From 3299adae9e95b53357cafda852588eb9d87c35ed Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 8 Jan 2019 19:19:43 -0500 Subject: [PATCH 001/680] refactor block_header_state --- libraries/chain/apply_context.cpp | 2 +- libraries/chain/block_header_state.cpp | 392 +++++++++------ libraries/chain/block_state.cpp | 34 +- libraries/chain/controller.cpp | 475 +++++++++++------- libraries/chain/fork_database.cpp | 12 +- .../include/eosio/chain/block_header.hpp | 10 +- .../eosio/chain/block_header_state.hpp | 51 +- .../chain/include/eosio/chain/block_state.hpp | 20 +- .../chain/include/eosio/chain/controller.hpp | 8 +- .../include/eosio/chain/fork_database.hpp | 4 +- libraries/chain/transaction_context.cpp | 2 +- libraries/testing/tester.cpp | 13 +- plugins/history_plugin/history_plugin.cpp | 19 +- plugins/producer_plugin/producer_plugin.cpp | 68 ++- .../test_control_plugin.cpp | 20 +- unittests/forked_tests.cpp | 14 +- unittests/producer_schedule_tests.cpp | 8 +- 17 files changed, 700 insertions(+), 452 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 1beb647eed6..550c26b8f34 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -38,7 +38,7 @@ void apply_context::exec_one( action_trace& trace ) r.act_digest = digest_type::hash(act); trace.trx_id = trx_context.id; - trace.block_num = control.pending_block_state()->block_num; + trace.block_num = control.head_block_num() + 1; trace.block_time = control.pending_block_time(); trace.producer_block_id = control.pending_producer_block_id(); trace.act = act; diff --git a/libraries/chain/block_header_state.cpp b/libraries/chain/block_header_state.cpp index 7189073f975..9fc95885dae 100644 --- a/libraries/chain/block_header_state.cpp +++ b/libraries/chain/block_header_state.cpp @@ -15,113 +15,245 @@ namespace eosio { namespace chain { return active_schedule.producers[index]; } - uint32_t block_header_state::calc_dpos_last_irreversible()const { + uint32_t block_header_state::calc_dpos_last_irreversible( account_name producer_of_next_block )const { vector blocknums; blocknums.reserve( producer_to_last_implied_irb.size() ); for( auto& i : producer_to_last_implied_irb ) { - blocknums.push_back(i.second); + blocknums.push_back( (i.first == producer_of_next_block) ? dpos_proposed_irreversible_blocknum : i.second); } /// 2/3 must be greater, so if I go 1/3 into the list sorted from low to high, then 2/3 are greater if( blocknums.size() == 0 ) return 0; - /// TODO: update to nth_element + /// TODO: update to nth_element std::sort( blocknums.begin(), blocknums.end() ); return blocknums[ (blocknums.size()-1) / 3 ]; } - /** - * Generate a template block header state for a given block time, it will not - * contain a transaction mroot, action mroot, or new_producers as those components - * are derived from chain state. - */ - block_header_state block_header_state::generate_next( block_timestamp_type when )const { - block_header_state result; - - if( when != block_timestamp_type() ) { - EOS_ASSERT( when > header.timestamp, block_validate_exception, "next block must be in the future" ); - } else { - (when = header.timestamp).slot++; - } - result.header.timestamp = when; - result.header.previous = id; - result.header.schedule_version = active_schedule.version; - - auto prokey = get_scheduled_producer(when); - result.block_signing_key = prokey.block_signing_key; - result.header.producer = prokey.producer_name; - - result.pending_schedule_lib_num = pending_schedule_lib_num; - result.pending_schedule_hash = pending_schedule_hash; - result.block_num = block_num + 1; - result.producer_to_last_produced = producer_to_last_produced; - result.producer_to_last_implied_irb = producer_to_last_implied_irb; - result.producer_to_last_produced[prokey.producer_name] = result.block_num; - result.blockroot_merkle = blockroot_merkle; - result.blockroot_merkle.append( id ); - - result.active_schedule = active_schedule; - result.pending_schedule = pending_schedule; - result.dpos_proposed_irreversible_blocknum = dpos_proposed_irreversible_blocknum; - result.bft_irreversible_blocknum = bft_irreversible_blocknum; - - result.producer_to_last_implied_irb[prokey.producer_name] = result.dpos_proposed_irreversible_blocknum; - result.dpos_irreversible_blocknum = result.calc_dpos_last_irreversible(); - - /// grow the confirmed count - static_assert(std::numeric_limits::max() >= (config::max_producers * 2 / 3) + 1, "8bit confirmations may not be able to hold all of the needed confirmations"); - - // This uses the previous block active_schedule because thats the "schedule" that signs and therefore confirms _this_ block - auto num_active_producers = active_schedule.producers.size(); - uint32_t required_confs = (uint32_t)(num_active_producers * 2 / 3) + 1; - - if( confirm_count.size() < config::maximum_tracked_dpos_confirmations ) { - result.confirm_count.reserve( confirm_count.size() + 1 ); - result.confirm_count = confirm_count; - result.confirm_count.resize( confirm_count.size() + 1 ); - result.confirm_count.back() = (uint8_t)required_confs; - } else { - result.confirm_count.resize( confirm_count.size() ); - memcpy( &result.confirm_count[0], &confirm_count[1], confirm_count.size() - 1 ); - result.confirm_count.back() = (uint8_t)required_confs; - } - - return result; - } /// generate_next - - bool block_header_state::maybe_promote_pending() { + pending_block_header_state block_header_state::next( block_timestamp_type when, + uint16_t num_prev_blocks_to_confirm )const + { + pending_block_header_state result; + + if( when != block_timestamp_type() ) { + EOS_ASSERT( when > header.timestamp, block_validate_exception, "next block must be in the future" ); + } else { + (when = header.timestamp).slot++; + } + + auto prokey = get_scheduled_producer(when); + + auto itr = producer_to_last_produced.find( prokey.producer_name ); + if( itr != producer_to_last_produced.end() ) { + EOS_ASSERT( itr->second < (block_num+1) - num_prev_blocks_to_confirm, producer_double_confirm, + "producer ${prod} double-confirming known range", + ("prod", prokey.producer_name)("num", block_num+1) + ("confirmed", num_prev_blocks_to_confirm)("last_produced", itr->second) ); + } + + result.block_num = block_num + 1; + result.previous = id; + result.timestamp = when; + result.confirmed = num_prev_blocks_to_confirm; + result.active_schedule_version = active_schedule.version; + + result.block_signing_key = prokey.block_signing_key; + result.producer = prokey.producer_name; + + result.blockroot_merkle = blockroot_merkle; + result.blockroot_merkle.append( id ); + + /// grow the confirmed count + static_assert(std::numeric_limits::max() >= (config::max_producers * 2 / 3) + 1, "8bit confirmations may not be able to hold all of the needed confirmations"); + + // This uses the previous block active_schedule because thats the "schedule" that signs and therefore confirms _this_ block + auto num_active_producers = active_schedule.producers.size(); + uint32_t required_confs = (uint32_t)(num_active_producers * 2 / 3) + 1; + + if( confirm_count.size() < config::maximum_tracked_dpos_confirmations ) { + result.confirm_count.reserve( confirm_count.size() + 1 ); + result.confirm_count = confirm_count; + result.confirm_count.resize( confirm_count.size() + 1 ); + result.confirm_count.back() = (uint8_t)required_confs; + } else { + result.confirm_count.resize( confirm_count.size() ); + memcpy( &result.confirm_count[0], &confirm_count[1], confirm_count.size() - 1 ); + result.confirm_count.back() = (uint8_t)required_confs; + } + + auto new_dpos_proposed_irreversible_blocknum = dpos_proposed_irreversible_blocknum; + + int32_t i = (int32_t)(result.confirm_count.size() - 1); + uint32_t blocks_to_confirm = num_prev_blocks_to_confirm + 1; /// confirm the head block too + while( i >= 0 && blocks_to_confirm ) { + --result.confirm_count[i]; + //idump((confirm_count[i])); + if( result.confirm_count[i] == 0 ) + { + uint32_t block_num_for_i = result.block_num - (uint32_t)(result.confirm_count.size() - 1 - i); + new_dpos_proposed_irreversible_blocknum = block_num_for_i; + //idump((dpos2_lib)(block_num)(dpos_irreversible_blocknum)); + + if (i == result.confirm_count.size() - 1) { + result.confirm_count.resize(0); + } else { + memmove( &result.confirm_count[0], &result.confirm_count[i + 1], result.confirm_count.size() - i - 1); + result.confirm_count.resize( result.confirm_count.size() - i - 1 ); + } + + break; + } + --i; + --blocks_to_confirm; + } + + result.dpos_proposed_irreversible_blocknum = new_dpos_proposed_irreversible_blocknum; + result.dpos_irreversible_blocknum = calc_dpos_last_irreversible( prokey.producer_name ); + + result.prev_pending_schedule = pending_schedule; + result.prev_pending_schedule_lib_num = pending_schedule_lib_num; + result.prev_pending_schedule_hash = pending_schedule_hash; + if( pending_schedule.producers.size() && - dpos_irreversible_blocknum >= pending_schedule_lib_num ) + result.dpos_irreversible_blocknum >= pending_schedule_lib_num ) { - active_schedule = move( pending_schedule ); + result.active_schedule = pending_schedule; flat_map new_producer_to_last_produced; - for( const auto& pro : active_schedule.producers ) { - auto existing = producer_to_last_produced.find( pro.producer_name ); - if( existing != producer_to_last_produced.end() ) { - new_producer_to_last_produced[pro.producer_name] = existing->second; + + for( const auto& pro : result.active_schedule.producers ) { + if( pro.producer_name == prokey.producer_name ) { + new_producer_to_last_produced[pro.producer_name] = result.block_num; } else { - new_producer_to_last_produced[pro.producer_name] = dpos_irreversible_blocknum; + auto existing = producer_to_last_produced.find( pro.producer_name ); + if( existing != producer_to_last_produced.end() ) { + new_producer_to_last_produced[pro.producer_name] = existing->second; + } else { + new_producer_to_last_produced[pro.producer_name] = result.dpos_irreversible_blocknum; + } } } + result.producer_to_last_produced = std::move( new_producer_to_last_produced ); + flat_map new_producer_to_last_implied_irb; - for( const auto& pro : active_schedule.producers ) { - auto existing = producer_to_last_implied_irb.find( pro.producer_name ); - if( existing != producer_to_last_implied_irb.end() ) { - new_producer_to_last_implied_irb[pro.producer_name] = existing->second; + + for( const auto& pro : result.active_schedule.producers ) { + if( pro.producer_name == prokey.producer_name ) { + new_producer_to_last_implied_irb[pro.producer_name] = dpos_proposed_irreversible_blocknum; } else { - new_producer_to_last_implied_irb[pro.producer_name] = dpos_irreversible_blocknum; + auto existing = producer_to_last_implied_irb.find( pro.producer_name ); + if( existing != producer_to_last_implied_irb.end() ) { + new_producer_to_last_implied_irb[pro.producer_name] = existing->second; + } else { + new_producer_to_last_implied_irb[pro.producer_name] = result.dpos_irreversible_blocknum; + } } } - producer_to_last_produced = move( new_producer_to_last_produced ); - producer_to_last_implied_irb = move( new_producer_to_last_implied_irb); - producer_to_last_produced[header.producer] = block_num; + result.producer_to_last_implied_irb = std::move( new_producer_to_last_implied_irb ); + + result.was_pending_promoted = true; + } else { + result.active_schedule = active_schedule; + result.producer_to_last_produced = producer_to_last_produced; + result.producer_to_last_produced[prokey.producer_name] = block_num; + result.producer_to_last_implied_irb = producer_to_last_implied_irb; + result.producer_to_last_implied_irb[prokey.producer_name] = dpos_proposed_irreversible_blocknum; + } + + return result; + } + + signed_block_header pending_block_header_state::make_block_header( const checksum256_type& transaction_mroot, + const checksum256_type& action_mroot, + optional&& new_producers )const + { + signed_block_header h; + + h.timestamp = timestamp; + h.producer = producer; + h.confirmed = confirmed; + h.previous = previous; + h.transaction_mroot = transaction_mroot; + h.action_mroot = action_mroot; + h.schedule_version = active_schedule_version; + h.new_producers = std::move(new_producers); + + return h; + } + + block_header_state pending_block_header_state::_finish_next( const signed_block_header& h )&& + { + EOS_ASSERT( h.timestamp == timestamp, block_validate_exception, "timestamp mismatch" ); + EOS_ASSERT( h.previous == previous, unlinkable_block_exception, "previous mismatch" ); + EOS_ASSERT( h.confirmed == confirmed, block_validate_exception, "confirmed mismatch" ); + EOS_ASSERT( h.producer == producer, wrong_producer, "wrong producer specified" ); + EOS_ASSERT( h.schedule_version == active_schedule_version, producer_schedule_exception, "schedule_version in signed block is corrupted" ); + + EOS_ASSERT( h.header_extensions.size() == 0, block_validate_exception, "no supported extensions" ); + + if( h.new_producers ) { + EOS_ASSERT( !was_pending_promoted, producer_schedule_exception, "cannot set pending producer schedule in the same block in which pending was promoted to active" ); + EOS_ASSERT( h.new_producers->version == active_schedule.version + 1, producer_schedule_exception, "wrong producer schedule version specified" ); + EOS_ASSERT( prev_pending_schedule.producers.size() == 0, producer_schedule_exception, + "cannot set new pending producers until last pending is confirmed" ); + } + + block_header_state result; + + result.id = h.id(); + result.block_num = block_num; + result.header = h; + + result.dpos_proposed_irreversible_blocknum = dpos_proposed_irreversible_blocknum; + result.dpos_irreversible_blocknum = dpos_irreversible_blocknum; + + if( h.new_producers ) { + result.pending_schedule = *h.new_producers; + result.pending_schedule_hash = digest_type::hash( result.pending_schedule ); + result.pending_schedule_lib_num = block_num; + } else { + if( was_pending_promoted ) { + result.pending_schedule.version = prev_pending_schedule.version; + } else { + result.pending_schedule = prev_pending_schedule; + } + result.pending_schedule_hash = std::move(prev_pending_schedule_hash); + result.pending_schedule_lib_num = prev_pending_schedule_lib_num; + } + + result.active_schedule = std::move(active_schedule); + result.blockroot_merkle = std::move(blockroot_merkle); + result.producer_to_last_produced = std::move(producer_to_last_produced); + result.producer_to_last_implied_irb = std::move(producer_to_last_implied_irb); + result.block_signing_key = std::move(block_signing_key); + result.confirm_count = std::move(confirm_count); + + return result; + } + + block_header_state pending_block_header_state::finish_next( const signed_block_header& h, + bool skip_validate_signee )&& + { + auto result = std::move(*this)._finish_next( h ); - return true; + // ASSUMPTION FROM controller_impl::apply_block = all untrusted blocks will have their signatures pre-validated here + if( !skip_validate_signee ) { + result.verify_signee( result.signee() ); } - return false; + + return result; + } + + block_header_state pending_block_header_state::finish_next( signed_block_header& h, + const std::function& signer )&& + { + auto result = std::move(*this)._finish_next( h ); + result.sign( signer ); + h.producer_signature = result.header.producer_signature; + return result; } + /* void block_header_state::set_new_producers( producer_schedule_type pending ) { EOS_ASSERT( pending.version == active_schedule.version + 1, producer_schedule_exception, "wrong producer schedule version specified" ); EOS_ASSERT( pending_schedule.producers.size() == 0, producer_schedule_exception, @@ -131,92 +263,19 @@ namespace eosio { namespace chain { pending_schedule = *header.new_producers; pending_schedule_lib_num = block_num; } - - - /** - * Transitions the current header state into the next header state given the supplied signed block header. - * - * Given a signed block header, generate the expected template based upon the header time, - * then validate that the provided header matches the template. - * - * If the header specifies new_producers then apply them accordingly. */ - block_header_state block_header_state::next( const signed_block_header& h, bool skip_validate_signee )const { - EOS_ASSERT( h.timestamp != block_timestamp_type(), block_validate_exception, "", ("h",h) ); - EOS_ASSERT( h.header_extensions.size() == 0, block_validate_exception, "no supported extensions" ); - - EOS_ASSERT( h.timestamp > header.timestamp, block_validate_exception, "block must be later in time" ); - EOS_ASSERT( h.previous == id, unlinkable_block_exception, "block must link to current state" ); - auto result = generate_next( h.timestamp ); - EOS_ASSERT( result.header.producer == h.producer, wrong_producer, "wrong producer specified" ); - EOS_ASSERT( result.header.schedule_version == h.schedule_version, producer_schedule_exception, "schedule_version in signed block is corrupted" ); - - auto itr = producer_to_last_produced.find(h.producer); - if( itr != producer_to_last_produced.end() ) { - EOS_ASSERT( itr->second < result.block_num - h.confirmed, producer_double_confirm, "producer ${prod} double-confirming known range", ("prod", h.producer) ); - } - - // FC_ASSERT( result.header.block_mroot == h.block_mroot, "mismatch block merkle root" ); - - /// below this point is state changes that cannot be validated with headers alone, but never-the-less, - /// must result in header state changes - - result.set_confirmed( h.confirmed ); - - auto was_pending_promoted = result.maybe_promote_pending(); - - if( h.new_producers ) { - EOS_ASSERT( !was_pending_promoted, producer_schedule_exception, "cannot set pending producer schedule in the same block in which pending was promoted to active" ); - result.set_new_producers( *h.new_producers ); - } - - result.header.action_mroot = h.action_mroot; - result.header.transaction_mroot = h.transaction_mroot; - result.header.producer_signature = h.producer_signature; - result.id = result.header.id(); - - // ASSUMPTION FROM controller_impl::apply_block = all untrusted blocks will have their signatures pre-validated here - if( !skip_validate_signee ) { - result.verify_signee( result.signee() ); - } - - return result; - } /// next - - void block_header_state::set_confirmed( uint16_t num_prev_blocks ) { - /* - idump((num_prev_blocks)(confirm_count.size())); - - for( uint32_t i = 0; i < confirm_count.size(); ++i ) { - std::cerr << "confirm_count["<= 0 && blocks_to_confirm ) { - --confirm_count[i]; - //idump((confirm_count[i])); - if( confirm_count[i] == 0 ) - { - uint32_t block_num_for_i = block_num - (uint32_t)(confirm_count.size() - 1 - i); - dpos_proposed_irreversible_blocknum = block_num_for_i; - //idump((dpos2_lib)(block_num)(dpos_irreversible_blocknum)); - - if (i == confirm_count.size() - 1) { - confirm_count.resize(0); - } else { - memmove( &confirm_count[0], &confirm_count[i + 1], confirm_count.size() - i - 1); - confirm_count.resize( confirm_count.size() - i - 1 ); - } - - return; - } - --i; - --blocks_to_confirm; - } - } + + /** + * Transitions the current header state into the next header state given the supplied signed block header. + * + * Given a signed block header, generate the expected template based upon the header time, + * then validate that the provided header matches the template. + * + * If the header specifies new_producers then apply them accordingly. + */ + block_header_state block_header_state::next( const signed_block_header& h, bool skip_validate_signee )const { + return next( h.timestamp, h.confirmed ).finish_next( h, skip_validate_signee ); + } digest_type block_header_state::sig_digest()const { auto header_bmroot = digest_type::hash( std::make_pair( header.digest(), blockroot_merkle.get_root() ) ); @@ -238,6 +297,7 @@ namespace eosio { namespace chain { ("block_signing_key", block_signing_key)( "signee", signee ) ); } + /* void block_header_state::add_confirmation( const header_confirmation& conf ) { for( const auto& c : confirmations ) EOS_ASSERT( c.producer != conf.producer, producer_double_confirm, "block already confirmed by this producer" ); @@ -249,6 +309,6 @@ namespace eosio { namespace chain { confirmations.emplace_back( conf ); } - + */ } } /// namespace eosio::chain diff --git a/libraries/chain/block_state.cpp b/libraries/chain/block_state.cpp index b4834775951..ecd7fceefc8 100644 --- a/libraries/chain/block_state.cpp +++ b/libraries/chain/block_state.cpp @@ -3,17 +3,33 @@ namespace eosio { namespace chain { - block_state::block_state( const block_header_state& prev, block_timestamp_type when ) - :block_header_state( prev.generate_next( when ) ), - block( std::make_shared() ) - { - static_cast(*block) = header; - } + block_state::block_state( const block_header_state& prev, + signed_block_ptr b, + bool skip_validate_signee + ) + :block_header_state( prev.next( *b, skip_validate_signee ) ) + ,block( std::move(b) ) + {} - block_state::block_state( const block_header_state& prev, signed_block_ptr b, bool skip_validate_signee ) - :block_header_state( prev.next( *b, skip_validate_signee )), block( move(b) ) - { } + block_state::block_state( pending_block_header_state&& cur, + signed_block_ptr&& b, + vector&& trx_metas, + const std::function& signer + ) + :block_header_state( std::move(cur).finish_next( *b, signer ) ) + ,block( std::move(b) ) + ,trxs( std::move(trx_metas) ) + {} + block_state::block_state( pending_block_header_state&& cur, + const signed_block_ptr& b, + vector&& trx_metas, + bool skip_validate_signee + ) + :block_header_state( std::move(cur).finish_next( *b, skip_validate_signee ) ) + ,block( b ) + ,trxs( std::move(trx_metas) ) + {} } } /// eosio::chain diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 98b2065d1e1..2a3071b5752 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -94,19 +94,69 @@ class maybe_session { optional _session; }; +struct building_block { + building_block( const block_header_state& prev, block_timestamp_type when, uint16_t num_prev_blocks_to_confirm ) + :_pending_block_header_state( prev.next( when, num_prev_blocks_to_confirm ) ) + {} + + pending_block_header_state _pending_block_header_state; + optional _new_pending_producer_schedule; + vector _pending_trx_metas; + vector _pending_trx_receipts; + vector _actions; +}; + +struct assembled_block { + pending_block_header_state _pending_block_header_state; + vector _trx_metas; + signed_block_ptr _unsigned_block; +}; + +struct completed_block { + block_state_ptr _block_state; +}; + +using block_stage_type = fc::static_variant; + struct pending_state { - pending_state( maybe_session&& s ) - :_db_session( move(s) ){} + pending_state( maybe_session&& s, const block_header_state& prev, + block_timestamp_type when, uint16_t num_prev_blocks_to_confirm ) + :_db_session( move(s) ) + ,_block_stage( building_block( prev, when, num_prev_blocks_to_confirm ) ) + {} maybe_session _db_session; + block_stage_type _block_stage; + controller::block_status _block_status = controller::block_status::incomplete; + optional _producer_block_id; - block_state_ptr _pending_block_state; + /** @pre _block_stage cannot hold completed_block alternative */ + const pending_block_header_state& get_pending_block_header_state()const { + if( _block_stage.contains() ) + return _block_stage.get()._pending_block_header_state; - vector _actions; + return _block_stage.get()._pending_block_header_state; + } - controller::block_status _block_status = controller::block_status::incomplete; + const vector& get_trx_receipts()const { + if( _block_stage.contains() ) + return _block_stage.get()._pending_trx_receipts; - optional _producer_block_id; + if( _block_stage.contains() ) + return _block_stage.get()._unsigned_block->transactions; + + return _block_stage.get()._block_state->block->transactions; + } + + const vector& get_trx_metas()const { + if( _block_stage.contains() ) + return _block_stage.get()._pending_trx_metas; + + if( _block_stage.contains() ) + return _block_stage.get()._trx_metas; + + return _block_stage.get()._block_state->trxs; + } void push() { _db_session.push(); @@ -534,7 +584,8 @@ struct controller_impl { block_header_state head_header_state; section.read_row(head_header_state, db); - auto head_state = std::make_shared(head_header_state); + auto head_state = std::make_shared(); + static_cast(*head_state) = head_header_state; fork_db.set(head_state); fork_db.set_validity(head_state, true); fork_db.mark_in_current_chain(head_state, true); @@ -594,7 +645,8 @@ struct controller_impl { genheader.id = genheader.header.id(); genheader.block_num = genheader.header.block_num(); - head = std::make_shared( genheader ); + head = std::make_shared(); + static_cast(*head) = genheader; head->block = std::make_shared(genheader.header); fork_db.set( head ); db.set_revision( head->block_num ); @@ -673,58 +725,22 @@ struct controller_impl { conf.genesis.initial_timestamp ); } - - - /** - * @post regardless of the success of commit block there is no active pending block - */ - void commit_block( bool add_to_fork_db ) { - auto reset_pending_on_exit = fc::make_scoped_exit([this]{ - pending.reset(); - }); - - try { - if (add_to_fork_db) { - pending->_pending_block_state->validated = true; - auto new_bsp = fork_db.add(pending->_pending_block_state, true); - emit(self.accepted_block_header, pending->_pending_block_state); - head = fork_db.head(); - EOS_ASSERT(new_bsp == head, fork_database_exception, "committed block did not become the new head in fork database"); - } - - if( !replaying ) { - reversible_blocks.create( [&]( auto& ubo ) { - ubo.blocknum = pending->_pending_block_state->block_num; - ubo.set_block( pending->_pending_block_state->block ); - }); - } - - emit( self.accepted_block, pending->_pending_block_state ); - } catch (...) { - // dont bother resetting pending, instead abort the block - reset_pending_on_exit.cancel(); - abort_block(); - throw; - } - - // push the state for pending. - pending->push(); - } - // The returned scoped_exit should not exceed the lifetime of the pending which existed when make_block_restore_point was called. fc::scoped_exit> make_block_restore_point() { - auto orig_block_transactions_size = pending->_pending_block_state->block->transactions.size(); - auto orig_state_transactions_size = pending->_pending_block_state->trxs.size(); - auto orig_state_actions_size = pending->_actions.size(); + auto& bb = pending->_block_stage.get(); + auto orig_block_transactions_size = bb._pending_trx_receipts.size(); + auto orig_state_transactions_size = bb._pending_trx_metas.size(); + auto orig_state_actions_size = bb._actions.size(); std::function callback = [this, orig_block_transactions_size, orig_state_transactions_size, orig_state_actions_size]() { - pending->_pending_block_state->block->transactions.resize(orig_block_transactions_size); - pending->_pending_block_state->trxs.resize(orig_state_transactions_size); - pending->_actions.resize(orig_state_actions_size); + auto& bb = pending->_block_stage.get(); + bb._pending_trx_receipts.resize(orig_block_transactions_size); + bb._pending_trx_metas.resize(orig_state_transactions_size); + bb._actions.resize(orig_state_actions_size); }; return fc::make_scoped_exit( std::move(callback) ); @@ -762,7 +778,7 @@ struct controller_impl { auto restore = make_block_restore_point(); trace->receipt = push_receipt( gtrx.trx_id, transaction_receipt::soft_fail, trx_context.billed_cpu_time_us, trace->net_usage ); - fc::move_append( pending->_actions, move(trx_context.executed) ); + fc::move_append( pending->_block_stage.get()._actions, move(trx_context.executed) ); trx_context.squash(); restore.cancel(); @@ -846,7 +862,7 @@ struct controller_impl { if( gtrx.expiration < self.pending_block_time() ) { trace = std::make_shared(); trace->id = gtrx.trx_id; - trace->block_num = self.pending_block_state()->block_num; + trace->block_num = self.head_block_num() + 1; trace->block_time = self.pending_block_time(); trace->producer_block_id = self.pending_producer_block_id(); trace->scheduled = true; @@ -888,7 +904,7 @@ struct controller_impl { trx_context.billed_cpu_time_us, trace->net_usage ); - fc::move_append( pending->_actions, move(trx_context.executed) ); + fc::move_append( pending->_block_stage.get()._actions, move(trx_context.executed) ); emit( self.accepted_transaction, trx ); emit( self.applied_transaction, trace ); @@ -976,8 +992,9 @@ struct controller_impl { uint64_t cpu_usage_us, uint64_t net_usage ) { uint64_t net_usage_words = net_usage / 8; EOS_ASSERT( net_usage_words*8 == net_usage, transaction_exception, "net_usage is not divisible by 8" ); - pending->_pending_block_state->block->transactions.emplace_back( trx ); - transaction_receipt& r = pending->_pending_block_state->block->transactions.back(); + auto& receipts = pending->_block_stage.get()._pending_trx_receipts; + receipts.emplace_back( trx ); + transaction_receipt& r = receipts.back(); r.cpu_usage_us = cpu_usage_us; r.net_usage_words = net_usage_words; r.status = status; @@ -1053,7 +1070,7 @@ struct controller_impl { ? transaction_receipt::executed : transaction_receipt::delayed; trace->receipt = push_receipt(*trx->packed_trx, s, trx_context.billed_cpu_time_us, trace->net_usage); - pending->_pending_block_state->trxs.emplace_back(trx); + pending->_block_stage.get()._pending_trx_metas.emplace_back(trx); } else { transaction_receipt_header r; r.status = transaction_receipt::executed; @@ -1062,7 +1079,7 @@ struct controller_impl { trace->receipt = r; } - fc::move_append(pending->_actions, move(trx_context.executed)); + fc::move_append(pending->_block_stage.get()._actions, move(trx_context.executed)); // call the accept signal but only once for this transaction if (!trx->accepted) { @@ -1115,44 +1132,43 @@ struct controller_impl { EOS_ASSERT( db.revision() == head->block_num, database_exception, "db revision is not on par with head block", ("db.revision()", db.revision())("controller_head_block", head->block_num)("fork_db_head_block", fork_db.head()->block_num) ); - pending.emplace(maybe_session(db)); + pending.emplace( maybe_session(db), *head, when, confirm_block_count ); } else { - pending.emplace(maybe_session()); + pending.emplace( maybe_session(), *head, when, confirm_block_count ); } pending->_block_status = s; pending->_producer_block_id = producer_block_id; - pending->_pending_block_state = std::make_shared( *head, when ); // promotes pending schedule (if any) to active - pending->_pending_block_state->in_current_chain = true; - pending->_pending_block_state->set_confirmed(confirm_block_count); - - auto was_pending_promoted = pending->_pending_block_state->maybe_promote_pending(); + const auto& pbhs = pending->get_pending_block_header_state(); //modify state in speculative block only if we are speculative reads mode (other wise we need clean state for head or irreversible reads) - if ( read_mode == db_read_mode::SPECULATIVE || pending->_block_status != controller::block_status::incomplete ) { - + if ( read_mode == db_read_mode::SPECULATIVE || pending->_block_status != controller::block_status::incomplete ) + { const auto& gpo = db.get(); if( gpo.proposed_schedule_block_num.valid() && // if there is a proposed schedule that was proposed in a block ... - ( *gpo.proposed_schedule_block_num <= pending->_pending_block_state->dpos_irreversible_blocknum ) && // ... that has now become irreversible ... - pending->_pending_block_state->pending_schedule.producers.size() == 0 && // ... and there is room for a new pending schedule ... - !was_pending_promoted // ... and not just because it was promoted to active at the start of this block, then: + ( *gpo.proposed_schedule_block_num <= pbhs.dpos_irreversible_blocknum ) && // ... that has now become irreversible ... + pbhs.prev_pending_schedule.producers.size() == 0 // ... and there was room for a new pending schedule prior to any possible promotion ) - { - // Promote proposed schedule to pending schedule. - if( !replaying ) { - ilog( "promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ", - ("proposed_num", *gpo.proposed_schedule_block_num)("n", pending->_pending_block_state->block_num) - ("lib", pending->_pending_block_state->dpos_irreversible_blocknum) - ("schedule", static_cast(gpo.proposed_schedule) ) ); - } - pending->_pending_block_state->set_new_producers( gpo.proposed_schedule ); - db.modify( gpo, [&]( auto& gp ) { - gp.proposed_schedule_block_num = optional(); - gp.proposed_schedule.clear(); - }); + { + // Promote proposed schedule to pending schedule. + if( !replaying ) { + ilog( "promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ", + ("proposed_num", *gpo.proposed_schedule_block_num)("n", pbhs.block_num) + ("lib", pbhs.dpos_irreversible_blocknum) + ("schedule", static_cast(gpo.proposed_schedule) ) ); } + EOS_ASSERT( gpo.proposed_schedule.version == pbhs.active_schedule_version + 1, + producer_schedule_exception, "wrong producer schedule version specified" ); + + pending->_block_stage.get()._new_pending_producer_schedule = gpo.proposed_schedule; + db.modify( gpo, [&]( auto& gp ) { + gp.proposed_schedule_block_num = optional(); + gp.proposed_schedule.clear(); + }); + } + try { auto onbtrx = std::make_shared( get_on_block_transaction() ); onbtrx->implicit = true; @@ -1175,17 +1191,106 @@ struct controller_impl { } guard_pending.cancel(); - } // start_block + } /// start_block + signed_block_ptr finalize_block() + { + EOS_ASSERT( pending, block_validate_exception, "it is not valid to finalize when there is no pending block"); + EOS_ASSERT( pending->_block_stage.contains(), block_validate_exception, "already called finalize_block"); + try { - void sign_block( const std::function& signer_callback ) { - auto p = pending->_pending_block_state; + auto& pbhs = pending->get_pending_block_header_state(); - p->sign( signer_callback ); + // Update resource limits: + resource_limits.process_account_limit_updates(); + const auto& chain_config = self.get_global_properties().configuration; + uint32_t max_virtual_mult = 1000; + uint64_t CPU_TARGET = EOS_PERCENT(chain_config.max_block_cpu_usage, chain_config.target_block_cpu_usage_pct); + resource_limits.set_block_parameters( + { CPU_TARGET, chain_config.max_block_cpu_usage, config::block_cpu_usage_average_window_ms / config::block_interval_ms, max_virtual_mult, {99, 100}, {1000, 999}}, + {EOS_PERCENT(chain_config.max_block_net_usage, chain_config.target_block_net_usage_pct), chain_config.max_block_net_usage, config::block_size_average_window_ms / config::block_interval_ms, max_virtual_mult, {99, 100}, {1000, 999}} + ); + resource_limits.process_block_usage(pbhs.block_num); + + auto& bb = pending->_block_stage.get(); + + // Create (unsigned) block: + auto block_ptr = std::make_shared( pbhs.make_block_header( + calculate_trx_merkle(), + calculate_action_merkle(), + std::move( bb._new_pending_producer_schedule ) + ) ); - static_cast(*p->block) = p->header; - } /// sign_block + block_ptr->transactions = std::move( bb._pending_trx_receipts ); + + // Update TaPoS table: + create_block_summary( block_ptr->id() ); + + /* + ilog( "finalized block ${n} (${id}) at ${t} by ${p} (${signing_key}); schedule_version: ${v} lib: ${lib} #dtrxs: ${ndtrxs} ${np}", + ("n",pbhs.block_num) + ("id",block_ptr->id()) + ("t",pbhs.timestamp) + ("p",pbhs.producer) + ("signing_key", pbhs.block_signing_key) + ("v",pbhs.active_schedule_version) + ("lib",pbhs.dpos_irreversible_blocknum) + ("ndtrxs",db.get_index().size()) + ("np",block_ptr->new_producers) + ); + */ + + pending->_block_stage = assembled_block{ + std::move( bb._pending_block_header_state ), + std::move( bb._pending_trx_metas ), + block_ptr + }; + + return block_ptr; + } FC_CAPTURE_AND_RETHROW() } /// finalize_block + + /** + * @post regardless of the success of commit block there is no active pending block + */ + void commit_block( bool add_to_fork_db ) { + auto reset_pending_on_exit = fc::make_scoped_exit([this]{ + pending.reset(); + }); + + try { + EOS_ASSERT( pending->_block_stage.contains(), block_validate_exception, + "cannot call commit_block until pending block is completed" ); + + auto bsp = pending->_block_stage.get()._block_state; + + if (add_to_fork_db) { + bsp->in_current_chain = true; + bsp->validated = true; + auto new_bsp = fork_db.add(bsp, true); + emit(self.accepted_block_header, bsp); + head = fork_db.head(); + EOS_ASSERT(new_bsp == head, fork_database_exception, "committed block did not become the new head in fork database"); + } + + if( !replaying ) { + reversible_blocks.create( [&]( auto& ubo ) { + ubo.blocknum = bsp->block_num; + ubo.set_block( bsp->block ); + }); + } + + emit( self.accepted_block, bsp ); + } catch (...) { + // dont bother resetting pending, instead abort the block + reset_pending_on_exit.cancel(); + abort_block(); + throw; + } + + // push the state for pending. + pending->push(); + } void apply_block( const signed_block_ptr& b, controller::block_status s ) { try { try { @@ -1210,7 +1315,8 @@ struct controller_impl { size_t packed_idx = 0; for( const auto& receipt : b->transactions ) { - auto num_pending_receipts = pending->_pending_block_state->block->transactions.size(); + const auto& trx_receipts = pending->_block_stage.get()._pending_trx_receipts; + auto num_pending_receipts = trx_receipts.size(); if( receipt.trx.contains() ) { trace = push_transaction( packed_transactions.at(packed_idx++), fc::time_point::maximum(), receipt.cpu_usage_us, true ); } else if( receipt.trx.contains() ) { @@ -1226,36 +1332,37 @@ struct controller_impl { throw *trace->except; } - EOS_ASSERT( pending->_pending_block_state->block->transactions.size() > 0, + EOS_ASSERT( trx_receipts.size() > 0, block_validate_exception, "expected a receipt", ("block", *b)("expected_receipt", receipt) ); - EOS_ASSERT( pending->_pending_block_state->block->transactions.size() == num_pending_receipts + 1, + EOS_ASSERT( trx_receipts.size() == num_pending_receipts + 1, block_validate_exception, "expected receipt was not added", ("block", *b)("expected_receipt", receipt) ); - const transaction_receipt_header& r = pending->_pending_block_state->block->transactions.back(); + const transaction_receipt_header& r = trx_receipts.back(); EOS_ASSERT( r == static_cast(receipt), block_validate_exception, "receipt does not match", - ("producer_receipt", receipt)("validator_receipt", pending->_pending_block_state->block->transactions.back()) ); + ("producer_receipt", receipt)("validator_receipt", trx_receipts.back()) ); } - finalize_block(); + auto block_ptr = finalize_block(); // this implicitly asserts that all header fields (less the signature) are identical - EOS_ASSERT(producer_block_id == pending->_pending_block_state->header.id(), - block_validate_exception, "Block ID does not match", - ("producer_block_id",producer_block_id)("validator_block_id",pending->_pending_block_state->header.id())); + auto id = block_ptr->id(); + EOS_ASSERT( producer_block_id == id, block_validate_exception, "Block ID does not match", + ("producer_block_id",producer_block_id)("validator_block_id",id) ); - // We need to fill out the pending block state's block because that gets serialized in the reversible block log - // in the future we can optimize this by serializing the original and not the copy + auto&& ab = pending->_block_stage.get(); - // we can always trust this signature because, - // - prior to apply_block, we call fork_db.add which does a signature check IFF the block is untrusted - // - OTHERWISE the block is trusted and therefore we trust that the signature is valid - // Also, as ::sign_block does not lazily calculate the digest of the block, we can just short-circuit to save cycles - pending->_pending_block_state->header.producer_signature = b->producer_signature; - static_cast(*pending->_pending_block_state->block) = pending->_pending_block_state->header; + auto bsp = std::make_shared( + std::move( ab._pending_block_header_state ), + b, + std::move( ab._trx_metas ), + true // signature should have already been verified (assuming untrusted) prior to apply_block + ); + + pending->_block_stage = completed_block{ bsp }; commit_block(false); return; @@ -1404,7 +1511,7 @@ struct controller_impl { void abort_block() { if( pending ) { if ( read_mode == db_read_mode::SPECULATIVE ) { - for( const auto& t : pending->_pending_block_state->trxs ) + for( const auto& t : pending->get_trx_metas() ) unapplied_transactions[t->signed_id] = t; } pending.reset(); @@ -1416,69 +1523,28 @@ struct controller_impl { return false; } - void set_action_merkle() { + checksum256_type calculate_action_merkle() { vector action_digests; - action_digests.reserve( pending->_actions.size() ); - for( const auto& a : pending->_actions ) + const auto& actions = pending->_block_stage.get()._actions; + action_digests.reserve( actions.size() ); + for( const auto& a : actions ) action_digests.emplace_back( a.digest() ); - pending->_pending_block_state->header.action_mroot = merkle( move(action_digests) ); + return merkle( move(action_digests) ); } - void set_trx_merkle() { + checksum256_type calculate_trx_merkle() { vector trx_digests; - const auto& trxs = pending->_pending_block_state->block->transactions; + const auto& trxs = pending->_block_stage.get()._pending_trx_receipts; trx_digests.reserve( trxs.size() ); for( const auto& a : trxs ) trx_digests.emplace_back( a.digest() ); - pending->_pending_block_state->header.transaction_mroot = merkle( move(trx_digests) ); + return merkle( move(trx_digests) ); } - - void finalize_block() - { - EOS_ASSERT(pending, block_validate_exception, "it is not valid to finalize when there is no pending block"); - try { - - - /* - ilog( "finalize block ${n} (${id}) at ${t} by ${p} (${signing_key}); schedule_version: ${v} lib: ${lib} #dtrxs: ${ndtrxs} ${np}", - ("n",pending->_pending_block_state->block_num) - ("id",pending->_pending_block_state->header.id()) - ("t",pending->_pending_block_state->header.timestamp) - ("p",pending->_pending_block_state->header.producer) - ("signing_key", pending->_pending_block_state->block_signing_key) - ("v",pending->_pending_block_state->header.schedule_version) - ("lib",pending->_pending_block_state->dpos_irreversible_blocknum) - ("ndtrxs",db.get_index().size()) - ("np",pending->_pending_block_state->header.new_producers) - ); - */ - - // Update resource limits: - resource_limits.process_account_limit_updates(); - const auto& chain_config = self.get_global_properties().configuration; - uint32_t max_virtual_mult = 1000; - uint64_t CPU_TARGET = EOS_PERCENT(chain_config.max_block_cpu_usage, chain_config.target_block_cpu_usage_pct); - resource_limits.set_block_parameters( - { CPU_TARGET, chain_config.max_block_cpu_usage, config::block_cpu_usage_average_window_ms / config::block_interval_ms, max_virtual_mult, {99, 100}, {1000, 999}}, - {EOS_PERCENT(chain_config.max_block_net_usage, chain_config.target_block_net_usage_pct), chain_config.max_block_net_usage, config::block_size_average_window_ms / config::block_interval_ms, max_virtual_mult, {99, 100}, {1000, 999}} - ); - resource_limits.process_block_usage(pending->_pending_block_state->block_num); - - set_action_merkle(); - set_trx_merkle(); - - auto p = pending->_pending_block_state; - p->id = p->header.id(); - - create_block_summary(p->id); - - } FC_CAPTURE_AND_RETHROW() } - void update_producers_authority() { - const auto& producers = pending->_pending_block_state->active_schedule.producers; + const auto& producers = pending->get_pending_block_header_state().active_schedule.producers; auto update_permission = [&]( auto& permission, auto threshold ) { auto auth = authority( threshold, {}, {}); @@ -1753,13 +1819,23 @@ void controller::start_block( block_timestamp_type when, uint16_t confirm_block_ my->start_block(when, confirm_block_count, block_status::incomplete, optional() ); } -void controller::finalize_block() { +block_state_ptr controller::finalize_block( const std::function& signer_callback ) { validate_db_available_size(); - my->finalize_block(); -} -void controller::sign_block( const std::function& signer_callback ) { - my->sign_block( signer_callback ); + auto block_ptr = my->finalize_block(); + + auto& ab = my->pending->_block_stage.get(); + + auto bsp = std::make_shared( + std::move( ab._pending_block_header_state ), + std::move( block_ptr ), + std::move( ab._trx_metas ), + signer_callback + ); + + my->pending->_block_stage = completed_block{ bsp }; + + return bsp; } void controller::commit_block() { @@ -1876,13 +1952,31 @@ account_name controller::fork_db_head_block_producer()const { return my->fork_db.head()->header.producer; } -block_state_ptr controller::pending_block_state()const { - if( my->pending ) return my->pending->_pending_block_state; - return block_state_ptr(); -} time_point controller::pending_block_time()const { EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); - return my->pending->_pending_block_state->header.timestamp; + + if( my->pending->_block_stage.contains() ) + return my->pending->_block_stage.get()._block_state->header.timestamp; + + return my->pending->get_pending_block_header_state().timestamp; +} + +account_name controller::pending_block_producer()const { + EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); + + if( my->pending->_block_stage.contains() ) + return my->pending->_block_stage.get()._block_state->header.producer; + + return my->pending->get_pending_block_header_state().producer; +} + +public_key_type controller::pending_block_signing_key()const { + EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); + + if( my->pending->_block_stage.contains() ) + return my->pending->_block_stage.get()._block_state->block_signing_key; + + return my->pending->get_pending_block_header_state().block_signing_key; } optional controller::pending_producer_block_id()const { @@ -1890,6 +1984,11 @@ optional controller::pending_producer_block_id()const { return my->pending->_producer_block_id; } +const vector& controller::get_pending_trx_receipts()const { + EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); + return my->pending->get_trx_receipts(); +} + uint32_t controller::last_irreversible_block_num() const { return std::max(std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum), my->snapshot_head_block); } @@ -1984,13 +2083,14 @@ int64_t controller::set_proposed_producers( vector producers ) { decltype(sch.producers.cend()) end; decltype(end) begin; - if( my->pending->_pending_block_state->pending_schedule.producers.size() == 0 ) { - const auto& active_sch = my->pending->_pending_block_state->active_schedule; + const auto& pending_sch = pending_producers(); + + if( pending_sch.producers.size() == 0 ) { + const auto& active_sch = active_producers(); begin = active_sch.producers.begin(); end = active_sch.producers.end(); sch.version = active_sch.version + 1; } else { - const auto& pending_sch = my->pending->_pending_block_state->pending_schedule; begin = pending_sch.producers.begin(); end = pending_sch.producers.end(); sch.version = pending_sch.version + 1; @@ -2003,6 +2103,8 @@ int64_t controller::set_proposed_producers( vector producers ) { int64_t version = sch.version; + wlog( "proposed producer schedule with version ${v}", ("v", version) ); + my->db.modify( gpo, [&]( auto& gp ) { gp.proposed_schedule_block_num = cur_block_num; gp.proposed_schedule = std::move(sch); @@ -2011,15 +2113,34 @@ int64_t controller::set_proposed_producers( vector producers ) { } const producer_schedule_type& controller::active_producers()const { - if ( !(my->pending) ) + if( !(my->pending) ) return my->head->active_schedule; - return my->pending->_pending_block_state->active_schedule; + + if( my->pending->_block_stage.contains() ) + return my->pending->_block_stage.get()._block_state->active_schedule; + + return my->pending->get_pending_block_header_state().active_schedule; } const producer_schedule_type& controller::pending_producers()const { - if ( !(my->pending) ) + if( !(my->pending) ) return my->head->pending_schedule; - return my->pending->_pending_block_state->pending_schedule; + + if( my->pending->_block_stage.contains() ) + return my->pending->_block_stage.get()._block_state->pending_schedule; + + if( my->pending->_block_stage.contains() ) { + const auto& np = my->pending->_block_stage.get()._unsigned_block->new_producers; + if( np ) + return *np; + } + + const auto& bb = my->pending->_block_stage.get(); + + if( bb._new_pending_producer_schedule ) + return *bb._new_pending_producer_schedule; + + return bb._pending_block_header_state.prev_pending_schedule; } optional controller::proposed_producers()const { @@ -2163,6 +2284,10 @@ void controller::check_key_list( const public_key_type& key )const { my->check_key_list( key ); } +bool controller::is_building_block()const { + return my->pending.valid(); +} + bool controller::is_producing_block()const { if( !my->pending ) return false; diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index 52b49fff449..1a6518cfa1b 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -110,7 +110,7 @@ namespace eosio { namespace chain { void fork_database::set( block_state_ptr s ) { auto result = my->index.insert( s ); - EOS_ASSERT( s->id == s->header.id(), fork_database_exception, + EOS_ASSERT( s->id == s->header.id(), fork_database_exception, "block state id (${id}) is different from block state header id (${hid})", ("id", string(s->id))("hid", string(s->header.id())) ); //FC_ASSERT( s->block_num == s->header.block_num() ); @@ -196,8 +196,8 @@ namespace eosio { namespace chain { result.second.push_back(second_branch); first_branch = get_block( first_branch->header.previous ); second_branch = get_block( second_branch->header.previous ); - EOS_ASSERT( first_branch && second_branch, fork_db_block_not_found, - "either block ${fid} or ${sid} does not exist", + EOS_ASSERT( first_branch && second_branch, fork_db_block_not_found, + "either block ${fid} or ${sid} does not exist", ("fid", string(first_branch->header.previous))("sid", string(second_branch->header.previous)) ); } @@ -297,6 +297,7 @@ namespace eosio { namespace chain { return *nitr; } + /* void fork_database::add( const header_confirmation& c ) { auto b = get_block( c.block_id ); EOS_ASSERT( b, fork_db_block_not_found, "unable to find block id ${id}", ("id",c.block_id)); @@ -307,7 +308,8 @@ namespace eosio { namespace chain { set_bft_irreversible( c.block_id ); } } - + */ + /** * This method will set this block as being BFT irreversible and will update * all blocks which build off of it to have the same bft_irb if their existing @@ -315,6 +317,7 @@ namespace eosio { namespace chain { * * This will require a search over all forks */ +#if 0 void fork_database::set_bft_irreversible( block_id_type id ) { auto& idx = my->index.get(); auto itr = idx.find(id); @@ -354,5 +357,6 @@ namespace eosio { namespace chain { queue = update( queue ); } } +#endif } } /// eosio::chain diff --git a/libraries/chain/include/eosio/chain/block_header.hpp b/libraries/chain/include/eosio/chain/block_header.hpp index bf9cf0bedb8..53de64eba67 100644 --- a/libraries/chain/include/eosio/chain/block_header.hpp +++ b/libraries/chain/include/eosio/chain/block_header.hpp @@ -10,15 +10,15 @@ namespace eosio { namespace chain { account_name producer; /** - * By signing this block this producer is confirming blocks [block_num() - confirmed, blocknum()) + * By signing this block this producer is confirming blocks [block_num() - confirmed, blocknum()) * as being the best blocks for that range and that he has not signed any other - * statements that would contradict. + * statements that would contradict. * * No producer should sign a block with overlapping ranges or it is proof of byzantine * behavior. When producing a block a producer is always confirming at least the block he * is building off of. A producer cannot confirm "this" block, only prior blocks. */ - uint16_t confirmed = 1; + uint16_t confirmed = 1; block_id_type previous; @@ -35,6 +35,8 @@ namespace eosio { namespace chain { extensions_type header_extensions; + block_header() = default; + digest_type digest()const; block_id_type id() const; uint32_t block_num() const { return num_from_id(previous) + 1; } @@ -55,7 +57,7 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain -FC_REFLECT(eosio::chain::block_header, +FC_REFLECT(eosio::chain::block_header, (timestamp)(producer)(confirmed)(previous) (transaction_mroot)(action_mroot) (schedule_version)(new_producers)(header_extensions)) diff --git a/libraries/chain/include/eosio/chain/block_header_state.hpp b/libraries/chain/include/eosio/chain/block_header_state.hpp index c318843d5df..6319355a936 100644 --- a/libraries/chain/include/eosio/chain/block_header_state.hpp +++ b/libraries/chain/include/eosio/chain/block_header_state.hpp @@ -5,6 +5,42 @@ namespace eosio { namespace chain { +struct block_header_state; + +struct pending_block_header_state { + uint32_t block_num = 0; + block_id_type previous; + block_timestamp_type timestamp; + account_name producer; + uint16_t confirmed = 1; + uint32_t dpos_proposed_irreversible_blocknum = 0; + uint32_t dpos_irreversible_blocknum = 0; + uint32_t active_schedule_version = 0; + uint32_t prev_pending_schedule_lib_num = 0; /// last irr block num + digest_type prev_pending_schedule_hash; + producer_schedule_type prev_pending_schedule; + producer_schedule_type active_schedule; + incremental_merkle blockroot_merkle; + flat_map producer_to_last_produced; + flat_map producer_to_last_implied_irb; + public_key_type block_signing_key; + vector confirm_count; + bool was_pending_promoted = false; + + signed_block_header make_block_header( const checksum256_type& transaction_mroot, + const checksum256_type& action_mroot, + optional&& new_producers )const; + + block_header_state finish_next( const signed_block_header& h, bool skip_validate_signee = false )&&; + + block_header_state finish_next( signed_block_header& h, + const std::function& signer )&&; + +private: + block_header_state _finish_next( const signed_block_header& h )&&; +}; + + /** * @struct block_header_state * @brief defines the minimum state necessary to validate transaction headers @@ -27,17 +63,18 @@ struct block_header_state { vector confirm_count; vector confirmations; - block_header_state next( const signed_block_header& h, bool trust = false )const; - block_header_state generate_next( block_timestamp_type when )const; + pending_block_header_state next( block_timestamp_type when, uint16_t num_prev_blocks_to_confirm )const; + + block_header_state next( const signed_block_header& h, bool skip_validate_signee = false )const; - void set_new_producers( producer_schedule_type next_pending ); - void set_confirmed( uint16_t num_prev_blocks ); - void add_confirmation( const header_confirmation& c ); - bool maybe_promote_pending(); + //void set_new_producers( producer_schedule_type next_pending ); + //void set_confirmed( uint16_t num_prev_blocks ); + //void add_confirmation( const header_confirmation& c ); + //bool maybe_promote_pending(); bool has_pending_producers()const { return pending_schedule.producers.size(); } - uint32_t calc_dpos_last_irreversible()const; + uint32_t calc_dpos_last_irreversible( account_name producer_of_next_block )const; bool is_active_producer( account_name n )const; /* diff --git a/libraries/chain/include/eosio/chain/block_state.hpp b/libraries/chain/include/eosio/chain/block_state.hpp index 2292392ade4..98fb1594299 100644 --- a/libraries/chain/include/eosio/chain/block_state.hpp +++ b/libraries/chain/include/eosio/chain/block_state.hpp @@ -12,9 +12,23 @@ namespace eosio { namespace chain { struct block_state : public block_header_state { - explicit block_state( const block_header_state& cur ):block_header_state(cur){} - block_state( const block_header_state& prev, signed_block_ptr b, bool skip_validate_signee ); - block_state( const block_header_state& prev, block_timestamp_type when ); + block_state( const block_header_state& prev, + signed_block_ptr b, + bool skip_validate_signee + ); + + block_state( pending_block_header_state&& cur, + signed_block_ptr&& b, // unsigned block + vector&& trx_metas, + const std::function& signer + ); + + block_state( pending_block_header_state&& cur, + const signed_block_ptr& b, // signed block + vector&& trx_metas, + bool skip_validate_signee + ); + block_state() = default; /// weak_ptr prev_block_state.... diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index f0d5a53f52d..00e9f54d8db 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -139,7 +139,7 @@ namespace eosio { namespace chain { */ transaction_trace_ptr push_scheduled_transaction( const transaction_id_type& scheduled, fc::time_point deadline, uint32_t billed_cpu_time_us = 0 ); - void finalize_block(); + block_state_ptr finalize_block( const std::function& signer_callback ); void sign_block( const std::function& signer_callback ); void commit_block(); void pop_block(); @@ -188,9 +188,12 @@ namespace eosio { namespace chain { account_name fork_db_head_block_producer()const; time_point pending_block_time()const; - block_state_ptr pending_block_state()const; + account_name pending_block_producer()const; + public_key_type pending_block_signing_key()const; optional pending_producer_block_id()const; + const vector& get_pending_trx_receipts()const; + const producer_schedule_type& active_producers()const; const producer_schedule_type& pending_producers()const; optional proposed_producers()const; @@ -214,6 +217,7 @@ namespace eosio { namespace chain { void check_contract_list( account_name code )const; void check_action_list( account_name code, action_name action )const; void check_key_list( const public_key_type& key )const; + bool is_building_block()const; bool is_producing_block()const; bool is_ram_billing_in_notify_allowed()const; diff --git a/libraries/chain/include/eosio/chain/fork_database.hpp b/libraries/chain/include/eosio/chain/fork_database.hpp index 998157ab41a..7473de2d582 100644 --- a/libraries/chain/include/eosio/chain/fork_database.hpp +++ b/libraries/chain/include/eosio/chain/fork_database.hpp @@ -44,7 +44,7 @@ namespace eosio { namespace chain { block_state_ptr add( const block_state_ptr& next_block, bool skip_validate_previous ); void remove( const block_id_type& id ); - void add( const header_confirmation& c ); + //void add( const header_confirmation& c ); const block_state_ptr& head()const; @@ -71,7 +71,7 @@ namespace eosio { namespace chain { signal irreversible; private: - void set_bft_irreversible( block_id_type id ); + //void set_bft_irreversible( block_id_type id ); unique_ptr my; }; diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index d5da8ca279b..226e6863a16 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -162,7 +162,7 @@ namespace bacc = boost::accumulators; undo_session = c.mutable_db().start_undo_session(true); } trace->id = id; - trace->block_num = c.pending_block_state()->block_num; + trace->block_num = c.head_block_num() + 1; trace->block_time = c.pending_block_time(); trace->producer_block_id = c.pending_producer_block_id(); executed.reserve( trx.total_actions() ); diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index bcf811434d5..311a8dc3685 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -158,7 +158,7 @@ namespace eosio { namespace testing { auto head_time = control->head_block_time(); auto next_time = head_time + skip_time; - if( !control->pending_block_state() || control->pending_block_state()->header.timestamp != next_time ) { + if( !control->is_building_block() || control->pending_block_time() != next_time ) { _start_block( next_time ); } @@ -203,7 +203,7 @@ namespace eosio { namespace testing { } signed_block_ptr base_tester::_finish_block() { - FC_ASSERT( control->pending_block_state(), "must first start a block before it can be finished" ); + FC_ASSERT( control->is_building_block(), "must first start a block before it can be finished" ); auto producer = control->head_block_state()->get_scheduled_producer( control->pending_block_time() ); private_key_type priv_key; @@ -216,10 +216,9 @@ namespace eosio { namespace testing { priv_key = private_key_itr->second; } - control->finalize_block(); - control->sign_block( [&]( digest_type d ) { + control->finalize_block( [&]( digest_type d ) { return priv_key.sign(d); - }); + } ); control->commit_block(); last_produced_block[control->head_block_state()->header.producer] = control->head_block_state()->id; @@ -331,7 +330,7 @@ namespace eosio { namespace testing { uint32_t billed_cpu_time_us ) { try { - if( !control->pending_block_state() ) + if( !control->is_building_block() ) _start_block(control->head_block_time() + fc::microseconds(config::block_interval_us)); auto r = control->push_transaction( std::make_shared(std::make_shared(trx)), deadline, billed_cpu_time_us ); if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); @@ -344,7 +343,7 @@ namespace eosio { namespace testing { uint32_t billed_cpu_time_us ) { try { - if( !control->pending_block_state() ) + if( !control->is_building_block() ) _start_block(control->head_block_time() + fc::microseconds(config::block_interval_us)); auto c = packed_transaction::none; diff --git a/plugins/history_plugin/history_plugin.cpp b/plugins/history_plugin/history_plugin.cpp index 0be3d9f11ca..4371a910388 100644 --- a/plugins/history_plugin/history_plugin.cpp +++ b/plugins/history_plugin/history_plugin.cpp @@ -264,7 +264,7 @@ namespace eosio { datastream ds( aho.packed_action_trace.data(), ps ); fc::raw::pack( ds, at ); aho.action_sequence_num = at.receipt.global_sequence; - aho.block_num = chain.pending_block_state()->block_num; + aho.block_num = chain.head_block_num() + 1; aho.block_time = chain.pending_block_time(); aho.trx_id = at.trx_id; }); @@ -348,7 +348,7 @@ namespace eosio { auto& chain = my->chain_plug->chain(); chainbase::database& db = const_cast( chain.db() ); // Override read-only access to state DB (highly unrecommended practice!) - // TODO: Use separate chainbase database for managing the state of the history_plugin (or remove deprecated history_plugin entirely) + // TODO: Use separate chainbase database for managing the state of the history_plugin (or remove deprecated history_plugin entirely) db.add_index(); db.add_index(); db.add_index(); @@ -493,15 +493,16 @@ namespace eosio { ++itr; } + + const vector* receipts = nullptr; auto blk = chain.fetch_block_by_number( result.block_num ); - if( blk == nullptr ) { // still in pending - auto blk_state = chain.pending_block_state(); - if( blk_state != nullptr ) { - blk = blk_state->block; - } + if( blk ) { + receipts = &blk->transactions; + } else if( chain.is_building_block() ) { // still in pending + receipts = &chain.get_pending_trx_receipts(); } - if( blk != nullptr ) { - for (const auto &receipt: blk->transactions) { + if( receipts ) { + for (const auto &receipt: *receipts) { if (receipt.trx.contains()) { auto &pt = receipt.trx.get(); if (pt.id() == result.id) { diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 18d7f2b795a..8ead8107b11 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -219,16 +219,13 @@ class producer_plugin_impl : public std::enable_shared_from_thischain(); const auto hbn = bsp->block_num; - auto new_block_header = bsp->header; - new_block_header.timestamp = new_block_header.timestamp.next(); - new_block_header.previous = bsp->id; - auto new_bs = bsp->generate_next(new_block_header.timestamp); + auto new_pbhs = bsp->next(bsp->header.timestamp.next(), 0); // for newly installed producers we can set their watermarks to the block they became active - if (new_bs.maybe_promote_pending() && bsp->active_schedule.version != new_bs.active_schedule.version) { + if( bsp->active_schedule.version != new_pbhs.active_schedule.version ) { flat_set new_producers; - new_producers.reserve(new_bs.active_schedule.producers.size()); - for( const auto& p: new_bs.active_schedule.producers) { + new_producers.reserve(new_pbhs.active_schedule.producers.size()); + for( const auto& p: new_pbhs.active_schedule.producers) { if (_producers.count(p.producer_name) > 0) new_producers.insert(p.producer_name); } @@ -361,12 +358,12 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); - if (!chain.pending_block_state()) { + if (!chain.is_building_block()) { _pending_incoming_transactions.emplace_back(trx, persist_until_expired, next); return; } - auto block_time = chain.pending_block_state()->header.timestamp.to_time_point(); + auto block_time = chain.pending_block_time(); auto send_response = [this, &trx, &chain, &next](const fc::static_variant& response) { next(response); @@ -375,7 +372,7 @@ class producer_plugin_impl : public std::enable_shared_from_thisheader.producer) + ("prod", chain.pending_block_producer()) ("txid", trx->id) ("why",response.get()->what())); } else { @@ -388,7 +385,7 @@ class producer_plugin_impl : public std::enable_shared_from_thisheader.producer) + ("prod", chain.pending_block_producer()) ("txid", trx->id)); } else { fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution is ACCEPTING tx: ${txid}", @@ -423,7 +420,7 @@ class producer_plugin_impl : public std::enable_shared_from_thisheader.producer) + ("prod", chain.pending_block_producer()) ("txid", trx->id)); } else { fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution COULD NOT FIT tx: ${txid} RETRYING", @@ -910,7 +907,7 @@ producer_plugin::integrity_hash_information producer_plugin::get_integrity_hash( my->schedule_production_loop(); }); - if (chain.pending_block_state()) { + if (chain.is_building_block()) { // abort the pending block chain.abort_block(); } else { @@ -927,7 +924,7 @@ producer_plugin::snapshot_information producer_plugin::create_snapshot() const { my->schedule_production_loop(); }); - if (chain.pending_block_state()) { + if (chain.is_building_block()) { // abort the pending block chain.abort_block(); } else { @@ -975,7 +972,7 @@ optional producer_plugin_impl::calculate_next_block_time(const a if (current_watermark_itr != _producer_watermarks.end()) { auto watermark = current_watermark_itr->second; auto block_num = chain.head_block_state()->block_num; - if (chain.pending_block_state()) { + if (chain.is_building_block()) { ++block_num; } if (watermark > block_num) { @@ -1107,11 +1104,13 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool chain.start_block(block_time, blocks_to_confirm); } FC_LOG_AND_DROP(); - const auto& pbs = chain.pending_block_state(); - if (pbs) { - if (_pending_block_mode == pending_block_mode::producing && pbs->block_signing_key != scheduled_producer.block_signing_key) { - elog("Block Signing Key is not expected value, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.block_signing_key)("actual", pbs->block_signing_key)); + if( chain.is_building_block() ) { + auto pending_block_time = chain.pending_block_time(); + auto pending_block_signing_key = chain.pending_block_signing_key(); + + if (_pending_block_mode == pending_block_mode::producing && pending_block_signing_key != scheduled_producer.block_signing_key) { + elog("Block Signing Key is not expected value, reverting to speculative mode! [expected: \"${expected}\", actual: \"${actual\"", ("expected", scheduled_producer.block_signing_key)("actual", pending_block_signing_key)); _pending_block_mode = pending_block_mode::speculating; } @@ -1125,12 +1124,12 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool int num_expired_persistent = 0; int orig_count = _persistent_transactions.size(); - while(!persisted_by_expiry.empty() && persisted_by_expiry.begin()->expiry <= pbs->header.timestamp.to_time_point()) { + while(!persisted_by_expiry.empty() && persisted_by_expiry.begin()->expiry <= pending_block_time) { auto const& txid = persisted_by_expiry.begin()->trx_id; if (_pending_block_mode == pending_block_mode::producing) { fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${txid}", ("block_num", chain.head_block_num() + 1) - ("prod", chain.pending_block_state()->header.producer) + ("prod", chain.pending_block_producer()) ("txid", txid)); } else { fc_dlog(_trx_trace_log, "[TRX_TRACE] Speculative execution is EXPIRING PERSISTED tx: ${txid}", @@ -1162,7 +1161,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool apply_trxs.reserve(unapplied_trxs.size()); auto calculate_transaction_category = [&](const transaction_metadata_ptr& trx) { - if (trx->packed_trx->expiration() < pbs->header.timestamp.to_time_point()) { + if (trx->packed_trx->expiration() < pending_block_time) { return tx_category::EXPIRED; } else if (persisted_by_id.find(trx->id) != persisted_by_id.end()) { return tx_category::PERSISTED; @@ -1384,20 +1383,20 @@ void producer_plugin_impl::schedule_production_loop() { static const boost::posix_time::ptime epoch(boost::gregorian::date(1970, 1, 1)); if (result == start_block_result::succeeded) { // ship this block off no later than its deadline - EOS_ASSERT( chain.pending_block_state(), missing_pending_block_state, "producing without pending_block_state, start_block succeeded" ); + EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state, start_block succeeded" ); auto deadline = chain.pending_block_time().time_since_epoch().count() + (last_block ? _last_block_time_offset_us : _produce_time_offset_us); _timer.expires_at( epoch + boost::posix_time::microseconds( deadline )); - fc_dlog(_log, "Scheduling Block Production on Normal Block #${num} for ${time}", ("num", chain.pending_block_state()->block_num)("time",deadline)); + fc_dlog(_log, "Scheduling Block Production on Normal Block #${num} for ${time}", ("num", chain.head_block_num()+1)("time",deadline)); } else { - EOS_ASSERT( chain.pending_block_state(), missing_pending_block_state, "producing without pending_block_state" ); + EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "producing without pending_block_state" ); auto expect_time = chain.pending_block_time() - fc::microseconds(config::block_interval_us); // ship this block off up to 1 block time earlier or immediately if (fc::time_point::now() >= expect_time) { _timer.expires_from_now( boost::posix_time::microseconds( 0 )); - fc_dlog(_log, "Scheduling Block Production on Exhausted Block #${num} immediately", ("num", chain.pending_block_state()->block_num)); + fc_dlog(_log, "Scheduling Block Production on Exhausted Block #${num} immediately", ("num", chain.head_block_num()+1)); } else { _timer.expires_at(epoch + boost::posix_time::microseconds(expect_time.time_since_epoch().count())); - fc_dlog(_log, "Scheduling Block Production on Exhausted Block #${num} at ${time}", ("num", chain.pending_block_state()->block_num)("time",expect_time)); + fc_dlog(_log, "Scheduling Block Production on Exhausted Block #${num} at ${time}", ("num", chain.head_block_num()+1)("time",expect_time)); } } @@ -1405,16 +1404,15 @@ void producer_plugin_impl::schedule_production_loop() { auto self = weak_this.lock(); if (self && ec != boost::asio::error::operation_aborted && cid == self->_timer_corelation_id) { // pending_block_state expected, but can't assert inside async_wait - auto block_num = chain.pending_block_state() ? chain.pending_block_state()->block_num : 0; + auto block_num = chain.is_building_block() ? chain.head_block_num() + 1 : 0; auto res = self->maybe_produce_block(); fc_dlog(_log, "Producing Block #${num} returned: ${res}", ("num", block_num)("res", res)); } }); } else if (_pending_block_mode == pending_block_mode::speculating && !_producers.empty() && !production_disabled_by_policy()){ fc_dlog(_log, "Specualtive Block Created; Scheduling Speculative/Production Change"); - EOS_ASSERT( chain.pending_block_state(), missing_pending_block_state, "speculating without pending_block_state" ); - const auto& pbs = chain.pending_block_state(); - schedule_delayed_production_loop(weak_this, pbs->header.timestamp); + EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "speculating without pending_block_state" ); + schedule_delayed_production_loop(weak_this, chain.pending_block_time()); } else { fc_dlog(_log, "Speculative Block Created"); } @@ -1495,16 +1493,14 @@ void producer_plugin_impl::produce_block() { //ilog("produce_block ${t}", ("t", fc::time_point::now())); // for testing _produce_time_offset_us EOS_ASSERT(_pending_block_mode == pending_block_mode::producing, producer_exception, "called produce_block while not actually producing"); chain::controller& chain = chain_plug->chain(); - const auto& pbs = chain.pending_block_state(); const auto& hbs = chain.head_block_state(); - EOS_ASSERT(pbs, missing_pending_block_state, "pending_block_state does not exist but it should, another plugin may have corrupted it"); - auto signature_provider_itr = _signature_providers.find( pbs->block_signing_key ); + EOS_ASSERT(chain.is_building_block(), missing_pending_block_state, "pending_block_state does not exist but it should, another plugin may have corrupted it"); + auto signature_provider_itr = _signature_providers.find( chain.pending_block_signing_key() ); EOS_ASSERT(signature_provider_itr != _signature_providers.end(), producer_priv_key_not_found, "Attempting to produce a block for which we don't have the private key"); //idump( (fc::time_point::now() - chain.pending_block_time()) ); - chain.finalize_block(); - chain.sign_block( [&]( const digest_type& d ) { + chain.finalize_block( [&]( const digest_type& d ) { auto debug_logger = maybe_make_debug_time_logger(); return signature_provider_itr->second(d); } ); diff --git a/plugins/test_control_plugin/test_control_plugin.cpp b/plugins/test_control_plugin/test_control_plugin.cpp index 483e859c30f..170079a8016 100644 --- a/plugins/test_control_plugin/test_control_plugin.cpp +++ b/plugins/test_control_plugin/test_control_plugin.cpp @@ -23,8 +23,7 @@ class test_control_plugin_impl { private: void accepted_block(const chain::block_state_ptr& bsp); void applied_irreversible_block(const chain::block_state_ptr& bsp); - void retrieve_next_block_state(const chain::block_state_ptr& bsp); - void process_next_block_state(const chain::block_header_state& bhs); + void process_next_block_state(const chain::block_state_ptr& bsp); fc::optional _accepted_block_connection; fc::optional _irreversible_block_connection; @@ -55,26 +54,17 @@ void test_control_plugin_impl::disconnect() { void test_control_plugin_impl::applied_irreversible_block(const chain::block_state_ptr& bsp) { if (_track_lib) - retrieve_next_block_state(bsp); + process_next_block_state(bsp); } void test_control_plugin_impl::accepted_block(const chain::block_state_ptr& bsp) { if (_track_head) - retrieve_next_block_state(bsp); + process_next_block_state(bsp); } -void test_control_plugin_impl::retrieve_next_block_state(const chain::block_state_ptr& bsp) { - const auto hbn = bsp->block_num; - auto new_block_header = bsp->header; - new_block_header.timestamp = new_block_header.timestamp.next(); - new_block_header.previous = bsp->id; - auto new_bs = bsp->generate_next(new_block_header.timestamp); - process_next_block_state(new_bs); -} - -void test_control_plugin_impl::process_next_block_state(const chain::block_header_state& bhs) { +void test_control_plugin_impl::process_next_block_state(const chain::block_state_ptr& bsp) { const auto block_time = _chain.head_block_time() + fc::microseconds(chain::config::block_interval_us); - const auto& producer_name = bhs.get_scheduled_producer(block_time).producer_name; + auto producer_name = bsp->get_scheduled_producer(block_time).producer_name; // start counting sequences for this producer (once we if (producer_name == _producer && _clean_producer_sequence) { _producer_sequence += 1; diff --git a/unittests/forked_tests.cpp b/unittests/forked_tests.cpp index 9543a791174..9ebab8e4c3b 100644 --- a/unittests/forked_tests.cpp +++ b/unittests/forked_tests.cpp @@ -43,7 +43,7 @@ BOOST_AUTO_TEST_CASE( irrblock ) try { wlog("set producer schedule to [dan,sam,pam]"); c.produce_blocks(50); -} FC_LOG_AND_RETHROW() +} FC_LOG_AND_RETHROW() struct fork_tracker { vector blocks; @@ -60,7 +60,7 @@ BOOST_AUTO_TEST_CASE( fork_with_bad_block ) try { auto res = bios.set_producers( {N(a),N(b),N(c),N(d),N(e)} ); // run until the producers are installed and its the start of "a's" round - while( bios.control->pending_block_state()->header.producer.to_string() != "a" || bios.control->head_block_state()->header.producer.to_string() != "e") { + while( bios.control->pending_block_producer().to_string() != "a" || bios.control->head_block_state()->header.producer.to_string() != "e") { bios.produce_block(); } @@ -313,7 +313,7 @@ BOOST_AUTO_TEST_CASE( prune_remove_branch ) try { auto nextproducer = [](tester &c, int skip_interval) ->account_name { auto head_time = c.control->head_block_time(); auto next_time = head_time + fc::milliseconds(config::block_interval_ms * skip_interval); - return c.control->head_block_state()->get_scheduled_producer(next_time).producer_name; + return c.control->head_block_state()->get_scheduled_producer(next_time).producer_name; }; // fork c: 2 producers: dan, sam @@ -323,18 +323,18 @@ BOOST_AUTO_TEST_CASE( prune_remove_branch ) try { account_name next1 = nextproducer(c, skip1); if (next1 == N(dan) || next1 == N(sam)) { c.produce_block(fc::milliseconds(config::block_interval_ms * skip1)); skip1 = 1; - } + } else ++skip1; account_name next2 = nextproducer(c2, skip2); if (next2 == N(scott)) { c2.produce_block(fc::milliseconds(config::block_interval_ms * skip2)); skip2 = 1; - } + } else ++skip2; } BOOST_REQUIRE_EQUAL(87, c.control->head_block_num()); BOOST_REQUIRE_EQUAL(73, c2.control->head_block_num()); - + // push fork from c2 => c int p = fork_num; while ( p < c2.control->head_block_num()) { @@ -344,7 +344,7 @@ BOOST_AUTO_TEST_CASE( prune_remove_branch ) try { BOOST_REQUIRE_EQUAL(73, c.control->head_block_num()); -} FC_LOG_AND_RETHROW() +} FC_LOG_AND_RETHROW() BOOST_AUTO_TEST_CASE( read_modes ) try { diff --git a/unittests/producer_schedule_tests.cpp b/unittests/producer_schedule_tests.cpp index a7279499656..07435f97d96 100644 --- a/unittests/producer_schedule_tests.cpp +++ b/unittests/producer_schedule_tests.cpp @@ -239,7 +239,7 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_promotion_test, TESTER ) try { produce_blocks(23); // Alice produces the last block of her first round. // Bob's first block (which advances LIB to Alice's last block) is started but not finalized. BOOST_REQUIRE_EQUAL( control->head_block_producer(), N(alice) ); - BOOST_REQUIRE_EQUAL( control->pending_block_state()->header.producer, N(bob) ); + BOOST_REQUIRE_EQUAL( control->pending_block_producer(), N(bob) ); BOOST_CHECK_EQUAL( control->pending_producers().version, 2 ); produce_blocks(12); // Bob produces his first 11 blocks @@ -247,7 +247,7 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_promotion_test, TESTER ) try { produce_blocks(12); // Bob produces his 12th block. // Alice's first block of the second round is started but not finalized (which advances LIB to Bob's last block). BOOST_REQUIRE_EQUAL( control->head_block_producer(), N(alice) ); - BOOST_REQUIRE_EQUAL( control->pending_block_state()->header.producer, N(bob) ); + BOOST_REQUIRE_EQUAL( control->pending_block_producer(), N(bob) ); BOOST_CHECK_EQUAL( control->active_producers().version, 2 ); BOOST_CHECK_EQUAL( true, compare_schedules( sch2, control->active_producers() ) ); @@ -299,7 +299,7 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_reduction, tester ) try { produce_blocks(48); BOOST_REQUIRE_EQUAL( control->head_block_producer(), N(bob) ); - BOOST_REQUIRE_EQUAL( control->pending_block_state()->header.producer, N(carol) ); + BOOST_REQUIRE_EQUAL( control->pending_block_producer(), N(carol) ); BOOST_CHECK_EQUAL( control->pending_producers().version, 2 ); produce_blocks(47); @@ -307,7 +307,7 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_reduction, tester ) try { produce_blocks(1); BOOST_REQUIRE_EQUAL( control->head_block_producer(), N(carol) ); - BOOST_REQUIRE_EQUAL( control->pending_block_state()->header.producer, N(alice) ); + BOOST_REQUIRE_EQUAL( control->pending_block_producer(), N(alice) ); BOOST_CHECK_EQUAL( control->active_producers().version, 2 ); BOOST_CHECK_EQUAL( true, compare_schedules( sch2, control->active_producers() ) ); From 9463efdfe2672e3058ac5701f01fbdd5525afada Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 16 Jan 2019 21:23:27 -0500 Subject: [PATCH 002/680] fork database refactor (merge + further changes) --- libraries/chain/block_log.cpp | 3 + libraries/chain/controller.cpp | 457 +++++++++++------- libraries/chain/fork_database.cpp | 436 +++++++++-------- .../eosio/chain/block_header_state.hpp | 19 +- .../chain/include/eosio/chain/block_state.hpp | 7 +- .../chain/include/eosio/chain/config.hpp | 2 +- .../chain/include/eosio/chain/controller.hpp | 5 + .../include/eosio/chain/fork_database.hpp | 59 ++- plugins/bnet_plugin/bnet_plugin.cpp | 4 +- plugins/chain_plugin/chain_plugin.cpp | 12 +- plugins/net_plugin/net_plugin.cpp | 20 +- unittests/forked_tests.cpp | 18 +- 12 files changed, 597 insertions(+), 445 deletions(-) diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index 41e9756483e..a2bac301efc 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -247,6 +247,9 @@ namespace eosio { namespace chain { if (first_block) { append(first_block); + } else { + my->head.reset(); + my->head_id = {}; } auto pos = my->block_stream.tellp(); diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 2a3071b5752..9836980ba1e 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -176,7 +176,6 @@ struct controller_impl { authorization_manager authorization; controller::config conf; chain_id_type chain_id; - bool replaying= false; optional replay_head_time; db_read_mode read_mode = db_read_mode::SPECULATIVE; bool in_trx_requiring_checks = false; ///< if true, checks that are normally skipped on replay (e.g. auth checks) cannot be skipped @@ -197,7 +196,11 @@ struct controller_impl { void pop_block() { auto prev = fork_db.get_block( head->header.previous ); - EOS_ASSERT( prev, block_validate_exception, "attempt to pop beyond last irreversible block" ); + + if( !prev ) { + EOS_ASSERT( fork_db.root()->id == head->header.previous, block_validate_exception, "attempt to pop beyond last irreversible block" ); + prev = fork_db.root(); + } if( const auto* b = reversible_blocks.find(head->block_num) ) { @@ -209,9 +212,9 @@ struct controller_impl { for( const auto& t : head->trxs ) unapplied_transactions[t->signed_id] = t; } + head = prev; db.undo(); - } @@ -255,11 +258,6 @@ struct controller_impl { */ SET_APP_HANDLER( eosio, eosio, canceldelay ); - - fork_db.irreversible.connect( [&]( auto b ) { - on_irreversible(b); - }); - } /** @@ -288,90 +286,131 @@ struct controller_impl { } } - void on_irreversible( const block_state_ptr& s ) { - if( !blog.head() ) - blog.read_head(); + void log_irreversible() { + EOS_ASSERT( fork_db.root(), fork_database_exception, "fork database not properly initialized" ); const auto& log_head = blog.head(); - bool append_to_blog = false; - if (!log_head) { - if (s->block) { - EOS_ASSERT(s->block_num == blog.first_block_num(), block_log_exception, "block log has no blocks and is appending the wrong first block. Expected ${expected}, but received: ${actual}", - ("expected", blog.first_block_num())("actual", s->block_num)); - append_to_blog = true; - } else { - EOS_ASSERT(s->block_num == blog.first_block_num() - 1, block_log_exception, "block log has no blocks and is not properly set up to start after the snapshot"); - } + + auto lib_num = log_head ? log_head->block_num() : (blog.first_block_num() - 1); + + auto root_id = fork_db.root()->id; + + if( log_head ) { + EOS_ASSERT( root_id == log_head->id(), fork_database_exception, "fork database root does not match block log head" ); } else { - auto lh_block_num = log_head->block_num(); - if (s->block_num > lh_block_num) { - EOS_ASSERT(s->block_num - 1 == lh_block_num, unlinkable_block_exception, "unlinkable block", ("s->block_num", s->block_num)("lh_block_num", lh_block_num)); - EOS_ASSERT(s->block->previous == log_head->id(), unlinkable_block_exception, "irreversible doesn't link to block log head"); - append_to_blog = true; - } + EOS_ASSERT( fork_db.root()->block_num == lib_num, fork_database_exception, + "empty block log expects the first appended block to build off a block that is not the fork database root" ); } + auto fork_head = (read_mode == db_read_mode::IRREVERSIBLE) ? fork_db.pending_head() : fork_db.head(); - db.commit( s->block_num ); + if( fork_head->dpos_irreversible_blocknum <= lib_num ) + return; - if( append_to_blog ) { - blog.append(s->block); - } + /* + const auto& rbi = reversible_blocks.get_index(); + auto libitr = rbi.find( fork_head->dpos_irreversible_blocknum ); + EOS_ASSERT( libitr != rbi.end(), fork_database_exception, + "new LIB according to fork database is not in reversible block database" ); + + fc::datastream ds( libitr->packedblock.data(), libitr->packedblock.size() ); + block_header h; + fc::raw::unpack( ds, h ); + auto lib_id = h.id(); + */ - const auto& ubi = reversible_blocks.get_index(); - auto objitr = ubi.begin(); - while( objitr != ubi.end() && objitr->blocknum <= s->block_num ) { - reversible_blocks.remove( *objitr ); - objitr = ubi.begin(); - } + const auto branch = fork_db.fetch_branch( fork_head->id, fork_head->dpos_irreversible_blocknum ); //fork_db.fetch_branch( lib_id ); + try { + const auto& rbi = reversible_blocks.get_index(); - // the "head" block when a snapshot is loaded is virtual and has no block data, all of its effects - // should already have been loaded from the snapshot so, it cannot be applied - if (s->block) { - if (read_mode == db_read_mode::IRREVERSIBLE) { - // when applying a snapshot, head may not be present - // when not applying a snapshot, make sure this is the next block - if (!head || s->block_num == head->block_num + 1) { - apply_block(s->block, controller::block_status::complete); - head = s; - } else { - // otherwise, assert the one odd case where initializing a chain - // from genesis creates and applies the first block automatically. - // when syncing from another chain, this is pushed in again - EOS_ASSERT(!head || head->block_num == 1, block_validate_exception, "Attempting to re-apply an irreversible block that was not the implied genesis block"); + for( auto bitr = branch.rbegin(); bitr != branch.rend(); ++bitr ) { + if( read_mode == db_read_mode::IRREVERSIBLE ) { + apply_block( (*bitr)->block, controller::block_status::complete ); + head = (*bitr); + fork_db.mark_valid( head ); } - fork_db.mark_in_current_chain(head, true); - fork_db.set_validity(head, true); + emit( self.irreversible_block, *bitr ); + + db.commit( (*bitr)->block_num ); + root_id = (*bitr)->id; + + blog.append( (*bitr)->block ); + + auto rbitr = rbi.begin(); + while( rbitr != rbi.end() && rbitr->blocknum <= (*bitr)->block_num ) { + reversible_blocks.remove( *rbitr ); + rbitr = rbi.begin(); + } + } + } catch( fc::exception& ) { + if( root_id != fork_db.root()->id ) { + fork_db.advance_root( root_id ); } - emit(self.irreversible_block, s); + throw; + } + + //db.commit( fork_head->dpos_irreversible_blocknum ); // redundant + + if( root_id != fork_db.root()->id ) { + fork_db.advance_root( root_id ); } } + /** + * Sets fork database head to the genesis state. + */ + void initialize_blockchain_state() { + wlog( "Initializing new blockchain with genesis state" ); + producer_schedule_type initial_schedule{ 0, {{config::system_account_name, conf.genesis.initial_key}} }; + + block_header_state genheader; + genheader.active_schedule = initial_schedule; + genheader.pending_schedule = initial_schedule; + genheader.pending_schedule_hash = fc::sha256::hash(initial_schedule); + genheader.header.timestamp = conf.genesis.initial_timestamp; + genheader.header.action_mroot = conf.genesis.compute_chain_id(); + genheader.id = genheader.header.id(); + genheader.block_num = genheader.header.block_num(); + + head = std::make_shared(); + static_cast(*head) = genheader; + head->block = std::make_shared(genheader.header); + fork_db.reset( *head ); + db.set_revision( head->block_num ); + + initialize_database(); + } + void replay(std::function shutdown) { - auto blog_head = blog.read_head(); + auto blog_head = blog.head(); auto blog_head_time = blog_head->timestamp.to_time_point(); - replaying = true; replay_head_time = blog_head_time; auto start_block_num = head->block_num + 1; - ilog( "existing block log, attempting to replay from ${s} to ${n} blocks", - ("s", start_block_num)("n", blog_head->block_num()) ); - auto start = fc::time_point::now(); - while( auto next = blog.read_block_by_num( head->block_num + 1 ) ) { - replay_push_block( next, controller::block_status::irreversible ); - if( next->block_num() % 100 == 0 ) { - std::cerr << std::setw(10) << next->block_num() << " of " << blog_head->block_num() <<"\r"; - if( shutdown() ) break; + + if( start_block_num <= blog_head->block_num() ) { + ilog( "existing block log, attempting to replay from ${s} to ${n} blocks", + ("s", start_block_num)("n", blog_head->block_num()) ); + while( auto next = blog.read_block_by_num( head->block_num + 1 ) ) { + replay_push_block( next, controller::block_status::irreversible ); + if( next->block_num() % 100 == 0 ) { + std::cerr << std::setw(10) << next->block_num() << " of " << blog_head->block_num() <<"\r"; + if( shutdown() ) break; + } } - } - std::cerr<< "\n"; - ilog( "${n} blocks replayed", ("n", head->block_num - start_block_num) ); + std::cerr<< "\n"; + ilog( "${n} irreversible blocks replayed", ("n", 1 + head->block_num - start_block_num) ); - // if the irreverible log is played without undo sessions enabled, we need to sync the - // revision ordinal to the appropriate expected value here. - if( self.skip_db_sessions( controller::block_status::irreversible ) ) - db.set_revision(head->block_num); + fork_db.reset( *head ); + + // if the irreverible log is played without undo sessions enabled, we need to sync the + // revision ordinal to the appropriate expected value here. + if( self.skip_db_sessions( controller::block_status::irreversible ) ) + db.set_revision( head->block_num ); + } else { + ilog( "no irreversible blocks need to be replayed" ); + } int rev = 0; while( auto obj = reversible_blocks.find(head->block_num+1) ) { @@ -384,59 +423,88 @@ struct controller_impl { ilog( "replayed ${n} blocks in ${duration} seconds, ${mspb} ms/block", ("n", head->block_num - start_block_num)("duration", (end-start).count()/1000000) ("mspb", ((end-start).count()/1000.0)/(head->block_num-start_block_num)) ); - replaying = false; replay_head_time.reset(); } void init(std::function shutdown, const snapshot_reader_ptr& snapshot) { + auto blog_head = blog.head(); + auto lib_num = (blog_head ? blog_head->block_num() : 1); + auto last_block_num = lib_num; + + const auto& rbi = reversible_blocks.get_index(); + + { + auto rbitr = rbi.rbegin(); + if( rbitr != rbi.rend() ) { + EOS_ASSERT( blog_head, fork_database_exception, + "non-empty reversible blocks despite empty irreversible block log" ); + EOS_ASSERT( rbitr->blocknum > lib_num, fork_database_exception, + "reversible block database is inconsistent with the block log" ); + last_block_num = rbitr->blocknum; + } + } - bool report_integrity_hash = !!snapshot; - if (snapshot) { + // Setup state if necessary (or in the default case stay with already loaded state) + if( snapshot ) { EOS_ASSERT( !head, fork_database_exception, "" ); snapshot->validate(); read_from_snapshot( snapshot ); - auto end = blog.read_head(); - if( !end ) { - blog.reset( conf.genesis, signed_block_ptr(), head->block_num + 1 ); - } else if( end->block_num() > head->block_num ) { - replay( shutdown ); - } else { - EOS_ASSERT( end->block_num() == head->block_num, fork_database_exception, - "Block log is provided with snapshot but does not contain the head block from the snapshot" ); + if( !blog_head ) { + lib_num = head->block_num; + blog.reset( conf.genesis, signed_block_ptr(), lib_num + 1 ); } + + EOS_ASSERT( lib_num >= head->block_num, fork_database_exception, + "Block log is provided with snapshot but does not contain the head block from the snapshot" ); } else { if( !head ) { - initialize_fork_db(); // set head to genesis state + initialize_blockchain_state(); // set head to genesis state + } else { + EOS_ASSERT( last_block_num == head->block_num, fork_database_exception, + "reversible block database is inconsistent with fork database, replay blockchain", + ("head", head->block_num)("last_block_num", last_block_num) + ); } - auto end = blog.read_head(); - if( !end ) { - blog.reset( conf.genesis, head->block ); - } else if( end->block_num() > head->block_num ) { - replay( shutdown ); - report_integrity_hash = true; + if( !blog_head ) { + if( blog.first_block_num() > 1 ) { + lib_num = blog.first_block_num() - 1; + last_block_num = lib_num; + EOS_ASSERT( lib_num == head->block_num, fork_database_exception, + "Empty block log requires the next block to be appended to it to be at height ${first_block_num} which is not compatible with the head block", + ("first_block_num", blog.first_block_num())("head", head->block_num) + ); + } else { + blog.reset( conf.genesis, head->block ); + } } } - if( shutdown() ) return; + bool report_integrity_hash = !!snapshot || (lib_num > head->block_num); - const auto& ubi = reversible_blocks.get_index(); - auto objitr = ubi.rbegin(); - if( objitr != ubi.rend() ) { - EOS_ASSERT( objitr->blocknum == head->block_num, fork_database_exception, - "reversible block database is inconsistent with fork database, replay blockchain", - ("head",head->block_num)("unconfimed", objitr->blocknum) ); - } else { - auto end = blog.read_head(); - EOS_ASSERT( !end || end->block_num() == head->block_num, fork_database_exception, - "fork database exists but reversible block database does not, replay blockchain", - ("blog_head",end->block_num())("head",head->block_num) ); + // Trim any irreversible blocks from start of reversible blocks database + if( lib_num >= last_block_num ) { + last_block_num = lib_num; + auto rbitr = rbi.begin(); + while( rbitr != rbi.end() && rbitr->blocknum <= lib_num ) { + reversible_blocks.remove( *rbitr ); + rbitr = rbi.begin(); + } + } + + if( lib_num > head->block_num ) { + replay( shutdown ); // replay irreversible blocks and any reversible blocks + } else if( last_block_num > lib_num ) { + replay( shutdown ); // replay reversible blocks } - EOS_ASSERT( db.revision() >= head->block_num, fork_database_exception, "fork database is inconsistent with shared memory", - ("db",db.revision())("head",head->block_num) ); + if( shutdown() ) return; + + EOS_ASSERT( db.revision() >= head->block_num, fork_database_exception, + "fork database is inconsistent with state", + ("db",db.revision())("head",head->block_num) ); if( db.revision() > head->block_num ) { wlog( "warning: database revision (${db}) is greater than head block number (${head}), " @@ -451,7 +519,6 @@ struct controller_impl { const auto hash = calculate_integrity_hash(); ilog( "database initialized with hash: ${hash}", ("hash", hash) ); } - } ~controller_impl() { @@ -584,11 +651,9 @@ struct controller_impl { block_header_state head_header_state; section.read_row(head_header_state, db); + fork_db.reset( head_header_state ); auto head_state = std::make_shared(); static_cast(*head_state) = head_header_state; - fork_db.set(head_state); - fork_db.set_validity(head_state, true); - fork_db.mark_in_current_chain(head_state, true); head = head_state; snapshot_head_block = head->block_num; }); @@ -628,32 +693,6 @@ struct controller_impl { return enc.result(); } - - /** - * Sets fork database head to the genesis state. - */ - void initialize_fork_db() { - wlog( " Initializing new blockchain with genesis state " ); - producer_schedule_type initial_schedule{ 0, {{config::system_account_name, conf.genesis.initial_key}} }; - - block_header_state genheader; - genheader.active_schedule = initial_schedule; - genheader.pending_schedule = initial_schedule; - genheader.pending_schedule_hash = fc::sha256::hash(initial_schedule); - genheader.header.timestamp = conf.genesis.initial_timestamp; - genheader.header.action_mroot = conf.genesis.compute_chain_id(); - genheader.id = genheader.header.id(); - genheader.block_num = genheader.header.block_num(); - - head = std::make_shared(); - static_cast(*head) = genheader; - head->block = std::make_shared(genheader.header); - fork_db.set( head ); - db.set_revision( head->block_num ); - - initialize_database(); - } - void create_native_account( account_name name, const authority& owner, const authority& active, bool is_privileged = false ) { db.create([&](auto& a) { a.name = name; @@ -1152,7 +1191,7 @@ struct controller_impl { ) { // Promote proposed schedule to pending schedule. - if( !replaying ) { + if( !replay_head_time ) { ilog( "promoting proposed schedule (set in block ${proposed_num}) to pending; current block: ${n} lib: ${lib} schedule: ${schedule} ", ("proposed_num", *gpo.proposed_schedule_block_num)("n", pbhs.block_num) ("lib", pbhs.dpos_irreversible_blocknum) @@ -1264,22 +1303,25 @@ struct controller_impl { auto bsp = pending->_block_stage.get()._block_state; - if (add_to_fork_db) { - bsp->in_current_chain = true; - bsp->validated = true; - auto new_bsp = fork_db.add(bsp, true); - emit(self.accepted_block_header, bsp); + if( add_to_fork_db ) { + fork_db.add( bsp ); + fork_db.mark_valid( bsp ); + emit( self.accepted_block_header, bsp ); head = fork_db.head(); - EOS_ASSERT(new_bsp == head, fork_database_exception, "committed block did not become the new head in fork database"); + EOS_ASSERT( bsp == head, fork_database_exception, "committed block did not become the new head in fork database"); } - if( !replaying ) { + if( !replay_head_time && read_mode != db_read_mode::IRREVERSIBLE ) { reversible_blocks.create( [&]( auto& ubo ) { ubo.blocknum = bsp->block_num; ubo.set_block( bsp->block ); }); } + if( add_to_fork_db ) { + log_irreversible(); + } + emit( self.accepted_block, bsp ); } catch (...) { // dont bother resetting pending, instead abort the block @@ -1382,8 +1424,9 @@ struct controller_impl { auto existing = fork_db.get_block( id ); EOS_ASSERT( !existing, fork_database_exception, "we already know about this block: ${id}", ("id", id) ); - auto prev = fork_db.get_block( b->previous ); - EOS_ASSERT( prev, unlinkable_block_exception, "unlinkable block ${id}", ("id", id)("previous", b->previous) ); + auto prev = fork_db.get_block_header( b->previous ); + EOS_ASSERT( prev, unlinkable_block_exception, + "unlinkable block ${id}", ("id", id)("previous", b->previous) ); return async_thread_pool( thread_pool, [b, prev]() { const bool skip_validate_signee = false; @@ -1403,15 +1446,25 @@ struct controller_impl { auto& b = new_header_state->block; emit( self.pre_accepted_block, b ); - fork_db.add( new_header_state, false ); + block_state_ptr bsp; + + auto prior = fork_db.get_block_header( b->previous ); + EOS_ASSERT( prior, unlinkable_block_exception, + "unlinkable block", ("id", string(b->id()))("previous", string(b->previous)) ); + bsp = std::make_shared( *prior, b, false ); + + fork_db.add( bsp ); if (conf.trusted_producers.count(b->producer)) { trusted_producer_light_validation = true; }; - emit( self.accepted_block_header, new_header_state ); - if ( read_mode != db_read_mode::IRREVERSIBLE ) { - maybe_switch_forks( s ); + emit( self.accepted_block_header, bsp ); + + if( read_mode != db_read_mode::IRREVERSIBLE ) { + maybe_switch_forks( fork_db.pending_head(), s ); + } else { + log_irreversible(); } } FC_LOG_AND_RETHROW( ) @@ -1429,41 +1482,49 @@ struct controller_impl { block_validate_exception, "invalid block status for replay" ); emit( self.pre_accepted_block, b ); const bool skip_validate_signee = !conf.force_all_checks; - auto new_header_state = fork_db.add( b, skip_validate_signee ); - emit( self.accepted_block_header, new_header_state ); + auto bsp = std::make_shared( *head, b, skip_validate_signee ); - if ( read_mode != db_read_mode::IRREVERSIBLE ) { - maybe_switch_forks( s ); + if( s != controller::block_status::irreversible ) { + fork_db.add( bsp ); } - // on replay irreversible is not emitted by fork database, so emit it explicitly here - if( s == controller::block_status::irreversible ) - emit( self.irreversible_block, new_header_state ); + emit( self.accepted_block_header, bsp ); + + if( s == controller::block_status::irreversible ) { + apply_block( bsp->block, s ); + head = bsp; + + // On replay, log_irreversible is not called and so no irreversible_block signal is emittted. + // So emit it explicitly here. + emit( self.irreversible_block, bsp ); + } else if( read_mode != db_read_mode::IRREVERSIBLE ) { + maybe_switch_forks( bsp, s ); + } else { + log_irreversible(); + } } FC_LOG_AND_RETHROW( ) } - void maybe_switch_forks( controller::block_status s ) { - auto new_head = fork_db.head(); - + void maybe_switch_forks( const block_state_ptr& new_head, controller::block_status s ) { + bool head_changed = true; if( new_head->header.previous == head->id ) { try { apply_block( new_head->block, s ); - fork_db.mark_in_current_chain( new_head, true ); - fork_db.set_validity( new_head, true ); + fork_db.mark_valid( new_head ); head = new_head; } catch ( const fc::exception& e ) { - fork_db.set_validity( new_head, false ); // Removes new_head from fork_db index, so no need to mark it as not in the current chain. + fork_db.remove( new_head->id ); throw; } } else if( new_head->id != head->id ) { + auto old_head = head; ilog("switching forks from ${current_head_id} (block number ${current_head_num}) to ${new_head_id} (block number ${new_head_num})", ("current_head_id", head->id)("current_head_num", head->block_num)("new_head_id", new_head->id)("new_head_num", new_head->block_num) ); auto branches = fork_db.fetch_branch_from( new_head->id, head->id ); for( auto itr = branches.second.begin(); itr != branches.second.end(); ++itr ) { - fork_db.mark_in_current_chain( *itr, false ); pop_block(); } EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, @@ -1472,24 +1533,26 @@ struct controller_impl { for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr ) { optional except; try { - apply_block( (*ritr)->block, (*ritr)->validated ? controller::block_status::validated : controller::block_status::complete ); + apply_block( (*ritr)->block, (*ritr)->is_valid() ? controller::block_status::validated : controller::block_status::complete ); + fork_db.mark_valid( *ritr ); head = *ritr; - fork_db.mark_in_current_chain( *ritr, true ); - (*ritr)->validated = true; + } catch (const fc::exception& e) { + except = e; } - catch (const fc::exception& e) { except = e; } - if (except) { + if( except ) { elog("exception thrown while switching forks ${e}", ("e", except->to_detail_string())); // ritr currently points to the block that threw - // if we mark it invalid it will automatically remove all forks built off it. - fork_db.set_validity( *ritr, false ); + // Remove the block that threw and all forks built off it. + fork_db.remove( (*ritr)->id ); + + EOS_ASSERT( head->id == fork_db.head()->id, fork_database_exception, + "loss of sync between fork_db and controller head during fork switch error" ); // pop all blocks from the bad fork // ritr base is a forward itr to the last block successfully applied auto applied_itr = ritr.base(); for( auto itr = applied_itr; itr != branches.first.end(); ++itr ) { - fork_db.mark_in_current_chain( *itr, false ); pop_block(); } EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, @@ -1499,13 +1562,17 @@ struct controller_impl { for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr ) { apply_block( (*ritr)->block, controller::block_status::validated /* we previously validated these blocks*/ ); head = *ritr; - fork_db.mark_in_current_chain( *ritr, true ); } throw *except; } // end if exception } /// end for each block in branch - ilog("successfully switched fork to new head ${new_head_id}", ("new_head_id", new_head->id) ); + ilog("successfully switched fork to new head ${new_head_id}", ("new_head_id", new_head->id)); + } else { + head_changed = false; } + + if( head_changed ) + log_irreversible(); } /// push_block void abort_block() { @@ -1789,10 +1856,12 @@ controller::controller( const controller::config& cfg ) controller::~controller() { my->abort_block(); + /* Shouldn't be needed anymore. //close fork_db here, because it can generate "irreversible" signal to this controller, //in case if read-mode == IRREVERSIBLE, we will apply latest irreversible block //for that we need 'my' to be valid pointer pointing to valid controller_impl. my->fork_db.close(); + */ } void controller::add_indices() { @@ -1952,6 +2021,22 @@ account_name controller::fork_db_head_block_producer()const { return my->fork_db.head()->header.producer; } +uint32_t controller::fork_db_pending_head_block_num()const { + return my->fork_db.pending_head()->block_num; +} + +block_id_type controller::fork_db_pending_head_block_id()const { + return my->fork_db.pending_head()->id; +} + +time_point controller::fork_db_pending_head_block_time()const { + return my->fork_db.pending_head()->header.timestamp; +} + +account_name controller::fork_db_pending_head_block_producer()const { + return my->fork_db.pending_head()->header.producer; +} + time_point controller::pending_block_time()const { EOS_ASSERT( my->pending, block_validate_exception, "no pending block" ); @@ -1990,7 +2075,7 @@ const vector& controller::get_pending_trx_receipts()const { } uint32_t controller::last_irreversible_block_num() const { - return std::max(std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum), my->snapshot_head_block); + return std::max(my->head->dpos_irreversible_blocknum, my->snapshot_head_block); } block_id_type controller::last_irreversible_block_id() const { @@ -2020,8 +2105,8 @@ signed_block_ptr controller::fetch_block_by_id( block_id_type id )const { } signed_block_ptr controller::fetch_block_by_number( uint32_t block_num )const { try { - auto blk_state = my->fork_db.get_block_in_current_chain_by_num( block_num ); - if( blk_state && blk_state->block ) { + auto blk_state = fetch_block_state_by_number( block_num ); + if( blk_state ) { return blk_state->block; } @@ -2034,14 +2119,30 @@ block_state_ptr controller::fetch_block_state_by_id( block_id_type id )const { } block_state_ptr controller::fetch_block_state_by_number( uint32_t block_num )const { try { - auto blk_state = my->fork_db.get_block_in_current_chain_by_num( block_num ); - return blk_state; + const auto& rev_blocks = my->reversible_blocks.get_index(); + auto objitr = rev_blocks.find(block_num); + + if( objitr == rev_blocks.end() ) + return block_state_ptr(); + + fc::datastream ds( objitr->packedblock.data(), objitr->packedblock.size() ); + block_header h; + fc::raw::unpack( ds, h ); + // Only need the block id to then look up the block state in fork database, so just unpack the block_header from the stored packed data. + // Avoid calling objitr->get_block() since that constructs a new signed_block in heap memory and unpacks the full signed_block from the stored packed data. + + return my->fork_db.get_block( h.id() ); } FC_CAPTURE_AND_RETHROW( (block_num) ) } block_id_type controller::get_block_id_for_num( uint32_t block_num )const { try { - auto blk_state = my->fork_db.get_block_in_current_chain_by_num( block_num ); - if( blk_state ) { - return blk_state->id; + const auto& rev_blocks = my->reversible_blocks.get_index(); + auto objitr = rev_blocks.find(block_num); + + if( objitr != rev_blocks.end() ) { + fc::datastream ds( objitr->packedblock.data(), objitr->packedblock.size() ); + block_header h; + fc::raw::unpack( ds, h ); + return h.id(); } auto signed_blk = my->blog.read_block_by_num(block_num); diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index 1a6518cfa1b..09f5568e162 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -13,6 +13,7 @@ namespace eosio { namespace chain { using boost::multi_index_container; using namespace boost::multi_index; + const uint32_t fork_database::supported_version = 1; struct by_block_id; struct by_block_num; @@ -23,27 +24,31 @@ namespace eosio { namespace chain { indexed_by< hashed_unique< tag, member, std::hash>, ordered_non_unique< tag, const_mem_fun >, - ordered_non_unique< tag, + ordered_unique< tag, composite_key< block_state, - member, - member + member, + member, + member, + member >, - composite_key_compare< std::less, std::greater > - >, - ordered_non_unique< tag, - composite_key< block_header_state, - member, - member, - member - >, - composite_key_compare< std::greater, std::greater, std::greater > + composite_key_compare< + std::greater, + std::greater, + std::greater, + std::less + > > > > fork_multi_index_type; + bool first_preferred( const block_header_state& lhs, const block_header_state& rhs ) { + return std::tie( lhs.dpos_irreversible_blocknum, lhs.block_num ) + > std::tie( rhs.dpos_irreversible_blocknum, rhs.block_num ); + } struct fork_database_impl { fork_multi_index_type index; + block_state_ptr root; // Only uses the block_header_state portion block_state_ptr head; fc::path datadir; }; @@ -57,48 +62,116 @@ namespace eosio { namespace chain { auto fork_db_dat = my->datadir / config::forkdb_filename; if( fc::exists( fork_db_dat ) ) { - string content; - fc::read_file_contents( fork_db_dat, content ); - - fc::datastream ds( content.data(), content.size() ); - unsigned_int size; fc::raw::unpack( ds, size ); - for( uint32_t i = 0, n = size.value; i < n; ++i ) { - block_state s; - fc::raw::unpack( ds, s ); - set( std::make_shared( move( s ) ) ); - } - block_id_type head_id; - fc::raw::unpack( ds, head_id ); + try { + string content; + fc::read_file_contents( fork_db_dat, content ); + + fc::datastream ds( content.data(), content.size() ); + + uint32_t version = 0; + fc::raw::unpack( ds, version ); + EOS_ASSERT( version == fork_database::supported_version, fork_database_exception, + "Unsupported version of ${filename}. Fork database version is ${version} while code supports version ${supported}", + ("filename", config::forkdb_filename)("version", version)("supported", fork_database::supported_version) ); + + block_header_state bhs; + fc::raw::unpack( ds, bhs ); + reset( bhs ); + + unsigned_int size; fc::raw::unpack( ds, size ); + for( uint32_t i = 0, n = size.value; i < n; ++i ) { + block_state s; + fc::raw::unpack( ds, s ); + for( const auto& receipt : s.block->transactions ) { + if( receipt.trx.contains() ) { + auto& pt = receipt.trx.get(); + s.trxs.push_back( std::make_shared( std::make_shared(pt) ) ); + } + } + add( std::make_shared( move( s ) ) ); + } + block_id_type head_id; + fc::raw::unpack( ds, head_id ); + + if( my->root->id == head_id ) { + my->head = my->root; + } else { + my->head = get_block( head_id ); + EOS_ASSERT( my->head, fork_database_exception, + "could not find head while reconstructing fork database from file; ${filename} is likely corrupted", + ("filename", config::forkdb_filename) ); + } - my->head = get_block( head_id ); + auto candidate = my->index.get().begin(); + if( candidate == my->index.get().end() || !(*candidate)->is_valid() ) { + EOS_ASSERT( my->head->id == my->root->id, fork_database_exception, + "head not set to root despite no better option available; ${filename} is likely corrupted", + ("filename", config::forkdb_filename) ); + } else { + EOS_ASSERT( !first_preferred( **candidate, *my->head ), fork_database_exception, + "head not set to best available option available; ${filename} is likely corrupted", + ("filename", config::forkdb_filename) ); + } + } FC_CAPTURE_AND_RETHROW( (fork_db_dat) ) fc::remove( fork_db_dat ); } } void fork_database::close() { - if( my->index.size() == 0 ) return; + if( !my->root ) { + if( my->index.size() > 0 ) { + elog( "fork_database is in a bad state when closing; not writing out ${filename}", ("filename", config::forkdb_filename) ); + } + return; + } auto fork_db_dat = my->datadir / config::forkdb_filename; std::ofstream out( fork_db_dat.generic_string().c_str(), std::ios::out | std::ios::binary | std::ofstream::trunc ); + fc::raw::pack( out, fork_database::supported_version ); + fc::raw::pack( out, *static_cast(&*my->root) ); uint32_t num_blocks_in_fork_db = my->index.size(); fc::raw::pack( out, unsigned_int{num_blocks_in_fork_db} ); - for( const auto& s : my->index ) { - fc::raw::pack( out, *s ); + + const auto& indx = my->index.get(); + + auto unvalidated_itr = indx.rbegin(); + auto unvalidated_end = boost::make_reverse_iterator( indx.lower_bound( false ) ); + + auto validated_itr = unvalidated_end; + auto validated_end = indx.rend(); + + for( bool unvalidated_remaining = (unvalidated_itr != unvalidated_end), + validated_remaining = (validated_itr != validated_end); + + unvalidated_remaining || validated_remaining; + + unvalidated_remaining = (unvalidated_itr != unvalidated_end), + validated_remaining = (validated_itr != validated_end) + ) + { + auto itr = (validated_remaining ? validated_itr : unvalidated_itr); + + if( unvalidated_remaining && validated_remaining ) { + if( first_preferred( **unvalidated_itr, **validated_itr ) ) { + itr = unvalidated_itr; + ++unvalidated_itr; + } else { + ++validated_itr; + } + } else if( unvalidated_remaining ) { + ++unvalidated_itr; + } else { + ++validated_itr; + } + + fc::raw::pack( out, *(*itr) ); } - if( my->head ) + + if( my->head ) { fc::raw::pack( out, my->head->id ); - else - fc::raw::pack( out, block_id_type() ); - - /// we don't normally indicate the head block as irreversible - /// we cannot normally prune the lib if it is the head block because - /// the next block needs to build off of the head block. We are exiting - /// now so we can prune this block as irreversible before exiting. - auto lib = my->head->dpos_irreversible_blocknum; - auto oldest = *my->index.get().begin(); - if( oldest->block_num <= lib ) { - prune( oldest ); + } else { + elog( "head not set in fork database; ${filename} will be corrupted", ("filename", config::forkdb_filename) ); } my->index.clear(); @@ -108,64 +181,132 @@ namespace eosio { namespace chain { close(); } - void fork_database::set( block_state_ptr s ) { - auto result = my->index.insert( s ); - EOS_ASSERT( s->id == s->header.id(), fork_database_exception, - "block state id (${id}) is different from block state header id (${hid})", ("id", string(s->id))("hid", string(s->header.id())) ); + void fork_database::reset( const block_header_state& root_bhs ) { + my->index.clear(); + my->root = std::make_shared(); + static_cast(*my->root) = root_bhs; + my->root->validated = true; + my->head = my->root; + } - //FC_ASSERT( s->block_num == s->header.block_num() ); + void fork_database::advance_root( const block_id_type& id ) { + EOS_ASSERT( my->root, fork_database_exception, "root not yet set" ); - EOS_ASSERT( result.second, fork_database_exception, "unable to insert block state, duplicate state detected" ); - if( !my->head ) { - my->head = s; - } else if( my->head->block_num < s->block_num ) { - my->head = s; + auto new_root = get_block( id ); + EOS_ASSERT( new_root, fork_database_exception, + "cannot advance root to a block that does not exist in the fork database" ); + EOS_ASSERT( new_root->is_valid(), fork_database_exception, + "cannot advance root to a block that has not yet been validated" ); + + + vector blocks_to_remove; + for( auto b = new_root; b; ) { + blocks_to_remove.push_back( b->header.previous ); + b = get_block( blocks_to_remove.back() ); + EOS_ASSERT( b || blocks_to_remove.back() == my->root->id, fork_database_exception, "invariant violation: orphaned branch was present in forked database" ); + } + + // The new root block should be erased from the fork database index individually rather than with the remove method, + // because we do not want the blocks branching off of it to be removed from the fork database. + my->index.erase( my->index.find( id ) ); + + // The other blocks to be removed are removed using the remove method so that orphaned branches do not remain in the fork database. + for( const auto& block_id : blocks_to_remove ) { + remove( block_id ); } + + new_root->block.reset(); // This would be clearing out the block pointer in the head block state iff id == my->head->id + new_root->trxs.clear(); + my->root = new_root; } - block_state_ptr fork_database::add( const block_state_ptr& n, bool skip_validate_previous ) { - EOS_ASSERT( n, fork_database_exception, "attempt to add null block state" ); - EOS_ASSERT( my->head, fork_db_block_not_found, "no head block set" ); + /* + void fork_database::retreat_head( const block_id_type& id ) { + if( my->head->id == id ) return; + + EOS_ASSERT( my->head->id != my->root->id, fork_database_exception, + "invalid id to retreat head to", ("id", string(id)) ); - if( !skip_validate_previous ) { - auto prior = my->index.find( n->block->previous ); - EOS_ASSERT( prior != my->index.end(), unlinkable_block_exception, - "unlinkable block", ("id", n->block->id())("previous", n->block->previous) ); + if( my->root->id == id ) { + my->index.clear(); + my->head = my->root; + return; } - auto inserted = my->index.insert(n); - EOS_ASSERT( inserted.second, fork_database_exception, "duplicate block added?" ); + vector blocks_to_remove; - my->head = *my->index.get().begin(); + auto s = my->head; + for( ; s && s->id == id; s = get_block( s->header.previous ) ) { + blocks_to_remove.push_back( s->id ); + } + EOS_ASSERT( s, fork_database_exception, + "invalid id to retreat head to", ("id", string(id)) ); - auto lib = my->head->dpos_irreversible_blocknum; - auto oldest = *my->index.get().begin(); + my->head = s; - if( oldest->block_num < lib ) { - prune( oldest ); + for( const auto& block_id : blocks_to_remove ) { + remove( block_id ); } - return n; } + */ - block_state_ptr fork_database::add( signed_block_ptr b, bool skip_validate_signee ) { - EOS_ASSERT( b, fork_database_exception, "attempt to add null block" ); - EOS_ASSERT( my->head, fork_db_block_not_found, "no head block set" ); - + block_header_state_ptr fork_database::get_block_header( const block_id_type& id )const { const auto& by_id_idx = my->index.get(); - auto existing = by_id_idx.find( b->id() ); - EOS_ASSERT( existing == by_id_idx.end(), fork_database_exception, "we already know about this block" ); - auto prior = by_id_idx.find( b->previous ); - EOS_ASSERT( prior != by_id_idx.end(), unlinkable_block_exception, "unlinkable block", ("id", string(b->id()))("previous", string(b->previous)) ); + if( my->root->id == id ) { + return my->root; + } - auto result = std::make_shared( **prior, move(b), skip_validate_signee ); - EOS_ASSERT( result, fork_database_exception , "fail to add new block state" ); - return add(result, true); + auto itr = my->index.find( id ); + if( itr != my->index.end() ) + return *itr; + + return block_header_state_ptr(); } + void fork_database::add( block_state_ptr n ) { + EOS_ASSERT( my->root, fork_database_exception, "root not yet set" ); + EOS_ASSERT( n, fork_database_exception, "attempt to add null block state" ); + + EOS_ASSERT( get_block_header( n->header.previous ), unlinkable_block_exception, + "unlinkable block", ("id", string(n->id))("previous", string(n->header.previous)) ); + + auto inserted = my->index.insert(n); + EOS_ASSERT( inserted.second, fork_database_exception, "duplicate block added" ); + + auto candidate = my->index.get().begin(); + if( (*candidate)->is_valid() ) { + my->head = *candidate; + } + } + + const block_state_ptr& fork_database::root()const { return my->root; } + const block_state_ptr& fork_database::head()const { return my->head; } + block_state_ptr fork_database::pending_head()const { + const auto& indx = my->index.get(); + + auto itr = indx.lower_bound( false ); + if( itr != indx.end() && !(*itr)->is_valid() ) { + if( first_preferred( **itr, *my->head ) ) + return *itr; + } + + return my->head; + } + + branch_type fork_database::fetch_branch( const block_id_type& h, uint32_t trim_after_block_num )const { + branch_type result; + for( auto s = get_block(h); s; s = get_block( s->header.previous ) ) { + if( s->block_num <= trim_after_block_num ) + result.push_back( s ); + } + + return result; + } + /** * Given two head blocks, return two branches of the fork graph that * end with a common ancestor (same prior block) @@ -209,71 +350,47 @@ namespace eosio { namespace chain { return result; } /// fetch_branch_from - /// remove all of the invalid forks built of this id including this id + /// remove all of the invalid forks built off of this id including this id void fork_database::remove( const block_id_type& id ) { vector remove_queue{id}; + auto& previdx = my->index.get(); + const auto head_id = my->head->id; for( uint32_t i = 0; i < remove_queue.size(); ++i ) { - auto itr = my->index.find( remove_queue[i] ); - if( itr != my->index.end() ) - my->index.erase(itr); + EOS_ASSERT( remove_queue[i] != head_id, fork_database_exception, + "removing the block and its descendants would remove the current head block" ); - auto& previdx = my->index.get(); - auto previtr = previdx.lower_bound(remove_queue[i]); + auto previtr = previdx.lower_bound( remove_queue[i] ); while( previtr != previdx.end() && (*previtr)->header.previous == remove_queue[i] ) { remove_queue.push_back( (*previtr)->id ); ++previtr; } } - //wdump((my->index.size())); - my->head = *my->index.get().begin(); - } - void fork_database::set_validity( const block_state_ptr& h, bool valid ) { - if( !valid ) { - remove( h->id ); - } else { - /// remove older than irreversible and mark block as valid - h->validated = true; + for( const auto& block_id : remove_queue ) { + auto itr = my->index.find( block_id ); + if( itr != my->index.end() ) + my->index.erase(itr); } } - void fork_database::mark_in_current_chain( const block_state_ptr& h, bool in_current_chain ) { - if( h->in_current_chain == in_current_chain ) - return; + void fork_database::mark_valid( const block_state_ptr& h ) { + if( h->validated ) return; auto& by_id_idx = my->index.get(); - auto itr = by_id_idx.find( h->id ); - EOS_ASSERT( itr != by_id_idx.end(), fork_db_block_not_found, "could not find block in fork database" ); - - by_id_idx.modify( itr, [&]( auto& bsp ) { // Need to modify this way rather than directly so that Boost MultiIndex can re-sort - bsp->in_current_chain = in_current_chain; - }); - } - - void fork_database::prune( const block_state_ptr& h ) { - auto num = h->block_num; - auto& by_bn = my->index.get(); - auto bni = by_bn.begin(); - while( bni != by_bn.end() && (*bni)->block_num < num ) { - prune( *bni ); - bni = by_bn.begin(); - } + auto itr = by_id_idx.find( h->id ); + EOS_ASSERT( itr != by_id_idx.end(), fork_database_exception, + "block state not in fork database; cannot mark as valid", + ("id", h->id) ); - auto itr = my->index.find( h->id ); - if( itr != my->index.end() ) { - irreversible(*itr); - my->index.erase(itr); - } + by_id_idx.modify( itr, []( block_state_ptr& bsp ) { + bsp->validated = true; + } ); - auto& numidx = my->index.get(); - auto nitr = numidx.lower_bound( num ); - while( nitr != numidx.end() && (*nitr)->block_num == num ) { - auto itr_to_remove = nitr; - ++nitr; - auto id = (*itr_to_remove)->id; - remove( id ); + auto candidate = my->index.get().begin(); + if( first_preferred( **candidate, *my->head ) ) { + my->head = *candidate; } } @@ -284,79 +401,4 @@ namespace eosio { namespace chain { return block_state_ptr(); } - block_state_ptr fork_database::get_block_in_current_chain_by_num( uint32_t n )const { - const auto& numidx = my->index.get(); - auto nitr = numidx.lower_bound( n ); - // following asserts removed so null can be returned - //FC_ASSERT( nitr != numidx.end() && (*nitr)->block_num == n, - // "could not find block in fork database with block number ${block_num}", ("block_num", n) ); - //FC_ASSERT( (*nitr)->in_current_chain == true, - // "block (with block number ${block_num}) found in fork database is not in the current chain", ("block_num", n) ); - if( nitr == numidx.end() || (*nitr)->block_num != n || (*nitr)->in_current_chain != true ) - return block_state_ptr(); - return *nitr; - } - - /* - void fork_database::add( const header_confirmation& c ) { - auto b = get_block( c.block_id ); - EOS_ASSERT( b, fork_db_block_not_found, "unable to find block id ${id}", ("id",c.block_id)); - b->add_confirmation( c ); - - if( b->bft_irreversible_blocknum < b->block_num && - b->confirmations.size() >= ((b->active_schedule.producers.size() * 2) / 3 + 1) ) { - set_bft_irreversible( c.block_id ); - } - } - */ - - /** - * This method will set this block as being BFT irreversible and will update - * all blocks which build off of it to have the same bft_irb if their existing - * bft irb is less than this block num. - * - * This will require a search over all forks - */ -#if 0 - void fork_database::set_bft_irreversible( block_id_type id ) { - auto& idx = my->index.get(); - auto itr = idx.find(id); - uint32_t block_num = (*itr)->block_num; - idx.modify( itr, [&]( auto& bsp ) { - bsp->bft_irreversible_blocknum = bsp->block_num; - }); - - /** to prevent stack-overflow, we perform a bredth-first traversal of the - * fork database. At each stage we iterate over the leafs from the prior stage - * and find all nodes that link their previous. If we update the bft lib then we - * add it to a queue for the next layer. This lambda takes one layer and returns - * all block ids that need to be iterated over for next layer. - */ - auto update = [&]( const vector& in ) { - vector updated; - - for( const auto& i : in ) { - auto& pidx = my->index.get(); - auto pitr = pidx.lower_bound( i ); - auto epitr = pidx.upper_bound( i ); - while( pitr != epitr ) { - pidx.modify( pitr, [&]( auto& bsp ) { - if( bsp->bft_irreversible_blocknum < block_num ) { - bsp->bft_irreversible_blocknum = block_num; - updated.push_back( bsp->id ); - } - }); - ++pitr; - } - } - return updated; - }; - - vector queue{id}; - while( queue.size() ) { - queue = update( queue ); - } - } -#endif - } } /// eosio::chain diff --git a/libraries/chain/include/eosio/chain/block_header_state.hpp b/libraries/chain/include/eosio/chain/block_header_state.hpp index 6319355a936..2ee31e6664d 100644 --- a/libraries/chain/include/eosio/chain/block_header_state.hpp +++ b/libraries/chain/include/eosio/chain/block_header_state.hpp @@ -51,7 +51,6 @@ struct block_header_state { signed_block_header header; uint32_t dpos_proposed_irreversible_blocknum = 0; uint32_t dpos_irreversible_blocknum = 0; - uint32_t bft_irreversible_blocknum = 0; uint32_t pending_schedule_lib_num = 0; /// last irr block num digest_type pending_schedule_hash; producer_schedule_type pending_schedule; @@ -67,23 +66,10 @@ struct block_header_state { block_header_state next( const signed_block_header& h, bool skip_validate_signee = false )const; - //void set_new_producers( producer_schedule_type next_pending ); - //void set_confirmed( uint16_t num_prev_blocks ); - //void add_confirmation( const header_confirmation& c ); - //bool maybe_promote_pending(); - - bool has_pending_producers()const { return pending_schedule.producers.size(); } uint32_t calc_dpos_last_irreversible( account_name producer_of_next_block )const; bool is_active_producer( account_name n )const; - /* - block_timestamp_type get_slot_time( uint32_t slot_num )const; - uint32_t get_slot_at_time( block_timestamp_type t )const; - producer_key get_scheduled_producer( uint32_t slot_num )const; - uint32_t producer_participation_rate()const; - */ - producer_key get_scheduled_producer( block_timestamp_type t )const; const block_id_type& prev()const { return header.previous; } digest_type sig_digest()const; @@ -92,13 +78,12 @@ struct block_header_state { void verify_signee(const public_key_type& signee)const; }; - +using block_header_state_ptr = std::shared_ptr; } } /// namespace eosio::chain FC_REFLECT( eosio::chain::block_header_state, - (id)(block_num)(header)(dpos_proposed_irreversible_blocknum)(dpos_irreversible_blocknum)(bft_irreversible_blocknum) - (pending_schedule_lib_num)(pending_schedule_hash) + (id)(block_num)(header)(dpos_proposed_irreversible_blocknum)(dpos_irreversible_blocknum) (pending_schedule_lib_num)(pending_schedule_hash) (pending_schedule)(active_schedule)(blockroot_merkle) (producer_to_last_produced)(producer_to_last_implied_irb)(block_signing_key) (confirm_count)(confirmations) ) diff --git a/libraries/chain/include/eosio/chain/block_state.hpp b/libraries/chain/include/eosio/chain/block_state.hpp index 98fb1594299..94defc13cfd 100644 --- a/libraries/chain/include/eosio/chain/block_state.hpp +++ b/libraries/chain/include/eosio/chain/block_state.hpp @@ -31,10 +31,11 @@ namespace eosio { namespace chain { block_state() = default; - /// weak_ptr prev_block_state.... + bool is_valid()const { return validated; } + + signed_block_ptr block; bool validated = false; - bool in_current_chain = false; /// this data is redundant with the data stored in block, but facilitates /// recapturing transactions when we pop a block @@ -45,4 +46,4 @@ namespace eosio { namespace chain { } } /// namespace eosio::chain -FC_REFLECT_DERIVED( eosio::chain::block_state, (eosio::chain::block_header_state), (block)(validated)(in_current_chain) ) +FC_REFLECT_DERIVED( eosio::chain::block_state, (eosio::chain::block_header_state), (block)(validated) ) diff --git a/libraries/chain/include/eosio/chain/config.hpp b/libraries/chain/include/eosio/chain/config.hpp index 0d5ff9e9469..a780cf42832 100644 --- a/libraries/chain/include/eosio/chain/config.hpp +++ b/libraries/chain/include/eosio/chain/config.hpp @@ -18,7 +18,7 @@ const static auto default_reversible_cache_size = 340*1024*1024ll;/// 1MB * 340 const static auto default_reversible_guard_size = 2*1024*1024ll;/// 1MB * 340 blocks based on 21 producer BFT delay const static auto default_state_dir_name = "state"; -const static auto forkdb_filename = "forkdb.dat"; +const static auto forkdb_filename = "fork_db.dat"; const static auto default_state_size = 1*1024*1024*1024ll; const static auto default_state_guard_size = 128*1024*1024ll; diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 00e9f54d8db..08b1bc6b2ba 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -187,6 +187,11 @@ namespace eosio { namespace chain { time_point fork_db_head_block_time()const; account_name fork_db_head_block_producer()const; + uint32_t fork_db_pending_head_block_num()const; + block_id_type fork_db_pending_head_block_id()const; + time_point fork_db_pending_head_block_time()const; + account_name fork_db_pending_head_block_producer()const; + time_point pending_block_time()const; account_name pending_block_producer()const; public_key_type pending_block_signing_key()const; diff --git a/libraries/chain/include/eosio/chain/fork_database.hpp b/libraries/chain/include/eosio/chain/fork_database.hpp index 7473de2d582..c07dfa4733f 100644 --- a/libraries/chain/include/eosio/chain/fork_database.hpp +++ b/libraries/chain/include/eosio/chain/fork_database.hpp @@ -27,26 +27,49 @@ namespace eosio { namespace chain { void close(); - block_state_ptr get_block(const block_id_type& id)const; - block_state_ptr get_block_in_current_chain_by_num( uint32_t n )const; -// vector get_blocks_by_number(uint32_t n)const; + block_header_state_ptr get_block_header( const block_id_type& id )const; + block_state_ptr get_block( const block_id_type& id )const; /** - * Provides a "valid" blockstate upon which other forks may build. + * Purges any existing blocks from the fork database and resets the root block_header_state to the provided value. + * The head will also be reset to point to the root. */ - void set( block_state_ptr s ); + void reset( const block_header_state& root_bhs ); - /** this method will attempt to append the block to an existing - * block_state and will return a pointer to the new block state or - * throw on error. + /** + * Advance root block forward to some other block in the tree. */ - block_state_ptr add( signed_block_ptr b, bool skip_validate_signee ); - block_state_ptr add( const block_state_ptr& next_block, bool skip_validate_previous ); - void remove( const block_id_type& id ); + void advance_root( const block_id_type& id ); + + /** + * Retreat head from the current head to some ancestor block within the tree. + * Also remove all blocks building off of the new head. + */ + //void retreat_head( const block_id_type& id ); + + /** + * Add block state to fork database. + * Must link to existing block in fork database or the root. + */ + void add( block_state_ptr next_block ); //void add( const header_confirmation& c ); + void remove( const block_id_type& id ); + + const block_state_ptr& root()const; const block_state_ptr& head()const; + block_state_ptr pending_head()const; + + /** + * Returns the sequence of block states resulting from trimming the branch from the + * root block (exclusive) to the block with an id of `h` (inclusive) by removing any + * block states corresponding to block numbers greater than `trim_after_block_num`. + * + * The order of the sequence is in descending block number order. + * A block with an id of `h` must exist in the fork database otherwise this method will throw an exception. + */ + branch_type fetch_branch( const block_id_type& h, uint32_t trim_after_block_num = std::numeric_limits::max() )const; /** * Given two head blocks, return two branches of the fork graph that @@ -56,19 +79,9 @@ namespace eosio { namespace chain { const block_id_type& second )const; - /** - * If the block is invalid, it will be removed. If it is valid, then blocks older - * than the LIB are pruned after emitting irreversible signal. - */ - void set_validity( const block_state_ptr& h, bool valid ); - void mark_in_current_chain( const block_state_ptr& h, bool in_current_chain ); - void prune( const block_state_ptr& h ); + void mark_valid( const block_state_ptr& h ); - /** - * This signal is emited when a block state becomes irreversible, once irreversible - * it is removed unless it is the head block. - */ - signal irreversible; + static const uint32_t supported_version; private: //void set_bft_irreversible( block_id_type id ); diff --git a/plugins/bnet_plugin/bnet_plugin.cpp b/plugins/bnet_plugin/bnet_plugin.cpp index b25631cbabc..3f887bb51a2 100644 --- a/plugins/bnet_plugin/bnet_plugin.cpp +++ b/plugins/bnet_plugin/bnet_plugin.cpp @@ -572,7 +572,7 @@ namespace eosio { _app_ios.post( [self = shared_from_this(),callback]{ auto& control = app().get_plugin().chain(); auto lib = control.last_irreversible_block_num(); - auto head = control.fork_db_head_block_id(); + auto head = control.fork_db_pending_head_block_id(); auto head_num = block_header::num_from_id(head); @@ -918,7 +918,7 @@ namespace eosio { * the connection from being closed. */ void wait_on_app() { - app().get_io_service().post( + app().get_io_service().post( boost::asio::bind_executor( _strand, [self=shared_from_this()]{ self->do_read(); } ) ); } diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 91d130d3ec5..501d4099645 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1032,12 +1032,12 @@ read_only::get_info_results read_only::get_info(const read_only::get_info_params return { itoh(static_cast(app().version())), db.get_chain_id(), - db.fork_db_head_block_num(), + db.fork_db_pending_head_block_num(), db.last_irreversible_block_num(), db.last_irreversible_block_id(), - db.fork_db_head_block_id(), - db.fork_db_head_block_time(), - db.fork_db_head_block_producer(), + db.fork_db_pending_head_block_id(), + db.fork_db_pending_head_block_time(), + db.fork_db_pending_head_block_producer(), rm.get_virtual_block_cpu_limit(), rm.get_virtual_block_net_limit(), rm.get_block_cpu_limit(), @@ -1099,7 +1099,7 @@ uint64_t convert_to_type(const string& str, const string& desc) { try { return boost::lexical_cast(str.c_str(), str.size()); } catch( ... ) { } - + try { auto trimmed_str = str; boost::trim(trimmed_str); @@ -1113,7 +1113,7 @@ uint64_t convert_to_type(const string& str, const string& desc) { return symb.value(); } catch( ... ) { } } - + try { return ( eosio::chain::string_to_symbol( 0, str.c_str() ) >> 8 ); } catch( ... ) { diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 0c00495c48d..656ff01f2cf 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -764,7 +764,7 @@ namespace eosio { void connection::blk_send_branch() { controller& cc = my_impl->chain_plug->chain(); - uint32_t head_num = cc.fork_db_head_block_num(); + uint32_t head_num = cc.fork_db_pending_head_block_num(); notice_message note; note.known_blocks.mode = normal; note.known_blocks.pending = 0; @@ -791,7 +791,7 @@ namespace eosio { } else { lib_id = cc.last_irreversible_block_id(); } - head_id = cc.fork_db_head_block_id(); + head_id = cc.fork_db_pending_head_block_id(); } catch (const assert_exception& ex) { elog( "unable to retrieve block info: ${n} for ${p}",("n",ex.to_string())("p",peer_name())); @@ -1219,7 +1219,7 @@ namespace eosio { bool fhset = c->fork_head != block_id_type(); fc_dlog(logger, "fork_head_num = ${fn} fork_head set = ${s}", ("fn", c->fork_head_num)("s", fhset)); - return c->fork_head != block_id_type() && c->fork_head_num < chain_plug->chain().fork_db_head_block_num(); + return c->fork_head != block_id_type() && c->fork_head_num < chain_plug->chain().fork_db_pending_head_block_num(); } return state != in_sync; } @@ -1240,14 +1240,14 @@ namespace eosio { bool sync_manager::sync_required() { fc_dlog(logger, "last req = ${req}, last recv = ${recv} known = ${known} our head = ${head}", - ("req",sync_last_requested_num)("recv",sync_next_expected_num)("known",sync_known_lib_num)("head",chain_plug->chain().fork_db_head_block_num())); + ("req",sync_last_requested_num)("recv",sync_next_expected_num)("known",sync_known_lib_num)("head",chain_plug->chain().fork_db_pending_head_block_num())); return( sync_last_requested_num < sync_known_lib_num || - chain_plug->chain().fork_db_head_block_num() < sync_last_requested_num ); + chain_plug->chain().fork_db_pending_head_block_num() < sync_last_requested_num ); } void sync_manager::request_next_chunk( const connection_ptr& conn ) { - uint32_t head_block = chain_plug->chain().fork_db_head_block_num(); + uint32_t head_block = chain_plug->chain().fork_db_pending_head_block_num(); if (head_block < sync_last_requested_num && source && source->current()) { fc_ilog(logger, "ignoring request, head is ${h} last req = ${r} source is ${p}", @@ -1345,7 +1345,7 @@ namespace eosio { if (!sync_required()) { uint32_t bnum = chain_plug->chain().last_irreversible_block_num(); - uint32_t hnum = chain_plug->chain().fork_db_head_block_num(); + uint32_t hnum = chain_plug->chain().fork_db_pending_head_block_num(); fc_dlog( logger, "We are already caught up, my irr = ${b}, head = ${h}, target = ${t}", ("b",bnum)("h",hnum)("t",target)); return; @@ -1392,8 +1392,8 @@ namespace eosio { // //----------------------------- - uint32_t head = cc.fork_db_head_block_num(); - block_id_type head_id = cc.fork_db_head_block_id(); + uint32_t head = cc.fork_db_pending_head_block_num(); + block_id_type head_id = cc.fork_db_pending_head_block_id(); if (head_id == msg.head_id) { fc_dlog(logger, "sync check state 0"); // notify peer of our pending transactions @@ -2692,7 +2692,7 @@ namespace eosio { controller& cc = my_impl->chain_plug->chain(); hello.head_id = fc::sha256(); hello.last_irreversible_block_id = fc::sha256(); - hello.head_num = cc.fork_db_head_block_num(); + hello.head_num = cc.fork_db_pending_head_block_num(); hello.last_irreversible_block_num = cc.last_irreversible_block_num(); if( hello.last_irreversible_block_num ) { try { diff --git a/unittests/forked_tests.cpp b/unittests/forked_tests.cpp index 9ebab8e4c3b..8e06798b0ea 100644 --- a/unittests/forked_tests.cpp +++ b/unittests/forked_tests.cpp @@ -22,8 +22,8 @@ public_key_type get_public_key( name keyname, string role ){ } void push_blocks( tester& from, tester& to ) { - while( to.control->fork_db_head_block_num() < from.control->fork_db_head_block_num() ) { - auto fb = from.control->fetch_block_by_number( to.control->fork_db_head_block_num()+1 ); + while( to.control->fork_db_pending_head_block_num() < from.control->fork_db_pending_head_block_num() ) { + auto fb = from.control->fetch_block_by_number( to.control->fork_db_pending_head_block_num()+1 ); to.push_block( fb ); } } @@ -356,21 +356,23 @@ BOOST_AUTO_TEST_CASE( read_modes ) try { auto res = c.set_producers( {N(dan),N(sam),N(pam)} ); c.produce_blocks(200); auto head_block_num = c.control->head_block_num(); + auto last_irreversible_block_num = c.control->last_irreversible_block_num(); tester head(true, db_read_mode::HEAD); push_blocks(c, head); - BOOST_REQUIRE_EQUAL(head_block_num, head.control->fork_db_head_block_num()); - BOOST_REQUIRE_EQUAL(head_block_num, head.control->head_block_num()); + BOOST_CHECK_EQUAL(head_block_num, head.control->fork_db_head_block_num()); + BOOST_CHECK_EQUAL(head_block_num, head.control->head_block_num()); tester read_only(false, db_read_mode::READ_ONLY); push_blocks(c, read_only); - BOOST_REQUIRE_EQUAL(head_block_num, read_only.control->fork_db_head_block_num()); - BOOST_REQUIRE_EQUAL(head_block_num, read_only.control->head_block_num()); + BOOST_CHECK_EQUAL(head_block_num, read_only.control->fork_db_head_block_num()); + BOOST_CHECK_EQUAL(head_block_num, read_only.control->head_block_num()); tester irreversible(true, db_read_mode::IRREVERSIBLE); push_blocks(c, irreversible); - BOOST_REQUIRE_EQUAL(head_block_num, irreversible.control->fork_db_head_block_num()); - BOOST_REQUIRE_EQUAL(head_block_num - 49, irreversible.control->head_block_num()); + BOOST_CHECK_EQUAL(head_block_num, irreversible.control->fork_db_pending_head_block_num()); + BOOST_CHECK_EQUAL(last_irreversible_block_num, irreversible.control->fork_db_head_block_num()); + BOOST_CHECK_EQUAL(last_irreversible_block_num, irreversible.control->head_block_num()); } FC_LOG_AND_RETHROW() From 07f75e2feb00866411d52d7e3dd82889e59e467a Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 16 Jan 2019 21:24:16 -0500 Subject: [PATCH 003/680] improve handling of pending trx receipts in history_plugin --- plugins/history_plugin/history_plugin.cpp | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/plugins/history_plugin/history_plugin.cpp b/plugins/history_plugin/history_plugin.cpp index 4371a910388..a44321ca0bc 100644 --- a/plugins/history_plugin/history_plugin.cpp +++ b/plugins/history_plugin/history_plugin.cpp @@ -493,16 +493,10 @@ namespace eosio { ++itr; } - - const vector* receipts = nullptr; auto blk = chain.fetch_block_by_number( result.block_num ); - if( blk ) { - receipts = &blk->transactions; - } else if( chain.is_building_block() ) { // still in pending - receipts = &chain.get_pending_trx_receipts(); - } - if( receipts ) { - for (const auto &receipt: *receipts) { + if( blk || chain.is_building_block() ) { + const vector& receipts = blk ? blk->transactions : chain.get_pending_trx_receipts(); + for (const auto &receipt: receipts) { if (receipt.trx.contains()) { auto &pt = receipt.trx.get(); if (pt.id() == result.id) { @@ -519,7 +513,7 @@ namespace eosio { break; } } - } + } } } else { auto blk = chain.fetch_block_by_number(*p.block_num_hint); From 71fc3a2deabf9f1b5af370c010e96f30d80c6aa8 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 17 Jan 2019 11:18:21 -0500 Subject: [PATCH 004/680] fix bug in fork_database::advance_root which caused non-determistic failures in plugins that do async processing such as mongodb and bnet --- libraries/chain/fork_database.cpp | 37 ++----------------- .../include/eosio/chain/fork_database.hpp | 7 ---- 2 files changed, 4 insertions(+), 40 deletions(-) diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index 09f5568e162..a9387168504 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -215,41 +215,12 @@ namespace eosio { namespace chain { remove( block_id ); } - new_root->block.reset(); // This would be clearing out the block pointer in the head block state iff id == my->head->id - new_root->trxs.clear(); - my->root = new_root; - } - - /* - void fork_database::retreat_head( const block_id_type& id ) { - if( my->head->id == id ) return; - - EOS_ASSERT( my->head->id != my->root->id, fork_database_exception, - "invalid id to retreat head to", ("id", string(id)) ); - - if( my->root->id == id ) { - my->index.clear(); - my->head = my->root; - return; - } - - vector blocks_to_remove; - - auto s = my->head; - for( ; s && s->id == id; s = get_block( s->header.previous ) ) { - blocks_to_remove.push_back( s->id ); - } - EOS_ASSERT( s, fork_database_exception, - "invalid id to retreat head to", ("id", string(id)) ); - - my->head = s; - - for( const auto& block_id : blocks_to_remove ) { - remove( block_id ); - } + // Even though fork database no longer needs block or trxs when a block state becomes a root of the tree, + // avoid mutating the block state at all, for example clearing the block shared pointer, because other + // parts of the code which run asynchronously (e.g. mongo_db_plugin) may later expect it remain unmodified. + my->root = new_root; } - */ block_header_state_ptr fork_database::get_block_header( const block_id_type& id )const { const auto& by_id_idx = my->index.get(); diff --git a/libraries/chain/include/eosio/chain/fork_database.hpp b/libraries/chain/include/eosio/chain/fork_database.hpp index c07dfa4733f..15a26d3a3fc 100644 --- a/libraries/chain/include/eosio/chain/fork_database.hpp +++ b/libraries/chain/include/eosio/chain/fork_database.hpp @@ -41,12 +41,6 @@ namespace eosio { namespace chain { */ void advance_root( const block_id_type& id ); - /** - * Retreat head from the current head to some ancestor block within the tree. - * Also remove all blocks building off of the new head. - */ - //void retreat_head( const block_id_type& id ); - /** * Add block state to fork database. * Must link to existing block in fork database or the root. @@ -84,7 +78,6 @@ namespace eosio { namespace chain { static const uint32_t supported_version; private: - //void set_bft_irreversible( block_id_type id ); unique_ptr my; }; From b7b4603a36279ebacd250b5949453de204bab451 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 17 Jan 2019 11:55:36 -0500 Subject: [PATCH 005/680] add magic number to start of fork_db.dat file --- libraries/chain/fork_database.cpp | 55 ++++++++++++++----- .../include/eosio/chain/fork_database.hpp | 5 +- 2 files changed, 45 insertions(+), 15 deletions(-) diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index a9387168504..1aec473b214 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -13,7 +13,15 @@ namespace eosio { namespace chain { using boost::multi_index_container; using namespace boost::multi_index; - const uint32_t fork_database::supported_version = 1; + const uint32_t fork_database::magic_number = 0x30510FDB; + + const uint32_t fork_database::min_supported_version = 1; + const uint32_t fork_database::max_supported_version = 1; + + /** + * History: + * Version 1: initial version of the new refactored fork database portable format + */ struct by_block_id; struct by_block_num; @@ -68,11 +76,26 @@ namespace eosio { namespace chain { fc::datastream ds( content.data(), content.size() ); + // validate totem + uint32_t totem = 0; + fc::raw::unpack( ds, totem ); + EOS_ASSERT( totem == magic_number, fork_database_exception, + "Fork database file '${filename}' has unexpected magic number!", + ("filename", fork_db_dat.generic_string()) + ); + + // validate version uint32_t version = 0; fc::raw::unpack( ds, version ); - EOS_ASSERT( version == fork_database::supported_version, fork_database_exception, - "Unsupported version of ${filename}. Fork database version is ${version} while code supports version ${supported}", - ("filename", config::forkdb_filename)("version", version)("supported", fork_database::supported_version) ); + EOS_ASSERT( version >= min_supported_version && version <= max_supported_version, + fork_database_exception, + "Unsupported version of fork database file '${filename}'. " + "Fork database version is ${version} while code supports version(s) [${min},${max}]", + ("filename", fork_db_dat.generic_string()) + ("version", version) + ("min", min_supported_version) + ("max", max_supported_version) + ); block_header_state bhs; fc::raw::unpack( ds, bhs ); @@ -98,19 +121,19 @@ namespace eosio { namespace chain { } else { my->head = get_block( head_id ); EOS_ASSERT( my->head, fork_database_exception, - "could not find head while reconstructing fork database from file; ${filename} is likely corrupted", - ("filename", config::forkdb_filename) ); + "could not find head while reconstructing fork database from file; '${filename}' is likely corrupted", + ("filename", fork_db_dat.generic_string()) ); } auto candidate = my->index.get().begin(); if( candidate == my->index.get().end() || !(*candidate)->is_valid() ) { EOS_ASSERT( my->head->id == my->root->id, fork_database_exception, - "head not set to root despite no better option available; ${filename} is likely corrupted", - ("filename", config::forkdb_filename) ); + "head not set to root despite no better option available; '${filename}' is likely corrupted", + ("filename", fork_db_dat.generic_string()) ); } else { EOS_ASSERT( !first_preferred( **candidate, *my->head ), fork_database_exception, - "head not set to best available option available; ${filename} is likely corrupted", - ("filename", config::forkdb_filename) ); + "head not set to best available option available; '${filename}' is likely corrupted", + ("filename", fork_db_dat.generic_string()) ); } } FC_CAPTURE_AND_RETHROW( (fork_db_dat) ) @@ -119,16 +142,19 @@ namespace eosio { namespace chain { } void fork_database::close() { + auto fork_db_dat = my->datadir / config::forkdb_filename; + if( !my->root ) { if( my->index.size() > 0 ) { - elog( "fork_database is in a bad state when closing; not writing out ${filename}", ("filename", config::forkdb_filename) ); + elog( "fork_database is in a bad state when closing; not writing out '${filename}'", + ("filename", fork_db_dat.generic_string()) ); } return; } - auto fork_db_dat = my->datadir / config::forkdb_filename; std::ofstream out( fork_db_dat.generic_string().c_str(), std::ios::out | std::ios::binary | std::ofstream::trunc ); - fc::raw::pack( out, fork_database::supported_version ); + fc::raw::pack( out, magic_number ); + fc::raw::pack( out, max_supported_version ); fc::raw::pack( out, *static_cast(&*my->root) ); uint32_t num_blocks_in_fork_db = my->index.size(); fc::raw::pack( out, unsigned_int{num_blocks_in_fork_db} ); @@ -171,7 +197,8 @@ namespace eosio { namespace chain { if( my->head ) { fc::raw::pack( out, my->head->id ); } else { - elog( "head not set in fork database; ${filename} will be corrupted", ("filename", config::forkdb_filename) ); + elog( "head not set in fork database; '${filename}' will be corrupted", + ("filename", fork_db_dat.generic_string()) ); } my->index.clear(); diff --git a/libraries/chain/include/eosio/chain/fork_database.hpp b/libraries/chain/include/eosio/chain/fork_database.hpp index 15a26d3a3fc..da91255d342 100644 --- a/libraries/chain/include/eosio/chain/fork_database.hpp +++ b/libraries/chain/include/eosio/chain/fork_database.hpp @@ -75,7 +75,10 @@ namespace eosio { namespace chain { void mark_valid( const block_state_ptr& h ); - static const uint32_t supported_version; + static const uint32_t magic_number; + + static const uint32_t min_supported_version; + static const uint32_t max_supported_version; private: unique_ptr my; From 5961a38c7abb418eae4342c1f8a6d5e92eb61170 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 16 Jan 2019 21:24:16 -0500 Subject: [PATCH 006/680] improve handling of pending trx receipts in history_plugin --- plugins/history_plugin/history_plugin.cpp | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/plugins/history_plugin/history_plugin.cpp b/plugins/history_plugin/history_plugin.cpp index 4371a910388..a44321ca0bc 100644 --- a/plugins/history_plugin/history_plugin.cpp +++ b/plugins/history_plugin/history_plugin.cpp @@ -493,16 +493,10 @@ namespace eosio { ++itr; } - - const vector* receipts = nullptr; auto blk = chain.fetch_block_by_number( result.block_num ); - if( blk ) { - receipts = &blk->transactions; - } else if( chain.is_building_block() ) { // still in pending - receipts = &chain.get_pending_trx_receipts(); - } - if( receipts ) { - for (const auto &receipt: *receipts) { + if( blk || chain.is_building_block() ) { + const vector& receipts = blk ? blk->transactions : chain.get_pending_trx_receipts(); + for (const auto &receipt: receipts) { if (receipt.trx.contains()) { auto &pt = receipt.trx.get(); if (pt.id() == result.id) { @@ -519,7 +513,7 @@ namespace eosio { break; } } - } + } } } else { auto blk = chain.fetch_block_by_number(*p.block_num_hint); From 2cb4dbc8825af74f45c18f5b35d030d2888bfd3c Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 17 Jan 2019 12:12:24 -0500 Subject: [PATCH 007/680] cleanup unused code --- libraries/chain/block_header_state.cpp | 26 -------- libraries/chain/fork_database.cpp | 62 ------------------- .../include/eosio/chain/block_header.hpp | 7 --- .../eosio/chain/block_header_state.hpp | 16 +---- .../chain/include/eosio/chain/controller.hpp | 1 - .../include/eosio/chain/fork_database.hpp | 2 - .../include/eosio/chain/plugin_interface.hpp | 2 - plugins/chain_plugin/chain_plugin.cpp | 14 +---- 8 files changed, 3 insertions(+), 127 deletions(-) diff --git a/libraries/chain/block_header_state.cpp b/libraries/chain/block_header_state.cpp index 9fc95885dae..470be0c2ff5 100644 --- a/libraries/chain/block_header_state.cpp +++ b/libraries/chain/block_header_state.cpp @@ -253,18 +253,6 @@ namespace eosio { namespace chain { return result; } - /* - void block_header_state::set_new_producers( producer_schedule_type pending ) { - EOS_ASSERT( pending.version == active_schedule.version + 1, producer_schedule_exception, "wrong producer schedule version specified" ); - EOS_ASSERT( pending_schedule.producers.size() == 0, producer_schedule_exception, - "cannot set new pending producers until last pending is confirmed" ); - header.new_producers = move(pending); - pending_schedule_hash = digest_type::hash( *header.new_producers ); - pending_schedule = *header.new_producers; - pending_schedule_lib_num = block_num; - } - */ - /** * Transitions the current header state into the next header state given the supplied signed block header. * @@ -297,18 +285,4 @@ namespace eosio { namespace chain { ("block_signing_key", block_signing_key)( "signee", signee ) ); } - /* - void block_header_state::add_confirmation( const header_confirmation& conf ) { - for( const auto& c : confirmations ) - EOS_ASSERT( c.producer != conf.producer, producer_double_confirm, "block already confirmed by this producer" ); - - auto key = active_schedule.get_producer_key( conf.producer ); - EOS_ASSERT( key != public_key_type(), producer_not_in_schedule, "producer not in current schedule" ); - auto signer = fc::crypto::public_key( conf.producer_signature, sig_digest(), true ); - EOS_ASSERT( signer == key, wrong_signing_key, "confirmation not signed by expected key" ); - - confirmations.emplace_back( conf ); - } - */ - } } /// namespace eosio::chain diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index 1a6518cfa1b..6adcea6f65e 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -297,66 +297,4 @@ namespace eosio { namespace chain { return *nitr; } - /* - void fork_database::add( const header_confirmation& c ) { - auto b = get_block( c.block_id ); - EOS_ASSERT( b, fork_db_block_not_found, "unable to find block id ${id}", ("id",c.block_id)); - b->add_confirmation( c ); - - if( b->bft_irreversible_blocknum < b->block_num && - b->confirmations.size() >= ((b->active_schedule.producers.size() * 2) / 3 + 1) ) { - set_bft_irreversible( c.block_id ); - } - } - */ - - /** - * This method will set this block as being BFT irreversible and will update - * all blocks which build off of it to have the same bft_irb if their existing - * bft irb is less than this block num. - * - * This will require a search over all forks - */ -#if 0 - void fork_database::set_bft_irreversible( block_id_type id ) { - auto& idx = my->index.get(); - auto itr = idx.find(id); - uint32_t block_num = (*itr)->block_num; - idx.modify( itr, [&]( auto& bsp ) { - bsp->bft_irreversible_blocknum = bsp->block_num; - }); - - /** to prevent stack-overflow, we perform a bredth-first traversal of the - * fork database. At each stage we iterate over the leafs from the prior stage - * and find all nodes that link their previous. If we update the bft lib then we - * add it to a queue for the next layer. This lambda takes one layer and returns - * all block ids that need to be iterated over for next layer. - */ - auto update = [&]( const vector& in ) { - vector updated; - - for( const auto& i : in ) { - auto& pidx = my->index.get(); - auto pitr = pidx.lower_bound( i ); - auto epitr = pidx.upper_bound( i ); - while( pitr != epitr ) { - pidx.modify( pitr, [&]( auto& bsp ) { - if( bsp->bft_irreversible_blocknum < block_num ) { - bsp->bft_irreversible_blocknum = block_num; - updated.push_back( bsp->id ); - } - }); - ++pitr; - } - } - return updated; - }; - - vector queue{id}; - while( queue.size() ) { - queue = update( queue ); - } - } -#endif - } } /// eosio::chain diff --git a/libraries/chain/include/eosio/chain/block_header.hpp b/libraries/chain/include/eosio/chain/block_header.hpp index 53de64eba67..2849ee00f31 100644 --- a/libraries/chain/include/eosio/chain/block_header.hpp +++ b/libraries/chain/include/eosio/chain/block_header.hpp @@ -49,12 +49,6 @@ namespace eosio { namespace chain { signature_type producer_signature; }; - struct header_confirmation { - block_id_type block_id; - account_name producer; - signature_type producer_signature; - }; - } } /// namespace eosio::chain FC_REFLECT(eosio::chain::block_header, @@ -63,4 +57,3 @@ FC_REFLECT(eosio::chain::block_header, (schedule_version)(new_producers)(header_extensions)) FC_REFLECT_DERIVED(eosio::chain::signed_block_header, (eosio::chain::block_header), (producer_signature)) -FC_REFLECT(eosio::chain::header_confirmation, (block_id)(producer)(producer_signature) ) diff --git a/libraries/chain/include/eosio/chain/block_header_state.hpp b/libraries/chain/include/eosio/chain/block_header_state.hpp index 6319355a936..81c925dfec5 100644 --- a/libraries/chain/include/eosio/chain/block_header_state.hpp +++ b/libraries/chain/include/eosio/chain/block_header_state.hpp @@ -61,29 +61,15 @@ struct block_header_state { flat_map producer_to_last_implied_irb; public_key_type block_signing_key; vector confirm_count; - vector confirmations; pending_block_header_state next( block_timestamp_type when, uint16_t num_prev_blocks_to_confirm )const; block_header_state next( const signed_block_header& h, bool skip_validate_signee = false )const; - //void set_new_producers( producer_schedule_type next_pending ); - //void set_confirmed( uint16_t num_prev_blocks ); - //void add_confirmation( const header_confirmation& c ); - //bool maybe_promote_pending(); - - bool has_pending_producers()const { return pending_schedule.producers.size(); } uint32_t calc_dpos_last_irreversible( account_name producer_of_next_block )const; bool is_active_producer( account_name n )const; - /* - block_timestamp_type get_slot_time( uint32_t slot_num )const; - uint32_t get_slot_at_time( block_timestamp_type t )const; - producer_key get_scheduled_producer( uint32_t slot_num )const; - uint32_t producer_participation_rate()const; - */ - producer_key get_scheduled_producer( block_timestamp_type t )const; const block_id_type& prev()const { return header.previous; } digest_type sig_digest()const; @@ -101,4 +87,4 @@ FC_REFLECT( eosio::chain::block_header_state, (pending_schedule_lib_num)(pending_schedule_hash) (pending_schedule)(active_schedule)(blockroot_merkle) (producer_to_last_produced)(producer_to_last_implied_irb)(block_signing_key) - (confirm_count)(confirmations) ) + (confirm_count) ) diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 00e9f54d8db..f1e28bba78d 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -257,7 +257,6 @@ namespace eosio { namespace chain { signal irreversible_block; signal accepted_transaction; signal applied_transaction; - signal accepted_confirmation; signal bad_alloc; /* diff --git a/libraries/chain/include/eosio/chain/fork_database.hpp b/libraries/chain/include/eosio/chain/fork_database.hpp index 7473de2d582..2ac957f4a65 100644 --- a/libraries/chain/include/eosio/chain/fork_database.hpp +++ b/libraries/chain/include/eosio/chain/fork_database.hpp @@ -44,8 +44,6 @@ namespace eosio { namespace chain { block_state_ptr add( const block_state_ptr& next_block, bool skip_validate_previous ); void remove( const block_id_type& id ); - //void add( const header_confirmation& c ); - const block_state_ptr& head()const; /** diff --git a/plugins/chain_interface/include/eosio/chain/plugin_interface.hpp b/plugins/chain_interface/include/eosio/chain/plugin_interface.hpp index b62915b5220..cdec12008ef 100644 --- a/plugins/chain_interface/include/eosio/chain/plugin_interface.hpp +++ b/plugins/chain_interface/include/eosio/chain/plugin_interface.hpp @@ -29,8 +29,6 @@ namespace eosio { namespace chain { namespace plugin_interface { using irreversible_block = channel_decl; using accepted_transaction = channel_decl; using applied_transaction = channel_decl; - using accepted_confirmation = channel_decl; - } namespace methods { diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 91d130d3ec5..8b26838e44d 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -146,7 +146,6 @@ class chain_plugin_impl { ,irreversible_block_channel(app().get_channel()) ,accepted_transaction_channel(app().get_channel()) ,applied_transaction_channel(app().get_channel()) - ,accepted_confirmation_channel(app().get_channel()) ,incoming_block_channel(app().get_channel()) ,incoming_block_sync_method(app().get_method()) ,incoming_transaction_async_method(app().get_method()) @@ -174,7 +173,6 @@ class chain_plugin_impl { channels::irreversible_block::channel_type& irreversible_block_channel; channels::accepted_transaction::channel_type& accepted_transaction_channel; channels::applied_transaction::channel_type& applied_transaction_channel; - channels::accepted_confirmation::channel_type& accepted_confirmation_channel; incoming::channels::block::channel_type& incoming_block_channel; // retained references to methods for easy calling @@ -194,8 +192,6 @@ class chain_plugin_impl { fc::optional irreversible_block_connection; fc::optional accepted_transaction_connection; fc::optional applied_transaction_connection; - fc::optional accepted_confirmation_connection; - }; @@ -698,11 +694,6 @@ void chain_plugin::plugin_initialize(const variables_map& options) { my->applied_transaction_channel.publish( trace ); } ); - my->accepted_confirmation_connection = my->chain->accepted_confirmation.connect( - [this]( const header_confirmation& conf ) { - my->accepted_confirmation_channel.publish( conf ); - } ); - my->chain->add_indices(); } FC_LOG_AND_RETHROW() @@ -744,7 +735,6 @@ void chain_plugin::plugin_shutdown() { my->irreversible_block_connection.reset(); my->accepted_transaction_connection.reset(); my->applied_transaction_connection.reset(); - my->accepted_confirmation_connection.reset(); my->chain->get_thread_pool().stop(); my->chain->get_thread_pool().join(); my->chain.reset(); @@ -1099,7 +1089,7 @@ uint64_t convert_to_type(const string& str, const string& desc) { try { return boost::lexical_cast(str.c_str(), str.size()); } catch( ... ) { } - + try { auto trimmed_str = str; boost::trim(trimmed_str); @@ -1113,7 +1103,7 @@ uint64_t convert_to_type(const string& str, const string& desc) { return symb.value(); } catch( ... ) { } } - + try { return ( eosio::chain::string_to_symbol( 0, str.c_str() ) >> 8 ); } catch( ... ) { From be795a8fad651857a9b467eff2695fd1cac3195f Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 17 Jan 2019 16:04:56 -0500 Subject: [PATCH 008/680] reduce duplication in block header state structs (addresses review comments) --- libraries/chain/block_header_state.cpp | 43 +++---- libraries/chain/controller.cpp | 24 ++-- libraries/chain/fork_database.cpp | 9 +- .../eosio/chain/block_header_state.hpp | 116 ++++++++++-------- unittests/block_tests.cpp | 6 +- unittests/forked_tests.cpp | 2 +- 6 files changed, 104 insertions(+), 96 deletions(-) diff --git a/libraries/chain/block_header_state.cpp b/libraries/chain/block_header_state.cpp index 470be0c2ff5..3f3fefd15dd 100644 --- a/libraries/chain/block_header_state.cpp +++ b/libraries/chain/block_header_state.cpp @@ -109,13 +109,11 @@ namespace eosio { namespace chain { result.dpos_irreversible_blocknum = calc_dpos_last_irreversible( prokey.producer_name ); result.prev_pending_schedule = pending_schedule; - result.prev_pending_schedule_lib_num = pending_schedule_lib_num; - result.prev_pending_schedule_hash = pending_schedule_hash; - if( pending_schedule.producers.size() && - result.dpos_irreversible_blocknum >= pending_schedule_lib_num ) + if( pending_schedule.schedule.producers.size() && + result.dpos_irreversible_blocknum >= pending_schedule.schedule_lib_num ) { - result.active_schedule = pending_schedule; + result.active_schedule = pending_schedule.schedule; flat_map new_producer_to_last_produced; @@ -194,40 +192,31 @@ namespace eosio { namespace chain { if( h.new_producers ) { EOS_ASSERT( !was_pending_promoted, producer_schedule_exception, "cannot set pending producer schedule in the same block in which pending was promoted to active" ); EOS_ASSERT( h.new_producers->version == active_schedule.version + 1, producer_schedule_exception, "wrong producer schedule version specified" ); - EOS_ASSERT( prev_pending_schedule.producers.size() == 0, producer_schedule_exception, + EOS_ASSERT( prev_pending_schedule.schedule.producers.size() == 0, producer_schedule_exception, "cannot set new pending producers until last pending is confirmed" ); } - block_header_state result; + auto block_number = block_num; - result.id = h.id(); - result.block_num = block_num; - result.header = h; + block_header_state result( std::move( *static_cast(this) ) ); - result.dpos_proposed_irreversible_blocknum = dpos_proposed_irreversible_blocknum; - result.dpos_irreversible_blocknum = dpos_irreversible_blocknum; + result.id = h.id(); + result.header = h; if( h.new_producers ) { - result.pending_schedule = *h.new_producers; - result.pending_schedule_hash = digest_type::hash( result.pending_schedule ); - result.pending_schedule_lib_num = block_num; + result.pending_schedule.schedule = *h.new_producers; + result.pending_schedule.schedule_hash = digest_type::hash( result.pending_schedule ); + result.pending_schedule.schedule_lib_num = block_number; } else { if( was_pending_promoted ) { - result.pending_schedule.version = prev_pending_schedule.version; + result.pending_schedule.schedule.version = prev_pending_schedule.schedule.version; } else { - result.pending_schedule = prev_pending_schedule; + result.pending_schedule.schedule = std::move( prev_pending_schedule.schedule ); } - result.pending_schedule_hash = std::move(prev_pending_schedule_hash); - result.pending_schedule_lib_num = prev_pending_schedule_lib_num; + result.pending_schedule.schedule_hash = std::move( prev_pending_schedule.schedule_hash ); + result.pending_schedule.schedule_lib_num = prev_pending_schedule.schedule_lib_num; } - result.active_schedule = std::move(active_schedule); - result.blockroot_merkle = std::move(blockroot_merkle); - result.producer_to_last_produced = std::move(producer_to_last_produced); - result.producer_to_last_implied_irb = std::move(producer_to_last_implied_irb); - result.block_signing_key = std::move(block_signing_key); - result.confirm_count = std::move(confirm_count); - return result; } @@ -267,7 +256,7 @@ namespace eosio { namespace chain { digest_type block_header_state::sig_digest()const { auto header_bmroot = digest_type::hash( std::make_pair( header.digest(), blockroot_merkle.get_root() ) ); - return digest_type::hash( std::make_pair(header_bmroot, pending_schedule_hash) ); + return digest_type::hash( std::make_pair(header_bmroot, pending_schedule.schedule_hash) ); } void block_header_state::sign( const std::function& signer ) { diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 2a3071b5752..4d76e0da363 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -637,13 +637,13 @@ struct controller_impl { producer_schedule_type initial_schedule{ 0, {{config::system_account_name, conf.genesis.initial_key}} }; block_header_state genheader; - genheader.active_schedule = initial_schedule; - genheader.pending_schedule = initial_schedule; - genheader.pending_schedule_hash = fc::sha256::hash(initial_schedule); - genheader.header.timestamp = conf.genesis.initial_timestamp; - genheader.header.action_mroot = conf.genesis.compute_chain_id(); - genheader.id = genheader.header.id(); - genheader.block_num = genheader.header.block_num(); + genheader.active_schedule = initial_schedule; + genheader.pending_schedule.schedule = initial_schedule; + genheader.pending_schedule.schedule_hash = fc::sha256::hash(initial_schedule); + genheader.header.timestamp = conf.genesis.initial_timestamp; + genheader.header.action_mroot = conf.genesis.compute_chain_id(); + genheader.id = genheader.header.id(); + genheader.block_num = genheader.header.block_num(); head = std::make_shared(); static_cast(*head) = genheader; @@ -1148,7 +1148,7 @@ struct controller_impl { const auto& gpo = db.get(); if( gpo.proposed_schedule_block_num.valid() && // if there is a proposed schedule that was proposed in a block ... ( *gpo.proposed_schedule_block_num <= pbhs.dpos_irreversible_blocknum ) && // ... that has now become irreversible ... - pbhs.prev_pending_schedule.producers.size() == 0 // ... and there was room for a new pending schedule prior to any possible promotion + pbhs.prev_pending_schedule.schedule.producers.size() == 0 // ... and there was room for a new pending schedule prior to any possible promotion ) { // Promote proposed schedule to pending schedule. @@ -1990,7 +1990,7 @@ const vector& controller::get_pending_trx_receipts()const { } uint32_t controller::last_irreversible_block_num() const { - return std::max(std::max(my->head->bft_irreversible_blocknum, my->head->dpos_irreversible_blocknum), my->snapshot_head_block); + return std::max( my->head->dpos_irreversible_blocknum, my->snapshot_head_block); } block_id_type controller::last_irreversible_block_id() const { @@ -2124,10 +2124,10 @@ const producer_schedule_type& controller::active_producers()const { const producer_schedule_type& controller::pending_producers()const { if( !(my->pending) ) - return my->head->pending_schedule; + return my->head->pending_schedule.schedule; if( my->pending->_block_stage.contains() ) - return my->pending->_block_stage.get()._block_state->pending_schedule; + return my->pending->_block_stage.get()._block_state->pending_schedule.schedule; if( my->pending->_block_stage.contains() ) { const auto& np = my->pending->_block_stage.get()._unsigned_block->new_producers; @@ -2140,7 +2140,7 @@ const producer_schedule_type& controller::pending_producers()const { if( bb._new_pending_producer_schedule ) return *bb._new_pending_producer_schedule; - return bb._pending_block_header_state.prev_pending_schedule; + return bb._pending_block_header_state.prev_pending_schedule.schedule; } optional controller::proposed_producers()const { diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index 6adcea6f65e..f73b3aec07d 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -25,18 +25,17 @@ namespace eosio { namespace chain { ordered_non_unique< tag, const_mem_fun >, ordered_non_unique< tag, composite_key< block_state, - member, + member, member >, composite_key_compare< std::less, std::greater > >, ordered_non_unique< tag, composite_key< block_header_state, - member, - member, - member + member, + member >, - composite_key_compare< std::greater, std::greater, std::greater > + composite_key_compare< std::greater, std::greater > > > > fork_multi_index_type; diff --git a/libraries/chain/include/eosio/chain/block_header_state.hpp b/libraries/chain/include/eosio/chain/block_header_state.hpp index 81c925dfec5..85a09047fdd 100644 --- a/libraries/chain/include/eosio/chain/block_header_state.hpp +++ b/libraries/chain/include/eosio/chain/block_header_state.hpp @@ -7,25 +7,34 @@ namespace eosio { namespace chain { struct block_header_state; -struct pending_block_header_state { - uint32_t block_num = 0; +namespace detail { + struct block_header_state_common { + uint32_t block_num = 0; + uint32_t dpos_proposed_irreversible_blocknum = 0; + uint32_t dpos_irreversible_blocknum = 0; + producer_schedule_type active_schedule; + incremental_merkle blockroot_merkle; + flat_map producer_to_last_produced; + flat_map producer_to_last_implied_irb; + public_key_type block_signing_key; + vector confirm_count; + }; + + struct schedule_info { + uint32_t schedule_lib_num = 0; /// last irr block num + digest_type schedule_hash; + producer_schedule_type schedule; + }; +} + +struct pending_block_header_state : public detail::block_header_state_common { + detail::schedule_info prev_pending_schedule; + bool was_pending_promoted = false; block_id_type previous; - block_timestamp_type timestamp; account_name producer; - uint16_t confirmed = 1; - uint32_t dpos_proposed_irreversible_blocknum = 0; - uint32_t dpos_irreversible_blocknum = 0; + block_timestamp_type timestamp; uint32_t active_schedule_version = 0; - uint32_t prev_pending_schedule_lib_num = 0; /// last irr block num - digest_type prev_pending_schedule_hash; - producer_schedule_type prev_pending_schedule; - producer_schedule_type active_schedule; - incremental_merkle blockroot_merkle; - flat_map producer_to_last_produced; - flat_map producer_to_last_implied_irb; - public_key_type block_signing_key; - vector confirm_count; - bool was_pending_promoted = false; + uint16_t confirmed = 1; signed_block_header make_block_header( const checksum256_type& transaction_mroot, const checksum256_type& action_mroot, @@ -36,7 +45,7 @@ struct pending_block_header_state { block_header_state finish_next( signed_block_header& h, const std::function& signer )&&; -private: +protected: block_header_state _finish_next( const signed_block_header& h )&&; }; @@ -45,46 +54,57 @@ struct pending_block_header_state { * @struct block_header_state * @brief defines the minimum state necessary to validate transaction headers */ -struct block_header_state { - block_id_type id; - uint32_t block_num = 0; - signed_block_header header; - uint32_t dpos_proposed_irreversible_blocknum = 0; - uint32_t dpos_irreversible_blocknum = 0; - uint32_t bft_irreversible_blocknum = 0; - uint32_t pending_schedule_lib_num = 0; /// last irr block num - digest_type pending_schedule_hash; - producer_schedule_type pending_schedule; - producer_schedule_type active_schedule; - incremental_merkle blockroot_merkle; - flat_map producer_to_last_produced; - flat_map producer_to_last_implied_irb; - public_key_type block_signing_key; - vector confirm_count; +struct block_header_state : public detail::block_header_state_common { + block_id_type id; + signed_block_header header; + detail::schedule_info pending_schedule; + + block_header_state() = default; + + block_header_state( detail::block_header_state_common&& base ) + :detail::block_header_state_common( std::move(base) ) + {} pending_block_header_state next( block_timestamp_type when, uint16_t num_prev_blocks_to_confirm )const; block_header_state next( const signed_block_header& h, bool skip_validate_signee = false )const; - bool has_pending_producers()const { return pending_schedule.producers.size(); } - uint32_t calc_dpos_last_irreversible( account_name producer_of_next_block )const; - bool is_active_producer( account_name n )const; + bool has_pending_producers()const { return pending_schedule.schedule.producers.size(); } + uint32_t calc_dpos_last_irreversible( account_name producer_of_next_block )const; + bool is_active_producer( account_name n )const; - producer_key get_scheduled_producer( block_timestamp_type t )const; - const block_id_type& prev()const { return header.previous; } - digest_type sig_digest()const; - void sign( const std::function& signer ); - public_key_type signee()const; - void verify_signee(const public_key_type& signee)const; + producer_key get_scheduled_producer( block_timestamp_type t )const; + const block_id_type& prev()const { return header.previous; } + digest_type sig_digest()const; + void sign( const std::function& signer ); + public_key_type signee()const; + void verify_signee(const public_key_type& signee)const; }; } } /// namespace eosio::chain -FC_REFLECT( eosio::chain::block_header_state, - (id)(block_num)(header)(dpos_proposed_irreversible_blocknum)(dpos_irreversible_blocknum)(bft_irreversible_blocknum) - (pending_schedule_lib_num)(pending_schedule_hash) - (pending_schedule)(active_schedule)(blockroot_merkle) - (producer_to_last_produced)(producer_to_last_implied_irb)(block_signing_key) - (confirm_count) ) +FC_REFLECT( eosio::chain::detail::block_header_state_common, + (block_num) + (dpos_proposed_irreversible_blocknum) + (dpos_irreversible_blocknum) + (active_schedule) + (blockroot_merkle) + (producer_to_last_produced) + (producer_to_last_implied_irb) + (block_signing_key) + (confirm_count) +) + +FC_REFLECT( eosio::chain::detail::schedule_info, + (schedule_lib_num) + (schedule_hash) + (schedule) +) + +FC_REFLECT_DERIVED( eosio::chain::block_header_state, (eosio::chain::detail::block_header_state_common), + (id) + (header) + (pending_schedule) +) diff --git a/unittests/block_tests.cpp b/unittests/block_tests.cpp index 045255da6d9..76e5746e1fa 100644 --- a/unittests/block_tests.cpp +++ b/unittests/block_tests.cpp @@ -31,13 +31,13 @@ BOOST_AUTO_TEST_CASE(block_with_invalid_tx_test) // Re-sign the transaction signed_tx.signatures.clear(); signed_tx.sign(main.get_private_key(config::system_account_name, "active"), main.control->get_chain_id()); - // Replace the valid transaction with the invalid transaction + // Replace the valid transaction with the invalid transaction auto invalid_packed_tx = packed_transaction(signed_tx); copy_b->transactions.back().trx = invalid_packed_tx; // Re-sign the block auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), main.control->head_block_state()->blockroot_merkle.get_root() ) ); - auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, main.control->head_block_state()->pending_schedule_hash) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, main.control->head_block_state()->pending_schedule.schedule_hash) ); copy_b->producer_signature = main.get_private_key(config::system_account_name, "active").sign(sig_digest); // Push block with invalid transaction to other chain @@ -78,7 +78,7 @@ std::pair corrupt_trx_in_block(validating_te // Re-sign the block auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), main.control->head_block_state()->blockroot_merkle.get_root() ) ); - auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, main.control->head_block_state()->pending_schedule_hash) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, main.control->head_block_state()->pending_schedule.schedule_hash) ); copy_b->producer_signature = main.get_private_key(b->producer, "active").sign(sig_digest); return std::pair(b, copy_b); } diff --git a/unittests/forked_tests.cpp b/unittests/forked_tests.cpp index 9ebab8e4c3b..9a07999f7a8 100644 --- a/unittests/forked_tests.cpp +++ b/unittests/forked_tests.cpp @@ -100,7 +100,7 @@ BOOST_AUTO_TEST_CASE( fork_with_bad_block ) try { // re-sign the block auto header_bmroot = digest_type::hash( std::make_pair( copy_b->digest(), fork.block_merkle.get_root() ) ); - auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state()->pending_schedule_hash) ); + auto sig_digest = digest_type::hash( std::make_pair(header_bmroot, remote.control->head_block_state()->pending_schedule.schedule_hash) ); copy_b->producer_signature = remote.get_private_key(N(b), "active").sign(sig_digest); // add this new block to our corrupted block merkle From b2c6fda13587041d766fc07ad868e10f9f554607 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 18 Jan 2019 19:51:45 -0500 Subject: [PATCH 009/680] Allow irreversible mode. Fix bugs in fetching recent blocks when in irreversible mode (also fixes an issue that prevented a node in irreversible mode from properly syncing). Change meaning of head in get info to be the controller's head and add new fields fork_db_head_block_num and fork_db_head_block_id to follow the previous definition of head (change only matters in irreversible mode). --- libraries/chain/controller.cpp | 45 ++++++++++++++----- libraries/chain/fork_database.cpp | 9 ++++ .../include/eosio/chain/fork_database.hpp | 7 +++ plugins/chain_plugin/chain_plugin.cpp | 17 +++---- .../eosio/chain_plugin/chain_plugin.hpp | 4 +- 5 files changed, 62 insertions(+), 20 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 8de7fa9c864..245d228aaae 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -2075,7 +2075,10 @@ const vector& controller::get_pending_trx_receipts()const { } uint32_t controller::last_irreversible_block_num() const { - return std::max( my->head->dpos_irreversible_blocknum, my->snapshot_head_block ); + uint32_t lib_num = (my->read_mode == db_read_mode::IRREVERSIBLE) + ? my->fork_db.pending_head()->dpos_irreversible_blocknum + : my->head->dpos_irreversible_blocknum; + return std::max( lib_num, my->snapshot_head_block ); } block_id_type controller::last_irreversible_block_id() const { @@ -2085,8 +2088,12 @@ block_id_type controller::last_irreversible_block_id() const { if( block_header::num_from_id(tapos_block_summary.block_id) == lib_num ) return tapos_block_summary.block_id; - return fetch_block_by_number(lib_num)->id(); + auto signed_blk = my->blog.read_block_by_num( lib_num ); + EOS_ASSERT( BOOST_LIKELY( signed_blk != nullptr ), unknown_block_exception, + "Could not find block: ${block}", ("block", lib_num) ); + + return signed_blk->id(); } const dynamic_global_property_object& controller::get_dynamic_global_properties()const { @@ -2122,8 +2129,13 @@ block_state_ptr controller::fetch_block_state_by_number( uint32_t block_num )con const auto& rev_blocks = my->reversible_blocks.get_index(); auto objitr = rev_blocks.find(block_num); - if( objitr == rev_blocks.end() ) - return block_state_ptr(); + if( objitr == rev_blocks.end() ) { + if( my->read_mode == db_read_mode::IRREVERSIBLE ) { + return my->fork_db.search_on_branch( my->fork_db.pending_head()->id, block_num ); + } else { + return block_state_ptr(); + } + } fc::datastream ds( objitr->packedblock.data(), objitr->packedblock.size() ); block_header h; @@ -2135,14 +2147,25 @@ block_state_ptr controller::fetch_block_state_by_number( uint32_t block_num )con } FC_CAPTURE_AND_RETHROW( (block_num) ) } block_id_type controller::get_block_id_for_num( uint32_t block_num )const { try { - const auto& rev_blocks = my->reversible_blocks.get_index(); - auto objitr = rev_blocks.find(block_num); + const auto& blog_head = my->blog.head(); + + bool find_in_blog = (blog_head && block_num <= blog_head->block_num()); + + if( !find_in_blog ) { + if( my->read_mode != db_read_mode::IRREVERSIBLE ) { + const auto& rev_blocks = my->reversible_blocks.get_index(); + auto objitr = rev_blocks.find(block_num); + if( objitr != rev_blocks.end() ) { + fc::datastream ds( objitr->packedblock.data(), objitr->packedblock.size() ); + block_header h; + fc::raw::unpack( ds, h ); + return h.id(); + } + } else { + auto bsp = my->fork_db.search_on_branch( my->fork_db.pending_head()->id, block_num ); - if( objitr != rev_blocks.end() ) { - fc::datastream ds( objitr->packedblock.data(), objitr->packedblock.size() ); - block_header h; - fc::raw::unpack( ds, h ); - return h.id(); + if( bsp ) return bsp->id; + } } auto signed_blk = my->blog.read_block_by_num(block_num); diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index e4d3c64bcb2..88456b542fe 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -305,6 +305,15 @@ namespace eosio { namespace chain { return result; } + block_state_ptr fork_database::search_on_branch( const block_id_type& h, uint32_t block_num )const { + for( auto s = get_block(h); s; s = get_block( s->header.previous ) ) { + if( s->block_num == block_num ) + return s; + } + + return {}; + } + /** * Given two head blocks, return two branches of the fork graph that * end with a common ancestor (same prior block) diff --git a/libraries/chain/include/eosio/chain/fork_database.hpp b/libraries/chain/include/eosio/chain/fork_database.hpp index c96d4f85fb1..ed025671383 100644 --- a/libraries/chain/include/eosio/chain/fork_database.hpp +++ b/libraries/chain/include/eosio/chain/fork_database.hpp @@ -63,6 +63,13 @@ namespace eosio { namespace chain { */ branch_type fetch_branch( const block_id_type& h, uint32_t trim_after_block_num = std::numeric_limits::max() )const; + + /** + * Returns the block state with a block number of `block_num` that is on the branch that + * contains a block with an id of`h`, or the empty shared pointer if no such block can be found. + */ + block_state_ptr search_on_branch( const block_id_type& h, uint32_t block_num )const; + /** * Given two head blocks, return two branches of the fork graph that * end with a common ancestor (same prior block) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index a8a2cc9b3cb..fdfd7cf83ea 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -237,12 +237,12 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip ("sender-bypass-whiteblacklist", boost::program_options::value>()->composing()->multitoken(), "Deferred transactions sent by accounts in this list do not have any of the subjective whitelist/blacklist checks applied to them (may specify multiple times)") ("read-mode", boost::program_options::value()->default_value(eosio::chain::db_read_mode::SPECULATIVE), - "Database read mode (\"speculative\", \"head\", or \"read-only\").\n"// or \"irreversible\").\n" + "Database read mode (\"speculative\", \"head\", \"read-only\", \"irreversible\").\n" "In \"speculative\" mode database contains changes done up to the head block plus changes made by transactions not yet included to the blockchain.\n" "In \"head\" mode database contains changes done up to the current head block.\n" - "In \"read-only\" mode database contains incoming block changes but no speculative transaction processing.\n" + "In \"read-only\" mode database contains changes done up to the current head block and transactions cannot be pushed to the chain API.\n" + "In \"irreversible\" mode database contains changes done up to the last irreversible block and transactions cannot be pushed to the chain API.\n" ) - //"In \"irreversible\" mode database contains changes done up the current irreversible block.\n") ("validation-mode", boost::program_options::value()->default_value(eosio::chain::validation_mode::FULL), "Chain validation mode (\"full\" or \"light\").\n" "In \"full\" mode all incoming blocks will be fully validated.\n" @@ -629,7 +629,6 @@ void chain_plugin::plugin_initialize(const variables_map& options) { if ( options.count("read-mode") ) { my->chain_config->read_mode = options.at("read-mode").as(); - EOS_ASSERT( my->chain_config->read_mode != db_read_mode::IRREVERSIBLE, plugin_config_exception, "irreversible mode not currently supported." ); } if ( options.count("validation-mode") ) { @@ -1024,12 +1023,12 @@ read_only::get_info_results read_only::get_info(const read_only::get_info_params return { itoh(static_cast(app().version())), db.get_chain_id(), - db.fork_db_pending_head_block_num(), + db.head_block_num(), db.last_irreversible_block_num(), db.last_irreversible_block_id(), - db.fork_db_pending_head_block_id(), - db.fork_db_pending_head_block_time(), - db.fork_db_pending_head_block_producer(), + db.head_block_id(), + db.head_block_time(), + db.head_block_producer(), rm.get_virtual_block_cpu_limit(), rm.get_virtual_block_net_limit(), rm.get_block_cpu_limit(), @@ -1037,6 +1036,8 @@ read_only::get_info_results read_only::get_info(const read_only::get_info_params //std::bitset<64>(db.get_dynamic_global_properties().recent_slots_filled).to_string(), //__builtin_popcountll(db.get_dynamic_global_properties().recent_slots_filled) / 64.0, app().version_string(), + db.fork_db_pending_head_block_num(), + db.fork_db_pending_head_block_id() }; } diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 4faf1fdb4bd..2052f712cb3 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -104,6 +104,8 @@ class read_only { //string recent_slots; //double participation_rate = 0; optional server_version_string; + uint32_t fork_db_head_block_num = 0; + chain::block_id_type fork_db_head_block_id; }; get_info_results get_info(const get_info_params&) const; @@ -705,7 +707,7 @@ class chain_plugin : public plugin { FC_REFLECT( eosio::chain_apis::permission, (perm_name)(parent)(required_auth) ) FC_REFLECT(eosio::chain_apis::empty, ) FC_REFLECT(eosio::chain_apis::read_only::get_info_results, -(server_version)(chain_id)(head_block_num)(last_irreversible_block_num)(last_irreversible_block_id)(head_block_id)(head_block_time)(head_block_producer)(virtual_block_cpu_limit)(virtual_block_net_limit)(block_cpu_limit)(block_net_limit)(server_version_string) ) +(server_version)(chain_id)(head_block_num)(last_irreversible_block_num)(last_irreversible_block_id)(head_block_id)(head_block_time)(head_block_producer)(virtual_block_cpu_limit)(virtual_block_net_limit)(block_cpu_limit)(block_net_limit)(server_version_string)(fork_db_head_block_num)(fork_db_head_block_id) ) FC_REFLECT(eosio::chain_apis::read_only::get_block_params, (block_num_or_id)) FC_REFLECT(eosio::chain_apis::read_only::get_block_header_state_params, (block_num_or_id)) From 7d06096382eeec3743f7893bd8987b022e0ef4f0 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 24 Jan 2019 10:51:00 -0500 Subject: [PATCH 010/680] add unit test for irreversible mode --- unittests/forked_tests.cpp | 140 +++++++++++++++++++++++++++++++++++-- 1 file changed, 135 insertions(+), 5 deletions(-) diff --git a/unittests/forked_tests.cpp b/unittests/forked_tests.cpp index 8ef00efabf9..d2981e3249c 100644 --- a/unittests/forked_tests.cpp +++ b/unittests/forked_tests.cpp @@ -27,13 +27,35 @@ public_key_type get_public_key( name keyname, string role ){ return get_private_key( keyname, role ).get_public_key(); } -void push_blocks( tester& from, tester& to ) { - while( to.control->fork_db_pending_head_block_num() < from.control->fork_db_pending_head_block_num() ) { +void push_blocks( tester& from, tester& to, uint32_t block_num_limit = std::numeric_limits::max() ) { + while( to.control->fork_db_pending_head_block_num() + < std::min( from.control->fork_db_pending_head_block_num(), block_num_limit ) ) + { auto fb = from.control->fetch_block_by_number( to.control->fork_db_pending_head_block_num()+1 ); to.push_block( fb ); } } +bool produce_empty_blocks_until( tester& t, + account_name last_producer, + account_name next_producer, + uint32_t max_num_blocks_to_produce = std::numeric_limits::max() ) +{ + auto condition_satisfied = [&t, last_producer, next_producer]() { + return t.control->pending_block_producer() == next_producer && t.control->head_block_producer() == last_producer; + }; + + for( uint32_t blocks_produced = 0; + blocks_produced < max_num_blocks_to_produce; + t.produce_block(), ++blocks_produced ) + { + if( condition_satisfied() ) + return true; + } + + return condition_satisfied(); +} + BOOST_AUTO_TEST_SUITE(forked_tests) BOOST_AUTO_TEST_CASE( irrblock ) try { @@ -66,9 +88,7 @@ BOOST_AUTO_TEST_CASE( fork_with_bad_block ) try { auto res = bios.set_producers( {N(a),N(b),N(c),N(d),N(e)} ); // run until the producers are installed and its the start of "a's" round - while( bios.control->pending_block_producer().to_string() != "a" || bios.control->head_block_state()->header.producer.to_string() != "e") { - bios.produce_block(); - } + BOOST_REQUIRE( produce_empty_blocks_until( bios, N(e), N(a) ) ); // sync remote node tester remote; @@ -382,4 +402,114 @@ BOOST_AUTO_TEST_CASE( read_modes ) try { } FC_LOG_AND_RETHROW() + +BOOST_AUTO_TEST_CASE( irreversible_mode ) try { + auto does_account_exist = []( const tester& t, account_name n ) { + const auto& db = t.control->db(); + return (db.find( n ) != nullptr); + }; + + tester main; + + main.create_accounts( {N(producer1), N(producer2)} ); + main.produce_block(); + main.set_producers( {N(producer1), N(producer2)} ); + main.produce_block(); + BOOST_REQUIRE( produce_empty_blocks_until( main, N(producer1), N(producer2), 26) ); + + main.create_accounts( {N(alice)} ); + main.produce_block(); + auto hbn1 = main.control->head_block_num(); + auto lib1 = main.control->last_irreversible_block_num(); + + BOOST_REQUIRE( produce_empty_blocks_until( main, N(producer2), N(producer1), 11) ); + + auto hbn2 = main.control->head_block_num(); + auto lib2 = main.control->last_irreversible_block_num(); + + BOOST_REQUIRE( lib2 < hbn1 ); + + tester other; + + push_blocks( main, other ); + BOOST_CHECK_EQUAL( other.control->head_block_num(), hbn2 ); + + BOOST_REQUIRE( produce_empty_blocks_until( main, N(producer1), N(producer2), 12) ); + BOOST_REQUIRE( produce_empty_blocks_until( main, N(producer2), N(producer1), 12) ); + + auto hbn3 = main.control->head_block_num(); + auto lib3 = main.control->last_irreversible_block_num(); + + BOOST_REQUIRE( lib3 >= hbn1 ); + + BOOST_CHECK_EQUAL( does_account_exist( main, N(alice) ), true ); + + // other forks away from main after hbn2 + BOOST_REQUIRE_EQUAL( other.control->head_block_producer().to_string(), "producer2" ); + + other.produce_block( fc::milliseconds( 13 * config::block_interval_ms ) ); // skip over producer1's round + BOOST_REQUIRE_EQUAL( other.control->head_block_producer().to_string(), "producer2" ); + auto fork_first_block_id = other.control->head_block_id(); + wlog( "{w}", ("w", fork_first_block_id)); + + BOOST_REQUIRE( produce_empty_blocks_until( other, N(producer2), N(producer1), 11) ); // finish producer2's round + BOOST_REQUIRE_EQUAL( other.control->pending_block_producer().to_string(), "producer1" ); + + // Repeat two more times to ensure other has a longer chain than main + other.produce_block( fc::milliseconds( 13 * config::block_interval_ms ) ); // skip over producer1's round + BOOST_REQUIRE( produce_empty_blocks_until( other, N(producer2), N(producer1), 11) ); // finish producer2's round + + other.produce_block( fc::milliseconds( 13 * config::block_interval_ms ) ); // skip over producer1's round + BOOST_REQUIRE( produce_empty_blocks_until( other, N(producer2), N(producer1), 11) ); // finish producer2's round + + auto hbn4 = other.control->head_block_num(); + auto lib4 = other.control->last_irreversible_block_num(); + + BOOST_REQUIRE( hbn4 > hbn3 ); + BOOST_REQUIRE( lib4 < hbn1 ); + + tester irreversible(false, db_read_mode::IRREVERSIBLE); + + push_blocks( main, irreversible, hbn1 ); + + BOOST_CHECK_EQUAL( irreversible.control->fork_db_pending_head_block_num(), hbn1 ); + BOOST_CHECK_EQUAL( irreversible.control->head_block_num(), lib1 ); + BOOST_CHECK_EQUAL( does_account_exist( irreversible, N(alice) ), false ); + + push_blocks( other, irreversible, hbn4 ); + + BOOST_CHECK_EQUAL( irreversible.control->fork_db_pending_head_block_num(), hbn4 ); + BOOST_CHECK_EQUAL( irreversible.control->head_block_num(), lib4 ); + BOOST_CHECK_EQUAL( does_account_exist( irreversible, N(alice) ), false ); + + // force push blocks from main to irreversible creating a new branch in irreversible's fork database + for( uint32_t n = hbn2 + 1; n <= hbn3; ++n ) { + auto fb = main.control->fetch_block_by_number( n ); + irreversible.push_block( fb ); + } + + BOOST_CHECK_EQUAL( irreversible.control->fork_db_pending_head_block_num(), hbn3 ); + BOOST_CHECK_EQUAL( irreversible.control->head_block_num(), lib3 ); + BOOST_CHECK_EQUAL( does_account_exist( irreversible, N(alice) ), true ); + + { + auto bs = irreversible.control->fetch_block_state_by_id( fork_first_block_id ); + BOOST_REQUIRE( bs && bs->id == fork_first_block_id ); + } + + main.produce_block(); + auto hbn5 = main.control->head_block_num(); + auto lib5 = main.control->last_irreversible_block_num(); + + BOOST_REQUIRE( lib5 > lib3 ); + + push_blocks( main, irreversible, hbn5 ); + + { + auto bs = irreversible.control->fetch_block_state_by_id( fork_first_block_id ); + BOOST_REQUIRE( !bs ); + } + +} FC_LOG_AND_RETHROW() + BOOST_AUTO_TEST_SUITE_END() From f7ecc99da69dd473155b1069e8d09ec10729d569 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 25 Jan 2019 21:22:42 -0500 Subject: [PATCH 011/680] changes from review --- libraries/chain/controller.cpp | 45 +++++++++---------- libraries/chain/fork_database.cpp | 30 ++++++++----- .../eosio/chain/block_header_state.hpp | 2 +- .../include/eosio/chain/fork_database.hpp | 4 +- libraries/chain/include/eosio/chain/types.hpp | 8 ++++ .../eosio/chain_plugin/chain_plugin.hpp | 4 +- plugins/net_plugin/net_plugin.cpp | 9 +--- plugins/producer_plugin/producer_plugin.cpp | 2 +- 8 files changed, 54 insertions(+), 50 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 245d228aaae..349da0a621c 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -107,6 +107,7 @@ struct building_block { }; struct assembled_block { + block_id_type _id; pending_block_header_state _pending_block_header_state; vector _trx_metas; signed_block_ptr _unsigned_block; @@ -1232,7 +1233,7 @@ struct controller_impl { guard_pending.cancel(); } /// start_block - signed_block_ptr finalize_block() + void finalize_block() { EOS_ASSERT( pending, block_validate_exception, "it is not valid to finalize when there is no pending block"); EOS_ASSERT( pending->_block_stage.contains(), block_validate_exception, "already called finalize_block"); @@ -1263,13 +1264,15 @@ struct controller_impl { block_ptr->transactions = std::move( bb._pending_trx_receipts ); + auto id = block_ptr->id(); + // Update TaPoS table: - create_block_summary( block_ptr->id() ); + create_block_summary( id ); /* ilog( "finalized block ${n} (${id}) at ${t} by ${p} (${signing_key}); schedule_version: ${v} lib: ${lib} #dtrxs: ${ndtrxs} ${np}", ("n",pbhs.block_num) - ("id",block_ptr->id()) + ("id",id) ("t",pbhs.timestamp) ("p",pbhs.producer) ("signing_key", pbhs.block_signing_key) @@ -1281,12 +1284,11 @@ struct controller_impl { */ pending->_block_stage = assembled_block{ + id, std::move( bb._pending_block_header_state ), std::move( bb._pending_trx_metas ), - block_ptr + std::move( block_ptr ) }; - - return block_ptr; } FC_CAPTURE_AND_RETHROW() } /// finalize_block /** @@ -1388,18 +1390,17 @@ struct controller_impl { ("producer_receipt", receipt)("validator_receipt", trx_receipts.back()) ); } - auto block_ptr = finalize_block(); + finalize_block(); - // this implicitly asserts that all header fields (less the signature) are identical - auto id = block_ptr->id(); - EOS_ASSERT( producer_block_id == id, block_validate_exception, "Block ID does not match", - ("producer_block_id",producer_block_id)("validator_block_id",id) ); + auto& ab = pending->_block_stage.get(); - auto&& ab = pending->_block_stage.get(); + // this implicitly asserts that all header fields (less the signature) are identical + EOS_ASSERT( producer_block_id == ab._id, block_validate_exception, "Block ID does not match", + ("producer_block_id",producer_block_id)("validator_block_id",ab._id) ); auto bsp = std::make_shared( std::move( ab._pending_block_header_state ), - b, + std::move( ab._unsigned_block ), std::move( ab._trx_metas ), true // signature should have already been verified (assuming untrusted) prior to apply_block ); @@ -1442,16 +1443,10 @@ struct controller_impl { trusted_producer_light_validation = old_value; }); try { - block_state_ptr new_header_state = block_state_future.get(); - auto& b = new_header_state->block; - emit( self.pre_accepted_block, b ); - - block_state_ptr bsp; + block_state_ptr bsp = block_state_future.get(); + const auto& b = bsp->block; - auto prior = fork_db.get_block_header( b->previous ); - EOS_ASSERT( prior, unlinkable_block_exception, - "unlinkable block", ("id", string(b->id()))("previous", string(b->previous)) ); - bsp = std::make_shared( *prior, b, false ); + emit( self.pre_accepted_block, b ); fork_db.add( bsp ); @@ -1891,13 +1886,13 @@ void controller::start_block( block_timestamp_type when, uint16_t confirm_block_ block_state_ptr controller::finalize_block( const std::function& signer_callback ) { validate_db_available_size(); - auto block_ptr = my->finalize_block(); + my->finalize_block(); auto& ab = my->pending->_block_stage.get(); auto bsp = std::make_shared( std::move( ab._pending_block_header_state ), - std::move( block_ptr ), + std::move( ab._unsigned_block ), std::move( ab._trx_metas ), signer_callback ); @@ -2227,7 +2222,7 @@ int64_t controller::set_proposed_producers( vector producers ) { int64_t version = sch.version; - wlog( "proposed producer schedule with version ${v}", ("v", version) ); + ilog( "proposed producer schedule with version ${v}", ("v", version) ); my->db.modify( gpo, [&]( auto& gp ) { gp.proposed_schedule_block_num = cur_block_num; diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index 88456b542fe..68238ccddda 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -43,7 +43,7 @@ namespace eosio { namespace chain { std::greater, std::greater, std::greater, - std::less + sha256_less > > > @@ -80,8 +80,10 @@ namespace eosio { namespace chain { uint32_t totem = 0; fc::raw::unpack( ds, totem ); EOS_ASSERT( totem == magic_number, fork_database_exception, - "Fork database file '${filename}' has unexpected magic number!", + "Fork database file '${filename}' has unexpected magic number: ${actual_totem}. Expected ${expected_totem}", ("filename", fork_db_dat.generic_string()) + ("actual_totem", totem) + ("expected_totem", magic_number) ); // validate version @@ -107,7 +109,7 @@ namespace eosio { namespace chain { fc::raw::unpack( ds, s ); for( const auto& receipt : s.block->transactions ) { if( receipt.trx.contains() ) { - auto& pt = receipt.trx.get(); + const auto& pt = receipt.trx.get(); s.trxs.push_back( std::make_shared( std::make_shared(pt) ) ); } } @@ -154,7 +156,7 @@ namespace eosio { namespace chain { std::ofstream out( fork_db_dat.generic_string().c_str(), std::ios::out | std::ios::binary | std::ofstream::trunc ); fc::raw::pack( out, magic_number ); - fc::raw::pack( out, max_supported_version ); + fc::raw::pack( out, max_supported_version ); // write out current version which is always max_supported_version fc::raw::pack( out, *static_cast(&*my->root) ); uint32_t num_blocks_in_fork_db = my->index.size(); fc::raw::pack( out, unsigned_int{num_blocks_in_fork_db} ); @@ -263,15 +265,15 @@ namespace eosio { namespace chain { return block_header_state_ptr(); } - void fork_database::add( block_state_ptr n ) { + void fork_database::add( const block_state_ptr& n ) { EOS_ASSERT( my->root, fork_database_exception, "root not yet set" ); EOS_ASSERT( n, fork_database_exception, "attempt to add null block state" ); EOS_ASSERT( get_block_header( n->header.previous ), unlinkable_block_exception, - "unlinkable block", ("id", string(n->id))("previous", string(n->header.previous)) ); + "unlinkable block", ("id", n->id)("previous", n->header.previous) ); auto inserted = my->index.insert(n); - EOS_ASSERT( inserted.second, fork_database_exception, "duplicate block added" ); + EOS_ASSERT( inserted.second, fork_database_exception, "duplicate block added", ("id", n->id) ); auto candidate = my->index.get().begin(); if( (*candidate)->is_valid() ) { @@ -328,14 +330,18 @@ namespace eosio { namespace chain { { result.first.push_back(first_branch); first_branch = get_block( first_branch->header.previous ); - EOS_ASSERT( first_branch, fork_db_block_not_found, "block ${id} does not exist", ("id", string(first_branch->header.previous)) ); + EOS_ASSERT( first_branch, fork_db_block_not_found, + "block ${id} does not exist", + ("id", first_branch->header.previous) ); } while( second_branch->block_num > first_branch->block_num ) { result.second.push_back( second_branch ); second_branch = get_block( second_branch->header.previous ); - EOS_ASSERT( second_branch, fork_db_block_not_found, "block ${id} does not exist", ("id", string(second_branch->header.previous)) ); + EOS_ASSERT( second_branch, fork_db_block_not_found, + "block ${id} does not exist", + ("id", second_branch->header.previous) ); } while( first_branch->header.previous != second_branch->header.previous ) @@ -346,7 +352,9 @@ namespace eosio { namespace chain { second_branch = get_block( second_branch->header.previous ); EOS_ASSERT( first_branch && second_branch, fork_db_block_not_found, "either block ${fid} or ${sid} does not exist", - ("fid", string(first_branch->header.previous))("sid", string(second_branch->header.previous)) ); + ("fid", first_branch->header.previous) + ("sid", second_branch->header.previous) + ); } if( first_branch && second_branch ) @@ -360,7 +368,7 @@ namespace eosio { namespace chain { /// remove all of the invalid forks built off of this id including this id void fork_database::remove( const block_id_type& id ) { vector remove_queue{id}; - auto& previdx = my->index.get(); + const auto& previdx = my->index.get(); const auto head_id = my->head->id; for( uint32_t i = 0; i < remove_queue.size(); ++i ) { diff --git a/libraries/chain/include/eosio/chain/block_header_state.hpp b/libraries/chain/include/eosio/chain/block_header_state.hpp index 4f4215cb8bf..2ec87664c28 100644 --- a/libraries/chain/include/eosio/chain/block_header_state.hpp +++ b/libraries/chain/include/eosio/chain/block_header_state.hpp @@ -61,7 +61,7 @@ struct block_header_state : public detail::block_header_state_common { block_header_state() = default; - block_header_state( detail::block_header_state_common&& base ) + explicit block_header_state( detail::block_header_state_common&& base ) :detail::block_header_state_common( std::move(base) ) {} diff --git a/libraries/chain/include/eosio/chain/fork_database.hpp b/libraries/chain/include/eosio/chain/fork_database.hpp index ed025671383..8e4d9176431 100644 --- a/libraries/chain/include/eosio/chain/fork_database.hpp +++ b/libraries/chain/include/eosio/chain/fork_database.hpp @@ -22,7 +22,7 @@ namespace eosio { namespace chain { class fork_database { public: - fork_database( const fc::path& data_dir ); + explicit fork_database( const fc::path& data_dir ); ~fork_database(); void close(); @@ -45,7 +45,7 @@ namespace eosio { namespace chain { * Add block state to fork database. * Must link to existing block in fork database or the root. */ - void add( block_state_ptr next_block ); + void add( const block_state_ptr& next_block ); void remove( const block_id_type& id ); diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index 21fbf216c43..8ee6827efe2 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -207,6 +207,14 @@ namespace eosio { namespace chain { using uint128_t = unsigned __int128; using bytes = vector; + struct sha256_less { + bool operator()( const fc::sha256& lhs, const fc::sha256& rhs ) const { + return + std::tie(lhs._hash[0], lhs._hash[1], lhs._hash[2], lhs._hash[3]) < + std::tie(rhs._hash[0], rhs._hash[1], rhs._hash[2], rhs._hash[3]); + } + }; + /** * Extentions are prefixed with type and are a buffer that can be diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 2052f712cb3..9fad2e2c7b6 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -104,8 +104,8 @@ class read_only { //string recent_slots; //double participation_rate = 0; optional server_version_string; - uint32_t fork_db_head_block_num = 0; - chain::block_id_type fork_db_head_block_id; + optional fork_db_head_block_num = 0; + optional fork_db_head_block_id; }; get_info_results get_info(const get_info_params&) const; diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 596f7289b70..3204793b701 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -45,6 +45,7 @@ namespace eosio { using fc::time_point; using fc::time_point_sec; using eosio::chain::transaction_id_type; + using eosio::chain::sha256_less; class connection; @@ -66,14 +67,6 @@ namespace eosio { struct by_expiry; struct by_block_num; - struct sha256_less { - bool operator()( const sha256& lhs, const sha256& rhs ) const { - return - std::tie(lhs._hash[0], lhs._hash[1], lhs._hash[2], lhs._hash[3]) < - std::tie(rhs._hash[0], rhs._hash[1], rhs._hash[2], rhs._hash[3]); - } - }; - typedef multi_index_container< node_transaction_state, indexed_by< diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index df63feb8b35..e7d0600deac 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1444,7 +1444,7 @@ void producer_plugin_impl::schedule_production_loop() { } } ) ); } else if (_pending_block_mode == pending_block_mode::speculating && !_producers.empty() && !production_disabled_by_policy()){ - fc_dlog(_log, "Specualtive Block Created; Scheduling Speculative/Production Change"); + fc_dlog(_log, "Speculative Block Created; Scheduling Speculative/Production Change"); EOS_ASSERT( chain.is_building_block(), missing_pending_block_state, "speculating without pending_block_state" ); schedule_delayed_production_loop(weak_this, chain.pending_block_time()); } else { From 8dc5a3aa8f7cc4e5a4667dea6d67553d8337952f Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 28 Jan 2019 10:29:03 -0500 Subject: [PATCH 012/680] use faster sha256_less for unapplied_transactions map --- libraries/chain/controller.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 349da0a621c..b2a390fe648 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -193,7 +193,7 @@ struct controller_impl { * are removed from this list if they are re-applied in other blocks. Producers * can query this list when scheduling new transactions into blocks. */ - map unapplied_transactions; + map unapplied_transactions; void pop_block() { auto prev = fork_db.get_block( head->header.previous ); From 57992927e1b3e631dfe850f0a61d0f89b03e1309 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 30 Jan 2019 17:53:33 -0500 Subject: [PATCH 013/680] initial work on #6429 --- libraries/chain/CMakeLists.txt | 2 + libraries/chain/block_header.cpp | 39 +++ libraries/chain/block_header_state.cpp | 131 ++++++-- libraries/chain/block_state.cpp | 15 +- libraries/chain/controller.cpp | 311 ++++++++++++++++-- libraries/chain/fork_database.cpp | 64 +++- .../include/eosio/chain/block_header.hpp | 54 +++ .../eosio/chain/block_header_state.hpp | 54 ++- .../chain/include/eosio/chain/block_state.hpp | 9 + .../chain/include/eosio/chain/controller.hpp | 18 +- .../chain/include/eosio/chain/exceptions.hpp | 10 +- .../include/eosio/chain/fork_database.hpp | 3 + .../eosio/chain/global_property_object.hpp | 5 +- .../chain/protocol_feature_activation.hpp | 39 +++ .../eosio/chain/protocol_feature_manager.hpp | 139 ++++++++ .../chain/protocol_feature_activation.cpp | 83 +++++ libraries/chain/protocol_feature_manager.cpp | 237 +++++++++++++ 17 files changed, 1126 insertions(+), 87 deletions(-) create mode 100644 libraries/chain/include/eosio/chain/protocol_feature_activation.hpp create mode 100644 libraries/chain/include/eosio/chain/protocol_feature_manager.hpp create mode 100644 libraries/chain/protocol_feature_activation.cpp create mode 100644 libraries/chain/protocol_feature_manager.cpp diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 2c430fecea0..52b4181a030 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -47,6 +47,8 @@ add_library( eosio_chain transaction_metadata.cpp + protocol_feature_activation.cpp + protocol_feature_manager.cpp ${HEADERS} ) diff --git a/libraries/chain/block_header.cpp b/libraries/chain/block_header.cpp index d623406fd25..692089dc9e6 100644 --- a/libraries/chain/block_header.cpp +++ b/libraries/chain/block_header.cpp @@ -28,5 +28,44 @@ namespace eosio { namespace chain { return result; } + vector block_header::validate_and_extract_header_extensions()const { + using block_header_extensions_t = block_header_extension_types::block_header_extensions_t; + using decompose_t = block_header_extension_types::decompose_t; + + static_assert( std::is_same::value, + "block_header_extensions is not setup as expected" ); + + vector results; + + uint16_t id_type_lower_bound = 0; + + for( size_t i = 0; i < header_extensions.size(); ++i ) { + const auto& e = header_extensions[i]; + auto id = e.first; + + EOS_ASSERT( id >= id_type_lower_bound, invalid_block_header_extension, + "Block header extensions are not in the correct order (ascending id types required)" + ); + + results.emplace_back(); + + auto match = decompose_t::extract( id, e.second, results.back() ); + EOS_ASSERT( match, invalid_block_header_extension, + "Block header extension with id type ${id} is not supported", + ("id", id) + ); + + if( match->enforce_unique ) { + EOS_ASSERT( i == 0 || id > id_type_lower_bound, invalid_block_header_extension, + "Block header extension with id type ${id} is not allowed to repeat", + ("id", id) + ); + } + + id_type_lower_bound = id; + } + + return results; + } } } diff --git a/libraries/chain/block_header_state.cpp b/libraries/chain/block_header_state.cpp index 3f3fefd15dd..569a470c0df 100644 --- a/libraries/chain/block_header_state.cpp +++ b/libraries/chain/block_header_state.cpp @@ -54,6 +54,7 @@ namespace eosio { namespace chain { result.timestamp = when; result.confirmed = num_prev_blocks_to_confirm; result.active_schedule_version = active_schedule.version; + result.prev_activated_protocol_features = activated_protocol_features; result.block_signing_key = prokey.block_signing_key; result.producer = prokey.producer_name; @@ -161,9 +162,12 @@ namespace eosio { namespace chain { return result; } - signed_block_header pending_block_header_state::make_block_header( const checksum256_type& transaction_mroot, - const checksum256_type& action_mroot, - optional&& new_producers )const + signed_block_header pending_block_header_state::make_block_header( + const checksum256_type& transaction_mroot, + const checksum256_type& action_mroot, + optional&& new_producers, + vector&& new_protocol_feature_activations + )const { signed_block_header h; @@ -176,10 +180,22 @@ namespace eosio { namespace chain { h.schedule_version = active_schedule_version; h.new_producers = std::move(new_producers); + if( new_protocol_feature_activations.size() > 0 ) { + h.header_extensions.emplace_back( + protocol_feature_activation::extension_id(), + fc::raw::pack( protocol_feature_activation{ std::move(new_protocol_feature_activations) } ) + ); + } + return h; } - block_header_state pending_block_header_state::_finish_next( const signed_block_header& h )&& + block_header_state pending_block_header_state::_finish_next( + const signed_block_header& h, + const std::function&, + const vector& )>& validator + )&& { EOS_ASSERT( h.timestamp == timestamp, block_validate_exception, "timestamp mismatch" ); EOS_ASSERT( h.previous == previous, unlinkable_block_exception, "previous mismatch" ); @@ -187,8 +203,6 @@ namespace eosio { namespace chain { EOS_ASSERT( h.producer == producer, wrong_producer, "wrong producer specified" ); EOS_ASSERT( h.schedule_version == active_schedule_version, producer_schedule_exception, "schedule_version in signed block is corrupted" ); - EOS_ASSERT( h.header_extensions.size() == 0, block_validate_exception, "no supported extensions" ); - if( h.new_producers ) { EOS_ASSERT( !was_pending_promoted, producer_schedule_exception, "cannot set pending producer schedule in the same block in which pending was promoted to active" ); EOS_ASSERT( h.new_producers->version == active_schedule.version + 1, producer_schedule_exception, "wrong producer schedule version specified" ); @@ -196,6 +210,23 @@ namespace eosio { namespace chain { "cannot set new pending producers until last pending is confirmed" ); } + protocol_feature_activation_set_ptr new_activated_protocol_features; + + auto exts = h.validate_and_extract_header_extensions(); + { + if( exts.size() > 0 ) { + auto& new_protocol_features = exts.front().get().protocol_features; + validator( timestamp, prev_activated_protocol_features->protocol_features, new_protocol_features ); + + new_activated_protocol_features = std::make_shared( + *prev_activated_protocol_features, + std::move( new_protocol_features ) + ); + } else { + new_activated_protocol_features = std::move( prev_activated_protocol_features ); + } + } + auto block_number = block_num; block_header_state result( std::move( *static_cast(this) ) ); @@ -203,6 +234,8 @@ namespace eosio { namespace chain { result.id = h.id(); result.header = h; + result.header_exts = std::move(exts); + if( h.new_producers ) { result.pending_schedule.schedule = *h.new_producers; result.pending_schedule.schedule_hash = digest_type::hash( result.pending_schedule ); @@ -217,13 +250,20 @@ namespace eosio { namespace chain { result.pending_schedule.schedule_lib_num = prev_pending_schedule.schedule_lib_num; } + result.activated_protocol_features = std::move( new_activated_protocol_features ); + return result; } - block_header_state pending_block_header_state::finish_next( const signed_block_header& h, - bool skip_validate_signee )&& + block_header_state pending_block_header_state::finish_next( + const signed_block_header& h, + const std::function&, + const vector& )>& validator, + bool skip_validate_signee + )&& { - auto result = std::move(*this)._finish_next( h ); + auto result = std::move(*this)._finish_next( h, validator ); // ASSUMPTION FROM controller_impl::apply_block = all untrusted blocks will have their signatures pre-validated here if( !skip_validate_signee ) { @@ -233,10 +273,15 @@ namespace eosio { namespace chain { return result; } - block_header_state pending_block_header_state::finish_next( signed_block_header& h, - const std::function& signer )&& + block_header_state pending_block_header_state::finish_next( + signed_block_header& h, + const std::function&, + const vector& )>& validator, + const std::function& signer + )&& { - auto result = std::move(*this)._finish_next( h ); + auto result = std::move(*this)._finish_next( h, validator ); result.sign( signer ); h.producer_signature = result.header.producer_signature; return result; @@ -250,28 +295,48 @@ namespace eosio { namespace chain { * * If the header specifies new_producers then apply them accordingly. */ - block_header_state block_header_state::next( const signed_block_header& h, bool skip_validate_signee )const { - return next( h.timestamp, h.confirmed ).finish_next( h, skip_validate_signee ); + block_header_state block_header_state::next( + const signed_block_header& h, + const std::function&, + const vector& )>& validator, + bool skip_validate_signee )const + { + return next( h.timestamp, h.confirmed ).finish_next( h, validator, skip_validate_signee ); } - digest_type block_header_state::sig_digest()const { - auto header_bmroot = digest_type::hash( std::make_pair( header.digest(), blockroot_merkle.get_root() ) ); - return digest_type::hash( std::make_pair(header_bmroot, pending_schedule.schedule_hash) ); - } - - void block_header_state::sign( const std::function& signer ) { - auto d = sig_digest(); - header.producer_signature = signer( d ); - EOS_ASSERT( block_signing_key == fc::crypto::public_key( header.producer_signature, d ), wrong_signing_key, "block is signed with unexpected key" ); - } - - public_key_type block_header_state::signee()const { - return fc::crypto::public_key( header.producer_signature, sig_digest(), true ); - } - - void block_header_state::verify_signee( const public_key_type& signee )const { - EOS_ASSERT( block_signing_key == signee, wrong_signing_key, "block not signed by expected key", - ("block_signing_key", block_signing_key)( "signee", signee ) ); - } + digest_type block_header_state::sig_digest()const { + auto header_bmroot = digest_type::hash( std::make_pair( header.digest(), blockroot_merkle.get_root() ) ); + return digest_type::hash( std::make_pair(header_bmroot, pending_schedule.schedule_hash) ); + } + + void block_header_state::sign( const std::function& signer ) { + auto d = sig_digest(); + header.producer_signature = signer( d ); + EOS_ASSERT( block_signing_key == fc::crypto::public_key( header.producer_signature, d ), + wrong_signing_key, "block is signed with unexpected key" ); + } + + public_key_type block_header_state::signee()const { + return fc::crypto::public_key( header.producer_signature, sig_digest(), true ); + } + + void block_header_state::verify_signee( const public_key_type& signee )const { + EOS_ASSERT( block_signing_key == signee, wrong_signing_key, + "block not signed by expected key", + ("block_signing_key", block_signing_key)( "signee", signee ) ); + } + + /** + * Reference cannot outlive *this. Assumes header_exts is not mutated after instatiation. + */ + const vector& block_header_state::get_new_protocol_feature_activations()const { + static const vector no_activations{}; + + if( header_exts.size() == 0 || !header_exts.front().contains() ) + return no_activations; + + return header_exts.front().get().protocol_features; + } } } /// namespace eosio::chain diff --git a/libraries/chain/block_state.cpp b/libraries/chain/block_state.cpp index ecd7fceefc8..3a246038149 100644 --- a/libraries/chain/block_state.cpp +++ b/libraries/chain/block_state.cpp @@ -5,18 +5,24 @@ namespace eosio { namespace chain { block_state::block_state( const block_header_state& prev, signed_block_ptr b, + const std::function&, + const vector& )>& validator, bool skip_validate_signee ) - :block_header_state( prev.next( *b, skip_validate_signee ) ) + :block_header_state( prev.next( *b, validator, skip_validate_signee ) ) ,block( std::move(b) ) {} block_state::block_state( pending_block_header_state&& cur, signed_block_ptr&& b, vector&& trx_metas, + const std::function&, + const vector& )>& validator, const std::function& signer ) - :block_header_state( std::move(cur).finish_next( *b, signer ) ) + :block_header_state( std::move(cur).finish_next( *b, validator, signer ) ) ,block( std::move(b) ) ,trxs( std::move(trx_metas) ) {} @@ -25,9 +31,12 @@ namespace eosio { namespace chain { block_state::block_state( pending_block_header_state&& cur, const signed_block_ptr& b, vector&& trx_metas, + const std::function&, + const vector& )>& validator, bool skip_validate_signee ) - :block_header_state( std::move(cur).finish_next( *b, skip_validate_signee ) ) + :block_header_state( std::move(cur).finish_next( *b, validator, skip_validate_signee ) ) ,block( b ) ,trxs( std::move(trx_metas) ) {} diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index b2a390fe648..ff7ba474cee 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -95,12 +96,18 @@ class maybe_session { }; struct building_block { - building_block( const block_header_state& prev, block_timestamp_type when, uint16_t num_prev_blocks_to_confirm ) + building_block( const block_header_state& prev, + block_timestamp_type when, + uint16_t num_prev_blocks_to_confirm, + const vector& new_protocol_feature_activations ) :_pending_block_header_state( prev.next( when, num_prev_blocks_to_confirm ) ) + ,_new_protocol_feature_activations( new_protocol_feature_activations ) {} pending_block_header_state _pending_block_header_state; optional _new_pending_producer_schedule; + vector _new_protocol_feature_activations; + size_t _num_new_protocol_features_that_have_activated = 0; vector _pending_trx_metas; vector _pending_trx_receipts; vector _actions; @@ -121,9 +128,11 @@ using block_stage_type = fc::static_variant& new_protocol_feature_activations ) :_db_session( move(s) ) - ,_block_stage( building_block( prev, when, num_prev_blocks_to_confirm ) ) + ,_block_stage( building_block( prev, when, num_prev_blocks_to_confirm, new_protocol_feature_activations ) ) {} maybe_session _db_session; @@ -159,6 +168,30 @@ struct pending_state { return _block_stage.get()._block_state->trxs; } + bool is_protocol_feature_activated( const digest_type& feature_digest )const { + if( _block_stage.contains() ) { + auto& bb = _block_stage.get(); + const auto& activated_features = bb._pending_block_header_state.prev_activated_protocol_features->protocol_features; + + if( activated_features.find( feature_digest ) != activated_features.end() ) return true; + + auto end = bb._new_protocol_feature_activations.begin() + bb._num_new_protocol_features_that_have_activated; + return (std::find( bb._new_protocol_feature_activations.begin(), end, feature_digest ) != end); + } + + if( _block_stage.contains() ) { + // Calling is_protocol_feature_activated during the assembled_block stage is not efficient. + // We should avoid doing it. + // In fact for now it isn't even implemented. + EOS_THROW( misc_exception, + "checking if protocol feature is activated in the assembled_block stage is not yet supported" ); + // TODO: implement this + } + + const auto& activated_features = _block_stage.get()._block_state->activated_protocol_features->protocol_features; + return (activated_features.find( feature_digest ) != activated_features.end()); + } + void push() { _db_session.push(); } @@ -175,6 +208,7 @@ struct controller_impl { wasm_interface wasmif; resource_limits_manager resource_limits; authorization_manager authorization; + protocol_feature_manager protocol_features; controller::config conf; chain_id_type chain_id; optional replay_head_time; @@ -187,6 +221,7 @@ struct controller_impl { typedef pair handler_key; map< account_name, map > apply_handlers; + map< builtin_protocol_feature_t, std::function > protocol_feature_activation_handlers; /** * Transactions that were undone by pop_block or abort_block, transactions @@ -216,8 +251,24 @@ struct controller_impl { head = prev; db.undo(); + + protocol_features.popped_blocks_to( prev->block_num ); } + template + void on_activation(); + + template + inline void set_activation_handler() { + auto res = protocol_feature_activation_handlers.emplace( F, &controller_impl::on_activation ); + EOS_ASSERT( res.second, misc_exception, "attempting to set activation handler twice" ); + } + + inline void trigger_activation_handler( builtin_protocol_feature_t f ) { + auto itr = protocol_feature_activation_handlers.find( f ); + if( itr == protocol_feature_activation_handlers.end() ) return; + (itr->second)( *this ); + } void set_apply_handler( account_name receiver, account_name contract, action_name action, apply_handler v ) { apply_handlers[receiver][make_pair(contract,action)] = v; @@ -242,6 +293,15 @@ struct controller_impl { thread_pool( cfg.thread_pool_size ) { + fork_db.open( [this]( block_timestamp_type timestamp, + const flat_set& cur_features, + const vector& new_features ) + { check_protocol_features( timestamp, cur_features, new_features ); } + ); + + set_activation_handler(); + + #define SET_APP_HANDLER( receiver, contract, action) \ set_apply_handler( #receiver, #contract, #action, &BOOST_PP_CAT(apply_, BOOST_PP_CAT(contract, BOOST_PP_CAT(_,action) ) ) ) @@ -326,7 +386,7 @@ struct controller_impl { for( auto bitr = branch.rbegin(); bitr != branch.rend(); ++bitr ) { if( read_mode == db_read_mode::IRREVERSIBLE ) { - apply_block( (*bitr)->block, controller::block_status::complete ); + apply_block( *bitr, controller::block_status::complete ); head = (*bitr); fork_db.mark_valid( head ); } @@ -376,6 +436,7 @@ struct controller_impl { head = std::make_shared(); static_cast(*head) = genheader; + head->activated_protocol_features = std::make_shared(); head->block = std::make_shared(genheader.header); fork_db.reset( *head ); db.set_revision( head->block_num ); @@ -1158,8 +1219,10 @@ struct controller_impl { } FC_CAPTURE_AND_RETHROW((trace)) } /// push_transaction - - void start_block( block_timestamp_type when, uint16_t confirm_block_count, controller::block_status s, + void start_block( block_timestamp_type when, + uint16_t confirm_block_count, + const vector& new_protocol_feature_activations, + controller::block_status s, const optional& producer_block_id ) { EOS_ASSERT( !pending, block_validate_exception, "pending block already exists" ); @@ -1172,20 +1235,61 @@ struct controller_impl { EOS_ASSERT( db.revision() == head->block_num, database_exception, "db revision is not on par with head block", ("db.revision()", db.revision())("controller_head_block", head->block_num)("fork_db_head_block", fork_db.head()->block_num) ); - pending.emplace( maybe_session(db), *head, when, confirm_block_count ); + pending.emplace( maybe_session(db), *head, when, confirm_block_count, new_protocol_feature_activations ); } else { - pending.emplace( maybe_session(), *head, when, confirm_block_count ); + pending.emplace( maybe_session(), *head, when, confirm_block_count, new_protocol_feature_activations ); } pending->_block_status = s; pending->_producer_block_id = producer_block_id; - const auto& pbhs = pending->get_pending_block_header_state(); + auto& bb = pending->_block_stage.get(); + const auto& pbhs = bb._pending_block_header_state; - //modify state in speculative block only if we are speculative reads mode (other wise we need clean state for head or irreversible reads) + // modify state of speculative block only if we are in speculative read mode (otherwise we need clean state for head or read-only modes) if ( read_mode == db_read_mode::SPECULATIVE || pending->_block_status != controller::block_status::incomplete ) { const auto& gpo = db.get(); + + bool handled_all_preactivated_features = (gpo.preactivated_protocol_features.size() == 0); + + if( new_protocol_feature_activations.size() > 0 ) { + flat_map preactivated_protocol_features; + preactivated_protocol_features.reserve( gpo.preactivated_protocol_features.size() ); + for( const auto& feature_digest : gpo.preactivated_protocol_features ) { + preactivated_protocol_features.emplace( feature_digest, false ); + } + + size_t num_preactivated_features_that_have_activated = 0; + + for( const auto& feature_digest : new_protocol_feature_activations ) { + const auto& f = protocol_features.get_protocol_feature( feature_digest ); + + if( f.preactivation_required ) { + auto itr = preactivated_protocol_features.find( feature_digest ); + if( itr != preactivated_protocol_features.end() && !itr->second ) { + itr->second = true; + ++num_preactivated_features_that_have_activated; + } + } + + if( f.builtin_feature ) { + trigger_activation_handler( *f.builtin_feature ); + protocol_features.activate_feature( feature_digest, pbhs.block_num ); + } + + ++bb._num_new_protocol_features_that_have_activated; + } + + if( num_preactivated_features_that_have_activated == gpo.preactivated_protocol_features.size() ) { + handled_all_preactivated_features = true; + } + } + + EOS_ASSERT( handled_all_preactivated_features, block_validate_exception, + "There are pre-activated protocol features that were not activated at the start of this block" + ); + if( gpo.proposed_schedule_block_num.valid() && // if there is a proposed schedule that was proposed in a block ... ( *gpo.proposed_schedule_block_num <= pbhs.dpos_irreversible_blocknum ) && // ... that has now become irreversible ... pbhs.prev_pending_schedule.schedule.producers.size() == 0 // ... and there was room for a new pending schedule prior to any possible promotion @@ -1206,6 +1310,13 @@ struct controller_impl { db.modify( gpo, [&]( auto& gp ) { gp.proposed_schedule_block_num = optional(); gp.proposed_schedule.clear(); + if( gp.preactivated_protocol_features.size() > 0 ) { + gp.preactivated_protocol_features.clear(); + } + }); + } else if( gpo.preactivated_protocol_features.size() > 0 ) { + db.modify( gpo, [&]( auto& gp ) { + gp.preactivated_protocol_features.clear(); }); } @@ -1259,7 +1370,8 @@ struct controller_impl { auto block_ptr = std::make_shared( pbhs.make_block_header( calculate_trx_merkle(), calculate_action_merkle(), - std::move( bb._new_pending_producer_schedule ) + std::move( bb._new_pending_producer_schedule ), + std::move( bb._new_protocol_feature_activations ) ) ); block_ptr->transactions = std::move( bb._pending_trx_receipts ); @@ -1336,11 +1448,75 @@ struct controller_impl { pending->push(); } - void apply_block( const signed_block_ptr& b, controller::block_status s ) { try { + /** + * This method is called from other threads. The controller_impl should outlive those threads. + * However, to avoid race conditions, it means that the behavior of this function should not change + * after controller_impl construction. + + * This should not be an issue since the purpose of this function is to ensure all of the protocol features + * in the supplied vector are recognized by the software, and the set of recognized protocol features is + * determined at startup and cannot be changed without a restart. + */ + void check_protocol_features( block_timestamp_type timestamp, + const flat_set& currently_activated_protocol_features, + const vector& new_protocol_features ) + { + for( auto itr = new_protocol_features.begin(); itr != new_protocol_features.end(); ++itr ) { + const auto& f = *itr; + + auto status = protocol_features.is_recognized( f, timestamp ); + switch( status ) { + case protocol_feature_manager::recognized_t::unrecognized: + EOS_THROW( protocol_feature_exception, + "protocol feature with digest '${digest}' is unrecognized", ("digest", f) ); + break; + case protocol_feature_manager::recognized_t::disabled: + EOS_THROW( protocol_feature_exception, + "protocol feature with digest '${digest}' is disabled", ("digest", f) ); + break; + case protocol_feature_manager::recognized_t::too_early: + EOS_THROW( protocol_feature_exception, + "${timestamp} is too early for the earliest allowed activation time of the protocol feature with digest '${digest}'", ("digest", f)("timestamp", timestamp) ); + break; + case protocol_feature_manager::recognized_t::ready_if_preactivated: + case protocol_feature_manager::recognized_t::ready: + break; + default: + EOS_THROW( protocol_feature_exception, "unexpected recognized_t status" ); + break; + } + + EOS_ASSERT( currently_activated_protocol_features.find( f ) == currently_activated_protocol_features.end(), + protocol_feature_exception, + "protocol feature with digest '${digest}' has already been activated", + ("digest", f) + ); + + auto dependency_checker = [¤tly_activated_protocol_features, &new_protocol_features, &itr] + ( const digest_type& f ) -> bool + { + if( currently_activated_protocol_features.find( f ) != currently_activated_protocol_features.end() ) + return true; + + return (std::find( new_protocol_features.begin(), itr, f ) != itr); + }; + + EOS_ASSERT( protocol_features.validate_dependencies( f, dependency_checker ), protocol_feature_exception, + "not all dependencies of protocol feature with digest '${digest}' have been activated", + ("digest", f) + ); + } + } + + void apply_block( const block_state_ptr& bsp, controller::block_status s ) + { try { try { - EOS_ASSERT( b->block_extensions.size() == 0, block_validate_exception, "no supported extensions" ); + const signed_block_ptr& b = bsp->block; + const auto& new_protocol_feature_activations = bsp->get_new_protocol_feature_activations(); + + EOS_ASSERT( b->block_extensions.size() == 0, block_validate_exception, "no supported block extensions" ); auto producer_block_id = b->id(); - start_block( b->timestamp, b->confirmed, s , producer_block_id); + start_block( b->timestamp, b->confirmed, new_protocol_feature_activations, s, producer_block_id); std::vector packed_transactions; packed_transactions.reserve( b->transactions.size() ); @@ -1402,6 +1578,10 @@ struct controller_impl { std::move( ab._pending_block_header_state ), std::move( ab._unsigned_block ), std::move( ab._trx_metas ), + []( block_timestamp_type timestamp, + const flat_set& cur_features, + const vector& new_features ) + {}, // validation of any new protocol features should have already occurred prior to apply_block true // signature should have already been verified (assuming untrusted) prior to apply_block ); @@ -1429,9 +1609,17 @@ struct controller_impl { EOS_ASSERT( prev, unlinkable_block_exception, "unlinkable block ${id}", ("id", id)("previous", b->previous) ); - return async_thread_pool( thread_pool, [b, prev]() { + return async_thread_pool( thread_pool, [b, prev, control=this]() { const bool skip_validate_signee = false; - return std::make_shared( *prev, move( b ), skip_validate_signee ); + return std::make_shared( + *prev, + move( b ), + [control]( block_timestamp_type timestamp, + const flat_set& cur_features, + const vector& new_features ) + { control->check_protocol_features( timestamp, cur_features, new_features ); }, + skip_validate_signee + ); } ); } @@ -1478,7 +1666,15 @@ struct controller_impl { emit( self.pre_accepted_block, b ); const bool skip_validate_signee = !conf.force_all_checks; - auto bsp = std::make_shared( *head, b, skip_validate_signee ); + auto bsp = std::make_shared( + *head, + b, + [this]( block_timestamp_type timestamp, + const flat_set& cur_features, + const vector& new_features ) + { check_protocol_features( timestamp, cur_features, new_features ); }, + skip_validate_signee + ); if( s != controller::block_status::irreversible ) { fork_db.add( bsp ); @@ -1487,7 +1683,7 @@ struct controller_impl { emit( self.accepted_block_header, bsp ); if( s == controller::block_status::irreversible ) { - apply_block( bsp->block, s ); + apply_block( bsp, s ); head = bsp; // On replay, log_irreversible is not called and so no irreversible_block signal is emittted. @@ -1506,7 +1702,7 @@ struct controller_impl { bool head_changed = true; if( new_head->header.previous == head->id ) { try { - apply_block( new_head->block, s ); + apply_block( new_head, s ); fork_db.mark_valid( new_head ); head = new_head; } catch ( const fc::exception& e ) { @@ -1528,7 +1724,8 @@ struct controller_impl { for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr ) { optional except; try { - apply_block( (*ritr)->block, (*ritr)->is_valid() ? controller::block_status::validated : controller::block_status::complete ); + apply_block( *ritr, (*ritr)->is_valid() ? controller::block_status::validated + : controller::block_status::complete ); fork_db.mark_valid( *ritr ); head = *ritr; } catch (const fc::exception& e) { @@ -1555,7 +1752,7 @@ struct controller_impl { // re-apply good blocks for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr ) { - apply_block( (*ritr)->block, controller::block_status::validated /* we previously validated these blocks*/ ); + apply_block( *ritr, controller::block_status::validated /* we previously validated these blocks*/ ); head = *ritr; } throw *except; @@ -1577,6 +1774,7 @@ struct controller_impl { unapplied_transactions[t->signed_id] = t; } pending.reset(); + protocol_features.popped_blocks_to( head->block_num ); } } @@ -1877,10 +2075,46 @@ chainbase::database& controller::mutable_db()const { return my->db; } const fork_database& controller::fork_db()const { return my->fork_db; } +vector controller::get_preactivated_protocol_features()const { + const auto& gpo = my->db.get(); + + if( gpo.preactivated_protocol_features.size() == 0 ) return {}; + + vector preactivated_protocol_features; + + for( const auto& f : gpo.preactivated_protocol_features ) { + preactivated_protocol_features.emplace_back( f ); + } + + return preactivated_protocol_features; +} + +void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count ) +{ + validate_db_available_size(); + + EOS_ASSERT( !my->pending, block_validate_exception, "pending block already exists" ); + + vector new_protocol_feature_activations; + + const auto& gpo = my->db.get(); + if( gpo.preactivated_protocol_features.size() > 0 ) { + for( const auto& f : gpo.preactivated_protocol_features ) { + new_protocol_feature_activations.emplace_back( f ); + } + } -void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count) { + my->start_block( when, confirm_block_count, new_protocol_feature_activations, + block_status::incomplete, optional() ); +} + +void controller::start_block( block_timestamp_type when, + uint16_t confirm_block_count, + const vector& new_protocol_feature_activations ) +{ validate_db_available_size(); - my->start_block(when, confirm_block_count, block_status::incomplete, optional() ); + my->start_block( when, confirm_block_count, new_protocol_feature_activations, + block_status::incomplete, optional() ); } block_state_ptr controller::finalize_block( const std::function& signer_callback ) { @@ -1894,6 +2128,10 @@ block_state_ptr controller::finalize_block( const std::function& cur_features, + const vector& new_features ) + { control->check_protocol_features( timestamp, cur_features, new_features ); }, signer_callback ); @@ -2454,6 +2692,24 @@ void controller::validate_reversible_available_size() const { EOS_ASSERT(free >= guard, reversible_guard_exception, "reversible free: ${f}, guard size: ${g}", ("f", free)("g",guard)); } +bool controller::is_protocol_feature_activated( const digest_type& feature_digest )const { + if( my->pending ) + return my->pending->is_protocol_feature_activated( feature_digest ); + + const auto& activated_features = my->head->activated_protocol_features->protocol_features; + return (activated_features.find( feature_digest ) != activated_features.end()); +} + +bool controller::is_builtin_activated( builtin_protocol_feature_t f )const { + uint32_t current_block_num = head_block_num(); + + if( my->pending ) { + ++current_block_num; + } + + my->protocol_features.is_builtin_activated( f, current_block_num ); +} + bool controller::is_known_unexpired_transaction( const transaction_id_type& id) const { return db().find(id); } @@ -2478,4 +2734,13 @@ const flat_set &controller::get_resource_greylist() const { return my->conf.resource_greylist; } +/// Protocol feature activation handlers: + +template<> +void controller_impl::on_activation() { + +} + +/// End of protocol feature activation handlers + } } /// eosio::chain diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index 68238ccddda..f021dd892a6 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -55,16 +55,34 @@ namespace eosio { namespace chain { } struct fork_database_impl { + fork_database_impl( fork_database& self, const fc::path& data_dir ) + :self(self) + ,datadir(data_dir) + {} + + fork_database& self; fork_multi_index_type index; block_state_ptr root; // Only uses the block_header_state portion block_state_ptr head; fc::path datadir; + + void add( const block_state_ptr& n, + bool validate, + const std::function&, + const vector& )>& validator ); }; - fork_database::fork_database( const fc::path& data_dir ):my( new fork_database_impl() ) { - my->datadir = data_dir; + fork_database::fork_database( const fc::path& data_dir ) + :my( new fork_database_impl( *this, data_dir ) ) + {} + + void fork_database::open( const std::function&, + const vector& )>& validator ) + { if (!fc::is_directory(my->datadir)) fc::create_directories(my->datadir); @@ -113,7 +131,8 @@ namespace eosio { namespace chain { s.trxs.push_back( std::make_shared( std::make_shared(pt) ) ); } } - add( std::make_shared( move( s ) ) ); + s.header_exts = s.block->validate_and_extract_header_extensions(); + my->add( std::make_shared( move( s ) ), true, validator ); } block_id_type head_id; fc::raw::unpack( ds, head_id ); @@ -265,22 +284,49 @@ namespace eosio { namespace chain { return block_header_state_ptr(); } - void fork_database::add( const block_state_ptr& n ) { - EOS_ASSERT( my->root, fork_database_exception, "root not yet set" ); + void fork_database_impl::add( const block_state_ptr& n, + bool validate, + const std::function&, + const vector& )>& validator ) + { + EOS_ASSERT( root, fork_database_exception, "root not yet set" ); EOS_ASSERT( n, fork_database_exception, "attempt to add null block state" ); - EOS_ASSERT( get_block_header( n->header.previous ), unlinkable_block_exception, + auto prev_bh = self.get_block_header( n->header.previous ); + + EOS_ASSERT( prev_bh, unlinkable_block_exception, "unlinkable block", ("id", n->id)("previous", n->header.previous) ); - auto inserted = my->index.insert(n); + if( validate ) { + try { + auto exts = n->block->validate_and_extract_header_extensions(); + + if( exts.size() > 0 ) { + auto& new_protocol_features = exts.front().get().protocol_features; + validator( n->header.timestamp, prev_bh->activated_protocol_features->protocol_features, new_protocol_features ); + } + } EOS_RETHROW_EXCEPTIONS( fork_database_exception, "serialized fork database is incompatible with configured protocol features" ) + } + + auto inserted = index.insert(n); EOS_ASSERT( inserted.second, fork_database_exception, "duplicate block added", ("id", n->id) ); - auto candidate = my->index.get().begin(); + auto candidate = index.get().begin(); if( (*candidate)->is_valid() ) { - my->head = *candidate; + head = *candidate; } } + void fork_database::add( const block_state_ptr& n ) { + my->add( n, false, + []( block_timestamp_type timestamp, + const flat_set& cur_features, + const vector& new_features ) + {} + ); + } + const block_state_ptr& fork_database::root()const { return my->root; } const block_state_ptr& fork_database::head()const { return my->head; } diff --git a/libraries/chain/include/eosio/chain/block_header.hpp b/libraries/chain/include/eosio/chain/block_header.hpp index 2849ee00f31..adbdb7d3def 100644 --- a/libraries/chain/include/eosio/chain/block_header.hpp +++ b/libraries/chain/include/eosio/chain/block_header.hpp @@ -1,9 +1,61 @@ #pragma once #include #include +#include + +#include namespace eosio { namespace chain { + namespace detail { + struct extract_match { + bool enforce_unique = false; + }; + + template + struct decompose; + + template<> + struct decompose<> { + template + static auto extract( uint16_t id, const vector& data, ResultVariant& result ) + -> fc::optional + { + return {}; + } + }; + + template + struct decompose { + using head_t = T; + using tail_t = decompose< Rest... >; + + template + static auto extract( uint16_t id, const vector& data, ResultVariant& result ) + -> fc::optional + { + if( id == head_t::extension_id() ) { + result = fc::raw::unpack( data ); + return { extract_match{ head_t::enforce_unique() } }; + } + + return tail_t::template extract( id, data, result ); + } + }; + + template + struct block_header_extension_types { + using block_header_extensions_t = fc::static_variant< Ts... >; + using decompose_t = decompose< Ts... >; + }; + } + + using block_header_extension_types = detail::block_header_extension_types< + protocol_feature_activation + >; + + using block_header_extensions = block_header_extension_types::block_header_extensions_t; + struct block_header { block_timestamp_type timestamp; @@ -41,6 +93,8 @@ namespace eosio { namespace chain { block_id_type id() const; uint32_t block_num() const { return num_from_id(previous) + 1; } static uint32_t num_from_id(const block_id_type& id); + + vector validate_and_extract_header_extensions()const; }; diff --git a/libraries/chain/include/eosio/chain/block_header_state.hpp b/libraries/chain/include/eosio/chain/block_header_state.hpp index 2ec87664c28..41e19253138 100644 --- a/libraries/chain/include/eosio/chain/block_header_state.hpp +++ b/libraries/chain/include/eosio/chain/block_header_state.hpp @@ -28,25 +28,37 @@ namespace detail { } struct pending_block_header_state : public detail::block_header_state_common { - detail::schedule_info prev_pending_schedule; - bool was_pending_promoted = false; - block_id_type previous; - account_name producer; - block_timestamp_type timestamp; - uint32_t active_schedule_version = 0; - uint16_t confirmed = 1; + protocol_feature_activation_set_ptr prev_activated_protocol_features; + detail::schedule_info prev_pending_schedule; + bool was_pending_promoted = false; + block_id_type previous; + account_name producer; + block_timestamp_type timestamp; + uint32_t active_schedule_version = 0; + uint16_t confirmed = 1; signed_block_header make_block_header( const checksum256_type& transaction_mroot, const checksum256_type& action_mroot, - optional&& new_producers )const; + optional&& new_producers, + vector&& new_protocol_feature_activations )const; - block_header_state finish_next( const signed_block_header& h, bool skip_validate_signee = false )&&; + block_header_state finish_next( const signed_block_header& h, + const std::function&, + const vector& )>& validator, + bool skip_validate_signee = false )&&; block_header_state finish_next( signed_block_header& h, - const std::function& signer )&&; + const std::function&, + const vector& )>& validator, + const std::function& signer )&&; protected: - block_header_state _finish_next( const signed_block_header& h )&&; + block_header_state _finish_next( const signed_block_header& h, + const std::function&, + const vector& )>& validator )&&; }; @@ -55,9 +67,14 @@ struct pending_block_header_state : public detail::block_header_state_common { * @brief defines the minimum state necessary to validate transaction headers */ struct block_header_state : public detail::block_header_state_common { - block_id_type id; - signed_block_header header; - detail::schedule_info pending_schedule; + block_id_type id; + signed_block_header header; + detail::schedule_info pending_schedule; + protocol_feature_activation_set_ptr activated_protocol_features; + + /// this data is redundant with the data stored in header, but it acts as a cache that avoids + /// duplication of work + vector header_exts; block_header_state() = default; @@ -67,7 +84,11 @@ struct block_header_state : public detail::block_header_state_common { pending_block_header_state next( block_timestamp_type when, uint16_t num_prev_blocks_to_confirm )const; - block_header_state next( const signed_block_header& h, bool skip_validate_signee = false )const; + block_header_state next( const signed_block_header& h, + const std::function&, + const vector& )>& validator, + bool skip_validate_signee = false )const; bool has_pending_producers()const { return pending_schedule.schedule.producers.size(); } uint32_t calc_dpos_last_irreversible( account_name producer_of_next_block )const; @@ -79,6 +100,8 @@ struct block_header_state : public detail::block_header_state_common { void sign( const std::function& signer ); public_key_type signee()const; void verify_signee(const public_key_type& signee)const; + + const vector& get_new_protocol_feature_activations()const; }; using block_header_state_ptr = std::shared_ptr; @@ -107,4 +130,5 @@ FC_REFLECT_DERIVED( eosio::chain::block_header_state, (eosio::chain::detail::bl (id) (header) (pending_schedule) + (activated_protocol_features) ) diff --git a/libraries/chain/include/eosio/chain/block_state.hpp b/libraries/chain/include/eosio/chain/block_state.hpp index 94defc13cfd..1352399c825 100644 --- a/libraries/chain/include/eosio/chain/block_state.hpp +++ b/libraries/chain/include/eosio/chain/block_state.hpp @@ -14,18 +14,27 @@ namespace eosio { namespace chain { struct block_state : public block_header_state { block_state( const block_header_state& prev, signed_block_ptr b, + const std::function&, + const vector& )>& validator, bool skip_validate_signee ); block_state( pending_block_header_state&& cur, signed_block_ptr&& b, // unsigned block vector&& trx_metas, + const std::function&, + const vector& )>& validator, const std::function& signer ); block_state( pending_block_header_state&& cur, const signed_block_ptr& b, // signed block vector&& trx_metas, + const std::function&, + const vector& )>& validator, bool skip_validate_signee ); diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 641b2d1e42d..b04c166cfd1 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -7,6 +7,7 @@ #include #include #include +#include namespace chainbase { class database; @@ -96,11 +97,23 @@ namespace eosio { namespace chain { void add_indices(); void startup( std::function shutdown, const snapshot_reader_ptr& snapshot = nullptr ); + vector get_preactivated_protocol_features()const; + + /** + * Starts a new pending block session upon which new transactions can + * be pushed. + * + * Will only activate protocol features that have been pre-activated. + */ + void start_block( block_timestamp_type time = block_timestamp_type(), uint16_t confirm_block_count = 0 ); + /** * Starts a new pending block session upon which new transactions can * be pushed. */ - void start_block( block_timestamp_type time = block_timestamp_type(), uint16_t confirm_block_count = 0 ); + void start_block( block_timestamp_type time, + uint16_t confirm_block_count, + const vector& new_protocol_feature_activations ); void abort_block(); @@ -237,6 +250,9 @@ namespace eosio { namespace chain { void validate_db_available_size() const; void validate_reversible_available_size() const; + bool is_protocol_feature_activated( const digest_type& feature_digest )const; + bool is_builtin_activated( builtin_protocol_feature_t f )const; + bool is_known_unexpired_transaction( const transaction_id_type& id) const; int64_t set_proposed_producers( vector producers ); diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index 6c3e504d349..6c71f08ed5a 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -158,7 +158,10 @@ namespace eosio { namespace chain { 3030008, "Block is not signed with expected key" ) FC_DECLARE_DERIVED_EXCEPTION( wrong_producer, block_validate_exception, 3030009, "Block is not signed by expected producer" ) - + FC_DECLARE_DERIVED_EXCEPTION( invalid_block_header_extension, block_validate_exception, + 3030010, "Invalid block header extension" ) + FC_DECLARE_DERIVED_EXCEPTION( ill_formed_protocol_feature_activation, block_validate_exception, + 3030011, "Block includes an ill-formed protocol feature activation extension" ) @@ -519,4 +522,9 @@ namespace eosio { namespace chain { 3240000, "Snapshot exception" ) FC_DECLARE_DERIVED_EXCEPTION( snapshot_validation_exception, snapshot_exception, 3240001, "Snapshot Validation Exception" ) + + FC_DECLARE_DERIVED_EXCEPTION( protocol_feature_exception, chain_exception, + 3250000, "Protocol feature exception" ) + FC_DECLARE_DERIVED_EXCEPTION( protocol_feature_validation_exception, snapshot_exception, + 3250001, "Protocol feature validation exception" ) } } // eosio::chain diff --git a/libraries/chain/include/eosio/chain/fork_database.hpp b/libraries/chain/include/eosio/chain/fork_database.hpp index 8e4d9176431..159fd45a1f3 100644 --- a/libraries/chain/include/eosio/chain/fork_database.hpp +++ b/libraries/chain/include/eosio/chain/fork_database.hpp @@ -25,6 +25,9 @@ namespace eosio { namespace chain { explicit fork_database( const fc::path& data_dir ); ~fork_database(); + void open( const std::function&, + const vector& )>& validator ); void close(); block_header_state_ptr get_block_header( const block_id_type& id )const; diff --git a/libraries/chain/include/eosio/chain/global_property_object.hpp b/libraries/chain/include/eosio/chain/global_property_object.hpp index 7f3c09cccf5..e513045c0b2 100644 --- a/libraries/chain/include/eosio/chain/global_property_object.hpp +++ b/libraries/chain/include/eosio/chain/global_property_object.hpp @@ -26,12 +26,13 @@ namespace eosio { namespace chain { */ class global_property_object : public chainbase::object { - OBJECT_CTOR(global_property_object, (proposed_schedule)) + OBJECT_CTOR(global_property_object, (proposed_schedule)(preactivated_protocol_features)) id_type id; optional proposed_schedule_block_num; shared_producer_schedule_type proposed_schedule; chain_config configuration; + shared_vector preactivated_protocol_features; }; @@ -82,5 +83,5 @@ FC_REFLECT(eosio::chain::dynamic_global_property_object, ) FC_REFLECT(eosio::chain::global_property_object, - (proposed_schedule_block_num)(proposed_schedule)(configuration) + (proposed_schedule_block_num)(proposed_schedule)(configuration)(preactivated_protocol_features) ) diff --git a/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp b/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp new file mode 100644 index 00000000000..b7e9b2ca5a0 --- /dev/null +++ b/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp @@ -0,0 +1,39 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +namespace eosio { namespace chain { + +struct protocol_feature_activation { + static constexpr uint16_t extension_id() { return 0; } + static constexpr bool enforce_unique() { return true; } + + void reflector_init(); + + vector protocol_features; +}; + +struct protocol_feature_activation_set; + +using protocol_feature_activation_set_ptr = std::shared_ptr; + +struct protocol_feature_activation_set { + flat_set protocol_features; + + protocol_feature_activation_set() = default; + + protocol_feature_activation_set( const protocol_feature_activation_set& orig_pfa_set, + vector additional_features, + bool enforce_disjoint = true + ); +}; + + +} } // namespace eosio::chain + +FC_REFLECT(eosio::chain::protocol_feature_activation, (protocol_features)) +FC_REFLECT(eosio::chain::protocol_feature_activation_set, (protocol_features)) diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp new file mode 100644 index 00000000000..c81d226fbf8 --- /dev/null +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -0,0 +1,139 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +namespace eosio { namespace chain { + +enum class protocol_feature_t : uint32_t { + builtin +}; + +enum class builtin_protocol_feature_t : uint32_t { + preactivate_feature, +}; + +struct protocol_feature_subjective_restrictions { + time_point earliest_allowed_activation_time; + bool preactivation_required = false; + bool enabled = false; +}; + +class protocol_feature_base { +public: + protocol_feature_base() = default; + + protocol_feature_base( protocol_feature_t feature_type, + const protocol_feature_subjective_restrictions& restrictions ); + + void reflector_init(); + +public: + std::string protocol_feature_type; + flat_set dependencies; + digest_type description_digest; + protocol_feature_subjective_restrictions subjective_restrictions; +protected: + protocol_feature_t _type; +}; + +class builtin_protocol_feature : public protocol_feature_base { +public: + static constexpr const char* feature_type_string = "builtin"; + + builtin_protocol_feature() = default; + + builtin_protocol_feature( builtin_protocol_feature_t codename, + const protocol_feature_subjective_restrictions& restrictions ); + + void reflector_init(); + + digest_type digest()const; + + friend class protocol_feature_manager; + +public: + std::string builtin_feature_codename; +protected: + builtin_protocol_feature_t _codename; +}; + +class protocol_feature_manager { +public: + + protocol_feature_manager(); + + enum class recognized_t { + unrecognized, + disabled, + too_early, + ready_if_preactivated, + ready + }; + + struct protocol_feature { + digest_type feature_digest; + flat_set dependencies; + time_point earliest_allowed_activation_time; + bool preactivation_required = false; + bool enabled = false; + optional builtin_feature; + + friend bool operator <( const protocol_feature& lhs, const protocol_feature& rhs ) { + return lhs.feature_digest < rhs.feature_digest; + } + + friend bool operator <( const digest_type& lhs, const protocol_feature& rhs ) { + return lhs < rhs.feature_digest; + } + + friend bool operator <( const protocol_feature& lhs, const digest_type& rhs ) { + return lhs.feature_digest < rhs; + } + }; + + recognized_t is_recognized( const digest_type& feature_digest, time_point now )const; + + const protocol_feature& get_protocol_feature( const digest_type& feature_digest )const; + + bool is_builtin_activated( builtin_protocol_feature_t feature_codename, uint32_t current_block_num )const; + + bool validate_dependencies( const digest_type& feature_digest, + const std::function& validator )const; + + void add_feature( const builtin_protocol_feature& f ); + + void activate_feature( const digest_type& feature_digest, uint32_t current_block_num ); + void popped_blocks_to( uint32_t block_num ); + +protected: + using protocol_feature_set_type = std::set>; + + struct builtin_protocol_feature_entry { + static constexpr uint32_t not_active = std::numeric_limits::max(); + static constexpr size_t no_previous = std::numeric_limits::max(); + + protocol_feature_set_type::iterator iterator_to_protocol_feature; + uint32_t activation_block_num = not_active; + size_t previous = no_previous; + }; + +protected: + protocol_feature_set_type _recognized_protocol_features; + vector _builtin_protocol_features; + size_t _head_of_builtin_activation_list = builtin_protocol_feature_entry::no_previous; +}; + +} } // namespace eosio::chain + +FC_REFLECT(eosio::chain::protocol_feature_subjective_restrictions, + (earliest_allowed_activation_time)(preactivation_required)(enabled)) + +FC_REFLECT(eosio::chain::protocol_feature_base, + (protocol_feature_type)(dependencies)(description_digest)(subjective_restrictions)) + +FC_REFLECT_DERIVED(eosio::chain::builtin_protocol_feature, (eosio::chain::protocol_feature_base), + (builtin_feature_codename)) diff --git a/libraries/chain/protocol_feature_activation.cpp b/libraries/chain/protocol_feature_activation.cpp new file mode 100644 index 00000000000..36afd6d6aa3 --- /dev/null +++ b/libraries/chain/protocol_feature_activation.cpp @@ -0,0 +1,83 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ + +#include +#include + +#include + +namespace eosio { namespace chain { + + void protocol_feature_activation::reflector_init() { + static_assert( fc::raw::has_feature_reflector_init_on_unpacked_reflected_types, + "protocol_feature_activation expects FC to support reflector_init" ); + + + EOS_ASSERT( protocol_features.size() > 0, ill_formed_protocol_feature_activation, + "Protocol feature activation extension must have at least one protocol feature digest", + ); + + set s; + + for( const auto& d : protocol_features ) { + auto res = s.insert( d ); + EOS_ASSERT( res.second, ill_formed_protocol_feature_activation, + "Protocol feature digest ${d} was repeated in the protocol feature activation extension", + ("d", d) + ); + } + } + + template + class end_insert_iterator : public std::iterator< std::output_iterator_tag, void, void, void, void > + { + protected: + Container* container; + + public: + using container_type = Container; + + explicit end_insert_iterator( Container& c ) + :container(&c) + {} + + end_insert_iterator& operator=( typename Container::const_reference value ) { + container->insert( container->cend(), value ); + return *this; + } + + end_insert_iterator& operator*() { return *this; } + end_insert_iterator& operator++() { return *this; } + end_insert_iterator operator++(int) { return *this; } + }; + + template + inline end_insert_iterator end_inserter( Container& c ) { + return end_insert_iterator( c ); + } + + protocol_feature_activation_set::protocol_feature_activation_set( + const protocol_feature_activation_set& orig_pfa_set, + vector additional_features, + bool enforce_disjoint + ) + { + std::sort( additional_features.begin(), additional_features.end() ); + + const auto& s1 = orig_pfa_set.protocol_features; + const auto& s2 = additional_features; + + auto expected_size = s1.size() + s2.size(); + protocol_features.reserve( expected_size ); + + std::set_union( s1.cbegin(), s1.cend(), s2.cbegin(), s2.cend(), end_inserter( protocol_features ) ); + + EOS_ASSERT( !enforce_disjoint || protocol_features.size() == expected_size, + invalid_block_header_extension, + "duplication of protocol feature digests" + ); + } + +} } // eosio::chain diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp new file mode 100644 index 00000000000..2f83490cc8f --- /dev/null +++ b/libraries/chain/protocol_feature_manager.cpp @@ -0,0 +1,237 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ + +#include +#include + +#include +#include + +namespace eosio { namespace chain { + + const std::unordered_map builtin_codenames = boost::assign::map_list_of + (builtin_protocol_feature_t::preactivate_feature, "PREACTIVATE_FEATURE"); + + protocol_feature_base::protocol_feature_base( protocol_feature_t feature_type, + const protocol_feature_subjective_restrictions& restrictions ) + :subjective_restrictions( restrictions ) + ,_type( feature_type ) + { + switch( feature_type ) { + case protocol_feature_t::builtin: + protocol_feature_type = builtin_protocol_feature::feature_type_string; + break; + default: + { + EOS_THROW( protocol_feature_validation_exception, + "Unsupported protocol_feature_t passed to constructor: ${type}", + ("type", static_cast(feature_type)) ); + } + break; + } + } + + void protocol_feature_base::reflector_init() { + static_assert( fc::raw::has_feature_reflector_init_on_unpacked_reflected_types, + "protocol_feature_activation expects FC to support reflector_init" ); + + if( protocol_feature_type == builtin_protocol_feature::feature_type_string ) { + _type = protocol_feature_t::builtin; + } else { + EOS_THROW( protocol_feature_validation_exception, + "Unsupported protocol feature type: ${type}", ("type", protocol_feature_type) ); + } + } + + builtin_protocol_feature::builtin_protocol_feature( builtin_protocol_feature_t codename, + const protocol_feature_subjective_restrictions& restrictions ) + :protocol_feature_base( protocol_feature_t::builtin, restrictions ) + ,_codename(codename) + { + auto itr = builtin_codenames.find( codename ); + EOS_ASSERT( itr != builtin_codenames.end(), protocol_feature_validation_exception, + "Unsupported builtin_protocol_feature_t passed to constructor: ${codename}", + ("codename", static_cast(codename)) ); + + builtin_feature_codename = itr->second; + } + + void builtin_protocol_feature::reflector_init() { + protocol_feature_base::reflector_init(); + + for( const auto& p : builtin_codenames ) { + if( builtin_feature_codename.compare( p.second ) == 0 ) { + _codename = p.first; + return; + } + } + + EOS_THROW( protocol_feature_validation_exception, + "Unsupported protocol feature type: ${type}", ("type", protocol_feature_type) ); + } + + + digest_type builtin_protocol_feature::digest()const { + digest_type::encoder enc; + fc::raw::pack( enc, _type ); + fc::raw::pack( enc, description_digest ); + fc::raw::pack( enc, _codename ); + + return enc.result(); + } + + protocol_feature_manager::protocol_feature_manager() { + _builtin_protocol_features.reserve( builtin_codenames.size() ); + } + + protocol_feature_manager::recognized_t + protocol_feature_manager::is_recognized( const digest_type& feature_digest, time_point now )const { + auto itr = _recognized_protocol_features.find( feature_digest ); + + if( itr == _recognized_protocol_features.end() ) + return recognized_t::unrecognized; + + if( !itr->enabled ) + return recognized_t::disabled; + + if( itr->earliest_allowed_activation_time > now ) + return recognized_t::too_early; + + if( itr->preactivation_required ) + return recognized_t::ready_if_preactivated; + + return recognized_t::ready; + } + + const protocol_feature_manager::protocol_feature& + protocol_feature_manager::get_protocol_feature( const digest_type& feature_digest )const { + auto itr = _recognized_protocol_features.find( feature_digest ); + + EOS_ASSERT( itr != _recognized_protocol_features.end(), protocol_feature_exception, + "unrecognized protocol feature with digest: ${digest}", + ("digest", feature_digest) + ); + + return *itr; + } + + bool protocol_feature_manager::is_builtin_activated( builtin_protocol_feature_t feature_codename, + uint32_t current_block_num )const + { + uint32_t indx = static_cast( feature_codename ); + + if( indx >= _builtin_protocol_features.size() ) return false; + + return (_builtin_protocol_features[indx].activation_block_num <= current_block_num); + } + + bool protocol_feature_manager::validate_dependencies( + const digest_type& feature_digest, + const std::function& validator + )const { + auto itr = _recognized_protocol_features.find( feature_digest ); + + if( itr == _recognized_protocol_features.end() ) return false; + + for( const auto& d : itr->dependencies ) { + if( !validator(d) ) return false; + } + + return true; + } + + void protocol_feature_manager::add_feature( const builtin_protocol_feature& f ) { + EOS_ASSERT( _head_of_builtin_activation_list == builtin_protocol_feature_entry::no_previous, + protocol_feature_exception, + "new builtin protocol features cannot be added after a protocol feature has already been activated" ); + + uint32_t indx = static_cast( f._codename ); + + if( indx < _builtin_protocol_features.size() ) { + EOS_ASSERT( _builtin_protocol_features[indx].iterator_to_protocol_feature == _recognized_protocol_features.end(), + protocol_feature_exception, + "builtin protocol feature with codename '${codename}' already added", + ("codename", f.builtin_feature_codename) ); + } + + auto feature_digest = f.digest(); + + auto res = _recognized_protocol_features.insert( protocol_feature{ + feature_digest, + f.dependencies, + f.subjective_restrictions.earliest_allowed_activation_time, + f.subjective_restrictions.preactivation_required, + f.subjective_restrictions.enabled, + f._codename + } ); + + EOS_ASSERT( res.second, protocol_feature_exception, + "builtin protocol feature with codename '${codename}' has a digest of ${digest} but another protocol feature with the same digest has already been added", + ("codename", f.builtin_feature_codename)("digest", feature_digest) ); + + if( indx < _builtin_protocol_features.size() ) { + for( auto i =_builtin_protocol_features.size(); i <= indx; ++i ) { + _builtin_protocol_features.push_back( builtin_protocol_feature_entry{ + _recognized_protocol_features.end(), + builtin_protocol_feature_entry::not_active + } ); + } + } + + _builtin_protocol_features[indx].iterator_to_protocol_feature = res.first; + } + + void protocol_feature_manager::activate_feature( const digest_type& feature_digest, + uint32_t current_block_num ) + { + auto itr = _recognized_protocol_features.find( feature_digest ); + + EOS_ASSERT( itr != _recognized_protocol_features.end(), protocol_feature_exception, + "unrecognized protocol feature digest: ${digest}", ("digest", feature_digest) ); + + if( itr->builtin_feature ) { + if( _head_of_builtin_activation_list != builtin_protocol_feature_entry::no_previous ) { + auto largest_block_num_of_activated_builtins = _builtin_protocol_features[_head_of_builtin_activation_list].activation_block_num; + EOS_ASSERT( largest_block_num_of_activated_builtins <= current_block_num, + protocol_feature_exception, + "trying to activate a builtin protocol feature with a current block number of " + "${current_block_num} when the largest activation block number of all activated builtin " + "protocol features is ${largest_block_num_of_activated_builtins}", + ("current_block_num", current_block_num) + ("largest_block_num_of_activated_builtins", largest_block_num_of_activated_builtins) + ); + } + + uint32_t indx = static_cast( *itr->builtin_feature ); + + EOS_ASSERT( indx < _builtin_protocol_features.size() && + _builtin_protocol_features[indx].iterator_to_protocol_feature != _recognized_protocol_features.end(), + protocol_feature_exception, + "invariant failure: problem with activating builtin protocol feature with digest: ${digest}", + ("digest", feature_digest) ); + + EOS_ASSERT( _builtin_protocol_features[indx].activation_block_num == builtin_protocol_feature_entry::not_active, + protocol_feature_exception, + "cannot activate already activated builtin feature with digest: ${digest}", + ("digest", feature_digest) ); + + _builtin_protocol_features[indx].activation_block_num = current_block_num; + _builtin_protocol_features[indx].previous = _head_of_builtin_activation_list; + _head_of_builtin_activation_list = indx; + } + } + + void protocol_feature_manager::popped_blocks_to( uint32_t block_num ) { + while( _head_of_builtin_activation_list != builtin_protocol_feature_entry::no_previous ) { + auto& e = _builtin_protocol_features[_head_of_builtin_activation_list]; + if( e.activation_block_num <= block_num ) break; + + _head_of_builtin_activation_list = e.previous; + e.activation_block_num = builtin_protocol_feature_entry::not_active; + e.previous = builtin_protocol_feature_entry::no_previous; + } + } + +} } // eosio::chain From 1d2e24b1165b17c6405749096110a84c6f05966b Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 15 Feb 2019 21:16:41 -0500 Subject: [PATCH 014/680] more work on #6429 --- libraries/chain/controller.cpp | 12 +- .../chain/include/eosio/chain/controller.hpp | 1 + .../eosio/chain/protocol_feature_manager.hpp | 29 +++- libraries/chain/include/eosio/chain/types.hpp | 28 ++++ .../chain/protocol_feature_activation.cpp | 28 ---- libraries/chain/protocol_feature_manager.cpp | 141 ++++++++++++++++-- .../testing/include/eosio/testing/tester.hpp | 6 + libraries/testing/tester.cpp | 11 +- plugins/chain_plugin/chain_plugin.cpp | 93 +++++++++++- unittests/fork_test_utilities.cpp | 42 ++++++ unittests/fork_test_utilities.hpp | 21 +++ unittests/forked_tests.cpp | 39 +---- unittests/protocol_feature_tests.cpp | 33 ++++ 13 files changed, 401 insertions(+), 83 deletions(-) create mode 100644 unittests/fork_test_utilities.cpp create mode 100644 unittests/fork_test_utilities.hpp create mode 100644 unittests/protocol_feature_tests.cpp diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index ff7ba474cee..52f3064ae87 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -274,7 +274,7 @@ struct controller_impl { apply_handlers[receiver][make_pair(contract,action)] = v; } - controller_impl( const controller::config& cfg, controller& s ) + controller_impl( const controller::config& cfg, controller& s, protocol_feature_manager&& pfm ) :self(s), db( cfg.state_dir, cfg.read_only ? database::read_only : database::read_write, @@ -287,6 +287,7 @@ struct controller_impl { wasmif( cfg.wasm_runtime ), resource_limits( db ), authorization( s, db ), + protocol_features( std::move(pfm) ), conf( cfg ), chain_id( cfg.genesis.compute_chain_id() ), read_mode( cfg.read_mode ), @@ -2043,7 +2044,12 @@ authorization_manager& controller::get_mutable_authorization_manager() } controller::controller( const controller::config& cfg ) -:my( new controller_impl( cfg, *this ) ) +:my( new controller_impl( cfg, *this, protocol_feature_manager{} ) ) +{ +} + +controller::controller( const config& cfg, protocol_feature_manager&& pfm ) +:my( new controller_impl( cfg, *this, std::move(pfm) ) ) { } @@ -2707,7 +2713,7 @@ bool controller::is_builtin_activated( builtin_protocol_feature_t f )const { ++current_block_num; } - my->protocol_features.is_builtin_activated( f, current_block_num ); + return my->protocol_features.is_builtin_activated( f, current_block_num ); } bool controller::is_known_unexpired_transaction( const transaction_id_type& id) const { diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index b04c166cfd1..0903b1ef37b 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -92,6 +92,7 @@ namespace eosio { namespace chain { }; explicit controller( const config& cfg ); + controller( const config& cfg, protocol_feature_manager&& pfm ); ~controller(); void add_indices(); diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index c81d226fbf8..66dd14c0d94 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -18,23 +18,36 @@ enum class builtin_protocol_feature_t : uint32_t { struct protocol_feature_subjective_restrictions { time_point earliest_allowed_activation_time; - bool preactivation_required = false; - bool enabled = false; + bool preactivation_required = true; + bool enabled = true; }; +struct builtin_protocol_feature_spec { + const char* codename = nullptr; + digest_type description_digest; + flat_set builtin_dependencies; + protocol_feature_subjective_restrictions subjective_restrictions; +}; + +const char* builtin_protocol_feature_codename( builtin_protocol_feature_t ); + class protocol_feature_base { public: protocol_feature_base() = default; protocol_feature_base( protocol_feature_t feature_type, + const digest_type& description_digest, + flat_set&& dependencies, const protocol_feature_subjective_restrictions& restrictions ); void reflector_init(); + protocol_feature_t get_type() { return _type; } + public: std::string protocol_feature_type; - flat_set dependencies; digest_type description_digest; + flat_set dependencies; protocol_feature_subjective_restrictions subjective_restrictions; protected: protocol_feature_t _type; @@ -47,12 +60,16 @@ class builtin_protocol_feature : public protocol_feature_base { builtin_protocol_feature() = default; builtin_protocol_feature( builtin_protocol_feature_t codename, + const digest_type& description_digest, + flat_set&& dependencies, const protocol_feature_subjective_restrictions& restrictions ); void reflector_init(); digest_type digest()const; + builtin_protocol_feature_t get_codename() { return _codename; } + friend class protocol_feature_manager; public: @@ -61,6 +78,8 @@ class builtin_protocol_feature : public protocol_feature_base { builtin_protocol_feature_t _codename; }; +extern const std::unordered_map builtin_protocol_feature_codenames; + class protocol_feature_manager { public: @@ -101,9 +120,13 @@ class protocol_feature_manager { bool is_builtin_activated( builtin_protocol_feature_t feature_codename, uint32_t current_block_num )const; + optional get_builtin_digest( builtin_protocol_feature_t feature_codename )const; + bool validate_dependencies( const digest_type& feature_digest, const std::function& validator )const; + builtin_protocol_feature make_default_builtin_protocol_feature( builtin_protocol_feature_t codename )const; + void add_feature( const builtin_protocol_feature& f ); void activate_feature( const digest_type& feature_digest, uint32_t current_block_num ); diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index 8ee6827efe2..93a80a0b7f3 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -223,6 +223,34 @@ namespace eosio { namespace chain { typedef vector>> extensions_type; + template + class end_insert_iterator : public std::iterator< std::output_iterator_tag, void, void, void, void > + { + protected: + Container* container; + + public: + using container_type = Container; + + explicit end_insert_iterator( Container& c ) + :container(&c) + {} + + end_insert_iterator& operator=( typename Container::const_reference value ) { + container->insert( container->cend(), value ); + return *this; + } + + end_insert_iterator& operator*() { return *this; } + end_insert_iterator& operator++() { return *this; } + end_insert_iterator operator++(int) { return *this; } + }; + + template + inline end_insert_iterator end_inserter( Container& c ) { + return end_insert_iterator( c ); + } + } } // eosio::chain FC_REFLECT( eosio::chain::void_t, ) diff --git a/libraries/chain/protocol_feature_activation.cpp b/libraries/chain/protocol_feature_activation.cpp index 36afd6d6aa3..b0b7a563073 100644 --- a/libraries/chain/protocol_feature_activation.cpp +++ b/libraries/chain/protocol_feature_activation.cpp @@ -30,34 +30,6 @@ namespace eosio { namespace chain { } } - template - class end_insert_iterator : public std::iterator< std::output_iterator_tag, void, void, void, void > - { - protected: - Container* container; - - public: - using container_type = Container; - - explicit end_insert_iterator( Container& c ) - :container(&c) - {} - - end_insert_iterator& operator=( typename Container::const_reference value ) { - container->insert( container->cend(), value ); - return *this; - } - - end_insert_iterator& operator*() { return *this; } - end_insert_iterator& operator++() { return *this; } - end_insert_iterator operator++(int) { return *this; } - }; - - template - inline end_insert_iterator end_inserter( Container& c ) { - return end_insert_iterator( c ); - } - protocol_feature_activation_set::protocol_feature_activation_set( const protocol_feature_activation_set& orig_pfa_set, vector additional_features, diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 2f83490cc8f..7650bd69f26 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -11,12 +11,34 @@ namespace eosio { namespace chain { - const std::unordered_map builtin_codenames = boost::assign::map_list_of - (builtin_protocol_feature_t::preactivate_feature, "PREACTIVATE_FEATURE"); + const std::unordered_map + builtin_protocol_feature_codenames = + boost::assign::map_list_of + ( builtin_protocol_feature_t::preactivate_feature, { + "PREACTIVATE_FEATURE", + digest_type{}, + {}, + {time_point{}, false, true} // enabled without preactivation and ready to go at any time + } ) + ; + + + const char* builtin_protocol_feature_codename( builtin_protocol_feature_t codename ) { + auto itr = builtin_protocol_feature_codenames.find( codename ); + EOS_ASSERT( itr != builtin_protocol_feature_codenames.end(), protocol_feature_validation_exception, + "Unsupported builtin_protocol_feature_t passed to builtin_protocol_feature_codename: ${codename}", + ("codename", static_cast(codename)) ); + + return itr->second.codename; + } protocol_feature_base::protocol_feature_base( protocol_feature_t feature_type, + const digest_type& description_digest, + flat_set&& dependencies, const protocol_feature_subjective_restrictions& restrictions ) - :subjective_restrictions( restrictions ) + :description_digest( description_digest ) + ,dependencies( std::move(dependencies) ) + ,subjective_restrictions( restrictions ) ,_type( feature_type ) { switch( feature_type ) { @@ -46,23 +68,32 @@ namespace eosio { namespace chain { } builtin_protocol_feature::builtin_protocol_feature( builtin_protocol_feature_t codename, + const digest_type& description_digest, + flat_set&& dependencies, const protocol_feature_subjective_restrictions& restrictions ) - :protocol_feature_base( protocol_feature_t::builtin, restrictions ) + :protocol_feature_base( protocol_feature_t::builtin, description_digest, std::move(dependencies), restrictions ) ,_codename(codename) { - auto itr = builtin_codenames.find( codename ); - EOS_ASSERT( itr != builtin_codenames.end(), protocol_feature_validation_exception, + auto itr = builtin_protocol_feature_codenames.find( codename ); + EOS_ASSERT( itr != builtin_protocol_feature_codenames.end(), protocol_feature_validation_exception, "Unsupported builtin_protocol_feature_t passed to constructor: ${codename}", ("codename", static_cast(codename)) ); - builtin_feature_codename = itr->second; + builtin_feature_codename = itr->second.codename; } void builtin_protocol_feature::reflector_init() { protocol_feature_base::reflector_init(); - for( const auto& p : builtin_codenames ) { - if( builtin_feature_codename.compare( p.second ) == 0 ) { + auto codename = get_codename(); + for( const auto& p : builtin_protocol_feature_codenames ) { + if( p.first == codename ) { + EOS_ASSERT( builtin_feature_codename == p.second.codename, + protocol_feature_validation_exception, + "Deserialized protocol feature has invalid codename. Expected: ${expected}. Actual: ${actual}.", + ("expected", p.second.codename) + ("actual", protocol_feature_type) + ); _codename = p.first; return; } @@ -83,7 +114,7 @@ namespace eosio { namespace chain { } protocol_feature_manager::protocol_feature_manager() { - _builtin_protocol_features.reserve( builtin_codenames.size() ); + _builtin_protocol_features.reserve( builtin_protocol_feature_codenames.size() ); } protocol_feature_manager::recognized_t @@ -127,6 +158,20 @@ namespace eosio { namespace chain { return (_builtin_protocol_features[indx].activation_block_num <= current_block_num); } + optional + protocol_feature_manager::get_builtin_digest( builtin_protocol_feature_t feature_codename )const + { + uint32_t indx = static_cast( feature_codename ); + + if( indx >= _builtin_protocol_features.size() ) + return {}; + + if( _builtin_protocol_features[indx].iterator_to_protocol_feature == _recognized_protocol_features.end() ) + return {}; + + return _builtin_protocol_features[indx].iterator_to_protocol_feature->feature_digest; + } + bool protocol_feature_manager::validate_dependencies( const digest_type& feature_digest, const std::function& validator @@ -142,11 +187,41 @@ namespace eosio { namespace chain { return true; } + builtin_protocol_feature + protocol_feature_manager::make_default_builtin_protocol_feature( builtin_protocol_feature_t codename )const + { + auto itr = builtin_protocol_feature_codenames.find( codename ); + + EOS_ASSERT( itr != builtin_protocol_feature_codenames.end(), protocol_feature_validation_exception, + "Unsupported builtin_protocol_feature_t: ${codename}", + ("codename", static_cast(codename)) ); + + flat_set dependencies; + dependencies.reserve( itr->second.builtin_dependencies.size() ); + + for( const auto& d : itr->second.builtin_dependencies ) { + auto dependency_digest = get_builtin_digest( d ); + EOS_ASSERT( dependency_digest, protocol_feature_exception, + "cannot make default builtin protocol feature with codename '${codename}' since it has a dependency that has not been added yet: ${dependency_codename}", + ("codename", static_cast(itr->first)) + ("dependency_codename", static_cast(d)) + ); + dependencies.insert( *dependency_digest ); + } + + return {itr->first, itr->second.description_digest, std::move(dependencies), itr->second.subjective_restrictions}; + } + void protocol_feature_manager::add_feature( const builtin_protocol_feature& f ) { EOS_ASSERT( _head_of_builtin_activation_list == builtin_protocol_feature_entry::no_previous, protocol_feature_exception, "new builtin protocol features cannot be added after a protocol feature has already been activated" ); + auto builtin_itr = builtin_protocol_feature_codenames.find( f._codename ); + EOS_ASSERT( builtin_itr != builtin_protocol_feature_codenames.end(), protocol_feature_validation_exception, + "Builtin protocol feature has unsupported builtin_protocol_feature_t: ${codename}", + ("codename", static_cast( f._codename )) ); + uint32_t indx = static_cast( f._codename ); if( indx < _builtin_protocol_features.size() ) { @@ -158,6 +233,52 @@ namespace eosio { namespace chain { auto feature_digest = f.digest(); + const auto& expected_builtin_dependencies = builtin_itr->second.builtin_dependencies; + flat_set satisfied_builtin_dependencies; + satisfied_builtin_dependencies.reserve( expected_builtin_dependencies.size() ); + + for( const auto& d : f.dependencies ) { + auto itr = _recognized_protocol_features.find( d ); + EOS_ASSERT( itr != _recognized_protocol_features.end(), protocol_feature_exception, + "builtin protocol feature with codename '${codename}' and digest of ${digest} has a dependency on a protocol feature with digest ${dependency_digest} that is not recognized", + ("codename", f.builtin_feature_codename) + ("digest", feature_digest) + ("dependency_digest", d ) + ); + + if( itr->builtin_feature + && expected_builtin_dependencies.find( *itr->builtin_feature ) + != expected_builtin_dependencies.end() ) + { + satisfied_builtin_dependencies.insert( *itr->builtin_feature ); + } + } + + if( expected_builtin_dependencies.size() > satisfied_builtin_dependencies.size() ) { + flat_set missing_builtins; + missing_builtins.reserve( expected_builtin_dependencies.size() - satisfied_builtin_dependencies.size() ); + std::set_difference( expected_builtin_dependencies.begin(), expected_builtin_dependencies.end(), + satisfied_builtin_dependencies.begin(), satisfied_builtin_dependencies.end(), + end_inserter( missing_builtins ) + ); + + vector missing_builtins_with_names; + missing_builtins_with_names.reserve( missing_builtins.size() ); + for( const auto& builtin_codename : missing_builtins ) { + auto itr = builtin_protocol_feature_codenames.find( builtin_codename ); + EOS_ASSERT( itr != builtin_protocol_feature_codenames.end(), + protocol_feature_exception, + "Unexpected error" + ); + missing_builtins_with_names.emplace_back( itr->second.codename ); + } + + EOS_THROW( protocol_feature_validation_exception, + "Not all the builtin dependencies of the builtin protocol feature with codename '${codename}' and digest of ${digest} were satisfied.", + ("missing_dependencies", missing_builtins_with_names) + ); + } + auto res = _recognized_protocol_features.insert( protocol_feature{ feature_digest, f.dependencies, diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 10e7d4499e2..d6a1962ea4f 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -84,8 +84,10 @@ namespace eosio { namespace testing { void init(bool push_genesis = true, db_read_mode read_mode = db_read_mode::SPECULATIVE); void init(controller::config config, const snapshot_reader_ptr& snapshot = nullptr); + void init(controller::config config, protocol_feature_manager&& pfm, const snapshot_reader_ptr& snapshot = nullptr); void close(); + void open( protocol_feature_manager&& pfm, const snapshot_reader_ptr& snapshot ); void open( const snapshot_reader_ptr& snapshot ); bool is_same_chain( base_tester& other ); @@ -297,6 +299,10 @@ namespace eosio { namespace testing { init(config); } + tester(controller::config config, protocol_feature_manager&& pfm) { + init(config, std::move(pfm)); + } + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { return _produce_block(skip_time, false, skip_flag); } diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index fe492eb5fc9..2b448fddadc 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -113,6 +113,11 @@ namespace eosio { namespace testing { open(snapshot); } + void base_tester::init(controller::config config, protocol_feature_manager&& pfm, const snapshot_reader_ptr& snapshot ) { + cfg = config; + open(std::move(pfm), snapshot); + } + void base_tester::close() { control.reset(); @@ -121,7 +126,11 @@ namespace eosio { namespace testing { void base_tester::open( const snapshot_reader_ptr& snapshot) { - control.reset( new controller(cfg) ); + open( protocol_feature_manager{}, snapshot ); + } + + void base_tester::open( protocol_feature_manager&& pfm, const snapshot_reader_ptr& snapshot ) { + control.reset( new controller(cfg, std::move(pfm)) ); control->add_indices(); control->startup( []() { return false; }, snapshot); chain_transactions.clear(); diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index fdfd7cf83ea..89386811496 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -208,6 +208,8 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip cfg.add_options() ("blocks-dir", bpo::value()->default_value("blocks"), "the location of the blocks directory (absolute path or relative to application data dir)") + ("protocol-features-dir", bpo::value()->default_value("protocol_features"), + "the location of the protocol_features directory (absolute path or relative to application config dir)") ("checkpoint", bpo::value>()->composing(), "Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints.") ("wasm-runtime", bpo::value()->value_name("wavm/wabt"), "Override default WASM runtime") ("abi-serializer-max-time-ms", bpo::value()->default_value(config::default_abi_serializer_max_time_ms), @@ -329,6 +331,83 @@ void clear_directory_contents( const fc::path& p ) { } } +optional read_builtin_protocol_feature( const fc::path& p ) { + return {}; +} + +protocol_feature_manager initialize_protocol_features( const fc::path& p, bool populate_missing_builtins = true ) { + using boost::filesystem::directory_iterator; + + protocol_feature_manager pfm; + + if( !fc::is_directory( p ) ) + return pfm; + + map found_builtin_protocol_features; + map > builtin_protocol_features_to_add; + // The bool in the pair is set to true if the builtin protocol feature has already been visited to add + + // Read all builtin protocol features + for( directory_iterator enditr, itr{p}; itr != enditr; ++itr ) { + auto file_path = itr->path(); + if( !fc::is_regular_file( file_path ) || file_path.extension().generic_string() == ".json" ) continue; + + auto f = read_builtin_protocol_feature( file_path ); + + if( !f ) continue; + + auto res = found_builtin_protocol_features.emplace( f->get_codename(), file_path ); + + EOS_ASSERT( res.second, plugin_exception, + "Builtin protocol feature '${codename}' was already included from a previous_file", + ("codename", builtin_protocol_feature_codename(f->get_codename())) + ("current_file", file_path.generic_string()) + ("previous_file", res.first->second.generic_string()) + ); + + builtin_protocol_features_to_add.emplace( std::piecewise_construct, + std::forward_as_tuple( f->digest() ), + std::forward_as_tuple( *f, false ) ); + //pfm.add_feature( *f ); + } + + // Add builtin protocol features to the protocol feature manager in the right order (to satisfy dependencies) + using itr_type = map>::iterator; + std::function add_protocol_feature = + [&pfm, &builtin_protocol_features_to_add, &add_protocol_feature]( const itr_type& itr ) -> void { + if( itr->second.second ) { + return; + } else { + itr->second.second = true; + } + + for( const auto& d : itr->second.first.dependencies ) { + auto itr2 = builtin_protocol_features_to_add.find( d ); + if( itr2 != builtin_protocol_features_to_add.end() ) { + add_protocol_feature( itr2 ); + } + } + + pfm.add_feature( itr->second.first ); + }; + + for( auto itr = builtin_protocol_features_to_add.begin(); itr != builtin_protocol_features_to_add.end(); ++itr ) { + add_protocol_feature( itr ); + } + + if( populate_missing_builtins ) { + for( const auto& p : builtin_protocol_feature_codenames ) { + auto itr = found_builtin_protocol_features.find( p.first ); + if( itr != found_builtin_protocol_features.end() ) continue; + + pfm.make_default_builtin_protocol_feature( p.first ); + // TODO: write it out to the protocol-features directory + } + } + + return pfm; +} + void chain_plugin::plugin_initialize(const variables_map& options) { ilog("initializing chain plugin"); @@ -379,6 +458,18 @@ void chain_plugin::plugin_initialize(const variables_map& options) { my->blocks_dir = bld; } + protocol_feature_manager pfm; + { + fc::path protocol_features_dir; + auto pfd = options.at( "protocol-features-dir" ).as(); + if( pfd.is_relative()) + protocol_features_dir = app().config_dir() / pfd; + else + protocol_features_dir = pfd; + + pfm = initialize_protocol_features( protocol_features_dir ); + } + if( options.count("checkpoint") ) { auto cps = options.at("checkpoint").as>(); my->loaded_checkpoints.reserve(cps.size()); @@ -635,7 +726,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { my->chain_config->block_validation_mode = options.at("validation-mode").as(); } - my->chain.emplace( *my->chain_config ); + my->chain.emplace( *my->chain_config, std::move(pfm) ); my->chain_id.emplace( my->chain->get_chain_id()); // set up method providers diff --git a/unittests/fork_test_utilities.cpp b/unittests/fork_test_utilities.cpp new file mode 100644 index 00000000000..a8caaeeb233 --- /dev/null +++ b/unittests/fork_test_utilities.cpp @@ -0,0 +1,42 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include "fork_test_utilities.hpp" + +private_key_type get_private_key( name keyname, string role ) { + return private_key_type::regenerate(fc::sha256::hash(string(keyname)+role)); +} + +public_key_type get_public_key( name keyname, string role ){ + return get_private_key( keyname, role ).get_public_key(); +} + +void push_blocks( tester& from, tester& to, uint32_t block_num_limit ) { + while( to.control->fork_db_pending_head_block_num() + < std::min( from.control->fork_db_pending_head_block_num(), block_num_limit ) ) + { + auto fb = from.control->fetch_block_by_number( to.control->fork_db_pending_head_block_num()+1 ); + to.push_block( fb ); + } +} + +bool produce_empty_blocks_until( tester& t, + account_name last_producer, + account_name next_producer, + uint32_t max_num_blocks_to_produce ) +{ + auto condition_satisfied = [&t, last_producer, next_producer]() { + return t.control->pending_block_producer() == next_producer && t.control->head_block_producer() == last_producer; + }; + + for( uint32_t blocks_produced = 0; + blocks_produced < max_num_blocks_to_produce; + t.produce_block(), ++blocks_produced ) + { + if( condition_satisfied() ) + return true; + } + + return condition_satisfied(); +} diff --git a/unittests/fork_test_utilities.hpp b/unittests/fork_test_utilities.hpp new file mode 100644 index 00000000000..f5ae33ae718 --- /dev/null +++ b/unittests/fork_test_utilities.hpp @@ -0,0 +1,21 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +using namespace eosio::chain; +using namespace eosio::testing; + +private_key_type get_private_key( name keyname, string role ); + +public_key_type get_public_key( name keyname, string role ); + +void push_blocks( tester& from, tester& to, uint32_t block_num_limit = std::numeric_limits::max() ); + +bool produce_empty_blocks_until( tester& t, + account_name last_producer, + account_name next_producer, + uint32_t max_num_blocks_to_produce = std::numeric_limits::max() ); diff --git a/unittests/forked_tests.cpp b/unittests/forked_tests.cpp index d2981e3249c..21190445426 100644 --- a/unittests/forked_tests.cpp +++ b/unittests/forked_tests.cpp @@ -16,46 +16,11 @@ #include +#include "fork_test_utilities.hpp" + using namespace eosio::chain; using namespace eosio::testing; -private_key_type get_private_key( name keyname, string role ) { - return private_key_type::regenerate(fc::sha256::hash(string(keyname)+role)); -} - -public_key_type get_public_key( name keyname, string role ){ - return get_private_key( keyname, role ).get_public_key(); -} - -void push_blocks( tester& from, tester& to, uint32_t block_num_limit = std::numeric_limits::max() ) { - while( to.control->fork_db_pending_head_block_num() - < std::min( from.control->fork_db_pending_head_block_num(), block_num_limit ) ) - { - auto fb = from.control->fetch_block_by_number( to.control->fork_db_pending_head_block_num()+1 ); - to.push_block( fb ); - } -} - -bool produce_empty_blocks_until( tester& t, - account_name last_producer, - account_name next_producer, - uint32_t max_num_blocks_to_produce = std::numeric_limits::max() ) -{ - auto condition_satisfied = [&t, last_producer, next_producer]() { - return t.control->pending_block_producer() == next_producer && t.control->head_block_producer() == last_producer; - }; - - for( uint32_t blocks_produced = 0; - blocks_produced < max_num_blocks_to_produce; - t.produce_block(), ++blocks_produced ) - { - if( condition_satisfied() ) - return true; - } - - return condition_satisfied(); -} - BOOST_AUTO_TEST_SUITE(forked_tests) BOOST_AUTO_TEST_CASE( irrblock ) try { diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp new file mode 100644 index 00000000000..c5f4f73da7e --- /dev/null +++ b/unittests/protocol_feature_tests.cpp @@ -0,0 +1,33 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ +#include +#include +#include + +#include + +#include + +#include + +#include + +#include + +#include "fork_test_utilities.hpp" + +using namespace eosio::chain; +using namespace eosio::testing; + + +BOOST_AUTO_TEST_SUITE(protocol_feature_tests) + +BOOST_AUTO_TEST_CASE( unaccepted_protocol_activation ) try { + +} FC_LOG_AND_RETHROW() + + + +BOOST_AUTO_TEST_SUITE_END() From 19e77204c4b981bf22b23f0f7715efa9a69cc021 Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 19 Feb 2019 21:30:20 -0500 Subject: [PATCH 015/680] some fixes for #6429 --- .../eosio/chain/protocol_feature_manager.hpp | 13 ++- libraries/chain/protocol_feature_manager.cpp | 21 ++-- libraries/chain/transaction.cpp | 1 - libraries/fc | 2 +- plugins/chain_plugin/chain_plugin.cpp | 108 +++++++++++++----- 5 files changed, 99 insertions(+), 46 deletions(-) diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index 66dd14c0d94..6eba182639b 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -29,6 +29,8 @@ struct builtin_protocol_feature_spec { protocol_feature_subjective_restrictions subjective_restrictions; }; +extern const std::unordered_map builtin_protocol_feature_codenames; + const char* builtin_protocol_feature_codename( builtin_protocol_feature_t ); class protocol_feature_base { @@ -42,7 +44,7 @@ class protocol_feature_base { void reflector_init(); - protocol_feature_t get_type() { return _type; } + protocol_feature_t get_type()const { return _type; } public: std::string protocol_feature_type; @@ -68,7 +70,7 @@ class builtin_protocol_feature : public protocol_feature_base { digest_type digest()const; - builtin_protocol_feature_t get_codename() { return _codename; } + builtin_protocol_feature_t get_codename()const { return _codename; } friend class protocol_feature_manager; @@ -78,8 +80,6 @@ class builtin_protocol_feature : public protocol_feature_base { builtin_protocol_feature_t _codename; }; -extern const std::unordered_map builtin_protocol_feature_codenames; - class protocol_feature_manager { public: @@ -125,7 +125,10 @@ class protocol_feature_manager { bool validate_dependencies( const digest_type& feature_digest, const std::function& validator )const; - builtin_protocol_feature make_default_builtin_protocol_feature( builtin_protocol_feature_t codename )const; + builtin_protocol_feature make_default_builtin_protocol_feature( + builtin_protocol_feature_t codename, + const std::function& handle_dependency + )const; void add_feature( const builtin_protocol_feature& f ); diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 7650bd69f26..0c915e73295 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -83,24 +83,18 @@ namespace eosio { namespace chain { } void builtin_protocol_feature::reflector_init() { - protocol_feature_base::reflector_init(); + //protocol_feature_base::reflector_init(); - auto codename = get_codename(); for( const auto& p : builtin_protocol_feature_codenames ) { - if( p.first == codename ) { - EOS_ASSERT( builtin_feature_codename == p.second.codename, - protocol_feature_validation_exception, - "Deserialized protocol feature has invalid codename. Expected: ${expected}. Actual: ${actual}.", - ("expected", p.second.codename) - ("actual", protocol_feature_type) - ); + if( builtin_feature_codename.compare( p.second.codename ) == 0 ) { _codename = p.first; return; } } EOS_THROW( protocol_feature_validation_exception, - "Unsupported protocol feature type: ${type}", ("type", protocol_feature_type) ); + "Unsupported builtin protocol feature codename: ${codename}", + ("codename", builtin_feature_codename) ); } @@ -188,8 +182,10 @@ namespace eosio { namespace chain { } builtin_protocol_feature - protocol_feature_manager::make_default_builtin_protocol_feature( builtin_protocol_feature_t codename )const - { + protocol_feature_manager::make_default_builtin_protocol_feature( + builtin_protocol_feature_t codename, + const std::function& handle_dependency + )const { auto itr = builtin_protocol_feature_codenames.find( codename ); EOS_ASSERT( itr != builtin_protocol_feature_codenames.end(), protocol_feature_validation_exception, @@ -200,6 +196,7 @@ namespace eosio { namespace chain { dependencies.reserve( itr->second.builtin_dependencies.size() ); for( const auto& d : itr->second.builtin_dependencies ) { + handle_dependency( d ); auto dependency_digest = get_builtin_digest( d ); EOS_ASSERT( dependency_digest, protocol_feature_exception, "cannot make default builtin protocol feature with codename '${codename}' since it has a dependency that has not been added yet: ${dependency_codename}", diff --git a/libraries/chain/transaction.cpp b/libraries/chain/transaction.cpp index 2724a31b28d..0bbbf848f3e 100644 --- a/libraries/chain/transaction.cpp +++ b/libraries/chain/transaction.cpp @@ -327,7 +327,6 @@ packed_transaction::packed_transaction( transaction&& t, vector& void packed_transaction::reflector_init() { // called after construction, but always on the same thread and before packed_transaction passed to any other threads - static_assert(&fc::reflector_init_visitor::reflector_init, "FC with reflector_init required"); static_assert(fc::raw::has_feature_reflector_init_on_unpacked_reflected_types, "FC unpack needs to call reflector_init otherwise unpacked_trx will not be initialized"); EOS_ASSERT( unpacked_trx.expiration == time_point_sec(), tx_decompression_error, "packed_transaction already unpacked" ); diff --git a/libraries/fc b/libraries/fc index d321bf498ba..5b615cdac4e 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit d321bf498ba8a10fc9ed6cee5636004413f7ff7b +Subproject commit 5b615cdac4ef098b3400431f4ce0639b01233a01 diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 89386811496..2afef2173a7 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -332,7 +332,11 @@ void clear_directory_contents( const fc::path& p ) { } optional read_builtin_protocol_feature( const fc::path& p ) { - return {}; + try { + return fc::json::from_file( p ); + } catch( ... ) { + return {}; + } } protocol_feature_manager initialize_protocol_features( const fc::path& p, bool populate_missing_builtins = true ) { @@ -340,45 +344,60 @@ protocol_feature_manager initialize_protocol_features( const fc::path& p, bool p protocol_feature_manager pfm; - if( !fc::is_directory( p ) ) - return pfm; + bool directory_exists = true; + + if( fc::exists( p ) ) { + EOS_ASSERT( fc::is_directory( p ), plugin_exception, + "Path to protocol-features is not a directory: ${path}", + ("path", p.generic_string()) + ); + } else { + if( populate_missing_builtins ) + bfs::create_directory( p ); + else + directory_exists = false; + } map found_builtin_protocol_features; map > builtin_protocol_features_to_add; // The bool in the pair is set to true if the builtin protocol feature has already been visited to add + set visited_builtins; // Read all builtin protocol features - for( directory_iterator enditr, itr{p}; itr != enditr; ++itr ) { - auto file_path = itr->path(); - if( !fc::is_regular_file( file_path ) || file_path.extension().generic_string() == ".json" ) continue; + if( directory_exists ) { + for( directory_iterator enditr, itr{p}; itr != enditr; ++itr ) { + auto file_path = itr->path(); + if( !fc::is_regular_file( file_path ) || file_path.extension().generic_string().compare( ".json" ) != 0 ) + continue; - auto f = read_builtin_protocol_feature( file_path ); + auto f = read_builtin_protocol_feature( file_path ); - if( !f ) continue; + if( !f ) continue; - auto res = found_builtin_protocol_features.emplace( f->get_codename(), file_path ); + auto res = found_builtin_protocol_features.emplace( f->get_codename(), file_path ); - EOS_ASSERT( res.second, plugin_exception, - "Builtin protocol feature '${codename}' was already included from a previous_file", - ("codename", builtin_protocol_feature_codename(f->get_codename())) - ("current_file", file_path.generic_string()) - ("previous_file", res.first->second.generic_string()) - ); + EOS_ASSERT( res.second, plugin_exception, + "Builtin protocol feature '${codename}' was already included from a previous_file", + ("codename", builtin_protocol_feature_codename(f->get_codename())) + ("current_file", file_path.generic_string()) + ("previous_file", res.first->second.generic_string()) + ); - builtin_protocol_features_to_add.emplace( std::piecewise_construct, - std::forward_as_tuple( f->digest() ), - std::forward_as_tuple( *f, false ) ); - //pfm.add_feature( *f ); + builtin_protocol_features_to_add.emplace( std::piecewise_construct, + std::forward_as_tuple( f->digest() ), + std::forward_as_tuple( *f, false ) ); + } } // Add builtin protocol features to the protocol feature manager in the right order (to satisfy dependencies) using itr_type = map>::iterator; std::function add_protocol_feature = - [&pfm, &builtin_protocol_features_to_add, &add_protocol_feature]( const itr_type& itr ) -> void { + [&pfm, &builtin_protocol_features_to_add, &visited_builtins, &add_protocol_feature]( const itr_type& itr ) -> void { if( itr->second.second ) { return; } else { itr->second.second = true; + visited_builtins.insert( itr->second.first.get_codename() ); } for( const auto& d : itr->second.first.dependencies ) { @@ -395,14 +414,49 @@ protocol_feature_manager initialize_protocol_features( const fc::path& p, bool p add_protocol_feature( itr ); } - if( populate_missing_builtins ) { - for( const auto& p : builtin_protocol_feature_codenames ) { - auto itr = found_builtin_protocol_features.find( p.first ); - if( itr != found_builtin_protocol_features.end() ) continue; + auto output_protocol_feature = [&p]( const builtin_protocol_feature& f ) { + static constexpr int max_tries = 10; - pfm.make_default_builtin_protocol_feature( p.first ); - // TODO: write it out to the protocol-features directory - } + string filename_base( "BUILTIN-" ); + filename_base += builtin_protocol_feature_codename( f.get_codename() ); + + string filename = filename_base + ".json"; + int i = 0; + for( ; + i < max_tries && fc::exists( p / filename ); + ++i, filename = filename_base + "-" + std::to_string(i) + ".json" ) + ; + + EOS_ASSERT( i < max_tries, plugin_exception, + "Could not save builtin protocol feature with codename '${codename}' due to file name conflicts", + ("codename", builtin_protocol_feature_codename( f.get_codename() )) + ); + + fc::json::save_to_file( f, p / filename ); + }; + + std::function add_missing_builtins = + [&pfm, &visited_builtins, &output_protocol_feature, &add_missing_builtins, populate_missing_builtins] + ( builtin_protocol_feature_t codename ) -> void { + auto res = visited_builtins.emplace( codename ); + if( !res.second ) return; + + auto f = pfm.make_default_builtin_protocol_feature( codename, + [&add_missing_builtins]( builtin_protocol_feature_t d ) { + add_missing_builtins( d ); + } ); + + pfm.add_feature( f ); + + if( populate_missing_builtins ) + output_protocol_feature( f ); + }; + + for( const auto& p : builtin_protocol_feature_codenames ) { + auto itr = found_builtin_protocol_features.find( p.first ); + if( itr != found_builtin_protocol_features.end() ) continue; + + add_missing_builtins( p.first ); } return pfm; From f89afe31b2ffae803b6699e3d98b8cf72a4ae0ef Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 21 Feb 2019 19:09:48 -0500 Subject: [PATCH 016/680] bump version to 1.8.0-develop --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index ced9cf8fd92..997ae2c1e65 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -36,9 +36,9 @@ set( CMAKE_CXX_EXTENSIONS ON ) set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) -set(VERSION_MINOR 7) +set(VERSION_MINOR 8) set(VERSION_PATCH 0) -set(VERSION_SUFFIX rc1) +set(VERSION_SUFFIX develop) if(VERSION_SUFFIX) set(VERSION_FULL "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}-${VERSION_SUFFIX}") From 797f1948e6942b3c42c528b900e724cd84ad36c9 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 22 Feb 2019 10:12:07 -0600 Subject: [PATCH 017/680] Add remote endpoint peer name to dup connection check --- plugins/net_plugin/net_plugin.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 7777949a7b9..43cb6326fc3 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1124,7 +1124,8 @@ namespace eosio { connection_ptr conn = weak_this.lock(); if (conn) { if (close_after_send != no_reason) { - elog ("sent a go away message: ${r}, closing connection to ${p}",("r", reason_str(close_after_send))("p", conn->peer_name())); + fc_elog( logger, "sent a go away message: ${r}, closing connection to ${p}", + ("r", reason_str(close_after_send))("p", conn->peer_name()) ); my_impl->close(conn); return; } @@ -1190,6 +1191,13 @@ namespace eosio { if( !peer_addr.empty() ) { return peer_addr; } + if( socket != nullptr ) { + boost::system::error_code ec; + auto rep = socket->remote_endpoint(ec); + if( !ec ) { + return rep.address().to_string() + ':' + std::to_string( rep.port() ); + } + } return "connecting client"; } From e3f3b5dc52effdf2f1b35a0cbd970257095e6347 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 22 Feb 2019 12:08:14 -0500 Subject: [PATCH 018/680] Merge branch 'develop' into protocol-feature-foundations --- .buildkite/docker.yml | 33 +- .buildkite/pipeline.yml | 552 +++-- .gitignore | 1 - .gitmodules | 3 + CMakeLists.txt | 33 +- CMakeModules/EosioTester.cmake.in | 13 +- CMakeModules/EosioTesterBuild.cmake.in | 11 +- Docker/README.md | 4 +- README.md | 28 +- eosio_build.sh | 307 --- eosio_install.sh | 116 - eosio_uninstall.sh | 36 - libraries/CMakeLists.txt | 14 + libraries/appbase | 2 +- libraries/chain/block_header_state.cpp | 2 +- libraries/chain/controller.cpp | 63 +- .../include/eosio/chain/abi_serializer.hpp | 14 +- .../include/eosio/chain/apply_context.hpp | 5 +- libraries/chain/include/eosio/chain/asset.hpp | 2 +- .../chain/include/eosio/chain/controller.hpp | 18 +- .../chain/include/eosio/chain/symbol.hpp | 2 +- .../chain/include/eosio/chain/transaction.hpp | 3 +- .../eosio/chain/wasm_eosio_injection.hpp | 15 +- libraries/chain/merkle.cpp | 2 +- libraries/chain/resource_limits.cpp | 6 +- libraries/chain/transaction.cpp | 3 +- libraries/chain/wasm_eosio_injection.cpp | 2 +- libraries/chain/wasm_interface.cpp | 17 +- libraries/fc | 2 +- .../testing/include/eosio/testing/tester.hpp | 13 +- libraries/testing/tester.cpp | 28 +- libraries/yubihsm | 1 + plugins/bnet_plugin/bnet_plugin.cpp | 8 +- .../include/eosio/bnet_plugin/bnet_plugin.hpp | 1 + plugins/chain_plugin/chain_plugin.cpp | 30 +- .../eosio/chain_plugin/chain_plugin.hpp | 3 + plugins/http_plugin/http_plugin.cpp | 96 +- .../include/eosio/http_plugin/http_plugin.hpp | 25 +- plugins/mongo_db_plugin/CMakeLists.txt | 28 +- .../include/eosio/net_plugin/net_plugin.hpp | 1 + plugins/net_plugin/net_plugin.cpp | 918 ++++---- .../eosio/producer_plugin/producer_plugin.hpp | 1 + plugins/producer_plugin/producer_plugin.cpp | 296 +-- .../state_history_log.hpp | 2 +- .../state_history_plugin.cpp | 5 +- .../test_control_plugin.cpp | 15 +- .../txn_test_gen_plugin.cpp | 88 +- plugins/wallet_plugin/CMakeLists.txt | 5 +- .../eosio/wallet_plugin/wallet_manager.hpp | 2 + .../include/eosio/wallet_plugin/yubihsm.h | 1933 ----------------- plugins/wallet_plugin/wallet_manager.cpp | 23 +- plugins/wallet_plugin/yubihsm_wallet.cpp | 125 +- programs/cleos/main.cpp | 461 +++- programs/eosio-launcher/main.cpp | 2 +- programs/nodeos/main.cpp | 26 +- scripts/clean_old_install.sh | 70 - scripts/eosio_build.sh | 317 +++ scripts/eosio_build_amazon.sh | 852 ++------ scripts/eosio_build_centos.sh | 983 +++------ scripts/eosio_build_darwin.sh | 617 ++---- scripts/eosio_build_darwin_deps | 13 + scripts/eosio_build_dep | 12 - scripts/eosio_build_fedora.sh | 720 ++---- scripts/eosio_build_ubuntu.sh | 641 ++---- scripts/eosio_install.sh | 86 + scripts/eosio_uninstall.sh | 87 + scripts/full_uninstaller.sh | 134 ++ scripts/generate_bottle.sh | 17 +- scripts/generate_deb.sh | 38 +- scripts/generate_package.sh.in | 18 +- scripts/generate_rpm.sh | 18 +- scripts/generate_tarball.sh | 18 +- scripts/mongod.conf | 3 + tests/CMakeLists.txt | 2 +- tests/Cluster.py | 53 +- tests/Node.py | 29 +- tests/TestHelper.py | 3 + tests/chain_plugin_tests.cpp | 2 +- tests/get_table_tests.cpp | 44 +- tests/nodeos_forked_chain_test.py | 85 +- tests/nodeos_run_test.py | 1 + tests/nodeos_under_min_avail_ram.py | 2 +- tests/nodeos_voting_test.py | 27 +- tests/restart-scenarios-test.py | 4 +- tests/testUtils.py | 2 + tests/wallet_tests.cpp | 44 +- tutorials/exchange-tutorial-python/README.md | 36 - .../exchange_tutorial.py | 187 -- unittests/CMakeLists.txt | 95 +- unittests/abi_tests.cpp | 32 +- unittests/api_tests.cpp | 650 +++--- unittests/auth_tests.cpp | 34 +- unittests/block_timestamp_tests.cpp | 4 +- unittests/bootseq_tests.cpp | 2 +- unittests/contracts.hpp.in | 7 +- unittests/currency_tests.cpp | 15 +- unittests/delay_tests.cpp | 7 +- unittests/forked_tests.cpp | 17 +- unittests/include/config.hpp.in | 11 - unittests/message_buffer_tests.cpp | 63 +- unittests/misc_tests.cpp | 438 +++- unittests/multi_index_tests.cpp | 82 - unittests/multisig_tests.cpp | 632 ------ unittests/producer_schedule_tests.cpp | 56 +- unittests/resource_limits_test.cpp | 6 +- unittests/special_accounts_tests.cpp | 16 +- unittests/test-contracts/CMakeLists.txt | 14 +- unittests/test-contracts/README.md | 5 + unittests/test-contracts/Readme.txt | 3 - .../test-contracts/asserter/CMakeLists.txt | 10 +- .../test-contracts/asserter/asserter.abi | 76 +- .../test-contracts/asserter/asserter.cpp | 26 +- .../test-contracts/asserter/asserter.hpp | 20 +- .../test-contracts/asserter/asserter.wasm | Bin 4641 -> 3625 bytes .../deferred_test/CMakeLists.txt | 8 +- .../deferred_test/deferred_test.abi | 7 +- .../deferred_test/deferred_test.cpp | 70 +- .../deferred_test/deferred_test.hpp | 26 + .../deferred_test/deferred_test.wasm | Bin 10790 -> 8204 bytes .../integration_test/CMakeLists.txt | 6 - .../integration_test/integration_test.abi | 58 - .../integration_test/integration_test.cpp | 44 - .../integration_test/integration_test.wasm | Bin 6889 -> 0 bytes .../multi_index_test/CMakeLists.txt | 6 - .../multi_index_test/multi_index_test.abi | 28 - .../multi_index_test/multi_index_test.cpp | 194 -- .../multi_index_test/multi_index_test.wasm | Bin 16825 -> 0 bytes unittests/test-contracts/noop/CMakeLists.txt | 8 +- unittests/test-contracts/noop/noop.abi | 7 +- unittests/test-contracts/noop/noop.cpp | 26 +- unittests/test-contracts/noop/noop.hpp | 17 + unittests/test-contracts/noop/noop.wasm | Bin 2228 -> 778 bytes .../test-contracts/payloadless/CMakeLists.txt | 8 +- .../payloadless/payloadless.abi | 7 +- .../payloadless/payloadless.cpp | 17 +- .../payloadless/payloadless.doit_rc.md | 14 - .../payloadless/payloadless.hpp | 15 + .../payloadless/payloadless.wasm | Bin 2179 -> 698 bytes .../payloadless/payloadless_rc.md | 40 - unittests/test-contracts/proxy/CMakeLists.txt | 9 +- unittests/test-contracts/proxy/proxy.abi | 96 +- unittests/test-contracts/proxy/proxy.cpp | 126 +- unittests/test-contracts/proxy/proxy.hpp | 65 +- unittests/test-contracts/proxy/proxy.wasm | Bin 16013 -> 18398 bytes .../snapshot_test/CMakeLists.txt | 8 +- .../snapshot_test/snapshot_test.abi | 7 +- .../snapshot_test/snapshot_test.cpp | 99 +- .../snapshot_test/snapshot_test.hpp | 46 + .../snapshot_test/snapshot_test.wasm | Bin 8380 -> 7286 bytes .../test-contracts/test.inline/CMakeLists.txt | 6 - .../test.inline/test.inline.abi | 11 - .../test.inline/test.inline.cpp | 7 - .../test.inline/test.inline.hpp | 23 - .../test.inline/test.inline.wasm | Bin 5068 -> 0 bytes .../test-contracts/test_api/CMakeLists.txt | 6 +- .../test-contracts/test_api/test_action.cpp | 4 - .../test-contracts/test_api/test_api.cpp | 27 - .../test-contracts/test_api/test_api.hpp | 2 - .../test-contracts/test_api/test_api.wasm | Bin 74342 -> 67533 bytes .../test_api/test_compiler_builtins.cpp | 395 ---- .../test_api/test_fixedpoint.cpp | 158 -- .../test_api/test_permission.cpp | 1 - .../test-contracts/test_api/test_print.cpp | 9 +- .../test_api/test_transaction.cpp | 26 +- .../test-contracts/test_api_db/CMakeLists.txt | 7 +- .../test_api_db/test_api_db.abi | 144 ++ .../test_api_db/test_api_db.cpp | 493 ++++- .../test_api_db/test_api_db.hpp | 46 + .../test_api_db/test_api_db.wasm | Bin 14681 -> 12721 bytes .../test-contracts/test_api_db/test_db.cpp | 556 ----- .../test_api_mem/CMakeLists.txt | 5 - .../test_api_mem/test_api_mem.cpp | 49 - .../test_api_mem/test_api_mem.wasm | Bin 15119 -> 0 bytes .../test_api_mem/test_extended_memory.cpp | 112 - .../test_api_mem/test_memory.cpp | 386 ---- .../test_api_multi_index/CMakeLists.txt | 8 +- .../test_api_multi_index.abi | 332 +++ .../test_api_multi_index.cpp | 963 +++++++- .../test_api_multi_index.hpp | 109 + .../test_api_multi_index.wasm | Bin 68589 -> 72891 bytes .../test_api_multi_index/test_multi_index.cpp | 957 -------- .../test_ram_limit/CMakeLists.txt | 2 +- unittests/wasm_tests.cpp | 67 +- unittests/whitelist_blacklist_tests.cpp | 6 +- 184 files changed, 7412 insertions(+), 11498 deletions(-) delete mode 100755 eosio_build.sh delete mode 100755 eosio_install.sh delete mode 100755 eosio_uninstall.sh create mode 160000 libraries/yubihsm mode change 100644 => 100755 plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp delete mode 100755 plugins/wallet_plugin/include/eosio/wallet_plugin/yubihsm.h delete mode 100755 scripts/clean_old_install.sh create mode 100755 scripts/eosio_build.sh mode change 100644 => 100755 scripts/eosio_build_amazon.sh mode change 100644 => 100755 scripts/eosio_build_centos.sh mode change 100644 => 100755 scripts/eosio_build_darwin.sh create mode 100755 scripts/eosio_build_darwin_deps delete mode 100644 scripts/eosio_build_dep mode change 100644 => 100755 scripts/eosio_build_fedora.sh mode change 100644 => 100755 scripts/eosio_build_ubuntu.sh create mode 100755 scripts/eosio_install.sh create mode 100755 scripts/eosio_uninstall.sh create mode 100755 scripts/full_uninstaller.sh mode change 100644 => 100755 scripts/generate_deb.sh create mode 100644 scripts/mongod.conf delete mode 100644 tutorials/exchange-tutorial-python/README.md delete mode 100644 tutorials/exchange-tutorial-python/exchange_tutorial.py delete mode 100644 unittests/include/config.hpp.in delete mode 100644 unittests/multi_index_tests.cpp delete mode 100644 unittests/multisig_tests.cpp create mode 100644 unittests/test-contracts/README.md delete mode 100644 unittests/test-contracts/Readme.txt create mode 100644 unittests/test-contracts/deferred_test/deferred_test.hpp delete mode 100644 unittests/test-contracts/integration_test/CMakeLists.txt delete mode 100644 unittests/test-contracts/integration_test/integration_test.abi delete mode 100644 unittests/test-contracts/integration_test/integration_test.cpp delete mode 100755 unittests/test-contracts/integration_test/integration_test.wasm delete mode 100644 unittests/test-contracts/multi_index_test/CMakeLists.txt delete mode 100644 unittests/test-contracts/multi_index_test/multi_index_test.abi delete mode 100644 unittests/test-contracts/multi_index_test/multi_index_test.cpp delete mode 100755 unittests/test-contracts/multi_index_test/multi_index_test.wasm create mode 100644 unittests/test-contracts/noop/noop.hpp delete mode 100644 unittests/test-contracts/payloadless/payloadless.doit_rc.md create mode 100644 unittests/test-contracts/payloadless/payloadless.hpp delete mode 100644 unittests/test-contracts/payloadless/payloadless_rc.md create mode 100644 unittests/test-contracts/snapshot_test/snapshot_test.hpp delete mode 100644 unittests/test-contracts/test.inline/CMakeLists.txt delete mode 100644 unittests/test-contracts/test.inline/test.inline.abi delete mode 100644 unittests/test-contracts/test.inline/test.inline.cpp delete mode 100644 unittests/test-contracts/test.inline/test.inline.hpp delete mode 100755 unittests/test-contracts/test.inline/test.inline.wasm delete mode 100644 unittests/test-contracts/test_api/test_compiler_builtins.cpp delete mode 100644 unittests/test-contracts/test_api/test_fixedpoint.cpp create mode 100644 unittests/test-contracts/test_api_db/test_api_db.abi create mode 100644 unittests/test-contracts/test_api_db/test_api_db.hpp delete mode 100644 unittests/test-contracts/test_api_db/test_db.cpp delete mode 100644 unittests/test-contracts/test_api_mem/CMakeLists.txt delete mode 100644 unittests/test-contracts/test_api_mem/test_api_mem.cpp delete mode 100755 unittests/test-contracts/test_api_mem/test_api_mem.wasm delete mode 100644 unittests/test-contracts/test_api_mem/test_extended_memory.cpp delete mode 100644 unittests/test-contracts/test_api_mem/test_memory.cpp create mode 100644 unittests/test-contracts/test_api_multi_index/test_api_multi_index.abi create mode 100644 unittests/test-contracts/test_api_multi_index/test_api_multi_index.hpp delete mode 100644 unittests/test-contracts/test_api_multi_index/test_multi_index.cpp diff --git a/.buildkite/docker.yml b/.buildkite/docker.yml index f8f6d8e0a12..9be30a77cef 100644 --- a/.buildkite/docker.yml +++ b/.buildkite/docker.yml @@ -5,16 +5,25 @@ steps: docker-credential-gcr configure-docker && \ echo "BUILDING BUILD IMAGE" && \ cd Docker/builder && \ - docker build -t eosio/builder:latest -t eosio/builder:$BUILDKITE_COMMIT . --build-arg branch=$BUILDKITE_COMMIT && \ + docker build -t eosio/builder:latest -t eosio/builder:$BUILDKITE_COMMIT -t eosio/builder:$BUILDKITE_BRANCH . --build-arg branch=$BUILDKITE_COMMIT && \ + [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/builder:latest eosio/builder:$BUILDKITE_TAG || : && \ docker tag eosio/builder:$BUILDKITE_COMMIT gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ + docker tag eosio/builder:$BUILDKITE_BRANCH gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_BRANCH && \ + [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/builder:$BUILDKITE_TAG gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_TAG || : && \ docker tag eosio/builder:latest gcr.io/b1-automation-dev/eosio/builder:latest && \ echo "PUSHING DOCKER IMAGES" && \ docker push gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ + docker push gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_BRANCH && \ + [[ "$BUILDKITE_TAG" != "" ]] && docker push gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_TAG || : && \ docker push gcr.io/b1-automation-dev/eosio/builder:latest && \ echo "TRASHING OLD IMAGES" && \ docker rmi eosio/builder:$BUILDKITE_COMMIT && \ + docker rmi eosio/builder:$BUILDKITE_BRANCH && \ + [[ "$BUILDKITE_TAG" != "" ]] && docker rmi eosio/builder:$BUILDKITE_TAG || : && \ docker rmi eosio/builder:latest && \ docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ + docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_BRANCH && \ + [[ "$BUILDKITE_TAG" != "" ]] && docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_TAG || : && \ docker rmi gcr.io/b1-automation-dev/eosio/builder:latest label: "Docker build builder" agents: @@ -30,16 +39,25 @@ steps: echo "BUILDING EOS IMAGE" && \ docker pull gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ cd Docker && \ - docker build -t eosio/eos:latest -t eosio/eos:$BUILDKITE_COMMIT . --build-arg branch=$BUILDKITE_BRANCH && \ + docker build -t eosio/eos:latest -t eosio/eos:$BUILDKITE_COMMIT -t eosio/eos:$BUILDKITE_BRANCH . --build-arg branch=$BUILDKITE_BRANCH && \ + [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/eos:latest eosio/eos:$BUILDKITE_TAG || : && \ docker tag eosio/eos:$BUILDKITE_COMMIT gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_COMMIT && \ + docker tag eosio/eos:$BUILDKITE_BRANCH gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_BRANCH && \ + [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/eos:$BUILDKITE_TAG gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_TAG || : && \ docker tag eosio/eos:latest gcr.io/b1-automation-dev/eosio/eos:latest && \ echo "PUSHING DOCKER IMAGES" && \ docker push gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_COMMIT && \ + docker push gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_BRANCH && \ + [[ "$BUILDKITE_TAG" != "" ]] && docker push gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_TAG || : && \ docker push gcr.io/b1-automation-dev/eosio/eos:latest && \ echo "TRASHING OLD IMAGES" && \ docker rmi eosio/eos:$BUILDKITE_COMMIT && \ + docker rmi eosio/eos:$BUILDKITE_BRANCH && \ + [[ "$BUILDKITE_TAG" != "" ]] && docker rmi eosio/eos:$BUILDKITE_TAG || : && \ docker rmi eosio/eos:latest && \ docker rmi gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_COMMIT && \ + docker rmi gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_BRANCH && \ + [[ "$BUILDKITE_TAG" != "" ]] && docker rmi gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_TAG || : && \ docker rmi gcr.io/b1-automation-dev/eosio/eos:latest && \ docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT label: "Docker build eos" @@ -54,16 +72,25 @@ steps: echo "BUILDING EOS DEV IMAGE" && \ docker pull gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ cd Docker/dev && \ - docker build -t eosio/eos-dev:latest -t eosio/eos-dev:$BUILDKITE_COMMIT . --build-arg branch=$BUILDKITE_BRANCH && \ + docker build -t eosio/eos-dev:latest -t eosio/eos-dev:$BUILDKITE_COMMIT -t eosio/eos-dev:$BUILDKITE_BRANCH . --build-arg branch=$BUILDKITE_BRANCH && \ + [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/eos-dev:latest eosio/eos-dev:$BUILDKITE_TAG || : && \ docker tag eosio/eos-dev:$BUILDKITE_COMMIT gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_COMMIT && \ + docker tag eosio/eos-dev:$BUILDKITE_BRANCH gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_BRANCH && \ + [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/eos-dev:$BUILDKITE_TAG gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_TAG || : && \ docker tag eosio/eos-dev:latest gcr.io/b1-automation-dev/eosio/eos-dev:latest && \ echo "PUSHING DOCKER IMAGES" && \ docker push gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_COMMIT && \ + docker push gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_BRANCH && \ + [[ "$BUILDKITE_TAG" != "" ]] && docker push gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_TAG || : && \ docker push gcr.io/b1-automation-dev/eosio/eos-dev:latest && \ echo "TRASHING OLD IMAGES" && \ docker rmi eosio/eos-dev:$BUILDKITE_COMMIT && \ + docker rmi eosio/eos-dev:$BUILDKITE_BRANCH && \ + [[ "$BUILDKITE_TAG" != "" ]] && docker rmi eosio/eos-dev:$BUILDKITE_TAG || : && \ docker rmi eosio/eos-dev:latest && \ docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_COMMIT && \ + docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_BRANCH && \ + [[ "$BUILDKITE_TAG" != "" ]] && docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_TAG || : && \ docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:latest && \ docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT label: "Docker build eos-dev" diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 2d38281560e..6bfebd3e0c6 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,57 +1,29 @@ steps: - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - echo 1 | ./eosio_build.sh && \ - echo "+++ Validating:" && \ - echo 1 | ./tools/validate_reflection.py plugins/ programs/ libraries/ --recurse --extension "cpp" --extension "hpp" -e && \ - echo "--- Compressing build directory :compression:" && \ - tar -pczf build.tar.gz build/ - label: ":darwin: High Sierra Build" - agents: - - "role=macos-builder" - artifact_paths: "build.tar.gz" - timeout: 60 - - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - echo 1 | ./eosio_build.sh && \ - echo "+++ Validating:" && \ - echo 1 | ./tools/validate_reflection.py plugins/ programs/ libraries/ --recurse --extension "cpp" --extension "hpp" -e && \ - echo "--- Compressing build directory :compression:" && \ - tar -pczf build.tar.gz build/ - label: ":darwin: Mojave Build" - agents: - - "role=builder" - - "os=mojave" - artifact_paths: "build.tar.gz" - timeout: 60 - command: | echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "+++ Validating:" && \ - echo 1 | ./tools/validate_reflection.py plugins/ programs/ libraries/ --recurse --extension "cpp" --extension "hpp" -e && \ + ./scripts/eosio_build.sh -y && \ echo "--- :compression: Compressing build directory" && \ tar -pczf build.tar.gz build/ - label: ":ubuntu: Build" + label: ":ubuntu: 16.04 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" workdir: /data/job timeout: 60 - + - command: | echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "+++ Validating:" && \ - echo 1 | ./tools/validate_reflection.py plugins/ programs/ libraries/ --recurse --extension "cpp" --extension "hpp" -e && \ + ./scripts/eosio_build.sh -y && \ echo "--- :compression: Compressing build directory" && \ tar -pczf build.tar.gz build/ label: ":ubuntu: 18.04 Build" @@ -59,144 +31,249 @@ steps: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" workdir: /data/job timeout: 60 - command: | echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "+++ Validating:" && \ - echo 1 | ./tools/validate_reflection.py plugins/ programs/ libraries/ --recurse --extension "cpp" --extension "hpp" -e && \ + ./scripts/eosio_build.sh -y && \ echo "--- :compression: Compressing build directory" && \ tar -pczf build.tar.gz build/ - label: ":fedora: Build" + label: ":centos: 7 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v1.4.0: - image: "eosio/ci:fedora" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" workdir: /data/job timeout: 60 - + - command: | echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "+++ Validating:" && \ - echo 1 | ./tools/validate_reflection.py plugins/ programs/ libraries/ --recurse --extension "cpp" --extension "hpp" -e && \ + ./scripts/eosio_build.sh -y && \ echo "--- :compression: Compressing build directory" && \ tar -pczf build.tar.gz build/ - label: ":centos: Build" + label: ":aws: 1 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v1.4.0: - image: "eosio/ci:centos" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" workdir: /data/job timeout: 60 + # - command: | + # echo "+++ :hammer: Building" && \ + # ./scripts/eosio_build.sh -y && \ + # echo "--- :compression: Compressing build directory" && \ + # tar -pczf build.tar.gz build/ + # label: ":aws: 2 Build" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: "build.tar.gz" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + # workdir: /data/job + # timeout: 60 + - command: | echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ - echo "+++ Validating:" && \ - echo 1 | ./tools/validate_reflection.py plugins/ programs/ libraries/ --recurse --extension "cpp" --extension "hpp" -e && \ + ./scripts/eosio_build.sh -y && \ echo "--- :compression: Compressing build directory" && \ tar -pczf build.tar.gz build/ - label: ":aws: Build" + label: ":fedora: 27 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v1.4.0: - image: "eosio/ci:amazonlinux" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" workdir: /data/job timeout: 60 + - command: | + echo "--- Creating symbolic link to job directory :file_folder:" && \ + sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ + echo "+++ Building :hammer:" && \ + ./scripts/eosio_build.sh -y && \ + echo "--- Compressing build directory :compression:" && \ + tar -pczf build.tar.gz build/ + label: ":darwin: Mojave Build" + agents: + - "role=builder-v2-1" + - "os=mojave" + artifact_paths: "build.tar.gz" + timeout: 60 + + - command: | + echo "--- Creating symbolic link to job directory :file_folder:" && \ + sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ + echo "+++ Building :hammer:" && \ + ./scripts/eosio_build.sh -y && \ + echo "--- Compressing build directory :compression:" && \ + tar -pczf build.tar.gz build/ + label: ":darwin: High Sierra Build" + agents: + - "role=builder-v2-1" + - "os=high-sierra" + artifact_paths: "build.tar.gz" + timeout: 60 + - wait - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - label: ":darwin: High Sierra Tests" + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure + label: ":ubuntu: 16.04 Tests" agents: - - "role=macos-tester" - - "os=high-sierra" + queue: "automation-large-builder-fleet" artifact_paths: - "mongod.log" - "build/genesis.json" - "build/config.ini" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + workdir: /data/job timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - label: ":darwin: High Sierra NP Tests" + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure + label: ":ubuntu: 16.04 NP Tests" agents: - - "role=macos-tester" + queue: "automation-large-builder-fleet" artifact_paths: - "mongod.log" - "build/genesis.json" - "build/config.ini" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + workdir: /data/job timeout: 60 - + - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - label: ":darwin: Mojave Tests" + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure + label: ":ubuntu: 18.04 Tests" agents: - - "role=tester" - - "os=mojave" + queue: "automation-large-builder-fleet" artifact_paths: - "mongod.log" - "build/genesis.json" - "build/config.ini" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + workdir: /data/job timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - label: ":darwin: Mojave NP Tests" + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure + label: ":ubuntu: 18.04 NP Tests" agents: - - "role=tester" - - "os=mojave" + queue: "automation-large-builder-fleet" artifact_paths: - "mongod.log" - "build/genesis.json" - "build/config.ini" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + workdir: /data/job timeout: 60 - + + - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - label: ":ubuntu: Tests" + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure + label: ":centos: 7 Tests" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -204,20 +281,26 @@ steps: - "build/genesis.json" - "build/config.ini" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" workdir: /data/job timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - label: ":ubuntu: NP Tests" + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure + label: ":centos: 7 NP Tests" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -225,20 +308,26 @@ steps: - "build/genesis.json" - "build/config.ini" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" workdir: /data/job timeout: 60 - + - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - label: ":ubuntu: 18.04 Tests" + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure + label: ":aws: 1 Tests" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -246,20 +335,26 @@ steps: - "build/genesis.json" - "build/config.ini" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" workdir: /data/job timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - label: ":ubuntu: 18.04 NP Tests" + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure + label: ":aws: 1 NP Tests" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -267,20 +362,80 @@ steps: - "build/genesis.json" - "build/config.ini" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" workdir: /data/job timeout: 60 + # - command: | + # echo "--- :arrow_down: Downloading build directory" && \ + # buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ + # tar -zxf build.tar.gz && \ + # echo "--- :m: Starting MongoDB" && \ + # ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ + # echo "+++ :microscope: Running tests" && \ + # cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure + # label: ":aws: 2 Tests" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: + # - "mongod.log" + # - "build/genesis.json" + # - "build/config.ini" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + # workdir: /data/job + # timeout: 60 + + # - command: | + # echo "--- :arrow_down: Downloading build directory" && \ + # buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ + # tar -zxf build.tar.gz && \ + # echo "--- :m: Starting MongoDB" && \ + # ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ + # echo "+++ :microscope: Running tests" && \ + # cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure + # label: ":aws: 2 NP Tests" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: + # - "mongod.log" + # - "build/genesis.json" + # - "build/config.ini" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + # workdir: /data/job + # timeout: 60 + - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - label: ":fedora: Tests" + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure + label: ":fedora: 27 Tests" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -288,20 +443,26 @@ steps: - "build/genesis.json" - "build/config.ini" plugins: - docker#v1.4.0: - image: "eosio/ci:fedora" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" workdir: /data/job timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - label: ":fedora: NP Tests" + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure + label: ":fedora: 27 NP Tests" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -309,95 +470,89 @@ steps: - "build/genesis.json" - "build/config.ini" plugins: - docker#v1.4.0: - image: "eosio/ci:fedora" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" workdir: /data/job timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - label: ":centos: Tests" + ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + label: ":darwin: High Sierra Tests" agents: - queue: "automation-large-builder-fleet" + - "role=tester-v2-1" + - "os=high-sierra" artifact_paths: - "mongod.log" - "build/genesis.json" - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:centos" - workdir: /data/job timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - label: ":centos: NP Tests" + ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + label: ":darwin: High Sierra NP Tests" agents: - queue: "automation-large-builder-fleet" + - "role=tester-v2-1" + - "os=high-sierra" artifact_paths: - "mongod.log" - "build/genesis.json" - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:centos" - workdir: /data/job timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -j8 -LE _tests --output-on-failure - label: ":aws: Tests" + ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + label: ":darwin: Mojave Tests" agents: - queue: "automation-large-builder-fleet" + - "role=tester-v2-1" + - "os=mojave" artifact_paths: - "mongod.log" - "build/genesis.json" - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:amazonlinux" - workdir: /data/job timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L nonparallelizable_tests --output-on-failure - label: ":aws: NP Tests" + ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + label: ":darwin: Mojave NP Tests" agents: - queue: "automation-large-builder-fleet" + - "role=tester-v2-1" + - "os=mojave" artifact_paths: - "mongod.log" - "build/genesis.json" - "build/config.ini" - plugins: - docker#v1.4.0: - image: "eosio/ci:amazonlinux" - workdir: /data/job timeout: 60 - + - wait - command: | @@ -408,10 +563,11 @@ steps: ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew label: ":darwin: High Sierra Package Builder" agents: - - "role=macos-builder" + - "role=builder-v2-1" - "os=high-sierra" artifact_paths: - "build/packages/*.tar.gz" + - "build/packages/*.rb" timeout: 60 - command: | @@ -422,26 +578,33 @@ steps: ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew label: ":darwin: Mojave Package Builder" agents: - - "role=builder" + - "role=builder-v2-1" - "os=mojave" artifact_paths: - "build/packages/*.tar.gz" + - "build/packages/*.rb" timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ tar -zxf build.tar.gz && \ echo "+++ :microscope: Starting package build" && \ cd /data/job/build/packages && bash generate_package.sh deb - label: ":ubuntu: Package builder" + label: ":ubuntu: 16.04 Package builder" agents: queue: "automation-large-builder-fleet" artifact_paths: - "build/packages/*.deb" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" workdir: /data/job env: OS: "ubuntu-16.04" @@ -460,8 +623,14 @@ steps: artifact_paths: - "build/packages/*.deb" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" workdir: /data/job env: OS: "ubuntu-18.04" @@ -470,7 +639,7 @@ steps: - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ tar -zxf build.tar.gz && \ echo "+++ :microscope: Starting package build" && \ yum install -y rpm-build && \ @@ -481,14 +650,20 @@ steps: mkdir -p /root/rpmbuild/SPECS && \ mkdir -p /root/rpmbuild/SRPMS && \ cd /data/job/build/packages && bash generate_package.sh rpm - label: ":fedora: Package builder" + label: ":fedora: 27 Package builder" agents: queue: "automation-large-builder-fleet" artifact_paths: - - "build/packages/x86_64/*.rpm" + - "build/packages/*.rpm" plugins: - docker#v1.4.0: - image: "eosio/ci:fedora" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" workdir: /data/job env: OS: "fc27" @@ -497,7 +672,7 @@ steps: - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ tar -zxf build.tar.gz && \ echo "+++ :microscope: Starting package build" && \ yum install -y rpm-build && \ @@ -508,16 +683,37 @@ steps: mkdir -p /root/rpmbuild/SPECS && \ mkdir -p /root/rpmbuild/SRPMS && \ cd /data/job/build/packages && bash generate_package.sh rpm - label: ":centos: Package builder" + label: ":centos: 7 Package builder" agents: queue: "automation-large-builder-fleet" artifact_paths: - - "build/packages/x86_64/*.rpm" + - "build/packages/*.rpm" plugins: - docker#v1.4.0: - image: "eosio/ci:centos" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" workdir: /data/job env: OS: "el7" PKGTYPE: "rpm" timeout: 60 + + - wait + + - command: | + echo "--- :arrow_down: Downloading brew files" && \ + buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: High Sierra Package Builder" && \ + mv build/packages/eosio.rb build/packages/eosio_highsierra.rb && \ + buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" + label: ":darwin: Brew Updater" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: + - "build/packages/eosio_highsierra.rb" + - "build/packages/eosio.rb" + timeout: 60 diff --git a/.gitignore b/.gitignore index 6ab1edbaa7e..e7a67332996 100644 --- a/.gitignore +++ b/.gitignore @@ -3,7 +3,6 @@ *.dylib *.ll *.bc -*.wasm *.wast *.wast.hpp *.s diff --git a/.gitmodules b/.gitmodules index 6ac601cdcc2..80fe86f626d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -15,3 +15,6 @@ [submodule "libraries/wabt"] path = libraries/wabt url = https://github.com/EOSIO/wabt +[submodule "libraries/yubihsm"] + path = libraries/yubihsm + url = https://github.com/Yubico/yubihsm-shell diff --git a/CMakeLists.txt b/CMakeLists.txt index 127c7b919e4..997ae2c1e65 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -16,9 +16,11 @@ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules") if (UNIX) if (APPLE) - if (LLVM_DIR STREQUAL "" OR NOT LLVM_DIR) - set(LLVM_DIR "/usr/local/Cellar/llvm@4/4.0.1/lib/cmake/llvm") - endif() + execute_process(COMMAND xcrun --show-sdk-path + OUTPUT_VARIABLE CMAKE_OSX_SYSROOT + OUTPUT_STRIP_TRAILING_WHITESPACE) + list(APPEND CMAKE_PREFIX_PATH "/usr/local/opt/llvm@4") + list(APPEND CMAKE_PREFIX_PATH "/usr/local/opt/gettext") endif() endif() @@ -34,7 +36,7 @@ set( CMAKE_CXX_EXTENSIONS ON ) set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) -set(VERSION_MINOR 7) +set(VERSION_MINOR 8) set(VERSION_PATCH 0) set(VERSION_SUFFIX develop) @@ -113,7 +115,6 @@ FIND_PACKAGE(Boost 1.67 REQUIRED COMPONENTS filesystem system program_options - signals serialization chrono unit_test_framework @@ -175,10 +176,6 @@ else( WIN32 ) # Apple AND Linux endif() endif( APPLE ) - if( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" ) - set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-builtin-memcmp" ) - endif() - if( "${CMAKE_GENERATOR}" STREQUAL "Ninja" ) if( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" ) set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fcolor-diagnostics" ) @@ -249,18 +246,20 @@ install(FILES ${CMAKE_BINARY_DIR}/modules/eosio-config.cmake DESTINATION ${CMAKE configure_file(${CMAKE_SOURCE_DIR}/CMakeModules/EosioTester.cmake.in ${CMAKE_BINARY_DIR}/modules/EosioTester.cmake @ONLY) install(FILES ${CMAKE_BINARY_DIR}/modules/EosioTester.cmake DESTINATION ${CMAKE_INSTALL_FULL_LIBDIR}/cmake/eosio) -configure_file(${CMAKE_SOURCE_DIR}/LICENSE +configure_file(${CMAKE_SOURCE_DIR}/LICENSE ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE COPYONLY) -configure_file(${CMAKE_SOURCE_DIR}/libraries/wabt/LICENSE +configure_file(${CMAKE_SOURCE_DIR}/libraries/wabt/LICENSE ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.wabt COPYONLY) -configure_file(${CMAKE_SOURCE_DIR}/libraries/softfloat/COPYING.txt +configure_file(${CMAKE_SOURCE_DIR}/libraries/softfloat/COPYING.txt ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.softfloat COPYONLY) -configure_file(${CMAKE_SOURCE_DIR}/libraries/wasm-jit/LICENSE +configure_file(${CMAKE_SOURCE_DIR}/libraries/wasm-jit/LICENSE ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.wavm COPYONLY) -configure_file(${CMAKE_SOURCE_DIR}/libraries/fc/secp256k1/upstream/COPYING +configure_file(${CMAKE_SOURCE_DIR}/libraries/fc/secp256k1/upstream/COPYING ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.secp256k1 COPYONLY) -configure_file(${CMAKE_SOURCE_DIR}/libraries/fc/src/network/LICENSE.go +configure_file(${CMAKE_SOURCE_DIR}/libraries/fc/src/network/LICENSE.go ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.go COPYONLY) +configure_file(${CMAKE_SOURCE_DIR}/libraries/yubihsm/LICENSE + ${CMAKE_BINARY_DIR}/licenses/eosio/LICENSE.yubihsm COPYONLY) install(FILES LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ COMPONENT base) install(FILES libraries/wabt/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.wabt COMPONENT base) @@ -268,6 +267,7 @@ install(FILES libraries/softfloat/COPYING.txt DESTINATION ${CMAKE_INSTALL_FULL_D install(FILES libraries/wasm-jit/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.wavm COMPONENT base) install(FILES libraries/fc/secp256k1/upstream/COPYING DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.secp256k1 COMPONENT base) install(FILES libraries/fc/src/network/LICENSE.go DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ COMPONENT base) +install(FILES libraries/yubihsm/LICENSE DESTINATION ${CMAKE_INSTALL_FULL_DATAROOTDIR}/licenses/eosio/ RENAME LICENSE.yubihsm COMPONENT base) add_custom_target(base-install COMMAND "${CMAKE_COMMAND}" --build "${CMAKE_BINARY_DIR}" @@ -275,5 +275,8 @@ add_custom_target(base-install USES_TERMINAL ) +get_property(_CTEST_CUSTOM_TESTS_IGNORE GLOBAL PROPERTY CTEST_CUSTOM_TESTS_IGNORE) +file(WRITE "${CMAKE_BINARY_DIR}/CTestCustom.cmake" "SET(CTEST_CUSTOM_TESTS_IGNORE ${_CTEST_CUSTOM_TESTS_IGNORE})") + include(package) include(doxygen) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index d767251cf54..5469c053d17 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -1,6 +1,8 @@ cmake_minimum_required( VERSION 3.5 ) message(STATUS "Setting up Eosio Tester @VERSION_FULL@ at @EOS_ROOT_DIR@") +SET(CMAKE_INSTALL_RPATH "${HOME}/lib;${HOME}/lib64") + set(CMAKE_CXX_COMPILER @CMAKE_CXX_COMPILER@) set(CMAKE_C_COMPILER @CMAKE_C_COMPILER@) @@ -8,15 +10,10 @@ set(EOSIO_VERSION "@VERSION_FULL@") enable_testing() -if (UNIX) - if (APPLE) - if (LLVM_DIR STREQUAL "" OR NOT LLVM_DIR) - set(LLVM_DIR "/usr/local/opt/llvm@4/lib/cmake/llvm") - endif() - endif() +if (LLVM_DIR STREQUAL "" OR NOT LLVM_DIR) + set(LLVM_DIR @LLVM_DIR@) endif() - find_package( Gperftools QUIET ) if( GPERFTOOLS_FOUND ) message( STATUS "Found gperftools; compiling tests with TCMalloc") @@ -33,11 +30,9 @@ set( CXX_STANDARD_REQUIRED ON ) if ( APPLE ) set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -Wall -Wno-deprecated-declarations" ) - set( BOOST_ROOT "/usr/local/boost" ) else ( APPLE ) set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall") set( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libstdc++ -static-libgcc") - set( BOOST_ROOT "~/opt/boost" ) endif ( APPLE ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 290cefa576c..053ad6fa4f4 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -8,15 +8,10 @@ set(EOSIO_VERSION "@VERSION_FULL@") enable_testing() -if (UNIX) - if (APPLE) - if (LLVM_DIR STREQUAL "" OR NOT LLVM_DIR) - set(LLVM_DIR "/usr/local/opt/llvm@4/lib/cmake/llvm") - endif() - endif() +if (LLVM_DIR STREQUAL "" OR NOT LLVM_DIR) + set(LLVM_DIR @LLVM_DIR@) endif() - find_package( Gperftools QUIET ) if( GPERFTOOLS_FOUND ) message( STATUS "Found gperftools; compiling tests with TCMalloc") @@ -33,11 +28,9 @@ set( CXX_STANDARD_REQUIRED ON ) if ( APPLE ) set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -Wall -Wno-deprecated-declarations" ) - set( BOOST_ROOT "/usr/local/boost" ) else ( APPLE ) set( CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall") set( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libstdc++ -static-libgcc") - set( BOOST_ROOT "~/opt/boost" ) endif ( APPLE ) set( Boost_USE_STATIC_LIBS ON CACHE STRING "ON or OFF" ) diff --git a/Docker/README.md b/Docker/README.md index 036182f4f0c..1aa0513cca9 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -20,10 +20,10 @@ cd eos/Docker docker build . -t eosio/eos ``` -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.7.0-develop tag, you could do the following: +The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.7.0-rc1 tag, you could do the following: ```bash -docker build -t eosio/eos:v1.7.0-develop --build-arg branch=v1.7.0-develop . +docker build -t eosio/eos:v1.7.0-rc1 --build-arg branch=v1.7.0-rc1 . ``` By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. diff --git a/README.md b/README.md index 454d3ed56d7..c36b2c6d1a5 100644 --- a/README.md +++ b/README.md @@ -7,22 +7,22 @@ Welcome to the EOSIO source code repository! This software enables businesses to Some of the groundbreaking features of EOSIO include: -1. Free Rate Limited Transactions +1. Free Rate Limited Transactions 1. Low Latency Block confirmation (0.5 seconds) 1. Low-overhead Byzantine Fault Tolerant Finality -1. Designed for optional high-overhead, low-latency BFT finality +1. Designed for optional high-overhead, low-latency BFT finality 1. Smart contract platform powered by WebAssembly 1. Designed for Sparse Header Light Client Validation -1. Scheduled Recurring Transactions +1. Scheduled Recurring Transactions 1. Time Delay Security 1. Hierarchical Role Based Permissions 1. Support for Biometric Hardware Secured Keys (e.g. Apple Secure Enclave) 1. Designed for Parallel Execution of Context Free Validation Logic -1. Designed for Inter Blockchain Communication +1. Designed for Inter Blockchain Communication -EOSIO is released under the open source MIT license and is offered “AS IS” without warranty of any kind, express or implied. Any security provided by the EOSIO software depends in part on how it is used, configured, and deployed. EOSIO is built upon many third-party libraries such as Binaryen (Apache License) and WAVM (BSD 3-clause) which are also provided “AS IS” without warranty of any kind. Without limiting the generality of the foregoing, Block.one makes no representation or guarantee that EOSIO or any third-party libraries will perform as intended or will be free of errors, bugs or faulty code. Both may fail in large or small ways that could completely or partially limit functionality or compromise computer systems. If you use or implement EOSIO, you do so at your own risk. In no event will Block.one be liable to any party for any damages whatsoever, even if it had been advised of the possibility of damage. +EOSIO is released under the open source MIT license and is offered “AS IS” without warranty of any kind, express or implied. Any security provided by the EOSIO software depends in part on how it is used, configured, and deployed. EOSIO is built upon many third-party libraries such as WABT (Apache License) and WAVM (BSD 3-clause) which are also provided “AS IS” without warranty of any kind. Without limiting the generality of the foregoing, Block.one makes no representation or guarantee that EOSIO or any third-party libraries will perform as intended or will be free of errors, bugs or faulty code. Both may fail in large or small ways that could completely or partially limit functionality or compromise computer systems. If you use or implement EOSIO, you do so at your own risk. In no event will Block.one be liable to any party for any damages whatsoever, even if it had been advised of the possibility of damage. -Block.one is neither launching nor operating any initial public blockchains based upon the EOSIO software. This release refers only to version 1.0 of our open source software. We caution those who wish to use blockchains built on EOSIO to carefully vet the companies and organizations launching blockchains based on EOSIO before disclosing any private keys to their derivative software. +Block.one is neither launching nor operating any initial public blockchains based upon the EOSIO software. This release refers only to version 1.0 of our open source software. We caution those who wish to use blockchains built on EOSIO to carefully vet the companies and organizations launching blockchains based on EOSIO before disclosing any private keys to their derivative software. There is no public testnet running currently. @@ -39,13 +39,13 @@ $ brew remove eosio ``` #### Ubuntu 18.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.7.0-develop-ubuntu-18.04_amd64.deb -$ sudo apt install ./eosio_1.7.0-develop-ubuntu-18.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.7.0-rc1-ubuntu-18.04_amd64.deb +$ sudo apt install ./eosio_1.7.0-rc1-ubuntu-18.04_amd64.deb ``` #### Ubuntu 16.04 Debian Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.7.0-develop-ubuntu-16.04_amd64.deb -$ sudo apt install ./eosio_1.7.0-develop-ubuntu-16.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.7.0-rc1-ubuntu-16.04_amd64.deb +$ sudo apt install ./eosio_1.7.0-rc1-ubuntu-16.04_amd64.deb ``` #### Debian Package Uninstall ```sh @@ -53,8 +53,8 @@ $ sudo apt remove eosio ``` #### Centos RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio-1.7.0-develop.el7.x86_64.rpm -$ sudo yum install ./eosio-1.7.0-develop.el7.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio-1.7.0-rc1.el7.x86_64.rpm +$ sudo yum install ./eosio-1.7.0-rc1.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall ```sh @@ -62,8 +62,8 @@ $ sudo yum remove eosio.cdt ``` #### Fedora RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio-1.7.0-develop.fc27.x86_64.rpm -$ sudo yum install ./eosio-1.7.0-develop.fc27.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio-1.7.0-rc1.fc27.x86_64.rpm +$ sudo yum install ./eosio-1.7.0-rc1.fc27.x86_64.rpm ``` #### Fedora RPM Package Uninstall ```sh diff --git a/eosio_build.sh b/eosio_build.sh deleted file mode 100755 index b37942a78ab..00000000000 --- a/eosio_build.sh +++ /dev/null @@ -1,307 +0,0 @@ -#!/bin/bash -########################################################################## -# This is the EOSIO automated install script for Linux and Mac OS. -# This file was downloaded from https://github.com/EOSIO/eos -# -# Copyright (c) 2017, Respective Authors all rights reserved. -# -# After June 1, 2018 this software is available under the following terms: -# -# The MIT License -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. -# -# https://github.com/EOSIO/eos/blob/master/LICENSE -########################################################################## - - SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - - function usage() - { - printf "\\tUsage: %s \\n\\t[Build Option -o ] \\n\\t[CodeCoverage -c] \\n\\t[Doxygen -d] \\n\\t[CoreSymbolName -s <1-7 characters>] \\n\\t[Avoid Compiling -a]\\n\\n" "$0" 1>&2 - exit 1 - } - - ARCH=$( uname ) - if [ "${SOURCE_DIR}" == "${PWD}" ]; then - BUILD_DIR="${PWD}/build" - else - BUILD_DIR="${PWD}" - fi - CMAKE_BUILD_TYPE=Release - DISK_MIN=20 - DOXYGEN=false - ENABLE_COVERAGE_TESTING=false - CORE_SYMBOL_NAME="SYS" - # Use current directory's tmp directory if noexec is enabled for /tmp - if (mount | grep "/tmp " | grep --quiet noexec); then - mkdir -p $SOURCE_DIR/tmp - TEMP_DIR="${SOURCE_DIR}/tmp" - rm -rf $SOURCE_DIR/tmp/* - else # noexec wasn't found - TEMP_DIR="/tmp" - fi - START_MAKE=true - TIME_BEGIN=$( date -u +%s ) - VERSION=1.2 - - txtbld=$(tput bold) - bldred=${txtbld}$(tput setaf 1) - txtrst=$(tput sgr0) - - if [ $# -ne 0 ]; then - while getopts ":cdo:s:ah" opt; do - case "${opt}" in - o ) - options=( "Debug" "Release" "RelWithDebInfo" "MinSizeRel" ) - if [[ "${options[*]}" =~ "${OPTARG}" ]]; then - CMAKE_BUILD_TYPE="${OPTARG}" - else - printf "\\n\\tInvalid argument: %s\\n" "${OPTARG}" 1>&2 - usage - exit 1 - fi - ;; - c ) - ENABLE_COVERAGE_TESTING=true - ;; - d ) - DOXYGEN=true - ;; - s) - if [ "${#OPTARG}" -gt 7 ] || [ -z "${#OPTARG}" ]; then - printf "\\n\\tInvalid argument: %s\\n" "${OPTARG}" 1>&2 - usage - exit 1 - else - CORE_SYMBOL_NAME="${OPTARG}" - fi - ;; - a) - START_MAKE=false - ;; - h) - usage - exit 1 - ;; - \? ) - printf "\\n\\tInvalid Option: %s\\n" "-${OPTARG}" 1>&2 - usage - exit 1 - ;; - : ) - printf "\\n\\tInvalid Option: %s requires an argument.\\n" "-${OPTARG}" 1>&2 - usage - exit 1 - ;; - * ) - usage - exit 1 - ;; - esac - done - fi - - if [ ! -d "${SOURCE_DIR}/.git" ]; then - printf "\\n\\tThis build script only works with sources cloned from git\\n" - printf "\\tPlease clone a new eos directory with 'git clone https://github.com/EOSIO/eos --recursive'\\n" - printf "\\tSee the wiki for instructions: https://github.com/EOSIO/eos/wiki\\n" - exit 1 - fi - - pushd "${SOURCE_DIR}" &> /dev/null - - STALE_SUBMODS=$(( $(git submodule status --recursive | grep -c "^[+\-]") )) - if [ $STALE_SUBMODS -gt 0 ]; then - printf "\\n\\tgit submodules are not up to date.\\n" - printf "\\tPlease run the command 'git submodule update --init --recursive'.\\n" - exit 1 - fi - - printf "\\n\\tBeginning build version: %s\\n" "${VERSION}" - printf "\\t%s\\n" "$( date -u )" - printf "\\tUser: %s\\n" "$( whoami )" - printf "\\tgit head id: %s\\n" "$( cat .git/refs/heads/master )" - printf "\\tCurrent branch: %s\\n" "$( git rev-parse --abbrev-ref HEAD )" - printf "\\n\\tARCHITECTURE: %s\\n" "${ARCH}" - - popd &> /dev/null - - if [ "$ARCH" == "Linux" ]; then - - if [ ! -e /etc/os-release ]; then - printf "\\n\\tEOSIO currently supports Amazon, Centos, Fedora, Mint & Ubuntu Linux only.\\n" - printf "\\tPlease install on the latest version of one of these Linux distributions.\\n" - printf "\\thttps://aws.amazon.com/amazon-linux-ami/\\n" - printf "\\thttps://www.centos.org/\\n" - printf "\\thttps://start.fedoraproject.org/\\n" - printf "\\thttps://linuxmint.com/\\n" - printf "\\thttps://www.ubuntu.com/\\n" - printf "\\tExiting now.\\n" - exit 1 - fi - - OS_NAME=$( cat /etc/os-release | grep ^NAME | cut -d'=' -f2 | sed 's/\"//gI' ) - - case "$OS_NAME" in - "Amazon Linux AMI"|"Amazon Linux") - FILE="${SOURCE_DIR}/scripts/eosio_build_amazon.sh" - CXX_COMPILER=g++ - C_COMPILER=gcc - MONGOD_CONF=${HOME}/opt/mongodb/mongod.conf - export LLVM_DIR=${HOME}/opt/wasm/lib/cmake/llvm - export CMAKE=${HOME}/opt/cmake/bin/cmake - export PATH=${HOME}/opt/mongodb/bin:$PATH - ;; - "CentOS Linux") - FILE="${SOURCE_DIR}/scripts/eosio_build_centos.sh" - CXX_COMPILER=g++ - C_COMPILER=gcc - MONGOD_CONF=${HOME}/opt/mongodb/mongod.conf - export LLVM_DIR=${HOME}/opt/wasm/lib/cmake/llvm - export CMAKE=${HOME}/opt/cmake/bin/cmake - export PATH=${HOME}/opt/mongodb/bin:$PATH - ;; - "elementary OS") - FILE="${SOURCE_DIR}/scripts/eosio_build_ubuntu.sh" - CXX_COMPILER=clang++-4.0 - C_COMPILER=clang-4.0 - MONGOD_CONF=${HOME}/opt/mongodb/mongod.conf - export PATH=${HOME}/opt/mongodb/bin:$PATH - ;; - "Fedora") - FILE="${SOURCE_DIR}/scripts/eosio_build_fedora.sh" - CXX_COMPILER=g++ - C_COMPILER=gcc - MONGOD_CONF=/etc/mongod.conf - export LLVM_DIR=${HOME}/opt/wasm/lib/cmake/llvm - ;; - "Linux Mint") - FILE="${SOURCE_DIR}/scripts/eosio_build_ubuntu.sh" - CXX_COMPILER=clang++-4.0 - C_COMPILER=clang-4.0 - MONGOD_CONF=${HOME}/opt/mongodb/mongod.conf - export PATH=${HOME}/opt/mongodb/bin:$PATH - ;; - "Ubuntu") - FILE="${SOURCE_DIR}/scripts/eosio_build_ubuntu.sh" - CXX_COMPILER=clang++-4.0 - C_COMPILER=clang-4.0 - MONGOD_CONF=${HOME}/opt/mongodb/mongod.conf - export PATH=${HOME}/opt/mongodb/bin:$PATH - ;; - "Debian GNU/Linux") - FILE=${SOURCE_DIR}/scripts/eosio_build_ubuntu.sh - CXX_COMPILER=clang++-4.0 - C_COMPILER=clang-4.0 - MONGOD_CONF=${HOME}/opt/mongodb/mongod.conf - export PATH=${HOME}/opt/mongodb/bin:$PATH - ;; - *) - printf "\\n\\tUnsupported Linux Distribution. Exiting now.\\n\\n" - exit 1 - esac - - export BOOST_ROOT="${HOME}/opt/boost" - OPENSSL_ROOT_DIR=/usr/include/openssl - fi - - if [ "$ARCH" == "Darwin" ]; then - FILE="${SOURCE_DIR}/scripts/eosio_build_darwin.sh" - CXX_COMPILER=clang++ - C_COMPILER=clang - MONGOD_CONF=/usr/local/etc/mongod.conf - OPENSSL_ROOT_DIR=/usr/local/opt/openssl - fi - - ${SOURCE_DIR}/scripts/clean_old_install.sh - if [ $? -ne 0 ]; then - printf "\\n\\tError occurred while trying to remove old installation!\\n\\n" - exit -1 - fi - - . "$FILE" - - printf "\\n\\n>>>>>>>> ALL dependencies successfully found or installed . Installing EOSIO\\n\\n" - printf ">>>>>>>> CMAKE_BUILD_TYPE=%s\\n" "${CMAKE_BUILD_TYPE}" - printf ">>>>>>>> ENABLE_COVERAGE_TESTING=%s\\n" "${ENABLE_COVERAGE_TESTING}" - printf ">>>>>>>> DOXYGEN=%s\\n\\n" "${DOXYGEN}" - - if [ ! -d "${BUILD_DIR}" ]; then - if ! mkdir -p "${BUILD_DIR}" - then - printf "Unable to create build directory %s.\\n Exiting now.\\n" "${BUILD_DIR}" - exit 1; - fi - fi - - if ! cd "${BUILD_DIR}" - then - printf "Unable to enter build directory %s.\\n Exiting now.\\n" "${BUILD_DIR}" - exit 1; - fi - - if [ -z "$CMAKE" ]; then - CMAKE=$( command -v cmake ) - fi - - if ! "${CMAKE}" -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX_COMPILER}" \ - -DCMAKE_C_COMPILER="${C_COMPILER}" -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ - -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ - -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ - -DCMAKE_INSTALL_PREFIX="/usr/local/eosio" ${LOCAL_CMAKE_FLAGS} "${SOURCE_DIR}" - then - printf "\\n\\t>>>>>>>>>>>>>>>>>>>> CMAKE building EOSIO has exited with the above error.\\n\\n" - exit -1 - fi - - if [ "${START_MAKE}" == "false" ]; then - printf "\\n\\t>>>>>>>>>>>>>>>>>>>> EOSIO has been successfully configured but not yet built.\\n\\n" - exit 0 - fi - - if [ -z ${JOBS} ]; then JOBS=$CPU_CORE; fi # Future proofing: Ensure $JOBS is set (usually set in scripts/eosio_build_*.sh scripts) - if ! make -j"${JOBS}" - then - printf "\\n\\t>>>>>>>>>>>>>>>>>>>> MAKE building EOSIO has exited with the above error.\\n\\n" - exit -1 - fi - - TIME_END=$(( $(date -u +%s) - ${TIME_BEGIN} )) - - printf "\n\n${bldred}\t _______ _______ _______ _________ _______\n" - printf '\t( ____ \( ___ )( ____ \\\\__ __/( ___ )\n' - printf "\t| ( \/| ( ) || ( \/ ) ( | ( ) |\n" - printf "\t| (__ | | | || (_____ | | | | | |\n" - printf "\t| __) | | | |(_____ ) | | | | | |\n" - printf "\t| ( | | | | ) | | | | | | |\n" - printf "\t| (____/\| (___) |/\____) |___) (___| (___) |\n" - printf "\t(_______/(_______)\_______)\_______/(_______)\n${txtrst}" - - printf "\\n\\tEOSIO has been successfully built. %02d:%02d:%02d\\n\\n" $(($TIME_END/3600)) $(($TIME_END%3600/60)) $(($TIME_END%60)) - printf "\\tTo verify your installation run the following commands:\\n" - - print_instructions - - printf "\\tFor more information:\\n" - printf "\\tEOSIO website: https://eos.io\\n" - printf "\\tEOSIO Telegram channel @ https://t.me/EOSProject\\n" - printf "\\tEOSIO resources: https://eos.io/resources/\\n" - printf "\\tEOSIO Stack Exchange: https://eosio.stackexchange.com\\n" - printf "\\tEOSIO wiki: https://github.com/EOSIO/eos/wiki\\n\\n\\n" diff --git a/eosio_install.sh b/eosio_install.sh deleted file mode 100755 index d68a28a2d38..00000000000 --- a/eosio_install.sh +++ /dev/null @@ -1,116 +0,0 @@ -#!/bin/bash -########################################################################## -# This is the EOSIO automated install script for Linux and Mac OS. -# This file was downloaded from https://github.com/EOSIO/eos -# -# Copyright (c) 2017, Respective Authors all rights reserved. -# -# After June 1, 2018 this software is available under the following terms: -# -# The MIT License -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. -# -# https://github.com/EOSIO/eos/blob/master/LICENSE.txt -########################################################################## - -if [ "$(id -u)" -ne 0 ]; then - printf "\n\tThis requires sudo. Please run with sudo.\n\n" - exit -1 -fi - - CWD="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - if [ "${CWD}" != "${PWD}" ]; then - printf "\\n\\tPlease cd into directory %s to run this script.\\n \\tExiting now.\\n\\n" "${CWD}" - exit 1 - fi - - BUILD_DIR="${PWD}/build" - CMAKE_BUILD_TYPE=Release - TIME_BEGIN=$( date -u +%s ) - INSTALL_PREFIX="/usr/local/eosio" - VERSION=1.2 - - txtbld=$(tput bold) - bldred=${txtbld}$(tput setaf 1) - txtrst=$(tput sgr0) - - create_symlink() { - pushd /usr/local/bin &> /dev/null - ln -sf ../eosio/bin/$1 $1 - popd &> /dev/null - } - - create_cmake_symlink() { - mkdir -p /usr/local/lib/cmake/eosio - pushd /usr/local/lib/cmake/eosio &> /dev/null - ln -sf ../../../eosio/lib/cmake/eosio/$1 $1 - popd &> /dev/null - } - - install_symlinks() { - printf "\\n\\tInstalling EOSIO Binary Symlinks\\n\\n" - create_symlink "cleos" - create_symlink "eosio-launcher" - create_symlink "keosd" - create_symlink "nodeos" - } - - if [ ! -d "${BUILD_DIR}" ]; then - printf "\\n\\tError, eosio_build.sh has not ran. Please run ./eosio_build.sh first!\\n\\n" - exit -1 - fi - - ${PWD}/scripts/clean_old_install.sh - if [ $? -ne 0 ]; then - printf "\\n\\tError occurred while trying to remove old installation!\\n\\n" - exit -1 - fi - - if ! pushd "${BUILD_DIR}" - then - printf "Unable to enter build directory %s.\\n Exiting now.\\n" "${BUILD_DIR}" - exit 1; - fi - - if ! make install - then - printf "\\n\\t>>>>>>>>>>>>>>>>>>>> MAKE installing EOSIO has exited with the above error.\\n\\n" - exit -1 - fi - popd &> /dev/null - - install_symlinks - create_cmake_symlink "eosio-config.cmake" - - printf "\n\n${bldred}\t _______ _______ _______ _________ _______\n" - printf '\t( ____ \( ___ )( ____ \\\\__ __/( ___ )\n' - printf "\t| ( \/| ( ) || ( \/ ) ( | ( ) |\n" - printf "\t| (__ | | | || (_____ | | | | | |\n" - printf "\t| __) | | | |(_____ ) | | | | | |\n" - printf "\t| ( | | | | ) | | | | | | |\n" - printf "\t| (____/\| (___) |/\____) |___) (___| (___) |\n" - printf "\t(_______/(_______)\_______)\_______/(_______)\n${txtrst}" - - printf "\\tFor more information:\\n" - printf "\\tEOSIO website: https://eos.io\\n" - printf "\\tEOSIO Telegram channel @ https://t.me/EOSProject\\n" - printf "\\tEOSIO resources: https://eos.io/resources/\\n" - printf "\\tEOSIO Stack Exchange: https://eosio.stackexchange.com\\n" - printf "\\tEOSIO wiki: https://github.com/EOSIO/eos/wiki\\n\\n\\n" diff --git a/eosio_uninstall.sh b/eosio_uninstall.sh deleted file mode 100755 index b080176a139..00000000000 --- a/eosio_uninstall.sh +++ /dev/null @@ -1,36 +0,0 @@ -#! /bin/bash - -binaries=(cleos - eosio-launcher - keosd - nodeos) - -if [ -d "/usr/local/eosio" ]; then - printf "\tDo you wish to remove this install? (requires sudo)\n" - select yn in "Yes" "No"; do - case $yn in - [Yy]* ) - if [ "$(id -u)" -ne 0 ]; then - printf "\n\tThis requires sudo, please run ./eosio_uninstall.sh with sudo\n\n" - exit -1 - fi - - pushd /usr/local &> /dev/null - rm -rf eosio - pushd bin &> /dev/null - for binary in ${binaries[@]}; do - rm ${binary} - done - # Handle cleanup of directories created from installation - if [ "$1" == "--full" ]; then - if [ -d ~/Library/Application\ Support/eosio ]; then rm -rf ~/Library/Application\ Support/eosio; fi # Mac OS - if [ -d ~/.local/share/eosio ]; then rm -rf ~/.local/share/eosio; fi # Linux - fi - popd &> /dev/null - break;; - [Nn]* ) - printf "\tAborting uninstall\n\n" - exit -1;; - esac - done -fi diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index eeaf5afa771..a40355971a9 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -13,3 +13,17 @@ set(BUILD_TOOLS OFF CACHE BOOL "Build wabt tools") set(RUN_RE2C OFF CACHE BOOL "Run re2c") set(WITH_EXCEPTIONS ON CACHE BOOL "Build with exceptions enabled" FORCE) add_subdirectory( wabt ) + +set(ENABLE_STATIC ON) +set(CMAKE_MACOSX_RPATH OFF) +set(BUILD_ONLY_LIB ON CACHE BOOL "Library only build") +message(STATUS "Starting yubihsm configuration...") +add_subdirectory( yubihsm EXCLUDE_FROM_ALL ) +set_target_properties(yubihsm_static PROPERTIES COMPILE_OPTIONS "-fno-lto") +message(STATUS "yubihsm configuration complete") + +get_property(_CTEST_CUSTOM_TESTS_IGNORE GLOBAL PROPERTY CTEST_CUSTOM_TESTS_IGNORE) +set_property(GLOBAL PROPERTY CTEST_CUSTOM_TESTS_IGNORE + "change_authkey import_ed decrypt_ec decrypt_rsa ssh logs generate_rsa import_ec echo\ + yubico_otp wrap_data wrap info import_rsa import_authkey generate_hmac generate_ec\ + attest pbkdf2 parsing ${_CTEST_CUSTOM_TESTS_IGNORE}") \ No newline at end of file diff --git a/libraries/appbase b/libraries/appbase index 1dc659f92aa..2208d40578f 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 1dc659f92aa1745946211dbb44591ac93a31d7aa +Subproject commit 2208d40578fb206978418c1df2bb8408ecef3fe7 diff --git a/libraries/chain/block_header_state.cpp b/libraries/chain/block_header_state.cpp index 3f3fefd15dd..e60df03f7eb 100644 --- a/libraries/chain/block_header_state.cpp +++ b/libraries/chain/block_header_state.cpp @@ -92,7 +92,7 @@ namespace eosio { namespace chain { new_dpos_proposed_irreversible_blocknum = block_num_for_i; //idump((dpos2_lib)(block_num)(dpos_irreversible_blocknum)); - if (i == result.confirm_count.size() - 1) { + if (i == static_cast(result.confirm_count.size() - 1)) { result.confirm_count.resize(0); } else { memmove( &result.confirm_count[0], &result.confirm_count[i + 1], result.confirm_count.size() - i - 1); diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index b2a390fe648..ea3ac8f0d05 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -24,7 +24,6 @@ #include #include - namespace eosio { namespace chain { using resource_limits::resource_limits_manager; @@ -193,7 +192,7 @@ struct controller_impl { * are removed from this list if they are re-applied in other blocks. Producers * can query this list when scheduling new transactions into blocks. */ - map unapplied_transactions; + unapplied_transactions_type unapplied_transactions; void pop_block() { auto prev = fork_db.get_block( head->header.previous ); @@ -395,8 +394,8 @@ struct controller_impl { ("s", start_block_num)("n", blog_head->block_num()) ); while( auto next = blog.read_block_by_num( head->block_num + 1 ) ) { replay_push_block( next, controller::block_status::irreversible ); - if( next->block_num() % 100 == 0 ) { - std::cerr << std::setw(10) << next->block_num() << " of " << blog_head->block_num() <<"\r"; + if( next->block_num() % 500 == 0 ) { + ilog( "${n} of ${head}", ("n", next->block_num())("head", blog_head->block_num()) ); if( shutdown() ) break; } } @@ -1865,10 +1864,23 @@ void controller::add_indices() { void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { my->head = my->fork_db.head(); - if( !my->head ) { + if( snapshot ) { + ilog( "Starting initialization from snapshot, this may take a significant amount of time" ); + } + else if( !my->head ) { elog( "No head block in fork db, perhaps we need to replay" ); } - my->init(shutdown, snapshot); + + try { + my->init(shutdown, snapshot); + } catch (boost::interprocess::bad_alloc& e) { + if ( snapshot ) + elog( "db storage not configured to have enough storage for the provided snapshot, please increase and retry snapshot" ); + throw e; + } + if( snapshot ) { + ilog( "Finished initialization from snapshot" ); + } } const chainbase::database& controller::db()const { return my->db; } @@ -2346,41 +2358,12 @@ const account_object& controller::get_account( account_name name )const return my->db.get(name); } FC_CAPTURE_AND_RETHROW( (name) ) } -vector controller::get_unapplied_transactions() const { - vector result; - if ( my->read_mode == db_read_mode::SPECULATIVE ) { - result.reserve(my->unapplied_transactions.size()); - for ( const auto& entry: my->unapplied_transactions ) { - result.emplace_back(entry.second); - } - } else { - EOS_ASSERT( my->unapplied_transactions.empty(), transaction_exception, "not empty unapplied_transactions in non-speculative mode" ); //should never happen - } - return result; -} - -void controller::drop_unapplied_transaction(const transaction_metadata_ptr& trx) { - my->unapplied_transactions.erase(trx->signed_id); -} - -void controller::drop_all_unapplied_transactions() { - my->unapplied_transactions.clear(); -} - -vector controller::get_scheduled_transactions() const { - const auto& idx = db().get_index(); - - vector result; - - static const size_t max_reserve = 64; - result.reserve(std::min(idx.size(), max_reserve)); - - auto itr = idx.begin(); - while( itr != idx.end() && itr->delay_until <= pending_block_time() ) { - result.emplace_back(itr->trx_id); - ++itr; +unapplied_transactions_type& controller::get_unapplied_transactions() { + if ( my->read_mode != db_read_mode::SPECULATIVE ) { + EOS_ASSERT( my->unapplied_transactions.empty(), transaction_exception, + "not empty unapplied_transactions in non-speculative mode" ); //should never happen } - return result; + return my->unapplied_transactions; } bool controller::sender_avoids_whitelist_blacklist_enforcement( account_name sender )const { diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index 8f8fca4cdeb..398f219ced8 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -124,8 +124,16 @@ namespace impl { struct abi_traverse_context { abi_traverse_context( fc::microseconds max_serialization_time ) - : max_serialization_time( max_serialization_time ), deadline( fc::time_point::now() + max_serialization_time ), recursion_depth(0) - {} + : max_serialization_time( max_serialization_time ), + deadline( fc::time_point::now() ), // init to now, updated below + recursion_depth(0) + { + if( max_serialization_time > fc::microseconds::maximum() - deadline.time_since_epoch() ) { + deadline = fc::time_point::maximum(); + } else { + deadline += max_serialization_time; + } + } abi_traverse_context( fc::microseconds max_serialization_time, fc::time_point deadline ) : max_serialization_time( max_serialization_time ), deadline( deadline ), recursion_depth(0) @@ -625,7 +633,7 @@ namespace impl { * @tparam Reslover - callable with the signature (const name& code_account) -> optional */ template - class abi_from_variant_visitor : reflector_init_visitor + class abi_from_variant_visitor : public reflector_init_visitor { public: abi_from_variant_visitor( const variant_object& _vo, T& v, Resolver _resolver, abi_traverse_context& _ctx ) diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index 1bb8e0e4cf8..03bfd63881e 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -62,7 +62,7 @@ class apply_context { const T& get( int iterator ) { EOS_ASSERT( iterator != -1, invalid_table_iterator, "invalid iterator" ); EOS_ASSERT( iterator >= 0, table_operation_not_permitted, "dereference of end iterator" ); - EOS_ASSERT( iterator < _iterator_to_object.size(), invalid_table_iterator, "iterator out of range" ); + EOS_ASSERT( (size_t)iterator < _iterator_to_object.size(), invalid_table_iterator, "iterator out of range" ); auto result = _iterator_to_object[iterator]; EOS_ASSERT( result, table_operation_not_permitted, "dereference of deleted object" ); return *result; @@ -71,7 +71,8 @@ class apply_context { void remove( int iterator ) { EOS_ASSERT( iterator != -1, invalid_table_iterator, "invalid iterator" ); EOS_ASSERT( iterator >= 0, table_operation_not_permitted, "cannot call remove on end iterators" ); - EOS_ASSERT( iterator < _iterator_to_object.size(), invalid_table_iterator, "iterator out of range" ); + EOS_ASSERT( (size_t)iterator < _iterator_to_object.size(), invalid_table_iterator, "iterator out of range" ); + auto obj_ptr = _iterator_to_object[iterator]; if( !obj_ptr ) return; _iterator_to_object[iterator] = nullptr; diff --git a/libraries/chain/include/eosio/chain/asset.hpp b/libraries/chain/include/eosio/chain/asset.hpp index 5c9bb9669bc..85222652a02 100644 --- a/libraries/chain/include/eosio/chain/asset.hpp +++ b/libraries/chain/include/eosio/chain/asset.hpp @@ -18,7 +18,7 @@ with amount = 10 and symbol(4,"CUR") */ -struct asset +struct asset : fc::reflect_init { static constexpr int64_t max_amount = (1LL << 62) - 1; diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 641b2d1e42d..09abbace360 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -33,6 +33,7 @@ namespace eosio { namespace chain { class account_object; using resource_limits::resource_limits_manager; using apply_handler = std::function; + using unapplied_transactions_type = map; class fork_database; @@ -111,22 +112,9 @@ namespace eosio { namespace chain { * The caller is responsible for calling drop_unapplied_transaction on a failing transaction that * they never intend to retry * - * @return vector of transactions which have been unapplied + * @return map of transactions which have been unapplied */ - vector get_unapplied_transactions() const; - void drop_unapplied_transaction(const transaction_metadata_ptr& trx); - void drop_all_unapplied_transactions(); - - /** - * These transaction IDs represent transactions available in the head chain state as scheduled - * or otherwise generated transactions. - * - * calling push_scheduled_transaction with these IDs will remove the associated transaction from - * the chain state IFF it succeeds or objectively fails - * - * @return - */ - vector get_scheduled_transactions() const; + unapplied_transactions_type& get_unapplied_transactions(); /** * diff --git a/libraries/chain/include/eosio/chain/symbol.hpp b/libraries/chain/include/eosio/chain/symbol.hpp index 437b1d36ab7..de4a37514a6 100644 --- a/libraries/chain/include/eosio/chain/symbol.hpp +++ b/libraries/chain/include/eosio/chain/symbol.hpp @@ -58,7 +58,7 @@ namespace eosio { operator uint64_t()const { return value; } }; - class symbol { + class symbol : fc::reflect_init { public: static constexpr uint8_t max_precision = 18; diff --git a/libraries/chain/include/eosio/chain/transaction.hpp b/libraries/chain/include/eosio/chain/transaction.hpp index 3bd836ec0ca..db61e5b17cb 100644 --- a/libraries/chain/include/eosio/chain/transaction.hpp +++ b/libraries/chain/include/eosio/chain/transaction.hpp @@ -102,7 +102,7 @@ namespace eosio { namespace chain { bool allow_duplicate_keys = false )const; }; - struct packed_transaction { + struct packed_transaction : fc::reflect_init { enum compression_type { none = 0, zlib = 1, @@ -158,6 +158,7 @@ namespace eosio { namespace chain { friend struct fc::reflector; friend struct fc::reflector_init_visitor; + friend struct fc::has_reflector_init; void reflector_init(); private: vector signatures; diff --git a/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp b/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp index f5ebf01c1f7..d59e81f9ba7 100644 --- a/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp +++ b/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp @@ -37,7 +37,7 @@ namespace eosio { namespace chain { namespace wasm_injections { static void build_type_slots( Module& mod ) { // add the module types to the type_slots map - for ( int i=0; i < mod.types.size(); i++ ) { + for ( size_t i=0; i < mod.types.size(); i++ ) { std::vector type_slot_list = { static_cast(mod.types[i]->ret) }; for ( auto param : mod.types[i]->parameters ) type_slot_list.push_back( static_cast(param) ); @@ -78,7 +78,7 @@ namespace eosio { namespace chain { namespace wasm_injections { injected_index_mapping.emplace( index, actual_index ); // shift all exported functions by 1 - for ( int i=0; i < module.exports.size(); i++ ) { + for ( size_t i=0; i < module.exports.size(); i++ ) { if ( module.exports[i].kind == IR::ObjectKind::function ) { module.exports[i].index++; } @@ -272,7 +272,7 @@ namespace eosio { namespace chain { namespace wasm_injections { }; - struct call_depth_check { + struct call_depth_check_and_insert_checktime { static constexpr bool kills = true; static constexpr bool post = false; static int32_t global_idx; @@ -290,6 +290,7 @@ namespace eosio { namespace chain { namespace wasm_injections { injector_utils::add_import(*(arg.module), "call_depth_assert", assert_idx); wasm_ops::op_types<>::call_t call_assert; + wasm_ops::op_types<>::call_t call_checktime; wasm_ops::op_types<>::get_global_t get_global_inst; wasm_ops::op_types<>::set_global_t set_global_inst; @@ -301,6 +302,7 @@ namespace eosio { namespace chain { namespace wasm_injections { wasm_ops::op_types<>::else__t else_inst; call_assert.field = assert_idx; + call_checktime.field = checktime_injection::chktm_idx; get_global_inst.field = global_idx; set_global_inst.field = global_idx; const_inst.field = -1; @@ -334,6 +336,7 @@ namespace eosio { namespace chain { namespace wasm_injections { INSERT_INJECTED(const_inst); INSERT_INJECTED(add_inst); INSERT_INJECTED(set_global_inst); + INSERT_INJECTED(call_checktime); #undef INSERT_INJECTED } @@ -679,8 +682,8 @@ namespace eosio { namespace chain { namespace wasm_injections { }; struct pre_op_injectors : wasm_ops::op_types { - using call_t = wasm_ops::call ; - using call_indirect_t = wasm_ops::call_indirect ; + using call_t = wasm_ops::call ; + using call_indirect_t = wasm_ops::call_indirect ; // float binops using f32_add_t = wasm_ops::f32_add >; @@ -785,7 +788,7 @@ namespace eosio { namespace chain { namespace wasm_injections { // initialize static fields of injectors injector_utils::init( mod ); checktime_injection::init(); - call_depth_check::init(); + call_depth_check_and_insert_checktime::init(); } void inject() { diff --git a/libraries/chain/merkle.cpp b/libraries/chain/merkle.cpp index 16276162c08..9c6ea420981 100644 --- a/libraries/chain/merkle.cpp +++ b/libraries/chain/merkle.cpp @@ -39,7 +39,7 @@ digest_type merkle(vector ids) { if( ids.size() % 2 ) ids.push_back(ids.back()); - for (int i = 0; i < ids.size() / 2; i++) { + for (size_t i = 0; i < ids.size() / 2; i++) { ids[i] = digest_type::hash(make_canonical_pair(ids[2 * i], ids[(2 * i) + 1])); } diff --git a/libraries/chain/resource_limits.cpp b/libraries/chain/resource_limits.cpp index fa38f76a1e2..43ced268542 100644 --- a/libraries/chain/resource_limits.cpp +++ b/libraries/chain/resource_limits.cpp @@ -208,7 +208,7 @@ void resource_limits_manager::verify_account_ram_usage( const account_name accou const auto& usage = _db.get( account ); if( ram_bytes >= 0 ) { - EOS_ASSERT( usage.ram_usage <= ram_bytes, ram_usage_exceeded, + EOS_ASSERT( usage.ram_usage <= static_cast(ram_bytes), ram_usage_exceeded, "account ${account} has insufficient ram; needs ${needs} bytes has ${available} bytes", ("account", account)("needs",usage.ram_usage)("available",ram_bytes) ); } @@ -291,12 +291,12 @@ void resource_limits_manager::process_account_limit_updates() { // convenience local lambda to reduce clutter auto update_state_and_value = [](uint64_t &total, int64_t &value, int64_t pending_value, const char* debug_which) -> void { if (value > 0) { - EOS_ASSERT(total >= value, rate_limiting_state_inconsistent, "underflow when reverting old value to ${which}", ("which", debug_which)); + EOS_ASSERT(total >= static_cast(value), rate_limiting_state_inconsistent, "underflow when reverting old value to ${which}", ("which", debug_which)); total -= value; } if (pending_value > 0) { - EOS_ASSERT(UINT64_MAX - total >= pending_value, rate_limiting_state_inconsistent, "overflow when applying new value to ${which}", ("which", debug_which)); + EOS_ASSERT(UINT64_MAX - total >= static_cast(pending_value), rate_limiting_state_inconsistent, "overflow when applying new value to ${which}", ("which", debug_which)); total += pending_value; } diff --git a/libraries/chain/transaction.cpp b/libraries/chain/transaction.cpp index 2724a31b28d..e1910ce02eb 100644 --- a/libraries/chain/transaction.cpp +++ b/libraries/chain/transaction.cpp @@ -213,7 +213,7 @@ static bytes zlib_decompress(const bytes& data) { bytes out; bio::filtering_ostream decomp; decomp.push(bio::zlib_decompressor()); - decomp.push(read_limiter<1*1024*1024>()); // limit to 10 megs decompressed for zip bomb protections + decomp.push(read_limiter<1*1024*1024>()); // limit to 1 meg decompressed for zip bomb protections decomp.push(bio::back_inserter(out)); bio::write(decomp, data.data(), data.size()); bio::close(decomp); @@ -327,7 +327,6 @@ packed_transaction::packed_transaction( transaction&& t, vector& void packed_transaction::reflector_init() { // called after construction, but always on the same thread and before packed_transaction passed to any other threads - static_assert(&fc::reflector_init_visitor::reflector_init, "FC with reflector_init required"); static_assert(fc::raw::has_feature_reflector_init_on_unpacked_reflected_types, "FC unpack needs to call reflector_init otherwise unpacked_trx will not be initialized"); EOS_ASSERT( unpacked_trx.expiration == time_point_sec(), tx_decompression_error, "packed_transaction already unpacked" ); diff --git a/libraries/chain/wasm_eosio_injection.cpp b/libraries/chain/wasm_eosio_injection.cpp index a4afa44d46d..2c627e13ea7 100644 --- a/libraries/chain/wasm_eosio_injection.cpp +++ b/libraries/chain/wasm_eosio_injection.cpp @@ -35,7 +35,7 @@ void max_memory_injection_visitor::inject( Module& m ) { } void max_memory_injection_visitor::initializer() {} -int32_t call_depth_check::global_idx = -1; +int32_t call_depth_check_and_insert_checktime::global_idx = -1; uint32_t instruction_counter::icnt = 0; uint32_t instruction_counter::tcnt = 0; uint32_t instruction_counter::bcnt = 0; diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 28de213c1c2..df0ce578b0e 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -22,6 +22,7 @@ #include #include #include +#include namespace eosio { namespace chain { using namespace webassembly; @@ -211,6 +212,8 @@ class softfloat_api : public context_aware_api { softfloat_api( apply_context& ctx ) :context_aware_api(ctx, true) {} +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-aliasing" // float binops float _eosio_f32_add( float a, float b ) { float32_t ret = f32_add( to_softfloat32(a), to_softfloat32(b) ); @@ -228,6 +231,7 @@ class softfloat_api : public context_aware_api { float32_t ret = f32_mul( to_softfloat32(a), to_softfloat32(b) ); return *reinterpret_cast(&ret); } +#pragma GCC diagnostic pop float _eosio_f32_min( float af, float bf ) { float32_t a = to_softfloat32(af); float32_t b = to_softfloat32(bf); @@ -901,6 +905,8 @@ class system_api : public context_aware_api { }; +constexpr size_t max_assert_message = 1024; + class context_free_system_api : public context_aware_api { public: explicit context_free_system_api( apply_context& ctx ) @@ -913,14 +919,16 @@ class context_free_system_api : public context_aware_api { // Kept as intrinsic rather than implementing on WASM side (using eosio_assert_message and strlen) because strlen is faster on native side. void eosio_assert( bool condition, null_terminated_ptr msg ) { if( BOOST_UNLIKELY( !condition ) ) { - std::string message( msg ); + const size_t sz = strnlen( msg, max_assert_message ); + std::string message( msg, sz ); EOS_THROW( eosio_assert_message_exception, "assertion failure with message: ${s}", ("s",message) ); } } void eosio_assert_message( bool condition, array_ptr msg, size_t msg_len ) { if( BOOST_UNLIKELY( !condition ) ) { - std::string message( msg, msg_len ); + const size_t sz = msg_len > max_assert_message ? max_assert_message : msg_len; + std::string message( msg, sz ); EOS_THROW( eosio_assert_message_exception, "assertion failure with message: ${s}", ("s",message) ); } } @@ -1067,7 +1075,10 @@ class console_api : public context_aware_api { console.precision( std::numeric_limits::digits10 ); extFloat80_t val_approx; f128M_to_extF80M(&val, &val_approx); +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-aliasing" context.console_append( *(long double*)(&val_approx) ); +#pragma GCC diagnostic pop #else console.precision( std::numeric_limits::digits10 ); double val_approx = from_softfloat64( f128M_to_f64(&val) ); @@ -1266,7 +1277,7 @@ class memory_api : public context_aware_api { :context_aware_api(ctx,true){} char* memcpy( array_ptr dest, array_ptr src, size_t length) { - EOS_ASSERT((std::abs((ptrdiff_t)dest.value - (ptrdiff_t)src.value)) >= length, + EOS_ASSERT((size_t)(std::abs((ptrdiff_t)dest.value - (ptrdiff_t)src.value)) >= length, overlapping_memory_error, "memcpy can only accept non-aliasing pointers"); return (char *)::memcpy(dest, src, length); } diff --git a/libraries/fc b/libraries/fc index d321bf498ba..12956c33041 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit d321bf498ba8a10fc9ed6cee5636004413f7ff7b +Subproject commit 12956c330413e69bd998cd0657c8a82ef3e8a106 diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 10e7d4499e2..4328bda7ee8 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -99,6 +99,17 @@ namespace eosio { namespace testing { void produce_min_num_of_blocks_to_spend_time_wo_inactive_prod(const fc::microseconds target_elapsed_time = fc::microseconds()); signed_block_ptr push_block(signed_block_ptr b); + /** + * These transaction IDs represent transactions available in the head chain state as scheduled + * or otherwise generated transactions. + * + * calling push_scheduled_transaction with these IDs will remove the associated transaction from + * the chain state IFF it succeeds or objectively fails + * + * @return + */ + vector get_scheduled_transactions() const; + transaction_trace_ptr push_transaction( packed_transaction& trx, fc::time_point deadline = fc::time_point::maximum(), uint32_t billed_cpu_time_us = DEFAULT_BILLED_CPU_TIME_US ); transaction_trace_ptr push_transaction( signed_transaction& trx, fc::time_point deadline = fc::time_point::maximum(), uint32_t billed_cpu_time_us = DEFAULT_BILLED_CPU_TIME_US ); action_result push_action(action&& cert_act, uint64_t authorizer); // TODO/QUESTION: Is this needed? @@ -166,7 +177,7 @@ namespace eosio { namespace testing { transaction_trace_ptr push_dummy(account_name from, const string& v = "blah", uint32_t billed_cpu_time_us = DEFAULT_BILLED_CPU_TIME_US ); transaction_trace_ptr transfer( account_name from, account_name to, asset amount, string memo, account_name currency ); transaction_trace_ptr transfer( account_name from, account_name to, string amount, string memo, account_name currency ); - transaction_trace_ptr issue( account_name to, string amount, account_name currency ); + transaction_trace_ptr issue( account_name to, string amount, account_name currency , string memo); template const auto& get(const chainbase::oid< ObjectType >& key) { diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index fe492eb5fc9..b09f03ef38a 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -3,6 +3,8 @@ #include #include #include +#include + #include #include @@ -162,16 +164,16 @@ namespace eosio { namespace testing { } if( !skip_pending_trxs ) { - auto unapplied_trxs = control->get_unapplied_transactions(); - for (const auto& trx : unapplied_trxs ) { - auto trace = control->push_transaction(trx, fc::time_point::maximum()); + unapplied_transactions_type unapplied_trxs = control->get_unapplied_transactions(); // make copy of map + for (const auto& entry : unapplied_trxs ) { + auto trace = control->push_transaction(entry.second, fc::time_point::maximum()); if(trace->except) { trace->except->dynamic_rethrow_exception(); } } vector scheduled_trxs; - while( (scheduled_trxs = control->get_scheduled_transactions() ).size() > 0 ) { + while( (scheduled_trxs = get_scheduled_transactions() ).size() > 0 ) { for (const auto& trx : scheduled_trxs ) { auto trace = control->push_scheduled_transaction(trx, fc::time_point::maximum()); if(trace->except) { @@ -235,6 +237,18 @@ namespace eosio { namespace testing { } } + vector base_tester::get_scheduled_transactions() const { + const auto& idx = control->db().get_index(); + + vector result; + + auto itr = idx.begin(); + while( itr != idx.end() && itr->delay_until <= control->pending_block_time() ) { + result.emplace_back(itr->trx_id); + ++itr; + } + return result; + } void base_tester::produce_blocks_until_end_of_round() { uint64_t blocks_per_round; @@ -549,7 +563,7 @@ namespace eosio { namespace testing { } - transaction_trace_ptr base_tester::issue( account_name to, string amount, account_name currency ) { + transaction_trace_ptr base_tester::issue( account_name to, string amount, account_name currency, string memo ) { variant pretty_trx = fc::mutable_variant_object() ("actions", fc::variants({ fc::mutable_variant_object() @@ -563,6 +577,7 @@ namespace eosio { namespace testing { ("data", fc::mutable_variant_object() ("to", to) ("quantity", amount) + ("memo", memo) ) }) ); @@ -796,7 +811,8 @@ namespace eosio { namespace testing { return other.sync_with(*this); auto sync_dbs = [](base_tester& a, base_tester& b) { - for( int i = 1; i <= a.control->head_block_num(); ++i ) { + for( uint32_t i = 1; i <= a.control->head_block_num(); ++i ) { + auto block = a.control->fetch_block_by_number(i); if( block ) { //&& !b.control->is_known_block(block->id()) ) { auto bs = b.control->create_block_state_future( block ); diff --git a/libraries/yubihsm b/libraries/yubihsm new file mode 160000 index 00000000000..e1922fffc15 --- /dev/null +++ b/libraries/yubihsm @@ -0,0 +1 @@ +Subproject commit e1922fffc15d0720ba08f110a66b9c752774e107 diff --git a/plugins/bnet_plugin/bnet_plugin.cpp b/plugins/bnet_plugin/bnet_plugin.cpp index a4cd0ebb6a7..d7a105b9835 100644 --- a/plugins/bnet_plugin/bnet_plugin.cpp +++ b/plugins/bnet_plugin/bnet_plugin.cpp @@ -1342,8 +1342,7 @@ namespace eosio { } void bnet_plugin::plugin_startup() { - if(fc::get_logger_map().find(logger_name) != fc::get_logger_map().end()) - plugin_logger = fc::get_logger_map()[logger_name]; + handle_sighup(); // Sets logger wlog( "bnet startup " ); @@ -1440,6 +1439,11 @@ namespace eosio { // lifetime of _ioc is guarded by shared_ptr of bnet_plugin_impl } + void bnet_plugin::handle_sighup() { + if(fc::get_logger_map().find(logger_name) != fc::get_logger_map().end()) + plugin_logger = fc::get_logger_map()[logger_name]; + } + session::~session() { wlog( "close session ${n}",("n",_session_num) ); diff --git a/plugins/bnet_plugin/include/eosio/bnet_plugin/bnet_plugin.hpp b/plugins/bnet_plugin/include/eosio/bnet_plugin/bnet_plugin.hpp index 9eb3a54c9a4..5874f2a28ba 100644 --- a/plugins/bnet_plugin/include/eosio/bnet_plugin/bnet_plugin.hpp +++ b/plugins/bnet_plugin/include/eosio/bnet_plugin/bnet_plugin.hpp @@ -44,6 +44,7 @@ class bnet_plugin : public plugin { void plugin_initialize(const variables_map& options); void plugin_startup(); void plugin_shutdown(); + void handle_sighup() override; private: bnet_ptr my; diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index fdfd7cf83ea..4e80263b028 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -881,7 +881,7 @@ bool chain_plugin::import_reversible_blocks( const fc::path& reversible_dir, reversible_blocks.open( reversible_blocks_file.generic_string().c_str(), std::ios::in | std::ios::binary ); reversible_blocks.seekg( 0, std::ios::end ); - uint64_t end_pos = reversible_blocks.tellg(); + auto end_pos = reversible_blocks.tellg(); reversible_blocks.seekg( 0 ); uint32_t num = 0; @@ -1149,7 +1149,8 @@ string get_table_type( const abi_def& abi, const name& table_name ) { read_only::get_table_rows_result read_only::get_table_rows( const read_only::get_table_rows_params& p )const { const abi_def abi = eosio::chain_apis::get_abi( db, p.code ); - +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-aliasing" bool primary = false; auto table_with_index = get_table_index_name( p, primary ); if( primary ) { @@ -1204,6 +1205,7 @@ read_only::get_table_rows_result read_only::get_table_rows( const read_only::get } EOS_ASSERT(false, chain::contract_table_query_exception, "Unsupported secondary index type: ${t}", ("t", p.key_type)); } +#pragma GCC diagnostic pop } read_only::get_table_by_scope_result read_only::get_table_by_scope( const read_only::get_table_by_scope_params& p )const { @@ -1305,12 +1307,6 @@ fc::variant read_only::get_currency_stats( const read_only::get_currency_stats_p return results; } -// TODO: move this and similar functions to a header. Copied from wasm_interface.cpp. -// TODO: fix strict aliasing violation -static float64_t to_softfloat64( double d ) { - return *reinterpret_cast(&d); -} - fc::variant get_global_row( const database& db, const abi_def& abi, const abi_serializer& abis, const fc::microseconds& abi_serializer_max_time_ms, bool shorten_abi_errors ) { const auto table_type = get_table_type(abi, N(global)); EOS_ASSERT(table_type == read_only::KEYi64, chain::contract_table_query_exception, "Invalid table type ${type} for table global", ("type",table_type)); @@ -1327,7 +1323,7 @@ fc::variant get_global_row( const database& db, const abi_def& abi, const abi_se return abis.binary_to_variant(abis.get_table_type(N(global)), data, abi_serializer_max_time_ms, shorten_abi_errors ); } -read_only::get_producers_result read_only::get_producers( const read_only::get_producers_params& p ) const { +read_only::get_producers_result read_only::get_producers( const read_only::get_producers_params& p ) const try { const abi_def abi = eosio::chain_apis::get_abi(db, config::system_account_name); const auto table_type = get_table_type(abi, N(producers)); const abi_serializer abis{ abi, abi_serializer_max_time }; @@ -1375,6 +1371,20 @@ read_only::get_producers_result read_only::get_producers( const read_only::get_p } result.total_producer_vote_weight = get_global_row(d, abi, abis, abi_serializer_max_time, shorten_abi_errors)["total_producer_vote_weight"].as_double(); + return result; +} catch (...) { + read_only::get_producers_result result; + + for (auto p : db.active_producers().producers) { + fc::variant row = fc::mutable_variant_object() + ("owner", p.producer_name) + ("producer_key", p.block_signing_key) + ("url", "") + ("total_votes", 0.0f); + + result.rows.push_back(row); + } + return result; } @@ -1588,7 +1598,7 @@ static void push_recurse(read_write* rw, int index, const std::shared_ptremplace_back( r ); } - int next_index = index + 1; + size_t next_index = index + 1; if (next_index < params->size()) { push_recurse(rw, next_index, params, results, next ); } else { diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 9fad2e2c7b6..48b44895b8e 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -610,8 +610,11 @@ class read_write { static auto function() { return [](const input_type& v) { chain::key256_t k; +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wstrict-aliasing" k[0] = ((uint128_t *)&v._hash)[0]; //0-127 k[1] = ((uint128_t *)&v._hash)[1]; //127-256 +#pragma GCC diagnostic pop return k; }; } diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index ffa6d8fb823..3bb5b530d5f 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -123,6 +123,7 @@ namespace eosio { using websocket_local_server_type = websocketpp::server; using websocket_server_tls_type = websocketpp::server>; using ssl_context_ptr = websocketpp::lib::shared_ptr; + using io_work_t = boost::asio::executor_work_guard; static bool verbose_http_errors = false; @@ -138,6 +139,13 @@ namespace eosio { websocket_server_type server; + uint16_t thread_pool_size = 2; + optional thread_pool; + std::shared_ptr server_ioc; + optional server_ioc_work; + std::atomic bytes_in_flight{0}; + size_t max_bytes_in_flight = 0; + optional https_listen_endpoint; string https_cert_chain; string https_key; @@ -277,20 +285,40 @@ namespace eosio { } con->append_header( "Content-type", "application/json" ); - auto body = con->get_request_body(); - auto resource = con->get_uri()->get_resource(); + + if( bytes_in_flight > max_bytes_in_flight ) { + dlog( "503 - too many bytes in flight: ${bytes}", ("bytes", bytes_in_flight.load()) ); + error_results results{websocketpp::http::status_code::too_many_requests, "Busy", error_results::error_info()}; + con->set_body( fc::json::to_string( results )); + con->set_status( websocketpp::http::status_code::too_many_requests ); + return; + } + + std::string body = con->get_request_body(); + std::string resource = con->get_uri()->get_resource(); auto handler_itr = url_handlers.find( resource ); if( handler_itr != url_handlers.end()) { con->defer_http_response(); - app().post( appbase::priority::low, [handler_itr, resource, body, con]() { + bytes_in_flight += body.size(); + app().post( appbase::priority::low, + [ioc = this->server_ioc, &bytes_in_flight = this->bytes_in_flight, handler_itr, + resource{std::move( resource )}, body{std::move( body )}, con]() { try { - handler_itr->second( resource, body, [con]( auto code, auto&& body ) { - con->set_body( std::move( body ) ); - con->set_status( websocketpp::http::status_code::value( code ) ); - con->send_http_response(); - } ); + bytes_in_flight -= body.size(); + handler_itr->second( resource, body, + [ioc{std::move(ioc)}, &bytes_in_flight, con]( int code, std::string response_body ) { + bytes_in_flight += response_body.size(); + boost::asio::post( *ioc, [ioc, response_body{std::move( response_body )}, &bytes_in_flight, con, code]() { + size_t body_size = response_body.size(); + con->set_body( std::move( response_body ) ); + con->set_status( websocketpp::http::status_code::value( code ) ); + con->send_http_response(); + bytes_in_flight -= body_size; + } ); + }); } catch( ... ) { handle_exception( con ); + con->send_http_response(); } } ); @@ -310,10 +338,11 @@ namespace eosio { void create_server_for_endpoint(const tcp::endpoint& ep, websocketpp::server>& ws) { try { ws.clear_access_channels(websocketpp::log::alevel::all); - ws.init_asio(&app().get_io_service()); + ws.init_asio(&(*server_ioc)); ws.set_reuse_addr(true); ws.set_max_http_body_size(max_body_size); - ws.set_http_handler([&](connection_hdl hdl) { + // capture server_ioc shared_ptr in http handler to keep it alive while in use + ws.set_http_handler([&, ioc = this->server_ioc](connection_hdl hdl) { handle_http_request>(ws.get_con_from_hdl(hdl)); }); } catch ( const fc::exception& e ){ @@ -389,10 +418,18 @@ namespace eosio { if (v) ilog("configured http with Access-Control-Allow-Credentials: true"); })->default_value(false), "Specify if Access-Control-Allow-Credentials: true should be returned on each request.") - ("max-body-size", bpo::value()->default_value(1024*1024), "The maximum body size in bytes allowed for incoming RPC requests") - ("verbose-http-errors", bpo::bool_switch()->default_value(false), "Append the error log to HTTP responses") - ("http-validate-host", boost::program_options::value()->default_value(true), "If set to false, then any incoming \"Host\" header is considered valid") - ("http-alias", bpo::value>()->composing(), "Additionaly acceptable values for the \"Host\" header of incoming HTTP requests, can be specified multiple times. Includes http/s_server_address by default.") + ("max-body-size", bpo::value()->default_value(1024*1024), + "The maximum body size in bytes allowed for incoming RPC requests") + ("http-max-bytes-in-flight-mb", bpo::value()->default_value(500), + "Maximum size in megabytes http_plugin should use for processing http requests. 503 error response when exceeded." ) + ("verbose-http-errors", bpo::bool_switch()->default_value(false), + "Append the error log to HTTP responses") + ("http-validate-host", boost::program_options::value()->default_value(true), + "If set to false, then any incoming \"Host\" header is considered valid") + ("http-alias", bpo::value>()->composing(), + "Additionaly acceptable values for the \"Host\" header of incoming HTTP requests, can be specified multiple times. Includes http/s_server_address by default.") + ("http-threads", bpo::value()->default_value( my->thread_pool_size ), + "Number of worker threads in http thread pool") ; } @@ -467,11 +504,25 @@ namespace eosio { my->max_body_size = options.at( "max-body-size" ).as(); verbose_http_errors = options.at( "verbose-http-errors" ).as(); + my->thread_pool_size = options.at( "http-threads" ).as(); + EOS_ASSERT( my->thread_pool_size > 0, chain::plugin_config_exception, + "http-threads ${num} must be greater than 0", ("num", my->thread_pool_size)); + + my->max_bytes_in_flight = options.at( "http-max-bytes-in-flight-mb" ).as() * 1024 * 1024; + //watch out for the returns above when adding new code here } FC_LOG_AND_RETHROW() } void http_plugin::plugin_startup() { + + my->thread_pool.emplace( my->thread_pool_size ); + my->server_ioc = std::make_shared(); + my->server_ioc_work.emplace( boost::asio::make_work_guard(*my->server_ioc) ); + for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { + boost::asio::post( *my->thread_pool, [ioc = my->server_ioc]() { ioc->run(); } ); + } + if(my->listen_endpoint) { try { my->create_server_for_endpoint(*my->listen_endpoint, my->server); @@ -494,10 +545,10 @@ namespace eosio { if(my->unix_endpoint) { try { my->unix_server.clear_access_channels(websocketpp::log::alevel::all); - my->unix_server.init_asio(&app().get_io_service()); + my->unix_server.init_asio(&(*my->server_ioc)); my->unix_server.set_max_http_body_size(my->max_body_size); my->unix_server.listen(*my->unix_endpoint); - my->unix_server.set_http_handler([&](connection_hdl hdl) { + my->unix_server.set_http_handler([&, ioc = my->server_ioc](connection_hdl hdl) { my->handle_http_request( my->unix_server.get_con_from_hdl(hdl)); }); my->unix_server.start_accept(); @@ -556,13 +607,20 @@ namespace eosio { my->https_server.stop_listening(); if(my->unix_server.is_listening()) my->unix_server.stop_listening(); + + if( my->server_ioc_work ) + my->server_ioc_work->reset(); + if( my->server_ioc ) + my->server_ioc->stop(); + if( my->thread_pool ) { + my->thread_pool->join(); + my->thread_pool->stop(); + } } void http_plugin::add_handler(const string& url, const url_handler& handler) { ilog( "add api url: ${c}", ("c",url) ); - app().post(priority::low, [=](){ - my->url_handlers.insert(std::make_pair(url,handler)); - }); + my->url_handlers.insert(std::make_pair(url,handler)); } void http_plugin::handle_exception( const char *api_name, const char *call_name, const string& body, url_response_callback cb ) { diff --git a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp index f77387e83f0..a522b2b1739 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp @@ -129,21 +129,22 @@ namespace eosio { error_info() {}; - error_info(const fc::exception& exc, bool include_log) { + error_info(const fc::exception& exc, bool include_full_log) { code = exc.code(); name = exc.name(); what = exc.what(); - if (include_log) { - for (auto itr = exc.get_log().begin(); itr != exc.get_log().end(); ++itr) { - // Prevent sending trace that are too big - if (details.size() >= details_limit) break; - // Append error - error_detail detail = { - itr->get_message(), itr->get_context().get_file(), - itr->get_context().get_line_number(), itr->get_context().get_method() - }; - details.emplace_back(detail); - } + uint8_t limit = include_full_log ? details_limit : 1; + for( auto itr = exc.get_log().begin(); itr != exc.get_log().end(); ++itr ) { + // Prevent sending trace that are too big + if( details.size() >= limit ) break; + // Append error + error_detail detail = { + include_full_log ? itr->get_message() : itr->get_limited_message(), + itr->get_context().get_file(), + itr->get_context().get_line_number(), + itr->get_context().get_method() + }; + details.emplace_back( detail ); } } }; diff --git a/plugins/mongo_db_plugin/CMakeLists.txt b/plugins/mongo_db_plugin/CMakeLists.txt index 926aca78414..dc76525f3a2 100644 --- a/plugins/mongo_db_plugin/CMakeLists.txt +++ b/plugins/mongo_db_plugin/CMakeLists.txt @@ -1,8 +1,4 @@ if(BUILD_MONGO_DB_PLUGIN) - file(GLOB HEADERS "include/eosio/mongo_db_plugin/*.hpp") - add_library( mongo_db_plugin - mongo_db_plugin.cpp - ${HEADERS} ) find_package(libmongoc-1.0 1.8) @@ -48,24 +44,15 @@ if(BUILD_MONGO_DB_PLUGIN) message(STATUS "Found mongocxx library: ${EOS_LIBMONGOCXX}") else() message("Could NOT find MongoDB. mongo_db_plugin with MongoDB support will not be included.") - # sudo apt-get install pkg-config libssl-dev libsasl2-dev - # wget https://github.com/mongodb/mongo-c-driver/releases/download/1.8.0/mongo-c-driver-1.8.0.tar.gz - # tar xzf mongo-c-driver-1.8.0.tar.gz - # cd mongo-c-driver-1.8.0 - # ./configure --disable-automatic-init-and-cleanup --enable-static - # make - # sudo make install - # - # git clone https://github.com/mongodb/mongo-cxx-driver.git --branch releases/stable --depth 1 - # cd mongo-cxx-driver/build - # cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local -DBUILD_SHARED_LIBS=OFF .. - # sudo make EP_mnmlstc_core - # make - # sudo make install - # - # sudo apt-get install mongodb + return() endif() + # This needs to be after the else/return in the situation that libmongoc isn't found and we need to avoid building mongo :: 'bsoncxx/builder/basic/kvp.hpp' file not found + file(GLOB HEADERS "include/eosio/mongo_db_plugin/*.hpp") + add_library( mongo_db_plugin + mongo_db_plugin.cpp + ${HEADERS} ) + target_include_directories(mongo_db_plugin PRIVATE ${LIBMONGOCXX_STATIC_INCLUDE_DIRS} ${LIBBSONCXX_STATIC_INCLUDE_DIRS} PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" @@ -79,6 +66,7 @@ if(BUILD_MONGO_DB_PLUGIN) PUBLIC chain_plugin eosio_chain appbase ${EOS_LIBMONGOCXX} ${EOS_LIBBSONCXX} ) + else() message("mongo_db_plugin not selected and will be omitted.") endif() diff --git a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp index d732b18cf0c..01c1383468d 100644 --- a/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp +++ b/plugins/net_plugin/include/eosio/net_plugin/net_plugin.hpp @@ -25,6 +25,7 @@ namespace eosio { APPBASE_PLUGIN_REQUIRES((chain_plugin)) virtual void set_program_options(options_description& cli, options_description& cfg) override; + void handle_sighup() override; void plugin_initialize(const variables_map& options); void plugin_startup(); diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 3204793b701..6bc4505b6d6 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -56,6 +56,7 @@ namespace eosio { using connection_wptr = std::weak_ptr; using socket_ptr = std::shared_ptr; + using io_work_t = boost::asio::executor_work_guard; struct node_transaction_state { transaction_id_type id; @@ -96,6 +97,7 @@ namespace eosio { unique_ptr acceptor; tcp::endpoint listen_endpoint; string p2p_address; + string p2p_server_address; uint32_t max_client_count = 0; uint32_t max_nodes_per_host = 1; uint32_t num_clients = 0; @@ -148,12 +150,28 @@ namespace eosio { channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; + uint16_t thread_pool_size = 1; // currently used by server_ioc + optional thread_pool; + std::shared_ptr server_ioc; + optional server_ioc_work; + + void connect(const connection_ptr& c); void connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr); bool start_session(const connection_ptr& c); void start_listen_loop(); void start_read_message(const connection_ptr& c); + /** \brief Process the next message from the pending message buffer + * + * Process the next message from the pending_message_buffer. + * message_length is the already determined length of the data + * part of the message that will handle the message. + * Returns true is successful. Returns false if an error was + * encountered unpacking or processing the message. + */ + bool process_next_message(const connection_ptr& conn, uint32_t message_length); + void close(const connection_ptr& c); size_t count_open_sockets() const; @@ -276,6 +294,10 @@ namespace eosio { */ constexpr auto def_send_buffer_size_mb = 4; constexpr auto def_send_buffer_size = 1024*1024*def_send_buffer_size_mb; + constexpr auto def_max_write_queue_size = def_send_buffer_size*10; + constexpr boost::asio::chrono::milliseconds def_read_delay_for_full_write_queue{100}; + constexpr auto def_max_reads_in_flight = 1000; + constexpr auto def_max_trx_in_progress_size = 100*1024*1024; // 100 MB constexpr auto def_max_clients = 25; // 0 for unlimited clients constexpr auto def_max_nodes_per_host = 1; constexpr auto def_conn_retry_wait = 30; @@ -284,6 +306,8 @@ namespace eosio { constexpr auto def_sync_fetch_span = 100; constexpr auto message_header_size = 4; + constexpr uint32_t signed_block_which = 7; // see protocol net_message + constexpr uint32_t packed_transaction_which = 8; // see protocol net_message /** * For a while, network version was a 16 bit value equal to the second set of 16 bits @@ -374,6 +398,86 @@ namespace eosio { static void populate(handshake_message &hello); }; + class queued_buffer : boost::noncopyable { + public: + void clear_write_queue() { + _write_queue.clear(); + _sync_write_queue.clear(); + _write_queue_size = 0; + } + + void clear_out_queue() { + while ( _out_queue.size() > 0 ) { + _out_queue.pop_front(); + } + } + + uint32_t write_queue_size() const { return _write_queue_size; } + + bool is_out_queue_empty() const { return _out_queue.empty(); } + + bool ready_to_send() const { + // if out_queue is not empty then async_write is in progress + return ((!_sync_write_queue.empty() || !_write_queue.empty()) && _out_queue.empty()); + } + + bool add_write_queue( const std::shared_ptr>& buff, + std::function callback, + bool to_sync_queue ) { + if( to_sync_queue ) { + _sync_write_queue.push_back( {buff, callback} ); + } else { + _write_queue.push_back( {buff, callback} ); + } + _write_queue_size += buff->size(); + if( _write_queue_size > 2 * def_max_write_queue_size ) { + return false; + } + return true; + } + + void fill_out_buffer( std::vector& bufs ) { + if( _sync_write_queue.size() > 0 ) { // always send msgs from sync_write_queue first + fill_out_buffer( bufs, _sync_write_queue ); + } else { // postpone real_time write_queue if sync queue is not empty + fill_out_buffer( bufs, _write_queue ); + EOS_ASSERT( _write_queue_size == 0, plugin_exception, "write queue size expected to be zero" ); + } + } + + void out_callback( boost::system::error_code ec, std::size_t w ) { + for( auto& m : _out_queue ) { + m.callback( ec, w ); + } + } + + private: + struct queued_write; + void fill_out_buffer( std::vector& bufs, + deque& w_queue ) { + while ( w_queue.size() > 0 ) { + auto& m = w_queue.front(); + bufs.push_back( boost::asio::buffer( *m.buff )); + _write_queue_size -= m.buff->size(); + _out_queue.emplace_back( m ); + w_queue.pop_front(); + } + } + + private: + struct queued_write { + std::shared_ptr> buff; + std::function callback; + }; + + uint32_t _write_queue_size = 0; + deque _write_queue; + deque _sync_write_queue; // sync_write_queue will be sent first + deque _out_queue; + + }; // queued_buffer + + class connection : public std::enable_shared_from_this { public: explicit connection( string endpoint ); @@ -385,17 +489,17 @@ namespace eosio { peer_block_state_index blk_state; transaction_state_index trx_state; optional peer_requested; // this peer is requesting info from us + std::shared_ptr server_ioc; // keep ioc alive socket_ptr socket; fc::message_buffer<1024*1024> pending_message_buffer; fc::optional outstanding_read_bytes; - struct queued_write { - std::shared_ptr> buff; - std::function callback; - }; - deque write_queue; - deque out_queue; + + queued_buffer buffer_queue; + + uint32_t reads_in_flight = 0; + uint32_t trx_in_progress_size = 0; fc::sha256 node_id; handshake_message last_handshake_recv; handshake_message last_handshake_sent; @@ -405,7 +509,7 @@ namespace eosio { uint16_t protocol_version = 0; string peer_addr; unique_ptr response_expected; - optional pending_fetch; + unique_ptr read_delay_timer; go_away_reason no_retry = no_reason; block_id_type fork_head; uint32_t fork_head_num = 0; @@ -471,13 +575,14 @@ namespace eosio { void txn_send(const vector& txn_lis); void blk_send_branch(); - void blk_send(const vector &txn_lis); + void blk_send(const block_id_type& blkid); void stop_send(); void enqueue( const net_message &msg, bool trigger_send = true ); - void enqueue_block( const signed_block_ptr& sb, bool trigger_send = true ); + void enqueue_block( const signed_block_ptr& sb, bool trigger_send = true, bool to_sync_queue = false); void enqueue_buffer( const std::shared_ptr>& send_buffer, - bool trigger_send, int priority, go_away_reason close_after_send ); + bool trigger_send, int priority, go_away_reason close_after_send, + bool to_sync_queue = false); void cancel_sync(go_away_reason); void flush_queues(); bool enqueue_sync_block(); @@ -492,21 +597,12 @@ namespace eosio { void queue_write(const std::shared_ptr>& buff, bool trigger_send, int priority, - std::function callback); + std::function callback, + bool to_sync_queue = false); void do_queue_write(int priority); - /** \brief Process the next message from the pending message buffer - * - * Process the next message from the pending_message_buffer. - * message_length is the already determined length of the data - * part of the message and impl in the net plugin implementation - * that will handle the message. - * Returns true is successful. Returns false if an error was - * encountered unpacking or processing the message. - */ - bool process_next_message(net_plugin_impl& impl, uint32_t message_length); - - void add_peer_block(const peer_block_state& pbs); + bool add_peer_block(const peer_block_state& pbs); + bool peer_has_block(const block_id_type& blkid); fc::optional _logger_variant; const fc::variant_object& get_logger_variant() { @@ -625,7 +721,8 @@ namespace eosio { : blk_state(), trx_state(), peer_requested(), - socket( std::make_shared( std::ref( app().get_io_service() ))), + server_ioc( my_impl->server_ioc ), + socket( std::make_shared( std::ref( *my_impl->server_ioc ))), node_id(), last_handshake_recv(), last_handshake_sent(), @@ -635,13 +732,13 @@ namespace eosio { protocol_version(0), peer_addr(endpoint), response_expected(), - pending_fetch(), + read_delay_timer(), no_retry(no_reason), fork_head(), fork_head_num(0), last_req() { - wlog( "created connection to ${n}", ("n", endpoint) ); + fc_ilog( logger, "created connection to ${n}", ("n", endpoint) ); initialize(); } @@ -649,6 +746,7 @@ namespace eosio { : blk_state(), trx_state(), peer_requested(), + server_ioc( my_impl->server_ioc ), socket( s ), node_id(), last_handshake_recv(), @@ -659,13 +757,13 @@ namespace eosio { protocol_version(0), peer_addr(), response_expected(), - pending_fetch(), + read_delay_timer(), no_retry(no_reason), fork_head(), fork_head_num(0), last_req() { - wlog( "accepted network connection" ); + fc_ilog( logger, "accepted network connection" ); initialize(); } @@ -674,7 +772,8 @@ namespace eosio { void connection::initialize() { auto *rnd = node_id.data(); rnd[0] = 0; - response_expected.reset(new boost::asio::steady_timer(app().get_io_service())); + response_expected.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); + read_delay_timer.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); } bool connection::connected() { @@ -692,7 +791,7 @@ namespace eosio { } void connection::flush_queues() { - write_queue.clear(); + buffer_queue.clear_write_queue(); } void connection::close() { @@ -700,7 +799,7 @@ namespace eosio { socket->close(); } else { - wlog("no socket to close!"); + fc_wlog( logger, "no socket to close!" ); } flush_queues(); connecting = false; @@ -715,6 +814,7 @@ namespace eosio { my_impl->sync_master->reset_lib_num(shared_from_this()); fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); + if( read_delay_timer ) read_delay_timer->cancel(); pending_message_buffer.reset(); } @@ -760,17 +860,11 @@ namespace eosio { fc_dlog(logger, "maybe truncating branch at = ${h}:${id}",("h",remote_head_num)("id",remote_head_id)); } - // base our branch off of the last handshake we sent the peer instead of our current - // LIB which could have moved forward in time as packets were in flight. - if (last_handshake_sent.generation >= 1) { - lib_id = last_handshake_sent.last_irreversible_block_id; - } else { - lib_id = cc.last_irreversible_block_id(); - } + lib_id = last_handshake_recv.last_irreversible_block_id; head_id = cc.fork_db_pending_head_block_id(); } catch (const assert_exception& ex) { - elog( "unable to retrieve block info: ${n} for ${p}",("n",ex.to_string())("p",peer_name())); + fc_elog( logger, "unable to retrieve block info: ${n} for ${p}",("n",ex.to_string())("p",peer_name()) ); enqueue(note); return; } @@ -779,75 +873,42 @@ namespace eosio { catch (...) { } - vector bstack; - block_id_type null_id; - for (auto bid = head_id; bid != null_id && bid != lib_id; ) { - try { - - // if the last handshake received indicates that we are catching up on a fork - // that the peer is already partially aware of, no need to resend blocks - if (remote_head_id == bid) { - break; - } - - signed_block_ptr b = cc.fetch_block_by_id(bid); - if ( b ) { - bid = b->previous; - bstack.push_back(b); - } - else { - break; - } - } catch (...) { - break; - } - } - size_t count = 0; - if (!bstack.empty()) { - if (bstack.back()->previous == lib_id || bstack.back()->previous == remote_head_id) { - count = bstack.size(); - while (bstack.size()) { - enqueue_block( bstack.back() ); - bstack.pop_back(); - } - } - fc_ilog(logger, "Sent ${n} blocks on my fork",("n",count)); + if( !peer_requested ) { + peer_requested = sync_state( block_header::num_from_id(lib_id)+1, + block_header::num_from_id(head_id), + block_header::num_from_id(lib_id) ); } else { - fc_ilog(logger, "Nothing to send on fork request"); + uint32_t start = std::min( peer_requested->last + 1, block_header::num_from_id(lib_id)+1 ); + uint32_t end = std::max( peer_requested->end_block, block_header::num_from_id(head_id) ); + peer_requested = sync_state( start, end, start - 1 ); } + enqueue_sync_block(); + // still want to send transactions along during blk branch sync syncing = false; } - void connection::blk_send(const vector &ids) { + void connection::blk_send(const block_id_type& blkid) { controller &cc = my_impl->chain_plug->chain(); - int count = 0; - for(auto &blkid : ids) { - ++count; - try { - signed_block_ptr b = cc.fetch_block_by_id(blkid); - if(b) { - fc_dlog(logger,"found block for id at num ${n}",("n",b->block_num())); - enqueue_block( b ); - } - else { - ilog("fetch block by id returned null, id ${id} on block ${c} of ${s} for ${p}", - ("id",blkid)("c",count)("s",ids.size())("p",peer_name())); - break; - } - } - catch (const assert_exception &ex) { - elog( "caught assert on fetch_block_by_id, ${ex}, id ${id} on block ${c} of ${s} for ${p}", - ("ex",ex.to_string())("id",blkid)("c",count)("s",ids.size())("p",peer_name())); - break; - } - catch (...) { - elog( "caught othser exception fetching block id ${id} on block ${c} of ${s} for ${p}", - ("id",blkid)("c",count)("s",ids.size())("p",peer_name())); - break; + try { + signed_block_ptr b = cc.fetch_block_by_id(blkid); + if(b) { + fc_dlog(logger,"found block for id at num ${n}",("n",b->block_num())); + add_peer_block({blkid, block_header::num_from_id(blkid)}); + enqueue_block( b ); + } else { + fc_ilog( logger, "fetch block by id returned null, id ${id} for ${p}", + ("id",blkid)("p",peer_name()) ); } } - + catch (const assert_exception &ex) { + fc_elog( logger, "caught assert on fetch_block_by_id, ${ex}, id ${id} for ${p}", + ("ex",ex.to_string())("id",blkid)("p",peer_name()) ); + } + catch (...) { + fc_elog( logger, "caught other exception fetching block id ${id} for ${p}", + ("id",blkid)("p",peer_name()) ); + } } void connection::stop_send() { @@ -882,14 +943,21 @@ namespace eosio { void connection::queue_write(const std::shared_ptr>& buff, bool trigger_send, int priority, - std::function callback) { - write_queue.push_back({buff, callback}); - if(out_queue.empty() && trigger_send) + std::function callback, + bool to_sync_queue) { + if( !buffer_queue.add_write_queue( buff, callback, to_sync_queue )) { + fc_wlog( logger, "write_queue full ${s} bytes, giving up on connection ${p}", + ("s", buffer_queue.write_queue_size())("p", peer_name()) ); + my_impl->close( shared_from_this() ); + return; + } + if( buffer_queue.is_out_queue_empty() && trigger_send) { do_queue_write( priority ); + } } void connection::do_queue_write(int priority) { - if(write_queue.empty() || !out_queue.empty()) + if( !buffer_queue.ready_to_send() ) return; connection_wptr c(shared_from_this()); if(!socket->is_open()) { @@ -898,61 +966,54 @@ namespace eosio { return; } std::vector bufs; - while (write_queue.size() > 0) { - auto& m = write_queue.front(); - bufs.push_back(boost::asio::buffer(*m.buff)); - out_queue.push_back(m); - write_queue.pop_front(); - } - boost::asio::async_write(*socket, bufs, - app().get_priority_queue().wrap(priority, [c, priority](boost::system::error_code ec, std::size_t w) { + buffer_queue.fill_out_buffer( bufs ); + + boost::asio::async_write(*socket, bufs, [c, priority]( boost::system::error_code ec, std::size_t w ) { + app().post(priority, [c, priority, ec, w]() { try { auto conn = c.lock(); if(!conn) return; - for (auto& m: conn->out_queue) { - m.callback(ec, w); - } + conn->buffer_queue.out_callback( ec, w ); if(ec) { string pname = conn ? conn->peer_name() : "no connection name"; if( ec.value() != boost::asio::error::eof) { - elog("Error sending to peer ${p}: ${i}", ("p",pname)("i", ec.message())); + fc_elog( logger, "Error sending to peer ${p}: ${i}", ("p",pname)("i", ec.message()) ); } else { - ilog("connection closure detected on write to ${p}",("p",pname)); + fc_wlog( logger, "connection closure detected on write to ${p}",("p",pname) ); } my_impl->close(conn); return; } - while (conn->out_queue.size() > 0) { - conn->out_queue.pop_front(); - } + conn->buffer_queue.clear_out_queue(); conn->enqueue_sync_block(); conn->do_queue_write( priority ); } catch(const std::exception &ex) { auto conn = c.lock(); string pname = conn ? conn->peer_name() : "no connection name"; - elog("Exception in do_queue_write to ${p} ${s}", ("p",pname)("s",ex.what())); + fc_elog( logger,"Exception in do_queue_write to ${p} ${s}", ("p",pname)("s",ex.what()) ); } catch(const fc::exception &ex) { auto conn = c.lock(); string pname = conn ? conn->peer_name() : "no connection name"; - elog("Exception in do_queue_write to ${p} ${s}", ("p",pname)("s",ex.to_string())); + fc_elog( logger,"Exception in do_queue_write to ${p} ${s}", ("p",pname)("s",ex.to_string()) ); } catch(...) { auto conn = c.lock(); string pname = conn ? conn->peer_name() : "no connection name"; - elog("Exception in do_queue_write to ${p}", ("p",pname) ); + fc_elog( logger,"Exception in do_queue_write to ${p}", ("p",pname) ); } - })); + }); + }); } void connection::cancel_sync(go_away_reason reason) { - fc_dlog(logger,"cancel sync reason = ${m}, write queue size ${o} peer ${p}", - ("m",reason_str(reason)) ("o", write_queue.size())("p", peer_name())); + fc_dlog(logger,"cancel sync reason = ${m}, write queue size ${o} bytes peer ${p}", + ("m",reason_str(reason)) ("o", buffer_queue.write_queue_size())("p", peer_name())); cancel_wait(); flush_queues(); switch (reason) { @@ -980,11 +1041,11 @@ namespace eosio { controller& cc = my_impl->chain_plug->chain(); signed_block_ptr sb = cc.fetch_block_by_number(num); if(sb) { - enqueue_block( sb, trigger_send); + enqueue_block( sb, trigger_send, true); return true; } } catch ( ... ) { - wlog( "write loop exception" ); + fc_wlog( logger, "write loop exception" ); } return false; } @@ -995,11 +1056,12 @@ namespace eosio { close_after_send = m.get().reason; } - uint32_t payload_size = fc::raw::pack_size( m ); + const uint32_t payload_size = fc::raw::pack_size( m ); - char* header = reinterpret_cast(&payload_size); - size_t header_size = sizeof(payload_size); - size_t buffer_size = header_size + payload_size; + const char* const header = reinterpret_cast(&payload_size); // avoid variable size encoding of uint32_t + constexpr size_t header_size = sizeof(payload_size); + static_assert( header_size == message_header_size, "invalid message_header_size" ); + const size_t buffer_size = header_size + payload_size; auto send_buffer = std::make_shared>(buffer_size); fc::datastream ds( send_buffer->data(), buffer_size); @@ -1009,32 +1071,45 @@ namespace eosio { enqueue_buffer( send_buffer, trigger_send, priority::low, close_after_send ); } - static std::shared_ptr> create_send_buffer( const signed_block_ptr& sb ) { - // this implementation is to avoid copy of signed_block to net_message - int which = 7; // matches which of net_message for signed_block - - uint32_t which_size = fc::raw::pack_size( unsigned_int( which )); - uint32_t payload_size = which_size + fc::raw::pack_size( *sb ); + template< typename T> + static std::shared_ptr> create_send_buffer( uint32_t which, const T& v ) { + // match net_message static_variant pack + const uint32_t which_size = fc::raw::pack_size( unsigned_int( which ) ); + const uint32_t payload_size = which_size + fc::raw::pack_size( v ); - char* header = reinterpret_cast(&payload_size); - size_t header_size = sizeof(payload_size); - size_t buffer_size = header_size + payload_size; + const char* const header = reinterpret_cast(&payload_size); // avoid variable size encoding of uint32_t + constexpr size_t header_size = sizeof( payload_size ); + static_assert( header_size == message_header_size, "invalid message_header_size" ); + const size_t buffer_size = header_size + payload_size; - auto send_buffer = std::make_shared>(buffer_size); - fc::datastream ds( send_buffer->data(), buffer_size); + auto send_buffer = std::make_shared>( buffer_size ); + fc::datastream ds( send_buffer->data(), buffer_size ); ds.write( header, header_size ); - fc::raw::pack( ds, unsigned_int( which )); - fc::raw::pack( ds, *sb ); + fc::raw::pack( ds, unsigned_int( which ) ); + fc::raw::pack( ds, v ); return send_buffer; } - void connection::enqueue_block( const signed_block_ptr& sb, bool trigger_send ) { - enqueue_buffer( create_send_buffer( sb ), trigger_send, priority::low, no_reason ); + static std::shared_ptr> create_send_buffer( const signed_block_ptr& sb ) { + // this implementation is to avoid copy of signed_block to net_message + // matches which of net_message for signed_block + return create_send_buffer( signed_block_which, *sb ); + } + + static std::shared_ptr> create_send_buffer( const packed_transaction& trx ) { + // this implementation is to avoid copy of packed_transaction to net_message + // matches which of net_message for packed_transaction + return create_send_buffer( packed_transaction_which, trx ); + } + + void connection::enqueue_block( const signed_block_ptr& sb, bool trigger_send, bool to_sync_queue) { + enqueue_buffer( create_send_buffer( sb ), trigger_send, priority::low, no_reason, to_sync_queue); } void connection::enqueue_buffer( const std::shared_ptr>& send_buffer, - bool trigger_send, int priority, go_away_reason close_after_send ) + bool trigger_send, int priority, go_away_reason close_after_send, + bool to_sync_queue) { connection_wptr weak_this = shared_from_this(); queue_write(send_buffer,trigger_send, priority, @@ -1049,7 +1124,8 @@ namespace eosio { } else { fc_wlog(logger, "connection expired before enqueued net_message called callback!"); } - }); + }, + to_sync_queue); } void connection::cancel_wait() { @@ -1060,7 +1136,8 @@ namespace eosio { void connection::sync_wait() { response_expected->expires_from_now( my_impl->resp_expected_period); connection_wptr c(shared_from_this()); - response_expected->async_wait( app().get_priority_queue().wrap(priority::low, [c]( boost::system::error_code ec){ + response_expected->async_wait( [c]( boost::system::error_code ec ) { + app().post(priority::low, [c, ec]() { connection_ptr conn = c.lock(); if (!conn) { // connection was destroyed before this lambda was delivered @@ -1068,13 +1145,15 @@ namespace eosio { } conn->sync_timeout(ec); - } ) ); + }); + } ); } void connection::fetch_wait() { response_expected->expires_from_now( my_impl->resp_expected_period); connection_wptr c(shared_from_this()); - response_expected->async_wait( app().get_priority_queue().wrap(priority::low, [c]( boost::system::error_code ec){ + response_expected->async_wait( [c]( boost::system::error_code ec ) { + app().post(priority::low, [c, ec]() { connection_ptr conn = c.lock(); if (!conn) { // connection was destroyed before this lambda was delivered @@ -1082,7 +1161,8 @@ namespace eosio { } conn->fetch_timeout(ec); - } ) ); + }); + } ); } void connection::sync_timeout( boost::system::error_code ec ) { @@ -1092,7 +1172,7 @@ namespace eosio { else if( ec == boost::asio::error::operation_aborted) { } else { - elog("setting timer for sync request got error ${ec}",("ec", ec.message())); + fc_elog( logger,"setting timer for sync request got error ${ec}",("ec", ec.message()) ); } } @@ -1108,9 +1188,7 @@ namespace eosio { void connection::fetch_timeout( boost::system::error_code ec ) { if( !ec ) { - if( pending_fetch.valid() && !( pending_fetch->req_trx.empty() || pending_fetch->req_blocks.empty() ) ) { - my_impl->dispatcher->retry_fetch(shared_from_this()); - } + my_impl->dispatcher->retry_fetch(shared_from_this()); } else if( ec == boost::asio::error::operation_aborted ) { if( !connected() ) { @@ -1118,7 +1196,7 @@ namespace eosio { } } else { - elog( "setting timer for fetch request got error ${ec}", ("ec", ec.message() ) ); + fc_elog( logger, "setting timer for fetch request got error ${ec}", ("ec", ec.message() ) ); } } @@ -1128,38 +1206,18 @@ namespace eosio { sync_wait(); } - bool connection::process_next_message(net_plugin_impl& impl, uint32_t message_length) { - try { - auto ds = pending_message_buffer.create_datastream(); - net_message msg; - fc::raw::unpack(ds, msg); - msg_handler m(impl, shared_from_this() ); - if( msg.contains() ) { - m( std::move( msg.get() ) ); - } else if( msg.contains() ) { - m( std::move( msg.get() ) ); - } else { - msg.visit( m ); - } - } catch( const fc::exception& e ) { - edump((e.to_detail_string() )); - impl.close( shared_from_this() ); - return false; - } - return true; - } - - void connection::add_peer_block(const peer_block_state& entry) { + bool connection::add_peer_block(const peer_block_state& entry) { auto bptr = blk_state.get().find(entry.id); bool added = (bptr == blk_state.end()); if (added){ blk_state.insert(entry); - } else { - blk_state.modify(bptr, [&entry](auto& e){ - e.id = entry.id; - e.block_num = entry.block_num; - }); } + return added; + } + + bool connection::peer_has_block( const block_id_type& blkid ) { + auto blk_itr = blk_state.get().find(blkid); + return blk_itr != blk_state.end(); } //----------------------------------------------------------- @@ -1287,7 +1345,7 @@ namespace eosio { // verify there is an available source if (!source || !source->current() ) { - elog("Unable to continue syncing at this time"); + fc_elog( logger, "Unable to continue syncing at this time"); sync_known_lib_num = chain_plug->chain().last_irreversible_block_num(); sync_last_requested_num = 0; set_state(in_sync); // probably not, but we can't do anything else @@ -1423,7 +1481,7 @@ namespace eosio { c->syncing = true; return; } - elog("sync check failed to resolve status"); + fc_elog( logger, "sync check failed to resolve status" ); } void sync_manager::verify_catchup(const connection_ptr& c, uint32_t num, const block_id_type& id) { @@ -1439,8 +1497,8 @@ namespace eosio { if( req.req_blocks.mode == catch_up ) { c->fork_head = id; c->fork_head_num = num; - ilog("got a catch_up notice while in ${s}, fork head num = ${fhn} target LIB = ${lib} next_expected = ${ne}", - ("s",stage_str(state))("fhn",num)("lib",sync_known_lib_num)("ne", sync_next_expected_num)); + fc_ilog( logger, "got a catch_up notice while in ${s}, fork head num = ${fhn} target LIB = ${lib} next_expected = ${ne}", + ("s",stage_str(state))("fhn",num)("lib",sync_known_lib_num)("ne", sync_next_expected_num) ); if (state == lib_catchup) return; set_state(head_catchup); @@ -1455,11 +1513,15 @@ namespace eosio { void sync_manager::recv_notice(const connection_ptr& c, const notice_message& msg) { fc_ilog(logger, "sync_manager got ${m} block notice",("m",modes_str(msg.known_blocks.mode))); + if( msg.known_blocks.ids.size() > 1 ) { + fc_elog( logger, "Invalid notice_message, known_blocks.ids.size ${s}", ("s", msg.known_blocks.ids.size()) ); + my_impl->close(c); + return; + } if (msg.known_blocks.mode == catch_up) { if (msg.known_blocks.ids.size() == 0) { - elog("got a catch up with ids size = 0"); - } - else { + fc_elog( logger,"got a catch up with ids size = 0" ); + } else { verify_catchup(c, msg.known_blocks.pending, msg.known_blocks.ids.back()); } } @@ -1481,7 +1543,7 @@ namespace eosio { } } void sync_manager::recv_block(const connection_ptr& c, const block_id_type& blk_id, uint32_t blk_num) { - fc_dlog(logger," got block ${bn} from ${p}",("bn",blk_num)("p",c->peer_name())); + fc_dlog(logger, "got block ${bn} from ${p}",("bn",blk_num)("p",c->peer_name())); if (state == lib_catchup) { if (blk_num != sync_next_expected_num) { fc_ilog(logger, "expected block ${ne} but got ${bn}",("ne",sync_next_expected_num)("bn",blk_num)); @@ -1508,6 +1570,10 @@ namespace eosio { set_state(head_catchup); } } + + if (state == in_sync) { + send_handshakes(); + } } else if (state == lib_catchup) { if( blk_num == sync_known_lib_num ) { @@ -1545,11 +1611,13 @@ namespace eosio { } bool has_block = cp->last_handshake_recv.last_irreversible_block_num >= bnum; if( !has_block ) { - fc_dlog(logger, "bcast block ${b} to ${p}", ("b", bnum)("p", cp->peer_name())); - cp->add_peer_block( pbstate ); + if( !cp->add_peer_block( pbstate ) ) { + continue; + } if( !send_buffer ) { send_buffer = create_send_buffer( bs->block ); } + fc_dlog(logger, "bcast block ${b} to ${p}", ("b", bnum)("p", cp->peer_name())); cp->enqueue_buffer( send_buffer, true, priority::high, no_reason ); } } @@ -1594,21 +1662,7 @@ namespace eosio { time_point_sec trx_expiration = ptrx->packed_trx->expiration(); const packed_transaction& trx = *ptrx->packed_trx; - // this implementation is to avoid copy of packed_transaction to net_message - int which = 8; // matches which of net_message for packed_transaction - - uint32_t which_size = fc::raw::pack_size( unsigned_int( which )); - uint32_t payload_size = which_size + fc::raw::pack_size( trx ); - - char* header = reinterpret_cast(&payload_size); - size_t header_size = sizeof(payload_size); - size_t buffer_size = header_size + payload_size; - - auto buff = std::make_shared>(buffer_size); - fc::datastream ds( buff->data(), buffer_size); - ds.write( header, header_size ); - fc::raw::pack( ds, unsigned_int( which )); - fc::raw::pack( ds, trx ); + auto buff = create_send_buffer( trx ); node_transaction_state nts = {id, trx_expiration, 0, buff}; my_impl->local_txns.insert(std::move(nts)); @@ -1659,24 +1713,26 @@ namespace eosio { send_req = false; } else if (msg.known_trx.mode != none) { - elog("passed a notice_message with something other than a normal on none known_trx"); + fc_elog( logger,"passed a notice_message with something other than a normal on none known_trx" ); return; } if (msg.known_blocks.mode == normal) { req.req_blocks.mode = normal; controller& cc = my_impl->chain_plug->chain(); - for( const auto& blkid : msg.known_blocks.ids) { + // known_blocks.ids is never > 1 + if( !msg.known_blocks.ids.empty() ) { + const block_id_type& blkid = msg.known_blocks.ids.back(); signed_block_ptr b; try { - b = cc.fetch_block_by_id(blkid); + b = cc.fetch_block_by_id(blkid); // if exists if(b) { - c->add_peer_block({blkid, b->block_num()}); + c->add_peer_block({blkid, block_header::num_from_id(blkid)}); } } catch (const assert_exception &ex) { - ilog( "caught assert on fetch_block_by_id, ${ex}",("ex",ex.what())); + fc_ilog( logger, "caught assert on fetch_block_by_id, ${ex}",("ex",ex.what()) ); // keep going, client can ask another peer } catch (...) { - elog( "failed to retrieve block for id"); + fc_elog( logger, "failed to retrieve block for id"); } if (!b) { send_req = true; @@ -1685,7 +1741,7 @@ namespace eosio { } } else if (msg.known_blocks.mode != none) { - elog("passed a notice_message with something other than a normal on none known_blocks"); + fc_elog( logger, "passed a notice_message with something other than a normal on none known_blocks" ); return; } fc_dlog( logger, "send req = ${sr}", ("sr",send_req)); @@ -1726,8 +1782,7 @@ namespace eosio { sendit = trx != conn->trx_state.end(); } else { - auto blk = conn->blk_state.get().find(bid); - sendit = blk != conn->blk_state.end(); + sendit = conn->peer_has_block(bid); } if (sendit) { conn->enqueue(*c->last_req); @@ -1755,7 +1810,7 @@ namespace eosio { auto colon = c->peer_addr.find(':'); if (colon == std::string::npos || colon == 0) { - elog("Invalid peer address. must be \"host:port\": ${p}", ("p",c->peer_addr)); + fc_elog( logger, "Invalid peer address. must be \"host:port\": ${p}", ("p",c->peer_addr) ); for ( auto itr : connections ) { if((*itr).peer_addr == c->peer_addr) { (*itr).reset(); @@ -1775,18 +1830,18 @@ namespace eosio { // Note: need to add support for IPv6 too resolver->async_resolve( query, - app().get_priority_queue().wrap( priority::low, - [weak_conn, this]( const boost::system::error_code& err, - tcp::resolver::iterator endpoint_itr ){ - auto c = weak_conn.lock(); - if (!c) return; - if( !err ) { - connect( c, endpoint_itr ); - } else { - elog( "Unable to resolve ${peer_addr}: ${error}", - ( "peer_addr", c->peer_name() )("error", err.message() ) ); - } - })); + [weak_conn, this]( const boost::system::error_code& err, tcp::resolver::iterator endpoint_itr ) { + app().post( priority::low, [err, endpoint_itr, weak_conn, this]() { + auto c = weak_conn.lock(); + if( !c ) return; + if( !err ) { + connect( c, endpoint_itr ); + } else { + fc_elog( logger, "Unable to resolve ${peer_addr}: ${error}", + ("peer_addr", c->peer_name())( "error", err.message()) ); + } + } ); + } ); } void net_plugin_impl::connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr) { @@ -1798,28 +1853,26 @@ namespace eosio { ++endpoint_itr; c->connecting = true; connection_wptr weak_conn = c; - c->socket->async_connect( current_endpoint, - app().get_priority_queue().wrap( priority::low, - [weak_conn, endpoint_itr, this] ( const boost::system::error_code& err ) { + c->socket->async_connect( current_endpoint, [weak_conn, endpoint_itr, this]( const boost::system::error_code& err ) { + app().post( priority::low, [weak_conn, endpoint_itr, this, err]() { auto c = weak_conn.lock(); - if (!c) return; - if( !err && c->socket->is_open() ) { - if (start_session( c )) { + if( !c ) return; + if( !err && c->socket->is_open()) { + if( start_session( c )) { c->send_handshake(); } } else { - if( endpoint_itr != tcp::resolver::iterator() ) { - close(c); + if( endpoint_itr != tcp::resolver::iterator()) { + close( c ); connect( c, endpoint_itr ); - } - else { - elog( "connection failed to ${peer}: ${error}", - ( "peer", c->peer_name())("error",err.message())); + } else { + fc_elog( logger, "connection failed to ${peer}: ${error}", ("peer", c->peer_name())( "error", err.message())); c->connecting = false; - my_impl->close(c); + my_impl->close( c ); } } - })); + } ); + } ); } bool net_plugin_impl::start_session(const connection_ptr& con) { @@ -1827,8 +1880,7 @@ namespace eosio { boost::system::error_code ec; con->socket->set_option( nodelay, ec ); if (ec) { - elog( "connection failed to ${peer}: ${error}", - ( "peer", con->peer_name())("error",ec.message())); + fc_elog( logger, "connection failed to ${peer}: ${error}", ( "peer", con->peer_name())("error",ec.message()) ); con->connecting = false; close(con); return false; @@ -1845,15 +1897,16 @@ namespace eosio { void net_plugin_impl::start_listen_loop() { - auto socket = std::make_shared( std::ref( app().get_io_service() ) ); - acceptor->async_accept( *socket, - app().get_priority_queue().wrap(priority::low, [socket,this]( boost::system::error_code ec ) { + auto socket = std::make_shared( std::ref( *server_ioc ) ); + acceptor->async_accept( *socket, [socket, this, ioc = server_ioc]( boost::system::error_code ec ) { + app().post( priority::low, [socket, this, ec, ioc{std::move(ioc)}]() { if( !ec ) { uint32_t visitors = 0; uint32_t from_addr = 0; - auto paddr = socket->remote_endpoint(ec).address(); - if (ec) { - fc_elog(logger,"Error getting remote endpoint: ${m}",("m", ec.message())); + boost::system::error_code rec; + auto paddr = socket->remote_endpoint(rec).address(); + if (rec) { + fc_elog(logger,"Error getting remote endpoint: ${m}",("m", rec.message())); } else { for (auto &conn : connections) { @@ -1868,7 +1921,7 @@ namespace eosio { } } if (num_clients != visitors) { - ilog("checking max client, visitors = ${v} num clients ${n}",("v",visitors)("n",num_clients)); + fc_ilog( logger,"checking max client, visitors = ${v} num clients ${n}",("v",visitors)("n",num_clients) ); num_clients = visitors; } if( from_addr < max_nodes_per_host && (max_client_count == 0 || num_clients < max_client_count )) { @@ -1891,7 +1944,7 @@ namespace eosio { } } } else { - elog( "Error accepting connection: ${m}",( "m", ec.message() ) ); + fc_elog( logger, "Error accepting connection: ${m}",( "m", ec.message() ) ); // For the listed error codes below, recall start_listen_loop() switch (ec.value()) { case ECONNABORTED: @@ -1906,7 +1959,8 @@ namespace eosio { } } start_listen_loop(); - })); + }); + }); } void net_plugin_impl::start_read_message(const connection_ptr& conn) { @@ -1934,22 +1988,55 @@ namespace eosio { } }; + if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size || + conn->reads_in_flight > def_max_reads_in_flight || + conn->trx_in_progress_size > def_max_trx_in_progress_size ) + { + // too much queued up, reschedule + if( conn->buffer_queue.write_queue_size() > def_max_write_queue_size ) { + peer_wlog( conn, "write_queue full ${s} bytes", ("s", conn->buffer_queue.write_queue_size()) ); + } else if( conn->reads_in_flight > def_max_reads_in_flight ) { + peer_wlog( conn, "max reads in flight ${s}", ("s", conn->reads_in_flight) ); + } else { + peer_wlog( conn, "max trx in progress ${s} bytes", ("s", conn->trx_in_progress_size) ); + } + if( conn->buffer_queue.write_queue_size() > 2*def_max_write_queue_size || + conn->reads_in_flight > 2*def_max_reads_in_flight || + conn->trx_in_progress_size > 2*def_max_trx_in_progress_size ) + { + fc_wlog( logger, "queues over full, giving up on connection ${p}", ("p", conn->peer_name()) ); + my_impl->close( conn ); + return; + } + if( !conn->read_delay_timer ) return; + conn->read_delay_timer->expires_from_now( def_read_delay_for_full_write_queue ); + conn->read_delay_timer->async_wait( + app().get_priority_queue().wrap( priority::low, [this, weak_conn]( boost::system::error_code ) { + auto conn = weak_conn.lock(); + if( !conn ) return; + start_read_message( conn ); + } ) ); + return; + } + + ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, - app().get_priority_queue().wrap( priority::medium, [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { + app().post( priority::medium, [this,weak_conn, ec, bytes_transferred]() { auto conn = weak_conn.lock(); if (!conn) { return; } + --conn->reads_in_flight; conn->outstanding_read_bytes.reset(); try { if( !ec ) { if (bytes_transferred > conn->pending_message_buffer.bytes_to_write()) { - elog("async_read_some callback: bytes_transfered = ${bt}, buffer.bytes_to_write = ${btw}", - ("bt",bytes_transferred)("btw",conn->pending_message_buffer.bytes_to_write())); + fc_elog( logger,"async_read_some callback: bytes_transfered = ${bt}, buffer.bytes_to_write = ${btw}", + ("bt",bytes_transferred)("btw",conn->pending_message_buffer.bytes_to_write()) ); } EOS_ASSERT(bytes_transferred <= conn->pending_message_buffer.bytes_to_write(), plugin_exception, ""); conn->pending_message_buffer.advance_write_ptr(bytes_transferred); @@ -1965,8 +2052,8 @@ namespace eosio { conn->pending_message_buffer.peek(&message_length, sizeof(message_length), index); if(message_length > def_send_buffer_size*2 || message_length == 0) { boost::system::error_code ec; - elog("incoming message length unexpected (${i}), from ${p}", - ("i", message_length)("p",boost::lexical_cast(conn->socket->remote_endpoint(ec)))); + fc_elog( logger,"incoming message length unexpected (${i}), from ${p}", + ("i", message_length)("p",boost::lexical_cast(conn->socket->remote_endpoint(ec))) ); close(conn); return; } @@ -1975,7 +2062,7 @@ namespace eosio { if (bytes_in_buffer >= total_message_bytes) { conn->pending_message_buffer.advance_read_ptr(message_header_size); - if (!conn->process_next_message(*this, message_length)) { + if (!process_next_message(conn, message_length)) { return; } } else { @@ -1994,36 +2081,76 @@ namespace eosio { } else { auto pname = conn->peer_name(); if (ec.value() != boost::asio::error::eof) { - elog( "Error reading message from ${p}: ${m}",("p",pname)( "m", ec.message() ) ); + fc_elog( logger, "Error reading message from ${p}: ${m}",("p",pname)( "m", ec.message() ) ); } else { - ilog( "Peer ${p} closed connection",("p",pname) ); + fc_ilog( logger, "Peer ${p} closed connection",("p",pname) ); } close( conn ); } } catch(const std::exception &ex) { string pname = conn ? conn->peer_name() : "no connection name"; - elog("Exception in handling read data from ${p} ${s}",("p",pname)("s",ex.what())); + fc_elog( logger, "Exception in handling read data from ${p} ${s}",("p",pname)("s",ex.what()) ); close( conn ); } catch(const fc::exception &ex) { string pname = conn ? conn->peer_name() : "no connection name"; - elog("Exception in handling read data ${s}", ("p",pname)("s",ex.to_string())); + fc_elog( logger, "Exception in handling read data ${s}", ("p",pname)("s",ex.to_string()) ); close( conn ); } catch (...) { string pname = conn ? conn->peer_name() : "no connection name"; - elog( "Undefined exception hanlding the read data from connection ${p}",( "p",pname)); + fc_elog( logger, "Undefined exception hanlding the read data from connection ${p}",( "p",pname) ); close( conn ); } - })); + }); + }); } catch (...) { string pname = conn ? conn->peer_name() : "no connection name"; - elog( "Undefined exception handling reading ${p}",("p",pname) ); + fc_elog( logger, "Undefined exception handling reading ${p}",("p",pname) ); close( conn ); } } + bool net_plugin_impl::process_next_message(const connection_ptr& conn, uint32_t message_length) { + try { + // if next message is a block we already have, exit early + auto peek_ds = conn->pending_message_buffer.create_peek_datastream(); + unsigned_int which{}; + fc::raw::unpack( peek_ds, which ); + if( which == signed_block_which ) { + block_header bh; + fc::raw::unpack( peek_ds, bh ); + + controller& cc = chain_plug->chain(); + block_id_type blk_id = bh.id(); + uint32_t blk_num = bh.block_num(); + if( cc.fetch_block_by_id( blk_id ) ) { + sync_master->recv_block( conn, blk_id, blk_num ); + conn->pending_message_buffer.advance_read_ptr( message_length ); + return true; + } + } + + auto ds = conn->pending_message_buffer.create_datastream(); + net_message msg; + fc::raw::unpack( ds, msg ); + msg_handler m( *this, conn ); + if( msg.contains() ) { + m( std::move( msg.get() ) ); + } else if( msg.contains() ) { + m( std::move( msg.get() ) ); + } else { + msg.visit( m ); + } + } catch( const fc::exception& e ) { + edump( (e.to_detail_string()) ); + close( conn ); + return false; + } + return true; + } + size_t net_plugin_impl::count_open_sockets() const { size_t count = 0; @@ -2050,20 +2177,20 @@ namespace eosio { // affecting state. bool valid = true; if (msg.last_irreversible_block_num > msg.head_num) { - wlog("Handshake message validation: last irreversible block (${i}) is greater than head block (${h})", - ("i", msg.last_irreversible_block_num)("h", msg.head_num)); + fc_wlog( logger, "Handshake message validation: last irreversible block (${i}) is greater than head block (${h})", + ("i", msg.last_irreversible_block_num)("h", msg.head_num) ); valid = false; } if (msg.p2p_address.empty()) { - wlog("Handshake message validation: p2p_address is null string"); + fc_wlog( logger, "Handshake message validation: p2p_address is null string" ); valid = false; } if (msg.os.empty()) { - wlog("Handshake message validation: os field is null string"); + fc_wlog( logger, "Handshake message validation: os field is null string" ); valid = false; } if ((msg.sig != chain::signature_type() || msg.token != sha256()) && (msg.token != fc::sha256::hash(msg.time))) { - wlog("Handshake message validation: token field invalid"); + fc_wlog( logger, "Handshake message validation: token field invalid" ); valid = false; } return valid; @@ -2088,7 +2215,7 @@ namespace eosio { } if (msg.generation == 1) { if( msg.node_id == node_id) { - elog( "Self connection detected. Closing connection"); + fc_elog( logger, "Self connection detected. Closing connection" ); c->enqueue( go_away_message( self ) ); return; } @@ -2120,20 +2247,20 @@ namespace eosio { } if( msg.chain_id != chain_id) { - elog( "Peer on a different chain. Closing connection"); + fc_elog( logger, "Peer on a different chain. Closing connection" ); c->enqueue( go_away_message(go_away_reason::wrong_chain) ); return; } c->protocol_version = to_protocol_version(msg.network_version); if(c->protocol_version != net_version) { if (network_version_match) { - elog("Peer network version does not match expected ${nv} but got ${mnv}", - ("nv", net_version)("mnv", c->protocol_version)); + fc_elog( logger, "Peer network version does not match expected ${nv} but got ${mnv}", + ("nv", net_version)("mnv", c->protocol_version) ); c->enqueue(go_away_message(wrong_version)); return; } else { - ilog("Local network version: ${nv} Remote version: ${mnv}", - ("nv", net_version)("mnv", c->protocol_version)); + fc_ilog( logger, "Local network version: ${nv} Remote version: ${mnv}", + ("nv", net_version)("mnv", c->protocol_version)); } } @@ -2142,7 +2269,7 @@ namespace eosio { } if(!authenticate_peer(msg)) { - elog("Peer not authenticated. Closing connection."); + fc_elog( logger, "Peer not authenticated. Closing connection." ); c->enqueue(go_away_message(authentication)); return; } @@ -2156,15 +2283,15 @@ namespace eosio { on_fork =( msg.last_irreversible_block_id != peer_lib_id); } catch( const unknown_block_exception &ex) { - wlog( "peer last irreversible block ${pl} is unknown", ("pl", peer_lib)); + fc_wlog( logger, "peer last irreversible block ${pl} is unknown", ("pl", peer_lib) ); on_fork = true; } catch( ...) { - wlog( "caught an exception getting block id for ${pl}",("pl",peer_lib)); + fc_wlog( logger, "caught an exception getting block id for ${pl}",("pl",peer_lib) ); on_fork = true; } if( on_fork) { - elog( "Peer chain is forked"); + fc_elog( logger, "Peer chain is forked" ); c->enqueue( go_away_message( forked )); return; } @@ -2182,9 +2309,9 @@ namespace eosio { void net_plugin_impl::handle_message(const connection_ptr& c, const go_away_message& msg) { string rsn = reason_str( msg.reason ); - peer_ilog(c, "received go_away_message"); - ilog( "received a go away message from ${p}, reason = ${r}", - ("p", c->peer_name())("r",rsn)); + peer_wlog(c, "received go_away_message"); + fc_wlog( logger, "received a go away message from ${p}, reason = ${r}", + ("p", c->peer_name())("r",rsn) ); c->no_retry = msg.reason; if(msg.reason == duplicate ) { c->node_id = msg.node_id; @@ -2291,6 +2418,12 @@ namespace eosio { } void net_plugin_impl::handle_message(const connection_ptr& c, const request_message& msg) { + if( msg.req_blocks.ids.size() > 1 ) { + fc_elog( logger, "Invalid request_message, req_blocks.ids.size ${s}", ("s", msg.req_blocks.ids.size()) ); + close(c); + return; + } + switch (msg.req_blocks.mode) { case catch_up : peer_ilog(c, "received request_message:catch_up"); @@ -2298,7 +2431,9 @@ namespace eosio { break; case normal : peer_ilog(c, "received request_message:normal"); - c->blk_send(msg.req_blocks.ids); + if( !msg.req_blocks.ids.empty() ) { + c->blk_send(msg.req_blocks.ids.back()); + } break; default:; } @@ -2330,6 +2465,13 @@ namespace eosio { } } + size_t calc_trx_size( const packed_transaction_ptr& trx ) { + // transaction is stored packed and unpacked, double packed_size and size of signed as an approximation of use + return (trx->get_packed_transaction().size() * 2 + sizeof(trx->get_signed_transaction())) * 2 + + trx->get_packed_context_free_data().size() * 4 + + trx->get_signatures().size() * sizeof(signature_type); + } + void net_plugin_impl::handle_message(const connection_ptr& c, const packed_transaction_ptr& trx) { fc_dlog(logger, "got a packed transaction, cancel wait"); peer_ilog(c, "received packed_transaction"); @@ -2351,7 +2493,9 @@ namespace eosio { return; } dispatcher->recv_transaction(c, tid); + c->trx_in_progress_size += calc_trx_size( ptrx->packed_trx ); chain_plug->accept_transaction(ptrx, [c, this, ptrx](const static_variant& result) { + c->trx_in_progress_size -= calc_trx_size( ptrx->packed_trx ); if (result.contains()) { peer_dlog(c, "bad packed_transaction : ${m}", ("m",result.get()->what())); } else { @@ -2383,7 +2527,7 @@ namespace eosio { } } catch( ...) { // should this even be caught? - elog("Caught an unknown exception trying to recall blockID"); + fc_elog( logger,"Caught an unknown exception trying to recall blockID" ); } dispatcher->recv_block(c, blk_id, blk_num); @@ -2400,18 +2544,18 @@ namespace eosio { reason = unlinkable; } catch( const block_validate_exception &ex) { peer_elog(c, "bad signed_block : ${m}", ("m",ex.what())); - elog( "block_validate_exception accept block #${n} syncing from ${p}",("n",blk_num)("p",c->peer_name())); + fc_elog( logger, "block_validate_exception accept block #${n} syncing from ${p}",("n",blk_num)("p",c->peer_name()) ); reason = validation; } catch( const assert_exception &ex) { peer_elog(c, "bad signed_block : ${m}", ("m",ex.what())); - elog( "unable to accept block on assert exception ${n} from ${p}",("n",ex.to_string())("p",c->peer_name())); + fc_elog( logger, "unable to accept block on assert exception ${n} from ${p}",("n",ex.to_string())("p",c->peer_name())); } catch( const fc::exception &ex) { peer_elog(c, "bad signed_block : ${m}", ("m",ex.what())); - elog( "accept_block threw a non-assert exception ${x} from ${p}",( "x",ex.to_string())("p",c->peer_name())); + fc_elog( logger, "accept_block threw a non-assert exception ${x} from ${p}",( "x",ex.to_string())("p",c->peer_name())); reason = no_reason; } catch( ...) { peer_elog(c, "bad signed_block : unknown exception"); - elog( "handle sync block caught something else from ${p}",("num",blk_num)("p",c->peer_name())); + fc_elog( logger, "handle sync block caught something else from ${p}",("num",blk_num)("p",c->peer_name())); } update_block_num ubn(blk_num); @@ -2436,49 +2580,54 @@ namespace eosio { void net_plugin_impl::start_conn_timer(boost::asio::steady_timer::duration du, std::weak_ptr from_connection) { connector_check->expires_from_now( du); - connector_check->async_wait(app().get_priority_queue().wrap(priority::low, [this, from_connection](boost::system::error_code ec) { + connector_check->async_wait( [this, from_connection](boost::system::error_code ec) { + app().post( priority::low, [this, from_connection, ec]() { if( !ec) { connection_monitor(from_connection); } else { - elog( "Error from connection check monitor: ${m}",( "m", ec.message())); + fc_elog( logger, "Error from connection check monitor: ${m}",( "m", ec.message())); start_conn_timer( connector_period, std::weak_ptr()); } - })); + }); + }); } void net_plugin_impl::start_txn_timer() { transaction_check->expires_from_now( txn_exp_period); - int lower_than_low = priority::low - 1; - transaction_check->async_wait(app().get_priority_queue().wrap(lower_than_low, [this](boost::system::error_code ec) { - if( !ec) { + transaction_check->async_wait( [this]( boost::system::error_code ec ) { + int lower_than_low = priority::low - 1; + app().post( lower_than_low, [this, ec]() { + if( !ec ) { expire_txns(); - } - else { - elog( "Error from transaction check monitor: ${m}",( "m", ec.message())); + } else { + fc_elog( logger, "Error from transaction check monitor: ${m}", ("m", ec.message())); start_txn_timer(); } - })); + } ); + }); } void net_plugin_impl::ticker() { keepalive_timer->expires_from_now(keepalive_interval); - keepalive_timer->async_wait(app().get_priority_queue().wrap(priority::low, [this](boost::system::error_code ec) { + keepalive_timer->async_wait( [this]( boost::system::error_code ec ) { + app().post( priority::low, [this, ec]() { ticker(); - if (ec) { - wlog("Peer keepalive ticked sooner than expected: ${m}", ("m", ec.message())); + if( ec ) { + fc_wlog( logger, "Peer keepalive ticked sooner than expected: ${m}", ("m", ec.message()) ); } - for (auto &c : connections ) { - if (c->socket->is_open()) { + for( auto& c : connections ) { + if( c->socket->is_open()) { c->send_time(); } } - })); + } ); + } ); } void net_plugin_impl::start_monitors() { - connector_check.reset(new boost::asio::steady_timer( app().get_io_service())); - transaction_check.reset(new boost::asio::steady_timer( app().get_io_service())); + connector_check.reset(new boost::asio::steady_timer( *server_ioc )); + transaction_check.reset(new boost::asio::steady_timer( *server_ioc )); start_conn_timer(connector_period, std::weak_ptr()); start_txn_timer(); } @@ -2584,8 +2733,8 @@ namespace eosio { if(producer_plug != nullptr) found_producer_key = producer_plug->is_producer_key(msg.key); if( allowed_it == allowed_peers.end() && private_it == private_keys.end() && !found_producer_key) { - elog( "Peer ${peer} sent a handshake with an unauthorized key: ${key}.", - ("peer", msg.p2p_address)("key", msg.key)); + fc_elog( logger, "Peer ${peer} sent a handshake with an unauthorized key: ${key}.", + ("peer", msg.p2p_address)("key", msg.key) ); return false; } } @@ -2594,16 +2743,15 @@ namespace eosio { sc::system_clock::duration msg_time(msg.time); auto time = sc::system_clock::now().time_since_epoch(); if(time - msg_time > peer_authentication_interval) { - elog( "Peer ${peer} sent a handshake with a timestamp skewed by more than ${time}.", - ("peer", msg.p2p_address)("time", "1 second")); // TODO Add to_variant for std::chrono::system_clock::duration + fc_elog( logger, "Peer ${peer} sent a handshake with a timestamp skewed by more than ${time}.", + ("peer", msg.p2p_address)("time", "1 second")); // TODO Add to_variant for std::chrono::system_clock::duration return false; } if(msg.sig != chain::signature_type() && msg.token != sha256()) { sha256 hash = fc::sha256::hash(msg.time); if(hash != msg.token) { - elog( "Peer ${peer} sent a handshake with an invalid token.", - ("peer", msg.p2p_address)); + fc_elog( logger, "Peer ${peer} sent a handshake with an invalid token.", ("peer", msg.p2p_address) ); return false; } chain::public_key_type peer_key; @@ -2611,18 +2759,16 @@ namespace eosio { peer_key = crypto::public_key(msg.sig, msg.token, true); } catch (fc::exception& /*e*/) { - elog( "Peer ${peer} sent a handshake with an unrecoverable key.", - ("peer", msg.p2p_address)); + fc_elog( logger, "Peer ${peer} sent a handshake with an unrecoverable key.", ("peer", msg.p2p_address) ); return false; } if((allowed_connections & (Producers | Specified)) && peer_key != msg.key) { - elog( "Peer ${peer} sent a handshake with an unauthenticated key.", - ("peer", msg.p2p_address)); + fc_elog( logger, "Peer ${peer} sent a handshake with an unauthenticated key.", ("peer", msg.p2p_address) ); return false; } } else if(allowed_connections & (Producers | Specified)) { - dlog( "Peer sent a handshake with blank signature and token, but this node accepts only authenticated connections."); + fc_dlog( logger, "Peer sent a handshake with blank signature and token, but this node accepts only authenticated connections." ); return false; } return true; @@ -2682,7 +2828,7 @@ namespace eosio { hello.last_irreversible_block_id = cc.get_block_id_for_num(hello.last_irreversible_block_num); } catch( const unknown_block_exception &ex) { - ilog("caught unkown_block"); + fc_wlog( logger, "caught unkown_block" ); hello.last_irreversible_block_num = 0; } } @@ -2721,6 +2867,8 @@ namespace eosio { ( "max-cleanup-time-msec", bpo::value()->default_value(10), "max connection cleanup time per cleanup call in millisec") ( "network-version-match", bpo::value()->default_value(false), "True to require exact match of peer network version.") + ( "net-threads", bpo::value()->default_value(my->thread_pool_size), + "Number of worker threads in net_plugin thread pool" ) ( "sync-fetch-span", bpo::value()->default_value(def_sync_fetch_span), "number of blocks to retrieve in a chunk from any individual peer during synchronization") ( "use-socket-read-watermark", bpo::value()->default_value(false), "Enable expirimental socket read watermark optimization") ( "peer-log-format", bpo::value()->default_value( "[\"${_name}\" ${_ip}:${_port}]" ), @@ -2742,7 +2890,7 @@ namespace eosio { } void net_plugin::plugin_initialize( const variables_map& options ) { - ilog("Initialize net plugin"); + fc_ilog( logger, "Initialize net plugin" ); try { peer_log_format = options.at( "peer-log-format" ).as(); @@ -2762,36 +2910,16 @@ namespace eosio { my->use_socket_read_watermark = options.at( "use-socket-read-watermark" ).as(); - my->resolver = std::make_shared( std::ref( app().get_io_service())); if( options.count( "p2p-listen-endpoint" ) && options.at("p2p-listen-endpoint").as().length()) { my->p2p_address = options.at( "p2p-listen-endpoint" ).as(); - auto host = my->p2p_address.substr( 0, my->p2p_address.find( ':' )); - auto port = my->p2p_address.substr( host.size() + 1, my->p2p_address.size()); - idump((host)( port )); - tcp::resolver::query query( tcp::v4(), host.c_str(), port.c_str()); - // Note: need to add support for IPv6 too? - - my->listen_endpoint = *my->resolver->resolve( query ); - - my->acceptor.reset( new tcp::acceptor( app().get_io_service())); - - if( options.count( "p2p-server-address" )) { - my->p2p_address = options.at( "p2p-server-address" ).as(); - } else { - if( my->listen_endpoint.address().to_v4() == address_v4::any()) { - boost::system::error_code ec; - auto host = host_name( ec ); - if( ec.value() != boost::system::errc::success ) { - - FC_THROW_EXCEPTION( fc::invalid_arg_exception, - "Unable to retrieve host_name. ${msg}", ("msg", ec.message())); - - } - auto port = my->p2p_address.substr( my->p2p_address.find( ':' ), my->p2p_address.size()); - my->p2p_address = host + port; - } - } } + if( options.count( "p2p-server-address" ) ) { + my->p2p_server_address = options.at( "p2p-server-address" ).as(); + } + + my->thread_pool_size = options.at( "net-threads" ).as(); + EOS_ASSERT( my->thread_pool_size > 0, chain::plugin_config_exception, + "net-threads ${num} must be greater than 0", ("num", my->thread_pool_size) ); if( options.count( "p2p-peer-address" )) { my->supplied_peers = options.at( "p2p-peer-address" ).as >(); @@ -2839,27 +2967,66 @@ namespace eosio { EOS_ASSERT( my->chain_plug, chain::missing_chain_plugin_exception, "" ); my->chain_id = my->chain_plug->get_chain_id(); fc::rand_pseudo_bytes( my->node_id.data(), my->node_id.data_size()); - ilog( "my node_id is ${id}", ("id", my->node_id)); + fc_ilog( logger, "my node_id is ${id}", ("id", my->node_id )); - my->keepalive_timer.reset( new boost::asio::steady_timer( app().get_io_service())); - my->ticker(); } FC_LOG_AND_RETHROW() } void net_plugin::plugin_startup() { my->producer_plug = app().find_plugin(); + + my->thread_pool.emplace( my->thread_pool_size ); + my->server_ioc = std::make_shared(); + my->server_ioc_work.emplace( boost::asio::make_work_guard( *my->server_ioc ) ); + // currently thread_pool only used for server_ioc + for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { + boost::asio::post( *my->thread_pool, [ioc = my->server_ioc]() { ioc->run(); } ); + } + + my->resolver = std::make_shared( std::ref( *my->server_ioc )); + if( my->p2p_address.size() > 0 ) { + auto host = my->p2p_address.substr( 0, my->p2p_address.find( ':' )); + auto port = my->p2p_address.substr( host.size() + 1, my->p2p_address.size()); + tcp::resolver::query query( tcp::v4(), host.c_str(), port.c_str()); + // Note: need to add support for IPv6 too? + + my->listen_endpoint = *my->resolver->resolve( query ); + + my->acceptor.reset( new tcp::acceptor( *my->server_ioc ) ); + + if( !my->p2p_server_address.empty() ) { + my->p2p_address = my->p2p_server_address; + } else { + if( my->listen_endpoint.address().to_v4() == address_v4::any()) { + boost::system::error_code ec; + auto host = host_name( ec ); + if( ec.value() != boost::system::errc::success ) { + + FC_THROW_EXCEPTION( fc::invalid_arg_exception, + "Unable to retrieve host_name. ${msg}", ("msg", ec.message())); + + } + auto port = my->p2p_address.substr( my->p2p_address.find( ':' ), my->p2p_address.size()); + my->p2p_address = host + port; + } + } + } + + my->keepalive_timer.reset( new boost::asio::steady_timer( *my->server_ioc ) ); + my->ticker(); + if( my->acceptor ) { my->acceptor->open(my->listen_endpoint.protocol()); my->acceptor->set_option(tcp::acceptor::reuse_address(true)); try { my->acceptor->bind(my->listen_endpoint); } catch (const std::exception& e) { - ilog("net_plugin::plugin_startup failed to bind to port ${port}", - ("port", my->listen_endpoint.port())); + fc_elog( logger, "net_plugin::plugin_startup failed to bind to port ${port}", + ("port", my->listen_endpoint.port())); throw e; } my->acceptor->listen(); - ilog("starting listener, max clients is ${mc}",("mc",my->max_client_count)); + fc_ilog( logger, "starting listener, max clients is ${mc}",("mc",my->max_client_count) ); my->start_listen_loop(); } chain::controller&cc = my->chain_plug->chain(); @@ -2871,7 +3038,7 @@ namespace eosio { if( cc.get_read_mode() == chain::db_read_mode::READ_ONLY ) { my->max_nodes_per_host = 0; - ilog( "node in read-only mode setting max_nodes_per_host to 0 to prevent connections" ); + fc_ilog( logger, "node in read-only mode setting max_nodes_per_host to 0 to prevent connections" ); } my->start_monitors(); @@ -2879,28 +3046,47 @@ namespace eosio { for( auto seed_node : my->supplied_peers ) { connect( seed_node ); } + handle_sighup(); + } + void net_plugin::handle_sighup() { if(fc::get_logger_map().find(logger_name) != fc::get_logger_map().end()) logger = fc::get_logger_map()[logger_name]; } void net_plugin::plugin_shutdown() { try { - ilog( "shutdown.." ); + fc_ilog( logger, "shutdown.." ); + if( my->server_ioc_work ) + my->server_ioc_work->reset(); + + if( my->connector_check ) + my->connector_check->cancel(); + if( my->transaction_check ) + my->transaction_check->cancel(); + if( my->keepalive_timer ) + my->keepalive_timer->cancel(); + my->done = true; if( my->acceptor ) { - ilog( "close acceptor" ); + fc_ilog( logger, "close acceptor" ); + my->acceptor->cancel(); my->acceptor->close(); - ilog( "close ${s} connections",( "s",my->connections.size()) ); - auto cons = my->connections; - for( auto con : cons ) { - my->close( con); + fc_ilog( logger, "close ${s} connections",( "s",my->connections.size()) ); + for( auto& con : my->connections ) { + my->close( con ); } + my->connections.clear(); + } - my->acceptor.reset(nullptr); + if( my->server_ioc ) + my->server_ioc->stop(); + if( my->thread_pool ) { + my->thread_pool->join(); + my->thread_pool->stop(); } - ilog( "exit shutdown" ); + fc_ilog( logger, "exit shutdown" ); } FC_CAPTURE_AND_RETHROW() } diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index 1c61e35974b..66030cc587e 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -65,6 +65,7 @@ class producer_plugin : public appbase::plugin { virtual void plugin_initialize(const boost::program_options::variables_map& options); virtual void plugin_startup(); virtual void plugin_shutdown(); + void handle_sighup() override; void pause(); void resume(); diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index e7d0600deac..4b7f004a1d7 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -534,7 +535,7 @@ void producer_plugin::set_program_options( " KEY: \tis a string form of a valid EOSIO private key which maps to the provided public key\n\n" " KEOSD: \tis the URL where keosd is available and the approptiate wallet(s) are unlocked") ("keosd-provider-timeout", boost::program_options::value()->default_value(5), - "Limits the maximum time (in milliseconds) that is allowd for sending blocks to a keosd provider for signing") + "Limits the maximum time (in milliseconds) that is allowed for sending blocks to a keosd provider for signing") ("greylist-account", boost::program_options::value>()->composing()->multitoken(), "account that can not access to extended CPU/NET virtual resources") ("produce-time-offset-us", boost::program_options::value()->default_value(0), @@ -734,14 +735,7 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ void producer_plugin::plugin_startup() { try { - auto& logger_map = fc::get_logger_map(); - if(logger_map.find(logger_name) != logger_map.end()) { - _log = logger_map[logger_name]; - } - - if( logger_map.find(trx_trace_logger_name) != logger_map.end()) { - _trx_trace_log = logger_map[trx_trace_logger_name]; - } + handle_sighup(); // Sets loggers ilog("producer plugin: plugin_startup() begin"); @@ -795,6 +789,16 @@ void producer_plugin::plugin_shutdown() { my->_irreversible_block_connection.reset(); } +void producer_plugin::handle_sighup() { + auto& logger_map = fc::get_logger_map(); + if(logger_map.find(logger_name) != logger_map.end()) { + _log = logger_map[logger_name]; + } + if( logger_map.find(trx_trace_logger_name) != logger_map.end()) { + _trx_trace_log = logger_map[trx_trace_logger_name]; + } +} + void producer_plugin::pause() { my->_pause_production = true; } @@ -1143,6 +1147,10 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { int orig_count = _persistent_transactions.size(); while(!persisted_by_expiry.empty() && persisted_by_expiry.begin()->expiry <= pending_block_time) { + if (preprocess_deadline <= fc::time_point::now()) { + exhausted = true; + break; + } auto const& txid = persisted_by_expiry.begin()->trx_id; if (_pending_block_mode == pending_block_mode::producing) { fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${txid}", @@ -1158,9 +1166,15 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { num_expired_persistent++; } - fc_dlog(_log, "Processed ${n} persisted transactions, Expired ${expired}", - ("n", orig_count) - ("expired", num_expired_persistent)); + if( exhausted ) { + fc_wlog( _log, "Unable to process all ${n} persisted transactions before deadline, Expired ${expired}", + ( "n", orig_count ) + ( "expired", num_expired_persistent ) ); + } else { + fc_dlog( _log, "Processed ${n} persisted transactions, Expired ${expired}", + ( "n", orig_count ) + ( "expired", num_expired_persistent ) ); + } } try { @@ -1171,13 +1185,15 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { if (_producers.empty() && persisted_by_id.empty()) { // if this node can never produce and has no persisted transactions, // there is no need for unapplied transactions they can be dropped - chain.drop_all_unapplied_transactions(); + chain.get_unapplied_transactions().clear(); } else { - std::vector apply_trxs; - { // derive appliable transactions from unapplied_transactions and drop droppable transactions - auto unapplied_trxs = chain.get_unapplied_transactions(); - apply_trxs.reserve(unapplied_trxs.size()); - + // derive appliable transactions from unapplied_transactions and drop droppable transactions + unapplied_transactions_type& unapplied_trxs = chain.get_unapplied_transactions(); + if( !unapplied_trxs.empty() ) { + auto unapplied_trxs_size = unapplied_trxs.size(); + int num_applied = 0; + int num_failed = 0; + int num_processed = 0; auto calculate_transaction_category = [&](const transaction_metadata_ptr& trx) { if (trx->packed_trx->expiration() < pending_block_time) { return tx_category::EXPIRED; @@ -1188,64 +1204,65 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } }; - for (auto& trx: unapplied_trxs) { + auto itr = unapplied_trxs.begin(); + while( itr != unapplied_trxs.end() ) { + auto itr_next = itr; // save off next since itr may be invalidated by loop + ++itr_next; + + if( preprocess_deadline <= fc::time_point::now() ) exhausted = true; + if( exhausted ) break; + const auto& trx = itr->second; auto category = calculate_transaction_category(trx); - if (category == tx_category::EXPIRED || (category == tx_category::UNEXPIRED_UNPERSISTED && _producers.empty())) { + if (category == tx_category::EXPIRED || + (category == tx_category::UNEXPIRED_UNPERSISTED && _producers.empty())) + { if (!_producers.empty()) { fc_dlog(_trx_trace_log, "[TRX_TRACE] Node with producers configured is dropping an EXPIRED transaction that was PREVIOUSLY ACCEPTED : ${txid}", ("txid", trx->id)); } - chain.drop_unapplied_transaction(trx); - } else if (category == tx_category::PERSISTED || (category == tx_category::UNEXPIRED_UNPERSISTED && _pending_block_mode == pending_block_mode::producing)) { - apply_trxs.emplace_back(std::move(trx)); - } - } - } - - if (!apply_trxs.empty()) { - int num_applied = 0; - int num_failed = 0; - int num_processed = 0; - - for (const auto& trx: apply_trxs) { - if (preprocess_deadline <= fc::time_point::now()) exhausted = true; - if (exhausted) { - break; - } - - num_processed++; - - try { - auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); - bool deadline_is_subjective = false; - if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && preprocess_deadline < deadline)) { - deadline_is_subjective = true; - deadline = preprocess_deadline; - } + itr = unapplied_trxs.erase( itr ); // unapplied_trxs map has not been modified, so simply erase and continue + continue; + } else if (category == tx_category::PERSISTED || + (category == tx_category::UNEXPIRED_UNPERSISTED && _pending_block_mode == pending_block_mode::producing)) + { + ++num_processed; + + try { + auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); + bool deadline_is_subjective = false; + if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && preprocess_deadline < deadline)) { + deadline_is_subjective = true; + deadline = preprocess_deadline; + } - auto trace = chain.push_transaction(trx, deadline); - if (trace->except) { - if (failure_is_subjective(*trace->except, deadline_is_subjective)) { - exhausted = true; + auto trace = chain.push_transaction(trx, deadline); + if (trace->except) { + if (failure_is_subjective(*trace->except, deadline_is_subjective)) { + exhausted = true; + break; + } else { + // this failed our configured maximum transaction time, we don't want to replay it + // chain.plus_transactions can modify unapplied_trxs, so erase by id + unapplied_trxs.erase( trx->signed_id ); + ++num_failed; + } } else { - // this failed our configured maximum transaction time, we don't want to replay it - chain.drop_unapplied_transaction(trx); - num_failed++; + ++num_applied; } - } else { - num_applied++; - } - } catch ( const guard_exception& e ) { - chain_plug->handle_guard_exception(e); - return start_block_result::failed; - } FC_LOG_AND_DROP(); + } catch ( const guard_exception& e ) { + chain_plug->handle_guard_exception(e); + return start_block_result::failed; + } FC_LOG_AND_DROP(); + } + + itr = itr_next; } fc_dlog(_log, "Processed ${m} of ${n} previously applied transactions, Applied ${applied}, Failed/Dropped ${failed}", - ("m", num_processed) - ("n", apply_trxs.size()) - ("applied", num_applied) - ("failed", num_failed)); + ("m", num_processed) + ("n", unapplied_trxs_size) + ("applied", num_applied) + ("failed", num_failed)); } } @@ -1258,6 +1275,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { int orig_count = _blacklisted_transactions.size(); while (!blacklist_by_expiry.empty() && blacklist_by_expiry.begin()->expiry <= now) { + if (preprocess_deadline <= fc::time_point::now()) break; blacklist_by_expiry.erase(blacklist_by_expiry.begin()); num_expired++; } @@ -1267,85 +1285,105 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { ("expired", num_expired)); } - auto scheduled_trxs = chain.get_scheduled_transactions(); - if (!scheduled_trxs.empty()) { - int num_applied = 0; - int num_failed = 0; - int num_processed = 0; + // scheduled transactions + int num_applied = 0; + int num_failed = 0; + int num_processed = 0; + + auto scheduled_trx_deadline = preprocess_deadline; + if (_max_scheduled_transaction_time_per_block_ms >= 0) { + scheduled_trx_deadline = std::min( + scheduled_trx_deadline, + fc::time_point::now() + fc::milliseconds(_max_scheduled_transaction_time_per_block_ms) + ); + } + time_point pending_block_time = chain.pending_block_time(); + const auto& sch_idx = chain.db().get_index(); + const auto scheduled_trxs_size = sch_idx.size(); + auto sch_itr = sch_idx.begin(); + while( sch_itr != sch_idx.end() ) { + if( sch_itr->delay_until > pending_block_time) break; // not scheduled yet + if( sch_itr->published >= pending_block_time ) { + ++sch_itr; + continue; // do not allow schedule and execute in same block + } + if( scheduled_trx_deadline <= fc::time_point::now() ) { + exhausted = true; + break; + } - auto scheduled_trx_deadline = preprocess_deadline; - if (_max_scheduled_transaction_time_per_block_ms >= 0) { - scheduled_trx_deadline = std::min( - scheduled_trx_deadline, - fc::time_point::now() + fc::milliseconds(_max_scheduled_transaction_time_per_block_ms) - ); + const transaction_id_type trx_id = sch_itr->trx_id; // make copy since reference could be invalidated + if (blacklist_by_id.find(trx_id) != blacklist_by_id.end()) { + ++sch_itr; + continue; } - for (const auto& trx : scheduled_trxs) { - if (scheduled_trx_deadline <= fc::time_point::now()) exhausted = true; - if (exhausted) { - break; - } + auto sch_itr_next = sch_itr; // save off next since sch_itr may be invalidated by loop + ++sch_itr_next; + const auto next_delay_until = sch_itr_next != sch_idx.end() ? sch_itr_next->delay_until : sch_itr->delay_until; + const auto next_id = sch_itr_next != sch_idx.end() ? sch_itr_next->id : sch_itr->id; - num_processed++; + num_processed++; - // configurable ratio of incoming txns vs deferred txns - while (_incoming_trx_weight >= 1.0 && orig_pending_txn_size && _pending_incoming_transactions.size()) { - if (scheduled_trx_deadline <= fc::time_point::now()) break; + // configurable ratio of incoming txns vs deferred txns + while (_incoming_trx_weight >= 1.0 && orig_pending_txn_size && _pending_incoming_transactions.size()) { + if (scheduled_trx_deadline <= fc::time_point::now()) break; - auto e = _pending_incoming_transactions.front(); - _pending_incoming_transactions.pop_front(); - --orig_pending_txn_size; - _incoming_trx_weight -= 1.0; - process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); - } + auto e = _pending_incoming_transactions.front(); + _pending_incoming_transactions.pop_front(); + --orig_pending_txn_size; + _incoming_trx_weight -= 1.0; + process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); + } - if (scheduled_trx_deadline <= fc::time_point::now()) { - exhausted = true; - break; - } + if (scheduled_trx_deadline <= fc::time_point::now()) { + exhausted = true; + break; + } - if (blacklist_by_id.find(trx) != blacklist_by_id.end()) { - continue; + try { + auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); + bool deadline_is_subjective = false; + if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && scheduled_trx_deadline < deadline)) { + deadline_is_subjective = true; + deadline = scheduled_trx_deadline; } - try { - auto deadline = fc::time_point::now() + fc::milliseconds(_max_transaction_time_ms); - bool deadline_is_subjective = false; - if (_max_transaction_time_ms < 0 || (_pending_block_mode == pending_block_mode::producing && scheduled_trx_deadline < deadline)) { - deadline_is_subjective = true; - deadline = scheduled_trx_deadline; - } - - auto trace = chain.push_scheduled_transaction(trx, deadline); - if (trace->except) { - if (failure_is_subjective(*trace->except, deadline_is_subjective)) { - exhausted = true; - } else { - auto expiration = fc::time_point::now() + fc::seconds(chain.get_global_properties().configuration.deferred_trx_expiration_window); - // this failed our configured maximum transaction time, we don't want to replay it add it to a blacklist - _blacklisted_transactions.insert(transaction_id_with_expiry{trx, expiration}); - num_failed++; - } + auto trace = chain.push_scheduled_transaction(trx_id, deadline); + if (trace->except) { + if (failure_is_subjective(*trace->except, deadline_is_subjective)) { + exhausted = true; + break; } else { - num_applied++; + auto expiration = fc::time_point::now() + fc::seconds(chain.get_global_properties().configuration.deferred_trx_expiration_window); + // this failed our configured maximum transaction time, we don't want to replay it add it to a blacklist + _blacklisted_transactions.insert(transaction_id_with_expiry{trx_id, expiration}); + num_failed++; } - } catch ( const guard_exception& e ) { - chain_plug->handle_guard_exception(e); - return start_block_result::failed; - } FC_LOG_AND_DROP(); + } else { + num_applied++; + } + } catch ( const guard_exception& e ) { + chain_plug->handle_guard_exception(e); + return start_block_result::failed; + } FC_LOG_AND_DROP(); - _incoming_trx_weight += _incoming_defer_ratio; - if (!orig_pending_txn_size) _incoming_trx_weight = 0.0; - } + _incoming_trx_weight += _incoming_defer_ratio; + if (!orig_pending_txn_size) _incoming_trx_weight = 0.0; - fc_dlog(_log, "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", - ("m", num_processed) - ("n", scheduled_trxs.size()) - ("applied", num_applied) - ("failed", num_failed)); + if( sch_itr_next == sch_idx.end() ) break; + sch_itr = sch_idx.lower_bound( boost::make_tuple( next_delay_until, next_id ) ); + } + if( scheduled_trxs_size > 0 ) { + fc_dlog( _log, + "Processed ${m} of ${n} scheduled transactions, Applied ${applied}, Failed/Dropped ${failed}", + ( "m", num_processed ) + ( "n", scheduled_trxs_size ) + ( "applied", num_applied ) + ( "failed", num_failed ) ); } + } if (exhausted || preprocess_deadline <= fc::time_point::now()) { @@ -1357,11 +1395,11 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { if (!_pending_incoming_transactions.empty()) { fc_dlog(_log, "Processing ${n} pending transactions", ("n", _pending_incoming_transactions.size())); while (orig_pending_txn_size && _pending_incoming_transactions.size()) { + if (preprocess_deadline <= fc::time_point::now()) return start_block_result::exhausted; auto e = _pending_incoming_transactions.front(); _pending_incoming_transactions.pop_front(); --orig_pending_txn_size; process_incoming_transaction_async(std::get<0>(e), std::get<1>(e), std::get<2>(e)); - if (preprocess_deadline <= fc::time_point::now()) return start_block_result::exhausted; } } return start_block_result::succeeded; diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp index 1644d29a404..5f97b0280a4 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp @@ -208,7 +208,7 @@ class state_history_log { void open_index() { index.open(index_filename, std::ios_base::binary | std::ios_base::in | std::ios_base::out | std::ios_base::app); index.seekg(0, std::ios_base::end); - if (index.tellg() == (_end_block - _begin_block) * sizeof(state_history_summary)) + if (index.tellg() == (static_cast(_end_block) - _begin_block) * sizeof(state_history_summary)) return; ilog("Regenerate ${name}.index", ("name", name)); index.close(); diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 67b61587440..49c47041e3d 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -511,8 +511,9 @@ void state_history_plugin::set_program_options(options_description& cli, options cli.add_options()("delete-state-history", bpo::bool_switch()->default_value(false), "clear state history files"); options("trace-history", bpo::bool_switch()->default_value(false), "enable trace history"); options("chain-state-history", bpo::bool_switch()->default_value(false), "enable chain state history"); - options("state-history-endpoint", bpo::value()->default_value("0.0.0.0:8080"), - "the endpoint upon which to listen for incoming connections"); + options("state-history-endpoint", bpo::value()->default_value("127.0.0.1:8080"), + "the endpoint upon which to listen for incoming connections. Caution: only expose this port to " + "your internal network."); } void state_history_plugin::plugin_initialize(const variables_map& options) { diff --git a/plugins/test_control_plugin/test_control_plugin.cpp b/plugins/test_control_plugin/test_control_plugin.cpp index 170079a8016..dbde59853ef 100644 --- a/plugins/test_control_plugin/test_control_plugin.cpp +++ b/plugins/test_control_plugin/test_control_plugin.cpp @@ -64,15 +64,22 @@ void test_control_plugin_impl::accepted_block(const chain::block_state_ptr& bsp) void test_control_plugin_impl::process_next_block_state(const chain::block_state_ptr& bsp) { const auto block_time = _chain.head_block_time() + fc::microseconds(chain::config::block_interval_us); - auto producer_name = bsp->get_scheduled_producer(block_time).producer_name; - // start counting sequences for this producer (once we + const auto producer_name = bsp->get_scheduled_producer(block_time).producer_name; + if (_producer != account_name()) + ilog("producer ${cprod}, looking for ${lprod}", ("cprod", producer_name.to_string())("lprod", _producer.to_string())); + + // start counting sequences for this producer (once we have a sequence that we saw the initial block for that producer) if (producer_name == _producer && _clean_producer_sequence) { + ilog("producer ${prod} seq: ${seq}", ("prod", producer_name.to_string())("seq", _producer_sequence)); _producer_sequence += 1; if (_producer_sequence >= _where_in_sequence) { + ilog("shutting down"); app().quit(); } } else if (producer_name != _producer) { + if (_producer_sequence != -1) + ilog("producer changed, restarting"); _producer_sequence = -1; // can now guarantee we are at the start of the producer _clean_producer_sequence = true; @@ -108,20 +115,24 @@ void test_control_plugin::plugin_initialize(const variables_map& options) { } void test_control_plugin::plugin_startup() { + ilog("test_control_plugin starting up"); my.reset(new test_control_plugin_impl(app().get_plugin().chain())); my->connect(); } void test_control_plugin::plugin_shutdown() { my->disconnect(); + ilog("test_control_plugin shutting down"); } namespace test_control_apis { read_write::kill_node_on_producer_results read_write::kill_node_on_producer(const read_write::kill_node_on_producer_params& params) const { if (params.based_on_lib) { + ilog("kill on lib for producer: ${p} at their ${s} slot in sequence", ("p", params.producer.to_string())("s", params.where_in_sequence)); my->kill_on_lib(params.producer, params.where_in_sequence); } else { + ilog("kill on head for producer: ${p} at their ${s} slot in sequence", ("p", params.producer.to_string())("s", params.where_in_sequence)); my->kill_on_head(params.producer, params.where_in_sequence); } return read_write::kill_node_on_producer_results{}; diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp old mode 100644 new mode 100755 index 712a35a2d0f..d4f197df468 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -37,6 +37,7 @@ namespace eosio { static appbase::abstract_plugin& _txn_test_gen_plugin = app().register_plugin(); using namespace eosio::chain; +using io_work_t = boost::asio::executor_work_guard; #define CALL(api_name, api_handle, call_name, INVOKE, http_response_code) \ {std::string("/v1/" #api_name "/" #call_name), \ @@ -92,15 +93,16 @@ struct txn_test_gen_plugin_impl { uint64_t _total_us = 0; uint64_t _txcount = 0; - int _remain = 0; + std::shared_ptr gen_ioc; + optional gen_ioc_work; + uint16_t thread_pool_size; + optional thread_pool; + std::shared_ptr timer; - void push_next_transaction(const std::shared_ptr>& trxs, size_t index, const std::function& next ) { + void push_next_transaction(const std::shared_ptr>& trxs, const std::function& next ) { chain_plugin& cp = app().get_plugin(); - const int overlap = 20; - int end = std::min(index + overlap, trxs->size()); - _remain = end - index; - for (int i = index; i < end; ++i) { + for (size_t i = 0; i < trxs->size(); ++i) { cp.accept_transaction( packed_transaction(trxs->at(i)), [=](const fc::static_variant& result){ if (result.contains()) { next(result.get()); @@ -109,14 +111,6 @@ struct txn_test_gen_plugin_impl { _total_us += result.get()->receipt->cpu_usage_us; ++_txcount; } - --_remain; - if (_remain == 0 ) { - if (end < trxs->size()) { - push_next_transaction(trxs, index + overlap, next); - } else { - next(nullptr); - } - } } }); } @@ -124,7 +118,9 @@ struct txn_test_gen_plugin_impl { void push_transactions( std::vector&& trxs, const std::function& next ) { auto trxs_copy = std::make_shared>(std::move(trxs)); - push_next_transaction(trxs_copy, 0, next); + app().post(priority::low, [this, trxs_copy, next]() { + push_next_transaction(trxs_copy, next); + }); } void create_test_accounts(const std::string& init_name, const std::string& init_priv_key, const std::function& next) { @@ -217,7 +213,7 @@ struct txn_test_gen_plugin_impl { act.account = N(txn.test.t); act.name = N(issue); act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("issue", fc::json::from_string("{\"to\":\"txn.test.t\",\"quantity\":\"600.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.data = eosio_token_serializer.variant_to_binary("issue", fc::json::from_string("{\"to\":\"txn.test.t\",\"quantity\":\"60000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); trx.actions.push_back(act); } { @@ -225,7 +221,7 @@ struct txn_test_gen_plugin_impl { act.account = N(txn.test.t); act.name = N(transfer); act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"txn.test.t\",\"to\":\"txn.test.a\",\"quantity\":\"200.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"txn.test.t\",\"to\":\"txn.test.a\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); trx.actions.push_back(act); } { @@ -233,7 +229,7 @@ struct txn_test_gen_plugin_impl { act.account = N(txn.test.t); act.name = N(transfer); act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"txn.test.t\",\"to\":\"txn.test.b\",\"quantity\":\"200.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"txn.test.t\",\"to\":\"txn.test.b\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); trx.actions.push_back(act); } @@ -285,30 +281,42 @@ struct txn_test_gen_plugin_impl { timer_timeout = period; batch = batch_size/2; + nonce_prefix = 0; - ilog("Started transaction test plugin; performing ${p} transactions every ${m}ms", ("p", batch_size)("m", period)); + gen_ioc = std::make_shared(); + gen_ioc_work.emplace( boost::asio::make_work_guard(*gen_ioc) ); + thread_pool.emplace( thread_pool_size ); + for( uint16_t i = 0; i < thread_pool_size; i++ ) + boost::asio::post( *thread_pool, [ioc = gen_ioc]() { ioc->run(); } ); + timer = std::make_shared(*gen_ioc); - arm_timer(boost::asio::high_resolution_timer::clock_type::now()); + ilog("Started transaction test plugin; generating ${p} transactions every ${m} ms by ${t} load generation threads", + ("p", batch_size) ("m", period) ("t", thread_pool_size)); + + boost::asio::post( *gen_ioc, [this]() { + arm_timer(boost::asio::high_resolution_timer::clock_type::now()); + }); } void arm_timer(boost::asio::high_resolution_timer::time_point s) { - timer.expires_at(s + std::chrono::milliseconds(timer_timeout)); - timer.async_wait([this](const boost::system::error_code& ec) { - if(!running || ec) - return; - + timer->expires_at(s + std::chrono::milliseconds(timer_timeout)); + boost::asio::post( *gen_ioc, [this]() { send_transaction([this](const fc::exception_ptr& e){ if (e) { elog("pushing transaction failed: ${e}", ("e", e->to_detail_string())); - stop_generation(); - } else { - arm_timer(timer.expires_at()); + if(running) + stop_generation(); } - }); + }, nonce_prefix++); + }); + timer->async_wait([this](const boost::system::error_code& ec) { + if(!running || ec) + return; + arm_timer(timer->expires_at()); }); } - void send_transaction(std::function next) { + void send_transaction(std::function next, uint64_t nonce_prefix) { std::vector trxs; trxs.reserve(2*batch); @@ -337,7 +345,7 @@ struct txn_test_gen_plugin_impl { { signed_transaction trx; trx.actions.push_back(act_a_to_b); - trx.context_free_actions.emplace_back(action({}, config::null_account_name, "nonce", fc::raw::pack(nonce++))); + trx.context_free_actions.emplace_back(action({}, config::null_account_name, "nonce", fc::raw::pack( std::to_string(nonce_prefix)+std::to_string(nonce++) ))); trx.set_reference_block(reference_block_id); trx.expiration = cc.head_block_time() + fc::seconds(30); trx.max_net_usage_words = 100; @@ -348,7 +356,7 @@ struct txn_test_gen_plugin_impl { { signed_transaction trx; trx.actions.push_back(act_b_to_a); - trx.context_free_actions.emplace_back(action({}, config::null_account_name, "nonce", fc::raw::pack(nonce++))); + trx.context_free_actions.emplace_back(action({}, config::null_account_name, "nonce", fc::raw::pack( std::to_string(nonce_prefix)+std::to_string(nonce++) ))); trx.set_reference_block(reference_block_id); trx.expiration = cc.head_block_time() + fc::seconds(30); trx.max_net_usage_words = 100; @@ -366,8 +374,16 @@ struct txn_test_gen_plugin_impl { void stop_generation() { if(!running) throw fc::exception(fc::invalid_operation_exception_code); - timer.cancel(); + timer->cancel(); running = false; + if( gen_ioc_work ) + gen_ioc_work->reset(); + if( gen_ioc ) + gen_ioc->stop(); + if( thread_pool ) { + thread_pool->join(); + thread_pool->stop(); + } ilog("Stopping transaction generation test"); if (_txcount) { @@ -376,11 +392,11 @@ struct txn_test_gen_plugin_impl { } } - boost::asio::high_resolution_timer timer{app().get_io_service()}; bool running{false}; unsigned timer_timeout; unsigned batch; + uint64_t nonce_prefix; action act_a_to_b; action act_b_to_a; @@ -394,6 +410,7 @@ txn_test_gen_plugin::~txn_test_gen_plugin() {} void txn_test_gen_plugin::set_program_options(options_description&, options_description& cfg) { cfg.add_options() ("txn-reference-block-lag", bpo::value()->default_value(0), "Lag in number of blocks from the head block when selecting the reference block for transactions (-1 means Last Irreversible Block)") + ("txn-test-gen-threads", bpo::value()->default_value(2), "Number of worker threads in txn_test_gen thread pool") ; } @@ -401,6 +418,9 @@ void txn_test_gen_plugin::plugin_initialize(const variables_map& options) { try { my.reset( new txn_test_gen_plugin_impl ); my->txn_reference_block_lag = options.at( "txn-reference-block-lag" ).as(); + my->thread_pool_size = options.at( "txn-test-gen-threads" ).as(); + EOS_ASSERT( my->thread_pool_size > 0, chain::plugin_config_exception, + "txn-test-gen-threads ${num} must be greater than 0", ("num", my->thread_pool_size) ); } FC_LOG_AND_RETHROW() } diff --git a/plugins/wallet_plugin/CMakeLists.txt b/plugins/wallet_plugin/CMakeLists.txt index 8b3a6d7d7b1..27f0147c8d7 100644 --- a/plugins/wallet_plugin/CMakeLists.txt +++ b/plugins/wallet_plugin/CMakeLists.txt @@ -22,5 +22,8 @@ add_library( wallet_plugin yubihsm_wallet.cpp ${HEADERS} ) -target_link_libraries( wallet_plugin eosio_chain appbase ${security_framework} ${corefoundation_framework} ${localauthentication_framework} ${cocoa_framework}) +target_link_libraries( wallet_plugin yubihsm_static eosio_chain appbase ${security_framework} ${corefoundation_framework} ${localauthentication_framework} ${cocoa_framework}) target_include_directories( wallet_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) + +#sadly old cmake 2.8 support in yubihsm cmake prevents usage of target_include_directories there +target_include_directories( wallet_plugin PRIVATE "${CMAKE_SOURCE_DIR}/libraries/yubihsm/lib" ) \ No newline at end of file diff --git a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_manager.hpp b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_manager.hpp index e73bc32d971..e61fb7b7617 100644 --- a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_manager.hpp +++ b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_manager.hpp @@ -5,6 +5,7 @@ #pragma once #include #include +#include #include #include #include @@ -142,6 +143,7 @@ class wallet_manager { boost::filesystem::path lock_path = dir / "wallet.lock"; std::unique_ptr wallet_dir_lock; + void start_lock_watch(std::shared_ptr t); void initialize_lock(); }; diff --git a/plugins/wallet_plugin/include/eosio/wallet_plugin/yubihsm.h b/plugins/wallet_plugin/include/eosio/wallet_plugin/yubihsm.h deleted file mode 100755 index 7851a30bede..00000000000 --- a/plugins/wallet_plugin/include/eosio/wallet_plugin/yubihsm.h +++ /dev/null @@ -1,1933 +0,0 @@ -/* - * Copyright (c) 2016 Yubico AB - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials provided - * with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -/** - @mainpage - - @section Introduction - - Libyubihsm is a library for communicating with a YubiHSM device. - - @section Usage - - To use the library include and pass the -lyubihsm flag to the - linker. - Debug output is controlled with the function yh_set_verbosity(). - - First step of using a YubiHSM is to init the library with yh_init(), init a - connector with yh_init_connector() and then connect it with yh_connect_best(). - After this a session must be established with yh_create_session_derived() and - yh_authenticate_session(). - When a session is established commands can be exchanged over it, the functions - in the namespace yh_util are high-level convenience functions that do a - specific task with the device. - - @section api API Reference - - yubihsm.h - All public functions and definitions - - @section example Code example - Here is a small example of establishing a session with a YubiHSM and fetching - some random before shutting down the session. - \code{.c} - int main(void) { - yh_connector *connector = NULL; - yh_session *session = NULL; - uint8_t context[YH_CONTEXT_LEN] = {0}; - uint8_t data[128] = {0}; - size_t data_len = sizeof(data); - - assert(yh_init() == YHR_SUCCESS); - assert(yh_init_connector("http://localhost:12345", &connector) == - YHR_SUCCESS); - assert(yh_connect_best(&connector, 1, NULL) == YHR_SUCCESS); - assert(yh_create_session_derived(connector, 1, YH_DEFAULT_PASSWORD, - strlen(YH_DEFAULT_PASSWORD), false, context, sizeof(context), &session) == - YHR_SUCCESS); - assert(yh_authenticate_session(session, context, sizeof(context)) == - YHR_SUCCESS); - assert(yh_util_get_random(session, sizeof(data), data, &data_len) == - YHR_SUCCESS); - assert(data_len == sizeof(data)); - assert(yh_util_close_session(session) == YHR_SUCCESS); - assert(yh_destroy_session(&session) == YHR_SUCCESS); - assert(yh_disconnect(connector) == YHR_SUCCESS); - } - \endcode - - */ - -/** @file yubihsm.h - * - * Everything you need for yubihsm. - */ - -#ifndef YUBIHSM_H -#define YUBIHSM_H - -#include -#include -#include -#include - -/// Length of context array for authentication -#define YH_CONTEXT_LEN 16 -/// Length of host challenge for authentication -#define YH_HOST_CHAL_LEN 8 -/// Maximum length of message buffer -#define YH_MSG_BUF_SIZE 2048 -/// Length of authentication keys -#define YH_KEY_LEN 16 -/// Device vendor ID -#define YH_VID 0x1050 -/// Device product ID -#define YH_PID 0x0030 -/// Response flag for commands -#define YH_CMD_RESP_FLAG 0x80 -/// Max items the device may hold -#define YH_MAX_ITEMS_COUNT \ - 256 // TODO: should this really be defined in the API? -/// Max sessions the device may hold -#define YH_MAX_SESSIONS 16 // TODO: same here, really part of the API? -/// Default encryption key -#define YH_DEFAULT_ENC_KEY \ - "\x09\x0b\x47\xdb\xed\x59\x56\x54\x90\x1d\xee\x1c\xc6\x55\xe4\x20" -/// Default MAC key -#define YH_DEFAULT_MAC_KEY \ - "\x59\x2f\xd4\x83\xf7\x59\xe2\x99\x09\xa0\x4c\x45\x05\xd2\xce\x0a" -/// Default authentication key password -#define YH_DEFAULT_PASSWORD "password" -/// Salt to be used for PBKDF2 key derivation -#define YH_DEFAULT_SALT "Yubico" -/// Number of iterations for PBKDF2 key derivation -#define YH_DEFAULT_ITERS 10000 -/// Length of capabilities array -#define YH_CAPABILITIES_LEN 8 -/// Max log entries the device may hold -#define YH_MAX_LOG_ENTRIES 64 // TODO: really part of the API? -/// Length of object labels -#define YH_OBJ_LABEL_LEN 40 -/// Max number of domains -#define YH_MAX_DOMAINS 16 - -// Debug levels -/// No messages -#define YH_VERB_QUIET 0x00 -/// Intermediate results -#define YH_VERB_INTERMEDIATE 0x01 -/// Crypto results -#define YH_VERB_CRYPTO 0x02 -/// Raw messages -#define YH_VERB_RAW 0x04 -/// General info -#define YH_VERB_INFO 0x08 -/// Error messages -#define YH_VERB_ERR 0x10 -/// All previous options enabled -#define YH_VERB_ALL 0xff - -/// This is the overhead when doing aes-ccm wrapping, 1 byte identifier, 13 -/// bytes nonce and 16 bytes mac -#define YH_CCM_WRAP_OVERHEAD (1 + 13 + 16) - -#ifdef __cplusplus -extern "C" { -#endif - -/// Reference to a connector -typedef struct yh_connector yh_connector; - -/// Reference to a session -typedef struct yh_session yh_session; - -/// Capabilitites representation -typedef struct { - /// Capabilities is represented as an 8 byte uint8_t array - uint8_t capabilities[YH_CAPABILITIES_LEN]; -} yh_capabilities; - -/** - * Return codes. - **/ -typedef enum { - /// Success - YHR_SUCCESS = 0, - /// Memory error - YHR_MEMORY = -1, - /// Init error - YHR_INIT_ERROR = -2, - /// Net error - YHR_NET_ERROR = -3, - /// Connector not found - YHR_CONNECTOR_NOT_FOUND = -4, - /// Invalid parameters - YHR_INVALID_PARAMS = -5, - /// Wrong length - YHR_WRONG_LENGTH = -6, - /// Buffer too small - YHR_BUFFER_TOO_SMALL = -7, - /// Cryptogram error - YHR_CRYPTOGRAM_MISMATCH = -8, - /// Authenticate session error - YHR_AUTH_SESSION_ERROR = -9, - /// MAC not matching - YHR_MAC_MISMATCH = -10, - /// Device success - YHR_DEVICE_OK = -11, - /// Invalid command - YHR_DEVICE_INV_COMMAND = -12, - /// Malformed command / invalid data - YHR_DEVICE_INV_DATA = -13, - /// Invalid session - YHR_DEVICE_INV_SESSION = -14, - /// Message encryption / verification failed - YHR_DEVICE_AUTH_FAIL = -15, - /// All sessions are allocated - YHR_DEVICE_SESSIONS_FULL = -16, - /// Session creation failed - YHR_DEVICE_SESSION_FAILED = -17, - /// Storage failure - YHR_DEVICE_STORAGE_FAILED = -18, - /// Wrong length - YHR_DEVICE_WRONG_LENGTH = -19, - /// Wrong permissions for operation - YHR_DEVICE_INV_PERMISSION = -20, - /// Log buffer is full and forced audit is set - YHR_DEVICE_LOG_FULL = -21, - /// Object not found - YHR_DEVICE_OBJ_NOT_FOUND = -22, - /// Id use is illegal - YHR_DEVICE_ID_ILLEGAL = -23, - /// OTP submitted is invalid - YHR_DEVICE_INVALID_OTP = -24, - /// Device is in demo mode and has to be power cycled - YHR_DEVICE_DEMO_MODE = -25, - /// The command execution has not terminated - YHR_DEVICE_CMD_UNEXECUTED = -26, - /// Unknown error - YHR_GENERIC_ERROR = -27, - /// Object with that ID already exists - YHR_DEVICE_OBJECT_EXISTS = -28, - /// Connector operation failed - YHR_CONNECTOR_ERROR = -29 -} yh_rc; - -/// Macro to define command and response command -#define ADD_COMMAND(c, v) c = v, c##_R = v | YH_CMD_RESP_FLAG - -/** - * Command definitions - */ -typedef enum { - /// Echo - ADD_COMMAND(YHC_ECHO, 0x01), - /// Create session - ADD_COMMAND(YHC_CREATE_SES, 0x03), - /// Authenticate session - ADD_COMMAND(YHC_AUTH_SES, 0x04), - /// Session message - ADD_COMMAND(YHC_SES_MSG, 0x05), - /// Get device info - ADD_COMMAND(YHC_GET_DEVICE_INFO, 0x06), - /// BSL - ADD_COMMAND(YHC_BSL, 0x07), - /// Reset - ADD_COMMAND(YHC_RESET, 0x08), - /// Close session - ADD_COMMAND(YHC_CLOSE_SES, 0x40), - /// Storage statistics - ADD_COMMAND(YHC_STATS, 0x041), - /// Put opaque - ADD_COMMAND(YHC_PUT_OPAQUE, 0x42), - /// Get opaque - ADD_COMMAND(YHC_GET_OPAQUE, 0x43), - /// Put authentication key - ADD_COMMAND(YHC_PUT_AUTHKEY, 0x44), - /// Put asymmetric key - ADD_COMMAND(YHC_PUT_ASYMMETRIC_KEY, 0x45), - /// Generate asymmetric key - ADD_COMMAND(YHC_GEN_ASYMMETRIC_KEY, 0x46), - /// Sign data with PKCS1 - ADD_COMMAND(YHC_SIGN_DATA_PKCS1, 0x47), - /// List objects - ADD_COMMAND(YHC_LIST, 0x48), - /// Decrypt data with PKCS1 - ADD_COMMAND(YHC_DECRYPT_PKCS1, 0x49), - /// Export an object wrapped - ADD_COMMAND(YHC_EXPORT_WRAPPED, 0x4a), - /// Import a wrapped object - ADD_COMMAND(YHC_IMPORT_WRAPPED, 0x4b), - /// Put wrap key - ADD_COMMAND(YHC_PUT_WRAP_KEY, 0x4c), - /// Get audit logs - ADD_COMMAND(YHC_GET_LOGS, 0x4d), - /// Get object information - ADD_COMMAND(YHC_GET_OBJECT_INFO, 0x4e), - /// Put a global option - ADD_COMMAND(YHC_PUT_OPTION, 0x4f), - /// Get a global option - ADD_COMMAND(YHC_GET_OPTION, 0x50), - /// Get pseudo random data - ADD_COMMAND(YHC_GET_PSEUDO_RANDOM, 0x51), - /// Put HMAC key - ADD_COMMAND(YHC_PUT_HMAC_KEY, 0x52), - /// HMAC data - ADD_COMMAND(YHC_HMAC_DATA, 0x53), - /// Get a public key - ADD_COMMAND(YHC_GET_PUBKEY, 0x54), - /// Sign data with PSS - ADD_COMMAND(YHC_SIGN_DATA_PSS, 0x55), - /// Sign data with ECDSA - ADD_COMMAND(YHC_SIGN_DATA_ECDSA, 0x56), - /// Perform a ECDH exchange - ADD_COMMAND(YHC_DECRYPT_ECDH, 0x57), - /// Delete an object - ADD_COMMAND(YHC_DELETE_OBJECT, 0x58), - /// Decrypt data with OAEP - ADD_COMMAND(YHC_DECRYPT_OAEP, 0x59), - /// Generate HMAC key - ADD_COMMAND(YHC_GENERATE_HMAC_KEY, 0x5a), - /// Generate wrap key - ADD_COMMAND(YHC_GENERATE_WRAP_KEY, 0x5b), - /// Verify HMAC data - ADD_COMMAND(YHC_VERIFY_HMAC, 0x5c), - /// SSH Certify - ADD_COMMAND(YHC_SSH_CERTIFY, 0x5d), - /// Put template - ADD_COMMAND(YHC_PUT_TEMPLATE, 0x5e), - /// Get template - ADD_COMMAND(YHC_GET_TEMPLATE, 0x5f), - /// Decrypt OTP - ADD_COMMAND(YHC_OTP_DECRYPT, 0x60), - /// Create OTP AEAD - ADD_COMMAND(YHC_OTP_AEAD_CREATE, 0x61), - /// Create OTP AEAD from random - ADD_COMMAND(YHC_OTP_AEAD_RANDOM, 0x62), - /// Rewrap OTP AEAD - ADD_COMMAND(YHC_OTP_AEAD_REWRAP, 0x63), - /// Attest an asymmetric key - ADD_COMMAND(YHC_ATTEST_ASYMMETRIC, 0x64), - /// Put OTP AEAD key - ADD_COMMAND(YHC_PUT_OTP_AEAD_KEY, 0x65), - /// Generate OTP AEAD key - ADD_COMMAND(YHC_GENERATE_OTP_AEAD_KEY, 0x66), - /// Set log index - ADD_COMMAND(YHC_SET_LOG_INDEX, 0x67), - /// Wrap data - ADD_COMMAND(YHC_WRAP_DATA, 0x68), - /// Unwrap data - ADD_COMMAND(YHC_UNWRAP_DATA, 0x69), - /// Sign data with EDDSA - ADD_COMMAND(YHC_SIGN_DATA_EDDSA, 0x6a), - /// Blink the device - ADD_COMMAND(YHC_BLINK, 0x6b), - /// Error - YHC_ERROR = 0x7f, -} yh_cmd; - -#undef ADD_COMMAND - -/** - * Object types - */ -typedef enum { - /// Opaque object - YH_OPAQUE = 0x01, - /// Authentication key - YH_AUTHKEY = 0x02, - /// Asymmetric key - YH_ASYMMETRIC = 0x03, - /// Wrap key - YH_WRAPKEY = 0x04, - /// HMAC key - YH_HMACKEY = 0x05, - /// Template - YH_TEMPLATE = 0x06, - /// OTP AEAD key - YH_OTP_AEAD_KEY = 0x07, - /// Public key (virtual..) - YH_PUBLIC = 0x83, -} yh_object_type; - -/// Max number of algorithms defined here -#define YH_MAX_ALGORITHM_COUNT 0xff -/** - * Algorithms - */ -typedef enum { - YH_ALGO_RSA_PKCS1_SHA1 = 1, - YH_ALGO_RSA_PKCS1_SHA256 = 2, - YH_ALGO_RSA_PKCS1_SHA384 = 3, - YH_ALGO_RSA_PKCS1_SHA512 = 4, - YH_ALGO_RSA_PSS_SHA1 = 5, - YH_ALGO_RSA_PSS_SHA256 = 6, - YH_ALGO_RSA_PSS_SHA384 = 7, - YH_ALGO_RSA_PSS_SHA512 = 8, - YH_ALGO_RSA_2048 = 9, - YH_ALGO_RSA_3072 = 10, - YH_ALGO_RSA_4096 = 11, - YH_ALGO_EC_P256 = 12, - YH_ALGO_EC_P384 = 13, - YH_ALGO_EC_P521 = 14, - YH_ALGO_EC_K256 = 15, - YH_ALGO_EC_BP256 = 16, - YH_ALGO_EC_BP384 = 17, - YH_ALGO_EC_BP512 = 18, - YH_ALGO_HMAC_SHA1 = 19, - YH_ALGO_HMAC_SHA256 = 20, - YH_ALGO_HMAC_SHA384 = 21, - YH_ALGO_HMAC_SHA512 = 22, - YH_ALGO_EC_ECDSA_SHA1 = 23, - YH_ALGO_EC_ECDH = 24, - YH_ALGO_RSA_OAEP_SHA1 = 25, - YH_ALGO_RSA_OAEP_SHA256 = 26, - YH_ALGO_RSA_OAEP_SHA384 = 27, - YH_ALGO_RSA_OAEP_SHA512 = 28, - YH_ALGO_AES128_CCM_WRAP = 29, - YH_ALGO_OPAQUE_DATA = 30, - YH_ALGO_OPAQUE_X509_CERT = 31, - YH_ALGO_MGF1_SHA1 = 32, - YH_ALGO_MGF1_SHA256 = 33, - YH_ALGO_MGF1_SHA384 = 34, - YH_ALGO_MGF1_SHA512 = 35, - YH_ALGO_TEMPL_SSH = 36, - YH_ALGO_YUBICO_OTP_AES128 = 37, - YH_ALGO_YUBICO_AES_AUTH = 38, - YH_ALGO_YUBICO_OTP_AES192 = 39, - YH_ALGO_YUBICO_OTP_AES256 = 40, - YH_ALGO_AES192_CCM_WRAP = 41, - YH_ALGO_AES256_CCM_WRAP = 42, - YH_ALGO_EC_ECDSA_SHA256 = 43, - YH_ALGO_EC_ECDSA_SHA384 = 44, - YH_ALGO_EC_ECDSA_SHA512 = 45, - YH_ALGO_EC_ED25519 = 46, - YH_ALGO_EC_P224 = 47, -} yh_algorithm; - -/** - * Global options - */ -typedef enum { - /// Forced audit mode - YH_OPTION_FORCE_AUDIT = 1, - /// Audit logging per command - YH_OPTION_COMMAND_AUDIT = 3, -} yh_option; - -/** - * Options for the connector, set with yh_set_connector_option() - */ -typedef enum { - /// File with CA certificate to validate the connector with (const char *) not - /// implemented on windows - YH_CONNECTOR_HTTPS_CA = 1, - /// Proxy server to use for connecting to the connector (const char *) not - /// implemented on windows - YH_CONNECTOR_PROXY_SERVER = 2, -} yh_connector_option; - -/// Size that the log digest is truncated to -#define YH_LOG_DIGEST_SIZE 16 -#pragma pack(push, 1) -/** - * Logging struct as returned by device - */ -typedef struct { - /// Monotonically increasing index - uint16_t number; - /// What command was executed @see yh_cmd - uint8_t command; - /// Length of in-data - uint16_t length; - /// ID of authentication key used - uint16_t session_key; - /// ID of first object used - uint16_t target_key; - /// ID of second object used - uint16_t second_key; - /// Command result @see yh_cmd - uint8_t result; - /// Systick at time of execution - uint32_t systick; - /// Truncated sha256 digest of this last digest + this entry - uint8_t digest[YH_LOG_DIGEST_SIZE]; -} yh_log_entry; - -/** - * Object descriptor - */ -typedef struct { - /// Object capabilities @see yh_capabilities - yh_capabilities capabilities; - /// Object ID - uint16_t id; - /// Object length - uint16_t len; - /// Object domains - uint16_t domains; - /// Object type - yh_object_type type; - /// Object algorithm - yh_algorithm algorithm; - /// Object sequence - uint8_t sequence; - /// Object origin - uint8_t origin; - /// Object label - char label[YH_OBJ_LABEL_LEN + 1]; - /// Object delegated capabilities - yh_capabilities delegated_capabilities; -} yh_object_descriptor; -#pragma pack(pop) - -static const struct { - const char *name; - int bit; -} yh_capability[] = { - {"asymmetric_decrypt_ecdh", 0x0b}, - {"asymmetric_decrypt_oaep", 0x0a}, - {"asymmetric_decrypt_pkcs", 0x09}, - {"asymmetric_gen", 0x04}, - {"asymmetric_sign_ecdsa", 0x07}, - {"asymmetric_sign_eddsa", 0x08}, - {"asymmetric_sign_pkcs", 0x05}, - {"asymmetric_sign_pss", 0x06}, - {"attest", 0x22}, - {"audit", 0x18}, - {"export_under_wrap", 0x10}, - {"export_wrapped", 0x0c}, - {"delete_asymmetric", 0x29}, - {"delete_authkey", 0x28}, - {"delete_hmackey", 0x2b}, - {"delete_opaque", 0x27}, - {"delete_otp_aead_key", 0x2d}, - {"delete_template", 0x2c}, - {"delete_wrapkey", 0x2a}, - {"generate_otp_aead_key", 0x24}, - {"generate_wrapkey", 0x0f}, - {"get_opaque", 0x00}, - {"get_option", 0x12}, - {"get_randomness", 0x13}, - {"get_template", 0x1a}, - {"hmackey_generate", 0x15}, - {"hmac_data", 0x16}, - {"hmac_verify", 0x17}, - {"import_wrapped", 0x0d}, - {"otp_aead_create", 0x1e}, - {"otp_aead_random", 0x1f}, - {"otp_aead_rewrap_from", 0x20}, - {"otp_aead_rewrap_to", 0x21}, - {"otp_decrypt", 0x1d}, - {"put_asymmetric", 0x03}, - {"put_authkey", 0x02}, - {"put_hmackey", 0x14}, - {"put_opaque", 0x01}, - {"put_option", 0x11}, - {"put_otp_aead_key", 0x23}, - {"put_template", 0x1b}, - {"put_wrapkey", 0x0e}, - {"reset", 0x1c}, - {"ssh_certify", 0x19}, - {"unwrap_data", 0x26}, - {"wrap_data", 0x25}, -}; - -static const struct { - const char *name; - yh_algorithm algorithm; -} yh_algorithms[] = { - {"aes128-ccm-wrap", YH_ALGO_AES128_CCM_WRAP}, - {"aes192-ccm-wrap", YH_ALGO_AES192_CCM_WRAP}, - {"aes256-ccm-wrap", YH_ALGO_AES256_CCM_WRAP}, - {"ecbp256", YH_ALGO_EC_BP256}, - {"ecbp384", YH_ALGO_EC_BP384}, - {"ecbp512", YH_ALGO_EC_BP512}, - {"ecdsa-sha1", YH_ALGO_EC_ECDSA_SHA1}, - {"ecdsa-sha256", YH_ALGO_EC_ECDSA_SHA256}, - {"ecdsa-sha384", YH_ALGO_EC_ECDSA_SHA384}, - {"ecdsa-sha512", YH_ALGO_EC_ECDSA_SHA512}, - {"ecdh", YH_ALGO_EC_ECDH}, - {"eck256", YH_ALGO_EC_K256}, - {"ecp224", YH_ALGO_EC_P224}, - {"ecp256", YH_ALGO_EC_P256}, - {"ecp384", YH_ALGO_EC_P384}, - {"ecp521", YH_ALGO_EC_P521}, - {"ed25519", YH_ALGO_EC_ED25519}, - {"hmac-sha1", YH_ALGO_HMAC_SHA1}, - {"hmac-sha256", YH_ALGO_HMAC_SHA256}, - {"hmac-sha384", YH_ALGO_HMAC_SHA384}, - {"hmac-sha512", YH_ALGO_HMAC_SHA512}, - {"mgf1-sha1", YH_ALGO_MGF1_SHA1}, - {"mgf1-sha256", YH_ALGO_MGF1_SHA256}, - {"mgf1-sha384", YH_ALGO_MGF1_SHA384}, - {"mgf1-sha512", YH_ALGO_MGF1_SHA512}, - {"opaque", YH_ALGO_OPAQUE_DATA}, - {"rsa2048", YH_ALGO_RSA_2048}, - {"rsa3072", YH_ALGO_RSA_3072}, - {"rsa4096", YH_ALGO_RSA_4096}, - {"rsa-pkcs1-sha1", YH_ALGO_RSA_PKCS1_SHA1}, - {"rsa-pkcs1-sha256", YH_ALGO_RSA_PKCS1_SHA256}, - {"rsa-pkcs1-sha384", YH_ALGO_RSA_PKCS1_SHA384}, - {"rsa-pkcs1-sha512", YH_ALGO_RSA_PKCS1_SHA512}, - {"rsa-pss-sha1", YH_ALGO_RSA_PSS_SHA1}, - {"rsa-pss-sha256", YH_ALGO_RSA_PSS_SHA256}, - {"rsa-pss-sha384", YH_ALGO_RSA_PSS_SHA384}, - {"rsa-pss-sha512", YH_ALGO_RSA_PSS_SHA512}, - {"rsa-oaep-sha1", YH_ALGO_RSA_OAEP_SHA1}, - {"rsa-oaep-sha256", YH_ALGO_RSA_OAEP_SHA256}, - {"rsa-oaep-sha384", YH_ALGO_RSA_OAEP_SHA384}, - {"rsa-oaep-sha512", YH_ALGO_RSA_OAEP_SHA512}, - {"template-ssh", YH_ALGO_TEMPL_SSH}, - {"x509-cert", YH_ALGO_OPAQUE_X509_CERT}, - {"yubico-aes-auth", YH_ALGO_YUBICO_AES_AUTH}, - {"yubico-otp-aes128", YH_ALGO_YUBICO_OTP_AES128}, - {"yubico-otp-aes192", YH_ALGO_YUBICO_OTP_AES192}, - {"yubico-otp-aes256", YH_ALGO_YUBICO_OTP_AES256}, -}; - -static const struct { - const char *name; - yh_object_type type; -} yh_types[] = { - {"authkey", YH_AUTHKEY}, {"asymmetric", YH_ASYMMETRIC}, - {"hmackey", YH_HMACKEY}, {"opaque", YH_OPAQUE}, - {"otpaeadkey", YH_OTP_AEAD_KEY}, {"template", YH_TEMPLATE}, - {"wrapkey", YH_WRAPKEY}, -}; - -static const struct { - const char *name; - yh_option option; -} yh_options[] = { - {"command_audit", YH_OPTION_COMMAND_AUDIT}, - {"force_audit", YH_OPTION_FORCE_AUDIT}, -}; - -/// Origin is generated -#define YH_ORIGIN_GENERATED 0x01 -/// Origin is imported -#define YH_ORIGIN_IMPORTED 0x02 -/// Origin is wrapped (note: this is used in combination with objects original -/// origin) -#define YH_ORIGIN_IMPORTED_WRAPPED 0x10 - -/** - * Return a string describing an error condition - * - * @param err yh_rc error code - * - * @return String with descriptive error - **/ -const char *yh_strerror(yh_rc err); - -/** - * Set verbosity - * This function may be called prior to global library initialization. - * - * @param verbosity - * - * @return yh_rc error code - **/ -yh_rc yh_set_verbosity(uint8_t verbosity); - -/** - * Get verbosity - * - * @param verbosity - * - * @return yh_rc error code - **/ -yh_rc yh_get_verbosity(uint8_t *verbosity); - -/** - * Set file for debug output - * - * @param output - * - * @return void - **/ -void yh_set_debug_output(FILE *output); - -/** - * Global library initialization - * - * @return yh_rc error code - **/ -yh_rc yh_init(void); - -/** - * Global library cleanup - * - * @return yh_rc error code - **/ -yh_rc yh_exit(void); - -/** - * Instantiate a new connector - * - * @param url URL to associate with this connector - * @param connector reference to connector - * - * @return yh_rc error code - */ -yh_rc yh_init_connector(const char *url, yh_connector **connector); - -/** - * Set connector options - * - * @param connector connector to set an option on - * @param opt option to set @see yh_connector_option - * @param val value to set, type is specific for the given option - * - * @return yh_rc error code - **/ -yh_rc yh_set_connector_option(yh_connector *connector, yh_connector_option opt, - const void *val); - -/** - * Connect to all specified connectors - * - * @param connectors pointer of connector array - * @param n_connectors number of connectors in array (will be set to - *successful connectors on return) - * @param timeout timeout in seconds - * - * @return yh_rc error code - **/ -yh_rc yh_connect_all(yh_connector **connectors, size_t *n_connectors, - int timeout); - -/** - * Connect to one connector in array - * - * @param connectors pointer of connector array - * @param n_connectors number of connectors in array - * @param idx index of connected connector, may be NULL - * - * @return yh_rc error code - **/ -yh_rc yh_connect_best(yh_connector **connectors, size_t n_connectors, int *idx); - -/** - * Disconnect from connector - * - * @param connector connector to disconnect from - * - * @return yh_rc error code - **/ -yh_rc yh_disconnect(yh_connector *connector); - -/** - * Send a plain message to a connector - * - * @param connector connector to send to - * @param cmd command to send - * @param data data to send - * @param data_len data length - * @param response_cmd response command - * @param response response data - * @param response_len response length - * - * @return yh_rc error code - **/ -yh_rc yh_send_plain_msg(yh_connector *connector, yh_cmd cmd, - const uint8_t *data, size_t data_len, - yh_cmd *response_cmd, uint8_t *response, - size_t *response_len); - -/** - * Send an encrypted message over a session - * - * @param session session to send over - * @param cmd command to send - * @param data data to send - * @param data_len data length - * @param response_cmd response command - * @param response response data - * @param response_len response length - * - * @return yh_rc error code - **/ -yh_rc yh_send_secure_msg(yh_session *session, yh_cmd cmd, const uint8_t *data, - size_t data_len, yh_cmd *response_cmd, - uint8_t *response, size_t *response_len); - -/** - * Create a session with keys derived frm password - * - * @param connector connector to create the session with - * @param auth_keyset_id ID of the authentication key to be used - * @param password password to derive keys from - * @param password_len length of the password in bytes - * @param recreate_session session will be recreated if expired, this caches the - *password in memory - * @param context context data for the authentication - * @param context_len context length - * @param session created session - * - * @return yh_rc error code - **/ -yh_rc yh_create_session_derived(yh_connector *connector, - uint16_t auth_keyset_id, - const uint8_t *password, size_t password_len, - bool recreate_session, uint8_t *context, - size_t context_len, yh_session **session); - -/** - * Create a session - * - * @param connector connector to create the session with - * @param auth_keyset_id ID of the authentication key - * @param key_enc encryption key - * @param key_enc_len length of encryption key - * @param key_mac MAC key - * @param key_mac_len length of MAC key - * @param recreate_session session will be recreated if expired, this caches the - *password in memory - * @param context context data for the authentication - * @param context_len context length - * @param session created session - * - * @return yh_rc error code - **/ -yh_rc yh_create_session(yh_connector *connector, uint16_t auth_keyset_id, - const uint8_t *key_enc, size_t key_enc_len, - const uint8_t *key_mac, size_t key_mac_len, - bool recreate_session, uint8_t *context, - size_t context_len, yh_session **session); - -/** - * Begin create extenal session - * - * @param connector connector to create the session with - * @param auth_keyset_id ID of the authentication key - * @param context context data for the authentication - * @param context_len length of context data - * @param card_cryptogram card cryptogram - * @param card_cryptogram_len catd cryptogram length - * @param session created session - * - * @return yh_rc error code - **/ -yh_rc yh_begin_create_session_ext(yh_connector *connector, - uint16_t auth_keyset_id, uint8_t *context, - size_t context_len, uint8_t *card_cryptogram, - size_t card_cryptogram_len, - yh_session **session); - -/** - * Finish creating external session - * - * @param connector connector to create the session with - * @param session session - * @param key_senc session encryption key - * @param key_senc_len session encrypt key length - * @param key_smac session MAC key - * @param key_smac_len session MAC key length - * @param key_srmac session return MAC key - * @param key_srmac_len session return MAC key length - * @param context context data - * @param context_len context length - * @param card_cryptogram card cryptogram - * @param card_cryptogram_len card cryptogram length - * - * @return yh_rc error code - **/ -yh_rc yh_finish_create_session_ext(yh_connector *connector, yh_session *session, - const uint8_t *key_senc, size_t key_senc_len, - const uint8_t *key_smac, size_t key_smac_len, - const uint8_t *key_srmac, - size_t key_srmac_len, uint8_t *context, - size_t context_len, uint8_t *card_cryptogram, - size_t card_cryptogram_len); - -/** - * Free data associated with session - * - * @param session session to destroy - * - * @return yh_rc error code - **/ -yh_rc yh_destroy_session(yh_session **session); - -/** - * Authenticate session - * - * @param session session to authenticate - * @param context context data - * @param context_len context length - * - * @return yh_rc error code - **/ -yh_rc yh_authenticate_session(yh_session *session, uint8_t *context, - size_t context_len); - -// Utility and convenience functions below - -/** - * Get device info - * - * @param connector connector to send over - * @param major version major - * @param minor version minor - * @param patch version path - * @param serial serial number - * @param log_total total number of log entries - * @param log_used log entries used - * @param algorithms algorithms array - * @param n_algorithms number of algorithms - * - * @return yh_rc error code - **/ -yh_rc yh_util_get_device_info(yh_connector *connector, uint8_t *major, - uint8_t *minor, uint8_t *patch, uint32_t *serial, - uint8_t *log_total, uint8_t *log_used, - yh_algorithm *algorithms, size_t *n_algorithms); - -/** - * List objects - * - * @param session session to use - * @param id ID to filter by (0 to not filter by ID) - * @param type Type to filter by (0 to not filter by type) @see yh_object_type - * @param domains Domains to filter by (0 to not filter by domain) - * @param capabilities Capabilities to filter by (0 to not filter by - *capabilities) @see yh_capabilities - * @param algorithm Algorithm to filter by (0 to not filter by algorithm) - * @param label Label to filter by - * @param objects Array of objects returned - * @param n_objects Max length of objects (will be set to number found on - *return) - * - * @return yh_rc error code - **/ -yh_rc yh_util_list_objects(yh_session *session, uint16_t id, - yh_object_type type, uint16_t domains, - const yh_capabilities *capabilities, - yh_algorithm algorithm, const char *label, - yh_object_descriptor *objects, size_t *n_objects); - -/** - * Get object info - * - * @param session session to use - * @param id Object ID - * @param type Object type - * @param object object information - * - * @return yh_rc error code - **/ -yh_rc yh_util_get_object_info(yh_session *session, uint16_t id, - yh_object_type type, - yh_object_descriptor *object); - -/** - * Get Public key - * - * @param session session to use - * @param id Object ID - * @param data Data out - * @param datalen Data length - * @param algorithm Algorithm of object - * - * @return yh_rc error code - **/ -yh_rc yh_util_get_pubkey(yh_session *session, uint16_t id, uint8_t *data, - size_t *datalen, yh_algorithm *algorithm); - -/** - * Close session - * - * @param session session to close - * - * @return yh_rc error code - **/ -yh_rc yh_util_close_session(yh_session *session); - -/** - * Sign data using PKCS1 v1.5 - * - * in is either a raw hashed message (sha1, sha256, sha384 or sha512) - *or that with correct digestinfo pre-pended. - * - * @param session session to use - * @param key_id Object ID - * @param hashed if data is only hashed - * @param in in data to sign - * @param in_len length of in - * @param out signed data - * @param out_len length of signed data - * - * @return yh_rc error code - **/ -yh_rc yh_util_sign_pkcs1v1_5(yh_session *session, uint16_t key_id, bool hashed, - const uint8_t *in, size_t in_len, uint8_t *out, - size_t *out_len); - -/** - * Sign data using PSS - * - * in is a raw hashed message (sha1, sha256, sha384 or sha512). - * - * @param session session to use - * @param key_id Object ID - * @param in data to sign - * @param in_len length of in - * @param out signed data - * @param out_len length of signed data - * @param salt_len length of salt - * @param mgf1Algo algorithm for mgf1 - * - * @return yh_rc error code - **/ -yh_rc yh_util_sign_pss(yh_session *session, uint16_t key_id, const uint8_t *in, - size_t in_len, uint8_t *out, size_t *out_len, - size_t salt_len, yh_algorithm mgf1Algo); - -/** - * Sign data using ECDSA - * - * in is a raw hashed message, a truncated hash to the curve length or - *a padded hash to the curve length. - * - * @param session session to use - * @param key_id Object ID - * @param in data to sign - * @param in_len length of in - * @param out signed data - * @param out_len length of signed data - * - * @return yh_rc error code - **/ -yh_rc yh_util_sign_ecdsa(yh_session *session, uint16_t key_id, - const uint8_t *in, size_t in_len, uint8_t *out, - size_t *out_len); - -/** - * Sign data using EDDSA - * - * @param session session to use - * @param key_id Object ID - * @param in data to sign - * @param in_len length of in - * @param out signed data - * @param out_len length of signed data - * - * @return yh_rc error code - **/ -yh_rc yh_util_sign_eddsa(yh_session *session, uint16_t key_id, - const uint8_t *in, size_t in_len, uint8_t *out, - size_t *out_len); - -/** - * HMAC data - * - * @param session session to use - * @param key_id Object ID - * @param in data to hmac - * @param in_len length of in - * @param out HMAC - * @param out_len length of HMAC - * - * @return yh_rc error code - **/ -yh_rc yh_util_hmac(yh_session *session, uint16_t key_id, const uint8_t *in, - size_t in_len, uint8_t *out, size_t *out_len); - -/** - * Get pseudo random data - * - * @param session session to use - * @param len length of data to get - * @param out random data out - * @param out_len length of random data - * - * @return yh_rc error code - **/ -yh_rc yh_util_get_random(yh_session *session, size_t len, uint8_t *out, - size_t *out_len); - -/** - * Import RSA key - * - * @param session session to use - * @param key_id Object ID - * @param label Label - * @param domains domains - * @param capabilities capabilities - * @param algorithm algorithm - * @param p P - * @param q Q - * - * @return yh_rc error code - **/ -yh_rc yh_util_import_key_rsa(yh_session *session, uint16_t *key_id, - const char *label, uint16_t domains, - const yh_capabilities *capabilities, - yh_algorithm algorithm, const uint8_t *p, - const uint8_t *q); - -/** - * Import EC key - * - * @param session session to use - * @param key_id Object ID - * @param label Label - * @param domains domains - * @param capabilities capabilities - * @param algorithm algorithm - * @param s S - * - * @return yh_rc error code - **/ -yh_rc yh_util_import_key_ec(yh_session *session, uint16_t *key_id, - const char *label, uint16_t domains, - const yh_capabilities *capabilities, - yh_algorithm algorithm, const uint8_t *s); - -/** - * Import ED key - * - * @param session session to use - * @param key_id Object ID - * @param label Label - * @param domains domains - * @param capabilities capabilities - * @param algorithm algorithm - * @param k k - * - * @return yh_rc error code - **/ -yh_rc yh_util_import_key_ed(yh_session *session, uint16_t *key_id, - const char *label, uint16_t domains, - const yh_capabilities *capabilities, - yh_algorithm algorithm, const uint8_t *k); - -/** - * Import HMAC key - * - * @param session session to use - * @param key_id Object ID - * @param label Label - * @param domains domains - * @param capabilities capabilities - * @param algorithm algorithm - * @param key key data - * @param key_len length of key - * - * @return yh_rc error code - **/ -yh_rc yh_util_import_key_hmac(yh_session *session, uint16_t *key_id, - const char *label, uint16_t domains, - const yh_capabilities *capabilities, - yh_algorithm algorithm, const uint8_t *key, - size_t key_len); - -/** - * Generate RSA key - * - * @param session session to use - * @param key_id Object ID - * @param label Label - * @param domains domains - * @param capabilities capabilities - * @param algorithm algorithm - * - * @return yh_rc error code - **/ -yh_rc yh_util_generate_key_rsa(yh_session *session, uint16_t *key_id, - const char *label, uint16_t domains, - const yh_capabilities *capabilities, - yh_algorithm algorithm); - -/** - * Generate EC key - * - * @param session session to use - * @param key_id Object ID - * @param label Label - * @param domains domains - * @param capabilities capabilities - * @param algorithm algorithm - * - * @return yh_rc error code - **/ -yh_rc yh_util_generate_key_ec(yh_session *session, uint16_t *key_id, - const char *label, uint16_t domains, - const yh_capabilities *capabilities, - yh_algorithm algorithm); - -/** - * Generate ED key - * - * @param session session to use - * @param key_id Object ID - * @param label Label - * @param domains domains - * @param capabilities capabilities - * @param algorithm algorithm - * - * @return yh_rc error code - **/ -yh_rc yh_util_generate_key_ed(yh_session *session, uint16_t *key_id, - const char *label, uint16_t domains, - const yh_capabilities *capabilities, - yh_algorithm algorithm); - -/** - * Verify HMAC data - * - * @param session session to use - * @param key_id Object ID - * @param signature HMAC - * @param signature_len HMAC length - * @param data data to verify - * @param data_len data length - * @param verified if verification succeeded - * - * @return yh_rc error code - **/ -yh_rc yh_util_hmac_verify(yh_session *session, uint16_t key_id, - const uint8_t *signature, size_t signature_len, - const uint8_t *data, size_t data_len, bool *verified); - -/** - * Generate HMAC key - * - * @param session session to use - * @param key_id Object ID - * @param label Label - * @param domains domains - * @param capabilities capabilities - * @param algorithm algorithm - * - * @return yh_rc error code - **/ -yh_rc yh_util_generate_key_hmac(yh_session *session, uint16_t *key_id, - const char *label, uint16_t domains, - const yh_capabilities *capabilities, - yh_algorithm algorithm); - -/** - * Decrypt PKCS1 v1.5 data - * - * @param session session to use - * @param key_id Object ID - * @param in Encrypted data - * @param in_len length of encrypted data - * @param out Decrypted data - * @param out_len length of decrypted data - * - * @return yh_rc error code - **/ -yh_rc yh_util_decrypt_pkcs1v1_5(yh_session *session, uint16_t key_id, - const uint8_t *in, size_t in_len, uint8_t *out, - size_t *out_len); - -/** - * Decrypt OAEP data - * - * @param session session to use - * @param key_id Object ID - * @param in Encrypted data - * @param in_len length of encrypted data - * @param out Decrypted data - * @param out_len length of decrypted data - * @param label OAEP label - * @param label_len label length - * @param mgf1Algo MGF1 algorithm - * - * @return yh_rc error code - **/ -yh_rc yh_util_decrypt_oaep(yh_session *session, uint16_t key_id, - const uint8_t *in, size_t in_len, uint8_t *out, - size_t *out_len, const uint8_t *label, - size_t label_len, yh_algorithm mgf1Algo); - -/** - * Perform ECDH key exchange - * - * @param session session to use - * @param key_id Object ID - * @param in public key - * @param in_len length of public key - * @param out Agreed key - * @param out_len length of agreed key - * - * @return yh_rc error code - **/ -yh_rc yh_util_decrypt_ecdh(yh_session *session, uint16_t key_id, - const uint8_t *in, size_t in_len, uint8_t *out, - size_t *out_len); - -/** - * Delete an object - * - * @param session session to use - * @param id Object ID - * @param type Object type - * - * @return yh_rc error code - **/ -yh_rc yh_util_delete_object(yh_session *session, uint16_t id, - yh_object_type type); - -/** - * Export an object under wrap - * - * @param session session to use - * @param wrapping_key_id ID of wrapping key - * @param target_type Type of object - * @param target_id ID of object - * @param out wrapped data - * @param out_len length of wrapped data - * - * @return yh_rc error code - **/ -yh_rc yh_util_export_wrapped(yh_session *session, uint16_t wrapping_key_id, - yh_object_type target_type, uint16_t target_id, - uint8_t *out, size_t *out_len); - -/** - * Import a wrapped object - * - * @param session session to use - * @param wrapping_key_id ID of wrapping key - * @param in wrapped data - * @param in_len length of wrapped data - * @param target_type what type the imported object has - * @param target_id ID of imported object - * - * @return yh_rc error code - **/ -yh_rc yh_util_import_wrapped(yh_session *session, uint16_t wrapping_key_id, - const uint8_t *in, size_t in_len, - yh_object_type *target_type, uint16_t *target_id); - -/** - * Import a wrap key - * - * @param session session to use - * @param key_id Object ID - * @param label label - * @param domains domains - * @param capabilities capabilities - * @param algorithm algorithm - * @param delegated_capabilities delegated capabilities - * @param in key - * @param in_len key length - * - * @return yh_rc error code - **/ -yh_rc yh_util_import_key_wrap(yh_session *session, uint16_t *key_id, - const char *label, uint16_t domains, - const yh_capabilities *capabilities, - yh_algorithm algorithm, - const yh_capabilities *delegated_capabilities, - const uint8_t *in, size_t in_len); - -/** - * Generate a wrap key - * - * @param session session to use - * @param key_id Object ID - * @param label label - * @param domains domains - * @param capabilities capabilities - * @param algorithm algorithm - * @param delegated_capabilities delegated capabilitites - * - * @return yh_rc error code - **/ -yh_rc yh_util_generate_key_wrap(yh_session *session, uint16_t *key_id, - const char *label, uint16_t domains, - const yh_capabilities *capabilities, - yh_algorithm algorithm, - const yh_capabilities *delegated_capabilities); - -/** - * Get logs - * - * @param session session to use - * @param unlogged_boot number of unlogged boots - * @param unlogged_auth number of unlogged authentications - * @param out array of log entries - * @param n_items number of items in out - * - * @return yh_rc error code - **/ -yh_rc yh_util_get_logs(yh_session *session, uint16_t *unlogged_boot, - uint16_t *unlogged_auth, yh_log_entry *out, - size_t *n_items); - -/** - * Set the log index - * - * @param session session to use - * @param index index to set - * - * @return yh_rc error code - **/ -yh_rc yh_util_set_log_index(yh_session *session, uint16_t index); - -/** - * Get opaque object - * - * @param session session to use - * @param object_id Object ID - * @param out data - * @param out_len length of out - * - * @return yh_rc error code - **/ -yh_rc yh_util_get_opaque(yh_session *session, uint16_t object_id, uint8_t *out, - size_t *out_len); - -/** - * Import opaque object - * - * @param session session to use - * @param object_id Object ID - * @param label label - * @param domains domains - * @param capabilities - * @param algorithm algorithm - * @param in object data - * @param in_len length of in - * - * @return - **/ -yh_rc yh_util_import_opaque(yh_session *session, uint16_t *object_id, - const char *label, uint16_t domains, - const yh_capabilities *capabilities, - yh_algorithm algorithm, const uint8_t *in, - size_t in_len); - -/** - * SSH certify - * - * @param session session to use - * @param key_id Key ID - * @param template_id Template ID - * @param sig_algo signature algorithm - * @param in Certificate request - * @param in_len length of in - * @param out Signature - * @param out_len length of out - * - * @return yh_rc error code - **/ -yh_rc yh_util_ssh_certify(yh_session *session, uint16_t key_id, - uint16_t template_id, yh_algorithm sig_algo, - const uint8_t *in, size_t in_len, uint8_t *out, - size_t *out_len); - -/** - * Import authentication key - * - * @param session session to use - * @param key_id Object ID - * @param label label - * @param domains domains - * @param capabilities capabilities - * @param delegated_capabilities delegated capabilities - * @param password password to derive key from - * @param password_len password length in bytes - * - * @return yh_rc error code - **/ -yh_rc yh_util_import_authkey(yh_session *session, uint16_t *key_id, - const char *label, uint16_t domains, - const yh_capabilities *capabilities, - const yh_capabilities *delegated_capabilities, - const uint8_t *password, size_t password_len); - -/** - * Get template - * - * @param session session to use - * @param object_id Object ID - * @param out data - * @param out_len length of out - * - * @return yh_rc error code - **/ -yh_rc yh_util_get_template(yh_session *session, uint16_t object_id, - uint8_t *out, size_t *out_len); - -/** - * Import template - * - * @param session session to use - * @param object_id Object ID - * @param label label - * @param domains domains - * @param capabilities capabilities - * @param algorithm algorithm - * @param in data - * @param in_len length of in - * - * @return yh_rc error code - **/ -yh_rc yh_util_import_template(yh_session *session, uint16_t *object_id, - const char *label, uint16_t domains, - const yh_capabilities *capabilities, - yh_algorithm algorithm, const uint8_t *in, - size_t in_len); - -/** - * Create OTP AEAD - * - * @param session session to use - * @param key_id Object ID - * @param key OTP key - * @param private_id OTP private id - * @param out AEAD - * @param out_len length of out - * - * @return yh_rc error code - **/ -yh_rc yh_util_otp_aead_create(yh_session *session, uint16_t key_id, - const uint8_t *key, const uint8_t *private_id, - uint8_t *out, size_t *out_len); - -/** - * Create OTP AEAD from random - * - * @param session session to use - * @param key_id Object ID - * @param out AEAD - * @param out_len length of out - * - * @return yh_rc error code - **/ -yh_rc yh_util_otp_aead_random(yh_session *session, uint16_t key_id, - uint8_t *out, size_t *out_len); - -/** - * Decrypt OTP - * - * @param session session to use - * @param key_id Object ID - * @param aead AEAD - * @param aead_len length of AEAD - * @param otp OTP - * @param useCtr OTP use counter - * @param sessionCtr OTP session counter - * @param tstph OTP timestamp high - * @param tstpl OTP timestamp low - * - * @return yh_rc error code - **/ -yh_rc yh_util_otp_decrypt(yh_session *session, uint16_t key_id, - const uint8_t *aead, size_t aead_len, - const uint8_t *otp, uint16_t *useCtr, - uint8_t *sessionCtr, uint8_t *tstph, uint16_t *tstpl); - -/** - * Import OTP AEAD Key - * - * @param session session to use - * @param key_id Object ID - * @param label label - * @param domains domains - * @param capabilities capabilities - * @param nonce_id nonce ID - * @param in key - * @param in_len length of in - * - * @return - **/ -yh_rc yh_util_put_otp_aead_key(yh_session *session, uint16_t *key_id, - const char *label, uint16_t domains, - const yh_capabilities *capabilities, - uint32_t nonce_id, const uint8_t *in, - size_t in_len); - -/** - * Generate OTP AEAD Key - * - * @param session session to use - * @param key_id Object ID - * @param label label - * @param domains domains - * @param capabilities capabilities - * @param algorithm algorithm - * @param nonce_id nonce ID - * - * @return yh_rc error code - **/ -yh_rc yh_util_generate_otp_aead_key(yh_session *session, uint16_t *key_id, - const char *label, uint16_t domains, - const yh_capabilities *capabilities, - yh_algorithm algorithm, uint32_t nonce_id); - -/** - * Attest asymmetric key - * - * @param session session to use - * @param key_id Object ID - * @param attest_id Attestation key ID - * @param out Certificate - * @param out_len length of out - * - * @return yh_rc error code - **/ -yh_rc yh_util_attest_asymmetric(yh_session *session, uint16_t key_id, - uint16_t attest_id, uint8_t *out, - size_t *out_len); - -/** - * Put global option - * - * @param session session to use - * @param option option - * @param len length of option data - * @param val option data - * - * @return yh_rc error code - **/ -yh_rc yh_util_put_option(yh_session *session, yh_option option, size_t len, - uint8_t *val); - -/** - * Get global option - * - * @param session session to use - * @param option option - * @param out option data - * @param out_len length of out - * - * @return yh_rc error code - **/ -yh_rc yh_util_get_option(yh_session *session, yh_option option, uint8_t *out, - size_t *out_len); - -/** - * Get storage statistics - * - * @param session session to use - * @param total_records total records available - * @param free_records number of free records - * @param total_pages total pages available - * @param free_pages number of free pages - * @param page_size page size in bytes - * - * @return yh_rc error code - **/ -yh_rc yh_util_get_storage_stats(yh_session *session, uint16_t *total_records, - uint16_t *free_records, uint16_t *total_pages, - uint16_t *free_pages, uint16_t *page_size); - -/** - * Wrap data - * - * @param session session to use - * @param key_id Object ID - * @param in data to wrap - * @param in_len length of in - * @param out wrapped data - * @param out_len length of out - * - * @return yh_rc error code - **/ -yh_rc yh_util_wrap_data(yh_session *session, uint16_t key_id, const uint8_t *in, - size_t in_len, uint8_t *out, size_t *out_len); - -/** - * Unwrap data - * - * @param session session to use - * @param key_id Object ID - * @param in wrapped data - * @param in_len length of in - * @param out unwrapped data - * @param out_len length of out - * - * @return yh_rc error code - **/ -yh_rc yh_util_unwrap_data(yh_session *session, uint16_t key_id, - const uint8_t *in, size_t in_len, uint8_t *out, - size_t *out_len); - -/** - * Blink the device - * - * @param session session to use - * @param seconds seconds to blink - * - * @return yh_rc error code - **/ -yh_rc yh_util_blink(yh_session *session, uint8_t seconds); - -/** - * Reset the device - * - * @param session session to use - * - * @return yh_rc error code. This function will normally return a network error - **/ -yh_rc yh_util_reset(yh_session *session); - -/** - * Get session ID - * - * @param session session to use - * @param sid session ID - * - * @return yh_rc error code - **/ -yh_rc yh_get_session_id(yh_session *session, uint8_t *sid); - -/** - * Check if the connector has a device connected - * - * @param connector connector - * - * @return true or false - **/ -bool yh_connector_has_device(yh_connector *connector); - -/** - * Get the connector version - * - * @param connector connector - * @param major major version - * @param minor minor version - * @param patch patch version - * - * @return yh_rc error code - **/ -yh_rc yh_get_connector_version(yh_connector *connector, uint8_t *major, - uint8_t *minor, uint8_t *patch); - -/** - * Get connector address - * - * @param connector connector - * @param address pointer to string address - * - * @return yh_rc error code - **/ -yh_rc yh_get_connector_address(yh_connector *connector, char **const address); - -/** - * Convert capability string to byte array - * - * @param capability string of capabilities - * @param result capabilities - * - * @return yh_rc error code - **/ -yh_rc yh_capabilities_to_num(const char *capability, yh_capabilities *result); - -/** - * Convert capability byte array to strings - * - * @param num capabilities - * @param result array of string pointers - * @param n_result number of elements of result - * - * @return yh_rc error code - **/ -yh_rc yh_num_to_capabilities(const yh_capabilities *num, const char *result[], - size_t *n_result); - -/** - * Check if capability is set - * - * @param capabilities capabilities - * @param capability capability string - * - * @return true or false - **/ -bool yh_check_capability(const yh_capabilities *capabilities, - const char *capability); - -/** - * Merge two sets of capabilities - * - * @param a a set of capabilities - * @param b a set of capabilities - * @param result resulting set of capabilities - * - * @return yh_rc error code - **/ -yh_rc yh_merge_capabilities(const yh_capabilities *a, const yh_capabilities *b, - yh_capabilities *result); - -/** - * Filter one set of capabilities with another - * - * @param capabilities set of capabilities - * @param filter capabilities to filter with - * @param result resulting set of capabilities - * - * @return yh_rc error code - **/ -yh_rc yh_filter_capabilities(const yh_capabilities *capabilities, - const yh_capabilities *filter, - yh_capabilities *result); - -/** - * Check if algorithm is an RSA algorithm - * - * @param algorithm algorithm - * - * @return true or false - **/ -bool yh_is_rsa(yh_algorithm algorithm); - -/** - * Check if algorithm is an EC algorithm - * - * @param algorithm algorithm - * - * @return true or false - **/ -bool yh_is_ec(yh_algorithm algorithm); - -/** - * Check if algorithm is an ED algorithm - * - * @param algorithm algorithm - * - * @return true or false - **/ -bool yh_is_ed(yh_algorithm algorithm); - -/** - * Check if algorithm is a HMAC algorithm - * - * @param algorithm algorithm - * - * @return true or false - **/ -bool yh_is_hmac(yh_algorithm algorithm); - -/** - * Get algorithm bitlength - * - * @param algorithm algorithm - * @param result bitlength - * - * @return yh_rc error code - **/ -yh_rc yh_get_key_bitlength(yh_algorithm algorithm, size_t *result); - -/** - * Convert algorithm to string - * - * @param algo algorithm - * @param result string - * - * @return yh_rc error code - **/ -yh_rc yh_algo_to_string(yh_algorithm algo, char const **result); - -/** - * Convert string to algorithm - * - * @param string algorithm as string - * @param algo algorithm - * - * @return yh_rc error code - **/ -yh_rc yh_string_to_algo(const char *string, yh_algorithm *algo); - -/** - * Convert type to string - * - * @param type type - * @param result string - * - * @return yh_rc error code - **/ -yh_rc yh_type_to_string(yh_object_type type, char const **result); - -/** - * Convert string to type - * - * @param string type as string - * @param type type - * - * @return yh_rc error code - **/ -yh_rc yh_string_to_type(const char *string, yh_object_type *type); - -/** - * Convert string to option - * - * @param string option as string - * @param option option - * - * @return yh_rc error code - **/ -yh_rc yh_string_to_option(const char *string, yh_option *option); - -/** - * Verify an array of log entries - * - * @param logs pointer to an array of log entries - * @param n_items number of items logs - * @param last_previous_log optional pointer to the entry before the first entry - *in logs - * - * @return true or false - **/ -bool yh_verify_logs(yh_log_entry *logs, size_t n_items, - yh_log_entry *last_previous_log); - -/** - * Parse a string to a domains parameter. - * - * @param domains string of the format 1,2,3 - * @param result resulting parsed domain parameter - * - * @return yh_rc error code - **/ -yh_rc yh_parse_domains(const char *domains, uint16_t *result); - -/** - * Write out domains to a string. - * - * @param domains encoded domains - * @param string string to hold the result - * @param max_len maximum length of string - * - * @return yh_rc error code - **/ -yh_rc yh_domains_to_string(uint16_t domains, char *string, size_t max_len); -#ifdef __cplusplus -} -#endif - -#endif diff --git a/plugins/wallet_plugin/wallet_manager.cpp b/plugins/wallet_plugin/wallet_manager.cpp index 15a39c9d9bd..b5287173670 100644 --- a/plugins/wallet_plugin/wallet_manager.cpp +++ b/plugins/wallet_plugin/wallet_manager.cpp @@ -2,6 +2,7 @@ * @file * @copyright defined in eos/LICENSE */ +#include #include #include #include @@ -271,10 +272,28 @@ wallet_manager::sign_digest(const chain::digest_type& digest, const public_key_t void wallet_manager::own_and_use_wallet(const string& name, std::unique_ptr&& wallet) { if(wallets.find(name) != wallets.end()) - FC_THROW("tried to use wallet name the already existed"); + EOS_THROW(wallet_exception, "Tried to use wallet name that already exists."); wallets.emplace(name, std::move(wallet)); } +void wallet_manager::start_lock_watch(std::shared_ptr t) +{ + t->async_wait([t, this](const boost::system::error_code& /*ec*/) + { + namespace bfs = boost::filesystem; + boost::system::error_code ec; + auto rc = bfs::status(lock_path, ec); + if(ec != boost::system::error_code()) { + if(rc.type() == bfs::file_not_found) { + appbase::app().quit(); + EOS_THROW(wallet_exception, "Lock file removed while keosd still running. Terminating."); + } + } + t->expires_from_now(boost::posix_time::seconds(1)); + start_lock_watch(t); + }); +} + void wallet_manager::initialize_lock() { //This is technically somewhat racy in here -- if multiple keosd are in this function at once. //I've considered that an acceptable tradeoff to maintain cross-platform boost constructs here @@ -288,6 +307,8 @@ void wallet_manager::initialize_lock() { wallet_dir_lock.reset(); EOS_THROW(wallet_exception, "Failed to lock access to wallet directory; is another keosd running?"); } + auto timer = std::make_shared(appbase::app().get_io_service(), boost::posix_time::seconds(1)); + start_lock_watch(timer); } } // namespace wallet diff --git a/plugins/wallet_plugin/yubihsm_wallet.cpp b/plugins/wallet_plugin/yubihsm_wallet.cpp index 682fc02bbb0..5676089c0e1 100644 --- a/plugins/wallet_plugin/yubihsm_wallet.cpp +++ b/plugins/wallet_plugin/yubihsm_wallet.cpp @@ -6,7 +6,7 @@ #include #include -#include +#include #include @@ -23,78 +23,18 @@ using namespace fc::crypto::r1; namespace detail { -//For now, load the shared library on the fly -struct yubihsm_api { - struct func_ptr { - explicit func_ptr(void* ptr) : _ptr(ptr) {} - template operator T*() const { - return reinterpret_cast(_ptr); - } - void* _ptr; - }; - - struct yubihsm_shlib { - yubihsm_shlib() { - const char* lib_name; -#if defined( __APPLE__ ) - lib_name = "libyubihsm.dylib"; -#elif defined( __linux__ ) - lib_name = "libyubihsm.so.1"; -#endif - _handle = dlopen(lib_name, RTLD_NOW); - if(!_handle) - FC_THROW("Failed to load libyubihsm: ${m}", ("m", dlerror())); - } - ~yubihsm_shlib() { - dlclose(_handle); - } - - func_ptr operator[](const char* import_name) const { - dlerror(); - void* ret = dlsym(_handle, import_name); - char* error; - if((error = dlerror())) - FC_THROW("Failed to import ${i} from libyubihsm: ${m}", ("i", import_name)("m", error)); - return func_ptr(ret); - } - - void* _handle; - }; - yubihsm_shlib _shlib; - -#define LOAD_IMPORT(n) decltype(yh_ ## n)* n = _shlib["yh_" #n]; - LOAD_IMPORT(init) - LOAD_IMPORT(init_connector) - LOAD_IMPORT(strerror) - LOAD_IMPORT(connect_best) - LOAD_IMPORT(create_session_derived) - LOAD_IMPORT(authenticate_session) - LOAD_IMPORT(capabilities_to_num) - LOAD_IMPORT(util_list_objects) - LOAD_IMPORT(util_get_pubkey) - LOAD_IMPORT(util_sign_ecdsa) - LOAD_IMPORT(util_get_object_info) - LOAD_IMPORT(check_capability) - LOAD_IMPORT(send_secure_msg) - LOAD_IMPORT(exit) - LOAD_IMPORT(util_close_session) - LOAD_IMPORT(destroy_session) - LOAD_IMPORT(disconnect) - LOAD_IMPORT(util_generate_key_ec) -}; - struct yubihsm_wallet_impl { using key_map_type = map; yubihsm_wallet_impl(const string& ep, const uint16_t ak) : endpoint(ep), authkey(ak) { yh_rc rc; - if((rc = api.init())) - FC_THROW("yubihsm init failure: ${c}", ("c", api.strerror(rc))); + if((rc = yh_init())) + FC_THROW("yubihsm init failure: ${c}", ("c", yh_strerror(rc))); } ~yubihsm_wallet_impl() { lock(); - api.exit(); + yh_exit(); //bizarre, is there no way to destroy a yh_connector?? ///XXX Probably a race condition on timer shutdown and appbase destruction @@ -108,10 +48,10 @@ struct yubihsm_wallet_impl { yh_rc rc; size_t blob_sz = 128; uint8_t blob[blob_sz]; - if((rc = api.util_get_pubkey(session, key_id, blob, &blob_sz, nullptr))) - FC_THROW_EXCEPTION(chain::wallet_exception, "yh_util_get_pubkey failed: ${m}", ("m", api.strerror(rc))); + if((rc = yh_util_get_public_key(session, key_id, blob, &blob_sz, nullptr))) + FC_THROW_EXCEPTION(chain::wallet_exception, "yh_util_get_public_key failed: ${m}", ("m", yh_strerror(rc))); if(blob_sz != 64) - FC_THROW_EXCEPTION(chain::wallet_exception, "unexpected pubkey size from yh_util_get_pubkey"); + FC_THROW_EXCEPTION(chain::wallet_exception, "unexpected pubkey size from yh_util_get_public_key"); ///XXX This is junky and common with SE wallet; commonize it char serialized_pub_key[sizeof(public_key_data) + 1]; @@ -128,34 +68,33 @@ struct yubihsm_wallet_impl { void unlock(const string& password) { yh_rc rc; - uint8_t context[YH_CONTEXT_LEN] = {0}; try { - if((rc = api.init_connector(endpoint.c_str(), &connector))) - FC_THROW_EXCEPTION(chain::wallet_exception, "Failled to initialize yubihsm connector URL: ${c}", ("c", api.strerror(rc))); - if((rc = api.connect_best(&connector, 1, NULL))) - FC_THROW_EXCEPTION(chain::wallet_exception, "Failed to connect to YubiHSM connector: ${m}", ("m", api.strerror(rc))); - if((rc = api.create_session_derived(connector, authkey, (const uint8_t *)password.data(), password.size(), false, context, sizeof(context), &session))) - FC_THROW_EXCEPTION(chain::wallet_exception, "Failed to create YubiHSM session: ${m}", ("m", api.strerror(rc))); - if((rc = api.authenticate_session(session, context, sizeof(context)))) - FC_THROW_EXCEPTION(chain::wallet_exception, "Failed to authenticate YubiHSM session: ${m}", ("m", api.strerror(rc))); + if((rc = yh_init_connector(endpoint.c_str(), &connector))) + FC_THROW_EXCEPTION(chain::wallet_exception, "Failled to initialize yubihsm connector URL: ${c}", ("c", yh_strerror(rc))); + if((rc = yh_connect(connector, 0))) + FC_THROW_EXCEPTION(chain::wallet_exception, "Failed to connect to YubiHSM connector: ${m}", ("m", yh_strerror(rc))); + if((rc = yh_create_session_derived(connector, authkey, (const uint8_t *)password.data(), password.size(), false, &session))) + FC_THROW_EXCEPTION(chain::wallet_exception, "Failed to create YubiHSM session: ${m}", ("m", yh_strerror(rc))); + if((rc = yh_authenticate_session(session))) + FC_THROW_EXCEPTION(chain::wallet_exception, "Failed to authenticate YubiHSM session: ${m}", ("m", yh_strerror(rc))); yh_object_descriptor authkey_desc; - if((rc = api.util_get_object_info(session, authkey, YH_AUTHKEY, &authkey_desc))) - FC_THROW_EXCEPTION(chain::wallet_exception, "Failed to get authkey info: ${m}", ("m", api.strerror(rc))); + if((rc = yh_util_get_object_info(session, authkey, YH_AUTHENTICATION_KEY, &authkey_desc))) + FC_THROW_EXCEPTION(chain::wallet_exception, "Failed to get authkey info: ${m}", ("m", yh_strerror(rc))); authkey_caps = authkey_desc.capabilities; authkey_domains = authkey_desc.domains; - if(!api.check_capability(&authkey_caps, "asymmetric_sign_ecdsa")) + if(!yh_check_capability(&authkey_caps, "sign-ecdsa")) FC_THROW_EXCEPTION(chain::wallet_exception, "Given authkey cannot perform signing"); size_t found_objects_n = 64*1024; yh_object_descriptor found_objs[found_objects_n]; yh_capabilities find_caps; - api.capabilities_to_num("asymmetric_sign_ecdsa", &find_caps); - if((rc = api.util_list_objects(session, 0, YH_ASYMMETRIC, 0, &find_caps, YH_ALGO_EC_P256, nullptr, found_objs, &found_objects_n))) - FC_THROW_EXCEPTION(chain::wallet_exception, "yh_util_list_objects failed: ${m}", ("m", api.strerror(rc))); + yh_string_to_capabilities("sign-ecdsa", &find_caps); + if((rc = yh_util_list_objects(session, 0, YH_ASYMMETRIC_KEY, 0, &find_caps, YH_ALGO_EC_P256, nullptr, found_objs, &found_objects_n))) + FC_THROW_EXCEPTION(chain::wallet_exception, "yh_util_list_objects failed: ${m}", ("m", yh_strerror(rc))); for(size_t i = 0; i < found_objects_n; ++i) populate_key_map_with_keyid(found_objs[i].id); @@ -170,12 +109,12 @@ struct yubihsm_wallet_impl { void lock() { if(session) { - api.util_close_session(session); - api.destroy_session(&session); + yh_util_close_session(session); + yh_destroy_session(&session); } session = nullptr; if(connector) - api.disconnect(connector); + yh_disconnect(connector); //it would seem like this would leak-- there is no destroy() call for it. But I clearly can't reuse connectors // as that fails with a "Unable to find a suitable connector" connector = nullptr; @@ -193,7 +132,7 @@ struct yubihsm_wallet_impl { uint8_t data, resp; yh_cmd resp_cmd; size_t resp_sz = 1; - if(api.send_secure_msg(session, YHC_ECHO, &data, 1, &resp_cmd, &resp, &resp_sz)) + if(yh_send_secure_msg(session, YHC_ECHO, &data, 1, &resp_cmd, &resp, &resp_sz)) lock(); else prime_keepalive_timer(); @@ -208,9 +147,9 @@ struct yubihsm_wallet_impl { size_t der_sig_sz = 128; uint8_t der_sig[der_sig_sz]; yh_rc rc; - if((rc = api.util_sign_ecdsa(session, it->second, (uint8_t*)d.data(), d.data_size(), der_sig, &der_sig_sz))) { + if((rc = yh_util_sign_ecdsa(session, it->second, (uint8_t*)d.data(), d.data_size(), der_sig, &der_sig_sz))) { lock(); - FC_THROW_EXCEPTION(chain::wallet_exception, "yh_util_sign_ecdsa failed: ${m}", ("m", api.strerror(rc))); + FC_THROW_EXCEPTION(chain::wallet_exception, "yh_util_sign_ecdsa failed: ${m}", ("m", yh_strerror(rc))); } ///XXX a lot of this below is similar to SE wallet; commonize it in non-junky way @@ -239,18 +178,18 @@ struct yubihsm_wallet_impl { } public_key_type create() { - if(!api.check_capability(&authkey_caps, "asymmetric_gen")) + if(!yh_check_capability(&authkey_caps, "generate-asymmetric-key")) FC_THROW_EXCEPTION(chain::wallet_exception, "Given authkey cannot create keys"); yh_rc rc; uint16_t new_key_id = 0; yh_capabilities creation_caps = {}; - if(api.capabilities_to_num("asymmetric_sign_ecdsa:export_under_wrap", &creation_caps)) + if(yh_string_to_capabilities("sign-ecdsa:export-wrapped", &creation_caps)) FC_THROW_EXCEPTION(chain::wallet_exception, "Cannot create caps mask"); try { - if((rc = api.util_generate_key_ec(session, &new_key_id, "keosd created key", authkey_domains, &creation_caps, YH_ALGO_EC_P256))) - FC_THROW_EXCEPTION(chain::wallet_exception, "yh_util_generate_key_ec failed: ${m}", ("m", api.strerror(rc))); + if((rc = yh_util_generate_ec_key(session, &new_key_id, "keosd created key", authkey_domains, &creation_caps, YH_ALGO_EC_P256))) + FC_THROW_EXCEPTION(chain::wallet_exception, "yh_util_generate_ec_key failed: ${m}", ("m", yh_strerror(rc))); return populate_key_map_with_keyid(new_key_id)->first; } catch(chain::wallet_exception& e) { @@ -271,8 +210,6 @@ struct yubihsm_wallet_impl { boost::asio::steady_timer keepalive_timer{appbase::app().get_io_service()}; fc::ec_key key = EC_KEY_new_by_curve_name(NID_X9_62_prime256v1); - - yubihsm_api api; }; diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 9150a7fcafb..feef29cbfd9 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -28,7 +28,7 @@ Usage: programs/cleos/cleos [OPTIONS] SUBCOMMAND the http/https URL where keosd is running -r,--header pass specific HTTP header, repeat this option to pass multiple headers -n,--no-verify don't verify peer certificate when using HTTPS - -v,--verbose output verbose actions on error + -v,--verbose output verbose errors and action output Subcommands: version Retrieve version information @@ -182,6 +182,7 @@ bool tx_print_json = false; bool print_request = false; bool print_response = false; bool no_auto_keosd = false; +bool verbose = false; uint8_t tx_max_cpu_usage = 0; uint32_t tx_max_net_usage = 0; @@ -373,8 +374,10 @@ void print_action( const fc::variant& at ) { if( console.size() ) { std::stringstream ss(console); string line; - std::getline( ss, line ); - cout << ">> " << line << "\n"; + while( std::getline( ss, line ) ) { + cout << ">> " << line << "\n"; + if( !verbose ) break; + } } } @@ -682,7 +685,6 @@ asset to_asset( account_name code, const string& s ) { auto expected_symbol = it->second; if ( a.decimals() < expected_symbol.decimals() ) { auto factor = expected_symbol.precision() / a.precision(); - auto a_old = a; a = asset( a.get_amount() * factor, expected_symbol ); } else if ( a.decimals() > expected_symbol.decimals() ) { EOS_THROW(symbol_type_exception, "Too many decimal digits in ${a}, only ${d} supported", ("a", a)("d", expected_symbol.decimals())); @@ -1605,6 +1607,405 @@ struct canceldelay_subcommand { } }; +struct deposit_subcommand { + string owner_str; + string amount_str; + const name act_name{ N(deposit) }; + + deposit_subcommand(CLI::App* actionRoot) { + auto deposit = actionRoot->add_subcommand("deposit", localized("Deposit into owner's REX fund by transfering from owner's liquid token balance")); + deposit->add_option("owner", owner_str, localized("Account which owns the REX fund"))->required(); + deposit->add_option("amount", amount_str, localized("Amount to be deposited into REX fund"))->required(); + add_standard_transaction_options(deposit, "owner@active"); + deposit->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object() + ("owner", owner_str) + ("amount", amount_str); + auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + +struct withdraw_subcommand { + string owner_str; + string amount_str; + const name act_name{ N(withdraw) }; + + withdraw_subcommand(CLI::App* actionRoot) { + auto withdraw = actionRoot->add_subcommand("withdraw", localized("Withdraw from owner's REX fund by transfering to owner's liquid token balance")); + withdraw->add_option("owner", owner_str, localized("Account which owns the REX fund"))->required(); + withdraw->add_option("amount", amount_str, localized("Amount to be withdrawn from REX fund"))->required(); + add_standard_transaction_options(withdraw, "owner@active"); + withdraw->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object() + ("owner", owner_str) + ("amount", amount_str); + auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + +struct buyrex_subcommand { + string from_str; + string amount_str; + const name act_name{ N(buyrex) }; + + buyrex_subcommand(CLI::App* actionRoot) { + auto buyrex = actionRoot->add_subcommand("buyrex", localized("Buy REX using tokens in owner's REX fund")); + buyrex->add_option("from", from_str, localized("Account buying REX tokens"))->required(); + buyrex->add_option("amount", amount_str, localized("Amount to be taken from REX fund and used in buying REX"))->required(); + add_standard_transaction_options(buyrex, "from@active"); + buyrex->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object() + ("from", from_str) + ("amount", amount_str); + auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + +struct lendrex_subcommand { + string from_str; + string amount_str; + const name act_name1{ N(deposit) }; + const name act_name2{ N(buyrex) }; + + lendrex_subcommand(CLI::App* actionRoot) { + auto lendrex = actionRoot->add_subcommand("lendrex", localized("Deposit tokens to REX fund and use the tokens to buy REX")); + lendrex->add_option("from", from_str, localized("Account buying REX tokens"))->required(); + lendrex->add_option("amount", amount_str, localized("Amount of liquid tokens to be used in buying REX"))->required(); + add_standard_transaction_options(lendrex, "from@active"); + lendrex->set_callback([this] { + fc::variant act_payload1 = fc::mutable_variant_object() + ("owner", from_str) + ("amount", amount_str); + fc::variant act_payload2 = fc::mutable_variant_object() + ("from", from_str) + ("amount", amount_str); + auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name1, act_payload1), + create_action(accountPermissions, config::system_account_name, act_name2, act_payload2)}); + }); + } +}; + +struct unstaketorex_subcommand { + string owner_str; + string receiver_str; + string from_net_str; + string from_cpu_str; + const name act_name{ N(unstaketorex) }; + + unstaketorex_subcommand(CLI::App* actionRoot) { + auto unstaketorex = actionRoot->add_subcommand("unstaketorex", localized("Buy REX using staked tokens")); + unstaketorex->add_option("owner", owner_str, localized("Account buying REX tokens"))->required(); + unstaketorex->add_option("receiver", receiver_str, localized("Account that tokens have been staked to"))->required(); + unstaketorex->add_option("from_net", from_net_str, localized("Amount to be unstaked from CPU resources and used in REX purchase"))->required(); + unstaketorex->add_option("from_cpu", from_cpu_str, localized("Amount to be unstaked from Net resources and used in REX purchase"))->required(); + add_standard_transaction_options(unstaketorex, "owner@active"); + unstaketorex->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object() + ("owner", owner_str) + ("receiver", receiver_str) + ("from_net", from_net_str) + ("from_cpu", from_cpu_str); + auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + +struct sellrex_subcommand { + string from_str; + string rex_str; + const name act_name{ N(sellrex) }; + + sellrex_subcommand(CLI::App* actionRoot) { + auto sellrex = actionRoot->add_subcommand("sellrex", localized("Sell REX tokens")); + sellrex->add_option("from", from_str, localized("Account selling REX tokens"))->required(); + sellrex->add_option("rex", rex_str, localized("Amount of REX tokens to be sold"))->required(); + add_standard_transaction_options(sellrex, "from@active"); + sellrex->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object() + ("from", from_str) + ("rex", rex_str); + auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + +struct cancelrexorder_subcommand { + string owner_str; + const name act_name{ N(cnclrexorder) }; + + cancelrexorder_subcommand(CLI::App* actionRoot) { + auto cancelrexorder = actionRoot->add_subcommand("cancelrexorder", localized("Cancel queued REX sell order if one exists")); + cancelrexorder->add_option("owner", owner_str, localized("Owner account of sell order"))->required(); + add_standard_transaction_options(cancelrexorder, "owner@active"); + cancelrexorder->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object()("owner", owner_str); + auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + +struct rentcpu_subcommand { + string from_str; + string receiver_str; + string loan_payment_str; + string loan_fund_str; + const name act_name{ N(rentcpu) }; + + rentcpu_subcommand(CLI::App* actionRoot) { + auto rentcpu = actionRoot->add_subcommand("rentcpu", localized("Rent CPU bandwidth for 30 days")); + rentcpu->add_option("from", from_str, localized("Account paying rent fees"))->required(); + rentcpu->add_option("receiver", receiver_str, localized("Account to whom rented CPU bandwidth is staked"))->required(); + rentcpu->add_option("loan_payment", loan_payment_str, localized("Loan fee to be paid, used to calculate amount of rented bandwidth"))->required(); + rentcpu->add_option("loan_fund", loan_fund_str, localized("Loan fund to be used in automatic renewal, can be 0 tokens"))->required(); + add_standard_transaction_options(rentcpu, "from@active"); + rentcpu->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object() + ("from", from_str) + ("receiver", receiver_str) + ("loan_payment", loan_payment_str) + ("loan_fund", loan_fund_str); + auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + +struct rentnet_subcommand { + string from_str; + string receiver_str; + string loan_payment_str; + string loan_fund_str; + const name act_name{ N(rentnet) }; + + rentnet_subcommand(CLI::App* actionRoot) { + auto rentnet = actionRoot->add_subcommand("rentnet", localized("Rent Network bandwidth for 30 days")); + rentnet->add_option("from", from_str, localized("Account paying rent fees"))->required(); + rentnet->add_option("receiver", receiver_str, localized("Account to whom rented Network bandwidth is staked"))->required(); + rentnet->add_option("loan_payment", loan_payment_str, localized("Loan fee to be paid, used to calculate amount of rented bandwidth"))->required(); + rentnet->add_option("loan_fund", loan_fund_str, localized("Loan fund to be used in automatic renewal, can be 0 tokens"))->required(); + add_standard_transaction_options(rentnet, "from@active"); + rentnet->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object() + ("from", from_str) + ("receiver", receiver_str) + ("loan_payment", loan_payment_str) + ("loan_fund", loan_fund_str); + auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + +struct fundcpuloan_subcommand { + string from_str; + string loan_num_str; + string payment_str; + const name act_name{ N(fundcpuloan) }; + + fundcpuloan_subcommand(CLI::App* actionRoot) { + auto fundcpuloan = actionRoot->add_subcommand("fundcpuloan", localized("Deposit into a CPU loan fund")); + fundcpuloan->add_option("from", from_str, localized("Loan owner"))->required(); + fundcpuloan->add_option("loan_num", loan_num_str, localized("Loan ID"))->required(); + fundcpuloan->add_option("payment", payment_str, localized("Amount to be deposited"))->required(); + add_standard_transaction_options(fundcpuloan, "from@active"); + fundcpuloan->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object() + ("from", from_str) + ("loan_num", loan_num_str) + ("payment", payment_str); + auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + +struct fundnetloan_subcommand { + string from_str; + string loan_num_str; + string payment_str; + const name act_name{ N(fundnetloan) }; + + fundnetloan_subcommand(CLI::App* actionRoot) { + auto fundnetloan = actionRoot->add_subcommand("fundnetloan", localized("Deposit into a Network loan fund")); + fundnetloan->add_option("from", from_str, localized("Loan owner"))->required(); + fundnetloan->add_option("loan_num", loan_num_str, localized("Loan ID"))->required(); + fundnetloan->add_option("payment", payment_str, localized("Amount to be deposited"))->required(); + add_standard_transaction_options(fundnetloan, "from@active"); + fundnetloan->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object() + ("from", from_str) + ("loan_num", loan_num_str) + ("payment", payment_str); + auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + +struct defcpuloan_subcommand { + string from_str; + string loan_num_str; + string amount_str; + const name act_name{ N(defcpuloan) }; + + defcpuloan_subcommand(CLI::App* actionRoot) { + auto defcpuloan = actionRoot->add_subcommand("defundcpuloan", localized("Withdraw from a CPU loan fund")); + defcpuloan->add_option("from", from_str, localized("Loan owner"))->required(); + defcpuloan->add_option("loan_num", loan_num_str, localized("Loan ID"))->required(); + defcpuloan->add_option("amount", amount_str, localized("Amount to be withdrawn"))->required(); + add_standard_transaction_options(defcpuloan, "from@active"); + defcpuloan->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object() + ("from", from_str) + ("loan_num", loan_num_str) + ("amount", amount_str); + auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + +struct defnetloan_subcommand { + string from_str; + string loan_num_str; + string amount_str; + const name act_name{ N(defnetloan) }; + + defnetloan_subcommand(CLI::App* actionRoot) { + auto defnetloan = actionRoot->add_subcommand("defundnetloan", localized("Withdraw from a Network loan fund")); + defnetloan->add_option("from", from_str, localized("Loan owner"))->required(); + defnetloan->add_option("loan_num", loan_num_str, localized("Loan ID"))->required(); + defnetloan->add_option("amount", amount_str, localized("Amount to be withdrawn"))->required(); + add_standard_transaction_options(defnetloan, "from@active"); + defnetloan->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object() + ("from", from_str) + ("loan_num", loan_num_str) + ("amount", amount_str); + auto accountPermissions = get_account_permissions(tx_permission, {from_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + +struct mvtosavings_subcommand { + string owner_str; + string rex_str; + const name act_name{ N(mvtosavings) }; + + mvtosavings_subcommand(CLI::App* actionRoot) { + auto mvtosavings = actionRoot->add_subcommand("mvtosavings", localized("Move REX tokens to savings bucket")); + mvtosavings->add_option("owner", owner_str, localized("REX owner"))->required(); + mvtosavings->add_option("rex", rex_str, localized("Amount of REX to be moved to savings bucket"))->required(); + add_standard_transaction_options(mvtosavings, "owner@active"); + mvtosavings->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object() + ("owner", owner_str) + ("rex", rex_str); + auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + +struct mvfrsavings_subcommand { + string owner_str; + string rex_str; + const name act_name{ N(mvfrsavings) }; + + mvfrsavings_subcommand(CLI::App* actionRoot) { + auto mvfrsavings = actionRoot->add_subcommand("mvfromsavings", localized("Move REX tokens out of savings bucket")); + mvfrsavings->add_option("owner", owner_str, localized("REX owner"))->required(); + mvfrsavings->add_option("rex", rex_str, localized("Amount of REX to be moved out of savings bucket"))->required(); + add_standard_transaction_options(mvfrsavings, "owner@active"); + mvfrsavings->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object() + ("owner", owner_str) + ("rex", rex_str); + auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + +struct updaterex_subcommand { + string owner_str; + const name act_name{ N(updaterex) }; + + updaterex_subcommand(CLI::App* actionRoot) { + auto updaterex = actionRoot->add_subcommand("updaterex", localized("Update REX owner vote stake and vote weight")); + updaterex->add_option("owner", owner_str, localized("REX owner"))->required(); + add_standard_transaction_options(updaterex, "owner@active"); + updaterex->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object()("owner", owner_str); + auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + +struct consolidate_subcommand { + string owner_str; + const name act_name{ N(consolidate) }; + + consolidate_subcommand(CLI::App* actionRoot) { + auto consolidate = actionRoot->add_subcommand("consolidate", localized("Consolidate REX maturity buckets into one that matures in 4 days")); + consolidate->add_option("owner", owner_str, localized("REX owner"))->required(); + add_standard_transaction_options(consolidate, "owner@active"); + consolidate->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object()("owner", owner_str); + auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + +struct rexexec_subcommand { + string user_str; + string max_str; + const name act_name{ N(rexexec) }; + + rexexec_subcommand(CLI::App* actionRoot) { + auto rexexec = actionRoot->add_subcommand("rexexec", localized("Perform REX maintenance by processing expired loans and unfilled sell orders")); + rexexec->add_option("user", user_str, localized("User executing the action"))->required(); + rexexec->add_option("max", max_str, localized("Maximum number of CPU loans, Network loans, and sell orders to be processed"))->required(); + add_standard_transaction_options(rexexec, "user@active"); + rexexec->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object() + ("user", user_str) + ("max", max_str); + auto accountPermissions = get_account_permissions(tx_permission, {user_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + +struct closerex_subcommand { + string owner_str; + const name act_name{ N(closerex) }; + + closerex_subcommand(CLI::App* actionRoot) { + auto closerex = actionRoot->add_subcommand("closerex", localized("Delete unused REX-related user table entries")); + closerex->add_option("owner", owner_str, localized("REX owner"))->required(); + add_standard_transaction_options(closerex, "owner@active"); + closerex->set_callback([this] { + fc::variant act_payload = fc::mutable_variant_object()("owner", owner_str); + auto accountPermissions = get_account_permissions(tx_permission, {owner_str, config::active_name}); + send_actions({create_action(accountPermissions, config::system_account_name, act_name, act_payload)}); + }); + } +}; + void get_account( const string& accountName, const string& coresym, bool json_format ) { fc::variant json; if (coresym.empty()) { @@ -1850,7 +2251,7 @@ void get_account( const string& accountName, const string& coresym, bool json_fo auto& prods = obj["producers"].get_array(); std::cout << "producers:"; if ( !prods.empty() ) { - for ( int i = 0; i < prods.size(); ++i ) { + for ( size_t i = 0; i < prods.size(); ++i ) { if ( i%3 == 0 ) { std::cout << std::endl << indent; } @@ -1902,8 +2303,7 @@ int main( int argc, char** argv ) { app.add_flag( "--no-auto-keosd", no_auto_keosd, localized("don't automatically launch a keosd if one is not currently running")); app.set_callback([&app]{ ensure_keosd_running(&app);}); - bool verbose_errors = false; - app.add_flag( "-v,--verbose", verbose_errors, localized("output verbose actions on error")); + app.add_flag( "-v,--verbose", verbose, localized("output verbose errors and action console output")); app.add_flag("--print-request", print_request, localized("print HTTP request to STDERR")); app.add_flag("--print-response", print_response, localized("print HTTP response to STDERR")); @@ -2307,7 +2707,7 @@ int main( int argc, char** argv ) { getActions->add_option("pos", pos_seq, localized("sequence number of action for this account, -1 for last")); getActions->add_option("offset", offset, localized("get actions [pos,pos+offset] for positive offset or [pos-offset,pos) for negative offset")); getActions->add_flag("--json,-j", printjson, localized("print full json")); - getActions->add_flag("--full", fullact, localized("don't truncate action json")); + getActions->add_flag("--full", fullact, localized("don't truncate action output")); getActions->add_flag("--pretty", prettyact, localized("pretty print full action json ")); getActions->add_flag("--console", printconsole, localized("print console output generated by action ")); getActions->set_callback([&] { @@ -2376,8 +2776,10 @@ int main( int argc, char** argv ) { stringstream out; std::stringstream ss(console); string line; - std::getline( ss, line ); - out << ">> " << line << "\n"; + while( std::getline( ss, line ) ) { + out << ">> " << line << "\n"; + if( !fullact ) break; + } cerr << out.str(); //ilog( "\r${m} ", ("m",out.str()) ); } } @@ -2827,7 +3229,7 @@ int main( int argc, char** argv ) { std::cout << fc::json::to_pretty_string(v) << std::endl; }); - auto stopKeosd = wallet->add_subcommand("stop", localized("Stop keosd (doesn't work with nodeos)."), false); + auto stopKeosd = wallet->add_subcommand("stop", localized("Stop keosd."), false); stopKeosd->set_callback([] { const auto& v = call(wallet_url, keosd_stop); if ( !v.is_object() || v.get_object().size() != 0 ) { //on success keosd responds with empty object @@ -3147,7 +3549,7 @@ int main( int argc, char** argv ) { for( const auto& ra : approvals_object["requested_approvals"].get_array() ) { const auto& ra_obj = ra.get_object(); auto pl = ra["level"].as(); - auto res = all_approvals.emplace( pl, std::make_pair(ra["time"].as(), approval_status::unapproved) ); + all_approvals.emplace( pl, std::make_pair(ra["time"].as(), approval_status::unapproved) ); } for( const auto& pa : approvals_object["provided_approvals"].get_array() ) { @@ -3178,7 +3580,7 @@ int main( int argc, char** argv ) { for( const auto& ra : approvals_object["requested_approvals"].get_array() ) { auto pl = ra.as(); - auto res = all_approvals.emplace( pl, std::make_pair(fc::time_point{}, approval_status::unapproved) ); + all_approvals.emplace( pl, std::make_pair(fc::time_point{}, approval_status::unapproved) ); } for( const auto& pa : approvals_object["provided_approvals"].get_array() ) { @@ -3442,7 +3844,7 @@ int main( int argc, char** argv ) { auto bidname = bidname_subcommand(system); auto bidnameinfo = bidname_info_subcommand(system); - auto biyram = buyram_subcommand(system); + auto buyram = buyram_subcommand(system); auto sellram = sellram_subcommand(system); auto claimRewards = claimrewards_subcommand(system); @@ -3452,6 +3854,29 @@ int main( int argc, char** argv ) { auto cancelDelay = canceldelay_subcommand(system); + auto rex = system->add_subcommand("rex", localized("Actions related to REX (the resource exchange)")); + rex->require_subcommand(); + auto deposit = deposit_subcommand(rex); + auto withdraw = withdraw_subcommand(rex); + auto buyrex = buyrex_subcommand(rex); + auto lendrex = lendrex_subcommand(rex); + auto unstaketorex = unstaketorex_subcommand(rex); + auto sellrex = sellrex_subcommand(rex); + auto cancelrexorder = cancelrexorder_subcommand(rex); + auto mvtosavings = mvtosavings_subcommand(rex); + auto mvfromsavings = mvfrsavings_subcommand(rex); + auto rentcpu = rentcpu_subcommand(rex); + auto rentnet = rentnet_subcommand(rex); + auto fundcpuloan = fundcpuloan_subcommand(rex); + auto fundnetloan = fundnetloan_subcommand(rex); + auto defcpuloan = defcpuloan_subcommand(rex); + auto defnetloan = defnetloan_subcommand(rex); + auto consolidate = consolidate_subcommand(rex); + auto updaterex = updaterex_subcommand(rex); + auto rexexec = rexexec_subcommand(rex); + auto closerex = closerex_subcommand(rex); + + try { app.parse(argc, argv); } catch (const CLI::ParseError &e) { @@ -3459,16 +3884,16 @@ int main( int argc, char** argv ) { } catch (const explained_exception& e) { return 1; } catch (connection_exception& e) { - if (verbose_errors) { + if (verbose) { elog("connect error: ${e}", ("e", e.to_detail_string())); } return 1; } catch (const fc::exception& e) { // attempt to extract the error code if one is present - if (!print_recognized_errors(e, verbose_errors)) { + if (!print_recognized_errors(e, verbose)) { // Error is not recognized - if (!print_help_text(e) || verbose_errors) { - elog("Failed with error: ${e}", ("e", verbose_errors ? e.to_detail_string() : e.to_string())); + if (!print_help_text(e) || verbose) { + elog("Failed with error: ${e}", ("e", verbose ? e.to_detail_string() : e.to_string())); } } return 1; diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index aca800eb604..66b40819b9d 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -850,7 +850,7 @@ launcher_def::bind_nodes () { cerr << "Unable to allocate producers due to insufficient prod_nodes = " << prod_nodes << "\n"; exit (10); } - int non_bios = prod_nodes - 1; + size_t non_bios = prod_nodes - 1; int per_node = producers / non_bios; int extra = producers % non_bios; unsigned int i = 0; diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index b858db748dd..7034a03858a 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -49,21 +49,14 @@ void configure_logging(const bfs::path& config_path) } // namespace detail -void logging_conf_loop() +void logging_conf_handler() { - std::shared_ptr sighup_set(new boost::asio::signal_set(app().get_io_service(), SIGHUP)); - sighup_set->async_wait([sighup_set](const boost::system::error_code& err, int /*num*/) { - if(!err) - { - ilog("Received HUP. Reloading logging configuration."); - auto config_path = app().get_logging_conf(); - if(fc::exists(config_path)) - ::detail::configure_logging(config_path); - for(auto iter : fc::get_appender_map()) - iter.second->initialize(app().get_io_service()); - logging_conf_loop(); - } - }); + ilog("Received HUP. Reloading logging configuration."); + auto config_path = app().get_logging_conf(); + if(fc::exists(config_path)) + ::detail::configure_logging(config_path); + for(auto iter : fc::get_appender_map()) + iter.second->initialize(app().get_io_service()); } void initialize_logging() @@ -74,7 +67,7 @@ void initialize_logging() for(auto iter : fc::get_appender_map()) iter.second->initialize(app().get_io_service()); - logging_conf_loop(); + app().set_sighup_callback(logging_conf_handler); } enum return_codes { @@ -100,7 +93,7 @@ int main(int argc, char** argv) .default_unix_socket_path = "", .default_http_port = 8888 }); - if(!app().initialize(argc, argv)) + if(!app().initialize(argc, argv)) return INITIALIZE_FAIL; initialize_logging(); ilog("nodeos version ${ver}", ("ver", app().version_string())); @@ -152,5 +145,6 @@ int main(int argc, char** argv) return OTHER_FAIL; } + ilog("nodeos successfully exiting"); return SUCCESS; } diff --git a/scripts/clean_old_install.sh b/scripts/clean_old_install.sh deleted file mode 100755 index 6e04edf92d5..00000000000 --- a/scripts/clean_old_install.sh +++ /dev/null @@ -1,70 +0,0 @@ -#! /bin/bash - -if [ -d "/usr/local/include/eosio" ]; then - printf "\n\tOld eosio install needs to be removed.\n\n" - printf "\tDo you wish to remove this install? (requires sudo)\n" - select yn in "Yes" "No"; do - case $yn in - [Yy]* ) - if [ "$(id -u)" -ne 0 ]; then - printf "\n\tThis requires sudo, please run ./scripts/clean_old_install.sh with sudo\n\n" - exit -1 - fi - pushd /usr/local &> /dev/null - - pushd include &> /dev/null - rm -rf appbase chainbase eosio eosio.system eosiolib fc libc++ musl &> /dev/null - popd &> /dev/null - - pushd bin &> /dev/null - rm cleos eosio-abigen eosio-applesedemo eosio-launcher eosio-s2wasm eosio-wast2wasm eosiocpp keosd nodeos &> /dev/null - popd &> /dev/null - - libraries=(libeosio_testing - libeosio_chain - libfc - libbinaryen - libWAST - libWASM - libRuntime - libPlatform - libIR - libLogging - libsoftfloat - libchainbase - libappbase - libbuiltins) - pushd lib &> /dev/null - for lib in ${libraries[@]}; do - rm ${lib}.a ${lib}.dylib ${lib}.so &> /dev/null - done - popd &> /dev/null - - pushd etc &> /dev/null - rm eosio &> /dev/null - popd &> /dev/null - - pushd share &> /dev/null - rm eosio &> /dev/null - popd &> /dev/null - - pushd usr/share &> /dev/null - rm eosio &> /dev/null - popd &> /dev/null - - pushd var/lib &> /dev/null - rm eosio &> /dev/null - popd &> /dev/null - - pushd var/log &> /dev/null - rm eosio &> /dev/null - popd &> /dev/null - - popd &> /dev/null - break;; - [Nn]* ) - printf "\tAborting uninstall\n\n" - exit -1;; - esac - done -fi diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh new file mode 100755 index 00000000000..d3128903097 --- /dev/null +++ b/scripts/eosio_build.sh @@ -0,0 +1,317 @@ +#!/bin/bash +########################################################################## +# This is the EOSIO automated install script for Linux and Mac OS. +# This file was downloaded from https://github.com/EOSIO/eos +# +# Copyright (c) 2017, Respective Authors all rights reserved. +# +# After June 1, 2018 this software is available under the following terms: +# +# The MIT License +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# +# https://github.com/EOSIO/eos/blob/master/LICENSE +########################################################################## + +VERSION=2.1 # Build script version +CMAKE_BUILD_TYPE=Release +export DISK_MIN=20 +DOXYGEN=false +ENABLE_COVERAGE_TESTING=false +CORE_SYMBOL_NAME="SYS" +START_MAKE=true + +TIME_BEGIN=$( date -u +%s ) +txtbld=$(tput bold) +bldred=${txtbld}$(tput setaf 1) +txtrst=$(tput sgr0) + +export SRC_LOCATION=${HOME}/src +export OPT_LOCATION=${HOME}/opt +export VAR_LOCATION=${HOME}/var +export ETC_LOCATION=${HOME}/etc +export BIN_LOCATION=${HOME}/bin +export DATA_LOCATION=${HOME}/data +export CMAKE_VERSION_MAJOR=3 +export CMAKE_VERSION_MINOR=13 +export CMAKE_VERSION_PATCH=2 +export CMAKE_VERSION=${CMAKE_VERSION_MAJOR}.${CMAKE_VERSION_MINOR}.${CMAKE_VERSION_PATCH} +export MONGODB_VERSION=3.6.3 +export MONGODB_ROOT=${OPT_LOCATION}/mongodb-${MONGODB_VERSION} +export MONGODB_CONF=${ETC_LOCATION}/mongod.conf +export MONGODB_LOG_LOCATION=${VAR_LOCATION}/log/mongodb +export MONGODB_LINK_LOCATION=${OPT_LOCATION}/mongodb +export MONGODB_DATA_LOCATION=${DATA_LOCATION}/mongodb +export MONGO_C_DRIVER_VERSION=1.13.0 +export MONGO_C_DRIVER_ROOT=${SRC_LOCATION}/mongo-c-driver-${MONGO_C_DRIVER_VERSION} +export MONGO_CXX_DRIVER_VERSION=3.4.0 +export MONGO_CXX_DRIVER_ROOT=${SRC_LOCATION}/mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION} +export BOOST_VERSION_MAJOR=1 +export BOOST_VERSION_MINOR=67 +export BOOST_VERSION_PATCH=0 +export BOOST_VERSION=${BOOST_VERSION_MAJOR}_${BOOST_VERSION_MINOR}_${BOOST_VERSION_PATCH} +export BOOST_ROOT=${SRC_LOCATION}/boost_${BOOST_VERSION} +export BOOST_LINK_LOCATION=${OPT_LOCATION}/boost +export LLVM_VERSION=release_40 +export LLVM_ROOT=${OPT_LOCATION}/llvm +export LLVM_DIR=${LLVM_ROOT}/lib/cmake/llvm +export DOXYGEN_VERSION=1_8_14 +export DOXYGEN_ROOT=${SRC_LOCATION}/doxygen-${DOXYGEN_VERSION} +export TINI_VERSION=0.18.0 + +# Setup directories +mkdir -p $SRC_LOCATION +mkdir -p $OPT_LOCATION +mkdir -p $VAR_LOCATION +mkdir -p $BIN_LOCATION +mkdir -p $VAR_LOCATION/log +mkdir -p $ETC_LOCATION +mkdir -p $MONGODB_LOG_LOCATION +mkdir -p $MONGODB_DATA_LOCATION + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +REPO_ROOT="${SCRIPT_DIR}/.." +BUILD_DIR="${REPO_ROOT}/build" + +# Use current directory's tmp directory if noexec is enabled for /tmp +if (mount | grep "/tmp " | grep --quiet noexec); then + mkdir -p $REPO_ROOT/tmp + TEMP_DIR="${REPO_ROOT}/tmp" + rm -rf $REPO_ROOT/tmp/* +else # noexec wasn't found + TEMP_DIR="/tmp" +fi + +function usage() +{ + printf "Usage: %s \\n[Build Option -o ] \\n[CodeCoverage -c] \\n[Doxygen -d] \\n[CoreSymbolName -s <1-7 characters>] \\n[Avoid Compiling -a]\\n[Noninteractive -y]\\n\\n" "$0" 1>&2 + exit 1 +} + +NONINTERACTIVE=0 + +if [ $# -ne 0 ]; then + while getopts ":cdo:s:ahy" opt; do + case "${opt}" in + o ) + options=( "Debug" "Release" "RelWithDebInfo" "MinSizeRel" ) + if [[ "${options[*]}" =~ "${OPTARG}" ]]; then + CMAKE_BUILD_TYPE="${OPTARG}" + else + printf "\\nInvalid argument: %s\\n" "${OPTARG}" 1>&2 + usage + exit 1 + fi + ;; + c ) + ENABLE_COVERAGE_TESTING=true + ;; + d ) + DOXYGEN=true + ;; + s) + if [ "${#OPTARG}" -gt 7 ] || [ -z "${#OPTARG}" ]; then + printf "\\nInvalid argument: %s\\n" "${OPTARG}" 1>&2 + usage + exit 1 + else + CORE_SYMBOL_NAME="${OPTARG}" + fi + ;; + a) + START_MAKE=false + ;; + h) + usage + exit 1 + ;; + y) + NONINTERACTIVE=1 + ;; + \? ) + printf "\\nInvalid Option: %s\\n" "-${OPTARG}" 1>&2 + usage + exit 1 + ;; + : ) + printf "\\nInvalid Option: %s requires an argument.\\n" "-${OPTARG}" 1>&2 + usage + exit 1 + ;; + * ) + usage + exit 1 + ;; + esac + done +fi + +if [ ! -d "${REPO_ROOT}/.git" ]; then + printf "\\nThis build script only works with sources cloned from git\\n" + printf "Please clone a new eos directory with 'git clone https://github.com/EOSIO/eos --recursive'\\n" + printf "See the wiki for instructions: https://github.com/EOSIO/eos/wiki\\n" + exit 1 +fi + +cd $REPO_ROOT + +STALE_SUBMODS=$(( $(git submodule status --recursive | grep -c "^[+\-]") )) +if [ $STALE_SUBMODS -gt 0 ]; then + printf "\\ngit submodules are not up to date.\\n" + printf "Please run the command 'git submodule update --init --recursive'.\\n" + exit 1 +fi + +printf "\\nBeginning build version: %s\\n" "${VERSION}" +printf "%s\\n" "$( date -u )" +printf "User: %s\\n" "$( whoami )" +# printf "git head id: %s\\n" "$( cat .git/refs/heads/master )" +printf "Current branch: %s\\n" "$( git rev-parse --abbrev-ref HEAD )" + +ARCH=$( uname ) +printf "\\nARCHITECTURE: %s\\n" "${ARCH}" + +# Find and use existing CMAKE +export CMAKE=$(command -v cmake 2>/dev/null) + +if [ "$ARCH" == "Linux" ]; then + # Check if cmake is already installed or not and use source install location + if [ -z $CMAKE ]; then export CMAKE=$HOME/bin/cmake; fi + export OS_NAME=$( cat /etc/os-release | grep ^NAME | cut -d'=' -f2 | sed 's/\"//gI' ) + OPENSSL_ROOT_DIR=/usr/include/openssl + if [ ! -e /etc/os-release ]; then + printf "\\nEOSIO currently supports Amazon, Centos, Fedora, Mint & Ubuntu Linux only.\\n" + printf "Please install on the latest version of one of these Linux distributions.\\n" + printf "https://aws.amazon.com/amazon-linux-ami/\\n" + printf "https://www.centos.org/\\n" + printf "https://start.fedoraproject.org/\\n" + printf "https://linuxmint.com/\\n" + printf "https://www.ubuntu.com/\\n" + printf "Exiting now.\\n" + exit 1 + fi + case "$OS_NAME" in + "Amazon Linux AMI"|"Amazon Linux") + FILE="${REPO_ROOT}/scripts/eosio_build_amazon.sh" + CXX_COMPILER=g++ + C_COMPILER=gcc + ;; + "CentOS Linux") + FILE="${REPO_ROOT}/scripts/eosio_build_centos.sh" + CXX_COMPILER=g++ + C_COMPILER=gcc + ;; + "elementary OS") + FILE="${REPO_ROOT}/scripts/eosio_build_ubuntu.sh" + CXX_COMPILER=clang++-4.0 + C_COMPILER=clang-4.0 + ;; + "Fedora") + export CPATH=/usr/include/llvm4.0:$CPATH # llvm4.0 for fedora package path inclusion + FILE="${REPO_ROOT}/scripts/eosio_build_fedora.sh" + CXX_COMPILER=g++ + C_COMPILER=gcc + ;; + "Linux Mint") + FILE="${REPO_ROOT}/scripts/eosio_build_ubuntu.sh" + CXX_COMPILER=clang++-4.0 + C_COMPILER=clang-4.0 + ;; + "Ubuntu") + FILE="${REPO_ROOT}/scripts/eosio_build_ubuntu.sh" + CXX_COMPILER=clang++-4.0 + C_COMPILER=clang-4.0 + ;; + "Debian GNU/Linux") + FILE="${REPO_ROOT}/scripts/eosio_build_ubuntu.sh" + CXX_COMPILER=clang++-4.0 + C_COMPILER=clang-4.0 + ;; + *) + printf "\\nUnsupported Linux Distribution. Exiting now.\\n\\n" + exit 1 + esac +fi + +if [ "$ARCH" == "Darwin" ]; then + # Check if cmake is already installed or not and use source install location + if [ -z $CMAKE ]; then export CMAKE=/usr/local/bin/cmake; fi + export OS_NAME=MacOSX + # opt/gettext: cleos requires Intl, which requires gettext; it's keg only though and we don't want to force linking: https://github.com/EOSIO/eos/issues/2240#issuecomment-396309884 + # HOME/lib/cmake: mongo_db_plugin.cpp:25:10: fatal error: 'bsoncxx/builder/basic/kvp.hpp' file not found + LOCAL_CMAKE_FLAGS="-DCMAKE_PREFIX_PATH=/usr/local/opt/gettext;$HOME/lib/cmake ${LOCAL_CMAKE_FLAGS}" + FILE="${REPO_ROOT}/scripts/eosio_build_darwin.sh" + CXX_COMPILER=clang++ + C_COMPILER=clang + OPENSSL_ROOT_DIR=/usr/local/opt/openssl +fi + +# Cleanup old installation +. ./scripts/full_uninstaller.sh $NONINTERACTIVE +if [ $? -ne 0 ]; then exit -1; fi # Stop if exit from script is not 0 + +pushd $SRC_LOCATION &> /dev/null +. "$FILE" $NONINTERACTIVE # Execute OS specific build file +popd &> /dev/null + +printf "\\n========================================================================\\n" +printf "======================= Starting EOSIO Build =======================\\n" +printf "## CMAKE_BUILD_TYPE=%s\\n" "${CMAKE_BUILD_TYPE}" +printf "## ENABLE_COVERAGE_TESTING=%s\\n" "${ENABLE_COVERAGE_TESTING}" + +mkdir -p $BUILD_DIR +cd $BUILD_DIR + +$CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX_COMPILER}" \ + -DCMAKE_C_COMPILER="${C_COMPILER}" -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ + -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ + -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ + -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS "${REPO_ROOT}" +if [ $? -ne 0 ]; then exit -1; fi +make -j"${JOBS}" +if [ $? -ne 0 ]; then exit -1; fi + +cd $REPO_ROOT + +TIME_END=$(( $(date -u +%s) - $TIME_BEGIN )) + +printf "${bldred}\n\n _______ _______ _______ _________ _______\n" +printf '( ____ \( ___ )( ____ \\\\__ __/( ___ )\n' +printf "| ( \/| ( ) || ( \/ ) ( | ( ) |\n" +printf "| (__ | | | || (_____ | | | | | |\n" +printf "| __) | | | |(_____ ) | | | | | |\n" +printf "| ( | | | | ) | | | | | | |\n" +printf "| (____/\| (___) |/\____) |___) (___| (___) |\n" +printf "(_______/(_______)\_______)\_______/(_______)\n\n${txtrst}" + +printf "\\nEOSIO has been successfully built. %02d:%02d:%02d\\n" $(($TIME_END/3600)) $(($TIME_END%3600/60)) $(($TIME_END%60)) +printf "==============================================================================================\\n${bldred}" +printf "(Optional) Testing Instructions:\\n" +print_instructions +printf "${BIN_LOCATION}/mongod --dbpath ${MONGODB_DATA_LOCATION} -f ${MONGODB_CONF} --logpath ${MONGODB_LOG_LOCATION}/mongod.log &\\n" +printf "cd ./build && PATH=\$PATH:$HOME/opt/mongodb/bin make test\\n" # PATH is set as currently 'mongo' binary is required for the mongodb test +printf "${txtrst}==============================================================================================\\n" +printf "For more information:\\n" +printf "EOSIO website: https://eos.io\\n" +printf "EOSIO Telegram channel @ https://t.me/EOSProject\\n" +printf "EOSIO resources: https://eos.io/resources/\\n" +printf "EOSIO Stack Exchange: https://eosio.stackexchange.com\\n" +printf "EOSIO wiki: https://github.com/EOSIO/eos/wiki\\n\\n\\n" + diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh old mode 100644 new mode 100755 index 1c96024b847..7a16e4486e9 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -1,631 +1,253 @@ - OS_VER=$( grep VERSION_ID /etc/os-release | cut -d'=' -f2 | sed 's/[^0-9\.]//gI' | cut -d'.' -f1 ) - - MEM_MEG=$( free -m | sed -n 2p | tr -s ' ' | cut -d\ -f2 ) - CPU_SPEED=$( lscpu | grep "MHz" | tr -s ' ' | cut -d\ -f3 | cut -d'.' -f1 ) - CPU_CORE=$( lscpu -pCPU | grep -v "#" | wc -l ) - MEM_GIG=$(( ((MEM_MEG / 1000) / 2) )) - JOBS=$(( MEM_GIG > CPU_CORE ? CPU_CORE : MEM_GIG )) - - DISK_TOTAL=$( df -h . | grep /dev | tr -s ' ' | cut -d\ -f2 | sed 's/[^0-9]//' ) - DISK_AVAIL=$( df -h . | grep /dev | tr -s ' ' | cut -d\ -f4 | sed 's/[^0-9]//' ) - - printf "\\n\\tOS name: %s\\n" "${OS_NAME}" - printf "\\tOS Version: %s\\n" "${OS_VER}" - printf "\\tCPU speed: %sMhz\\n" "${CPU_SPEED}" - printf "\\tCPU cores: %s\\n" "${CPU_CORE}" - printf "\\tPhysical Memory: %sMgb\\n" "${MEM_MEG}" - printf "\\tDisk space total: %sGb\\n" "${DISK_TOTAL}" - printf "\\tDisk space available: %sG\\n" "${DISK_AVAIL}" - - if [ "${MEM_MEG}" -lt 7000 ]; then - printf "\\tYour system must have 7 or more Gigabytes of physical memory installed.\\n" - printf "\\texiting now.\\n" - exit 1 - fi - - if [[ "${OS_NAME}" == "Amazon Linux AMI" && "${OS_VER}" -lt 2017 ]]; then - printf "\\tYou must be running Amazon Linux 2017.09 or higher to install EOSIO.\\n" - printf "\\texiting now.\\n" - exit 1 - fi - - if [ "${DISK_AVAIL}" -lt "${DISK_MIN}" ]; then - printf "\\tYou must have at least %sGB of available storage to install EOSIO.\\n" "${DISK_MIN}" - printf "\\texiting now.\\n" - exit 1 - fi - - printf "\\n\\tChecking Yum installation.\\n" - if ! YUM=$( command -v yum 2>/dev/null ) - then - printf "\\n\\tYum must be installed to compile EOS.IO.\\n" - printf "\\n\\tExiting now.\\n" - exit 1 - fi - - printf "\\tYum installation found at %s.\\n" "${YUM}" - printf "\\tUpdating YUM.\\n" - if ! UPDATE=$( sudo "$YUM" -y update ) - then - printf "\\n\\tYUM update failed.\\n" - printf "\\n\\tExiting now.\\n" - exit 1 - fi - printf "\\t%s\\n" "${UPDATE}" - - if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then - DEP_ARRAY=( git gcc72.x86_64 gcc72-c++.x86_64 autoconf automake libtool make bzip2 \ - bzip2-devel.x86_64 openssl-devel.x86_64 gmp-devel.x86_64 libstdc++72.x86_64 \ - python27.x86_64 python36-devel.x86_64 libedit-devel.x86_64 doxygen.x86_64 graphviz.x86_64) - else - DEP_ARRAY=( git gcc gcc-c++ autoconf automake libtool make bzip2 \ - bzip2-devel openssl-devel gmp-devel libstdc++ \ - python3 python3-devel libedit-devel doxygen graphviz) - fi - COUNT=1 - DISPLAY="" - DEP="" - - printf "\\n\\tChecking YUM for installed dependencies.\\n\\n" +if [ $1 == 1 ]; then ANSWER=1; else ANSWER=0; fi + +OS_VER=$( grep VERSION_ID /etc/os-release | cut -d'=' -f2 | sed 's/[^0-9\.]//gI' | cut -d'.' -f1 ) + +DISK_INSTALL=$( df -h . | tail -1 | tr -s ' ' | cut -d\ -f1 ) +DISK_TOTAL_KB=$( df . | tail -1 | awk '{print $2}' ) +DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) +DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) +DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) + +if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then + DEP_ARRAY=( + sudo procps util-linux which gcc72 gcc72-c++ autoconf automake libtool make doxygen graphviz \ + bzip2 bzip2-devel openssl-devel gmp gmp-devel libstdc++72 python27 python27-devel python34 python34-devel \ + libedit-devel ncurses-devel swig wget file libcurl-devel libusb1-devel + ) +else + DEP_ARRAY=( + git procps-ng util-linux gcc gcc-c++ autoconf automake libtool make bzip2 \ + bzip2-devel openssl-devel gmp-devel libstdc++ libcurl-devel libusbx-devel \ + python3 python3-devel python-devel libedit-devel doxygen graphviz + ) +fi - for (( i=0; i<${#DEP_ARRAY[@]}; i++ )); - do - pkg=$("$YUM" info "${DEP_ARRAY[$i]}" 2>/dev/null | grep Repo | tr -s ' ' | cut -d: -f2 | sed 's/ //g' ) +COUNT=1 +DISPLAY="" +DEP="" - if [ "$pkg" != "installed" ]; then - DEP=$DEP" ${DEP_ARRAY[$i]} " - DISPLAY="${DISPLAY}${COUNT}. ${DEP_ARRAY[$i]}\\n\\t" - printf "\\tPackage %s ${bldred} NOT ${txtrst} found.\\n" "${DEP_ARRAY[$i]}" - (( COUNT++ )) - else - printf "\\tPackage %s found.\\n" "${DEP_ARRAY[$i]}" - continue - fi - done +if [[ "${OS_NAME}" == "Amazon Linux AMI" && "${OS_VER}" -lt 2017 ]]; then + printf "You must be running Amazon Linux 2017.09 or higher to install EOSIO.\\n" + printf "exiting now.\\n" + exit 1 +fi - if [ "${COUNT}" -gt 1 ]; then - printf "\\n\\tThe following dependencies are required to install EOSIO.\\n" - printf "\\n\\t${DISPLAY}\\n\\n" - printf "\\tDo you wish to install these dependencies?\\n" - select yn in "Yes" "No"; do - case $yn in - [Yy]* ) - printf "\\n\\n\\tInstalling dependencies.\\n\\n" - if ! sudo "${YUM}" -y install ${DEP} - then - printf "\\n\\tYUM dependency installation failed.\\n" - printf "\\n\\tExiting now.\\n" - exit 1 - else - printf "\\n\\tYUM dependencies installed successfully.\\n" - fi - break;; - [Nn]* ) printf "\\nUser aborting installation of required dependencies,\\n Exiting now.\\n"; exit;; - * ) echo "Please type 1 for yes or 2 for no.";; - esac - done - else - printf "\\n\\tNo required YUM dependencies to install.\\n" - fi +if [ "${DISK_AVAIL}" -lt "${DISK_MIN}" ]; then + printf "You must have at least %sGB of available storage to install EOSIO.\\n" "${DISK_MIN}" + printf "exiting now.\\n" + exit 1 +fi - if [ "${ENABLE_COVERAGE_TESTING}" = true ]; then - printf "\\n\\tChecking perl installation.\\n" - perl_bin=$( command -v perl 2>/dev/null ) - if [ -z "${perl_bin}" ]; then - printf "\\n\\tInstalling perl.\\n" - if ! sudo "${YUM}" -y install perl - then - printf "\\n\\tUnable to install perl at this time.\\n" - printf "\\n\\tExiting now.\\n\\n" - exit 1; - fi - else - printf "\\tPerl installation found at %s.\\n" "${perl_bin}" - fi - printf "\\n\\tChecking LCOV installation.\\n" - if [ ! -e "/usr/local/bin/lcov" ]; then - printf "\\n\\tLCOV installation not found.\\n" - printf "\\tInstalling LCOV.\\n" - if ! cd "${TEMP_DIR}" - then - printf "\\n\\tUnable to enter %s. Exiting now.\\n" "${TEMP_DIR}" - exit 1; - fi - if ! git clone "https://github.com/linux-test-project/lcov.git" - then - printf "\\n\\tUnable to clone LCOV at this time.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/lcov" - then - printf "\\n\\tUnable to enter %s/lcov. Exiting now.\\n" "${TEMP_DIR}" - exit 1; - fi - if ! sudo make install - then - printf "\\n\\tUnable to install LCOV at this time.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - rm -rf "${TEMP_DIR}/lcov" - printf "\\n\\tSuccessfully installed LCOV.\\n\\n" - else - printf "\\n\\tLCOV installation found @ /usr/local/bin.\\n" - fi - fi +printf "\\nChecking Yum installation.\\n" +if ! YUM=$( command -v yum 2>/dev/null ) +then + printf "\\nYum must be installed to compile EOS.IO.\\n" + printf "\\nExiting now.\\n" + exit 1 +fi +printf "Yum installation found at ${YUM}.\\n" - printf "\\n\\tChecking CMAKE installation.\\n" - if [ ! -e "${CMAKE}" ]; then - printf "\\tInstalling CMAKE.\\n" - if ! mkdir -p "${HOME}/opt/" 2>/dev/null - then - printf "\\n\\tUnable to create directory %s/opt.\\n" "${HOME}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${HOME}/opt" - then - printf "\\n\\tUnable to enter directory %s/opt.\\n" "${HOME}" - printf "\\tExiting now.\\n\\n" +if [ $ANSWER != 1 ]; then read -p "Do you wish to update YUM repositories? (y/n) " ANSWER; fi +case $ANSWER in + 1 | [Yy]* ) + if ! sudo $YUM -y update; then + printf " - YUM update failed.\\n" exit 1; - fi - STATUS=$( curl -LO -w '%{http_code}' --connect-timeout 30 "https://cmake.org/files/v3.10/cmake-3.10.2.tar.gz" ) - if [ "${STATUS}" -ne 200 ]; then - printf "\\tUnable to clone CMAKE repo.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! tar xf "${HOME}/opt/cmake-3.10.2.tar.gz" - then - printf "\\tUnable to unarchive file %s/opt/cmake-3.10.2.tar.gz at this time.\\n" "${HOME}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -f "${HOME}/opt/cmake-3.10.2.tar.gz" - then - printf "\\tUnable to remove file %s/opt/cmake-3.10.2.tar.gz.\\n" "${HOME}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! ln -s "${HOME}/opt/cmake-3.10.2/" "${HOME}/opt/cmake" - then - printf "\\tUnable to symlink directory %s/opt/cmake-3.10.2/ to %s/opt/cmake at this time.\\n" "${HOME}" "${HOME}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${HOME}/opt/cmake/" - then - printf "\\n\\tUnable to change directory into %s/opt/cmake.\\n" "${HOME}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! ./bootstrap - then - printf "\\tRunning bootstrap for CMAKE exited with the above error.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! make -j"${JOBS}" - then - printf "\\tError compiling CMAKE.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\tCMAKE successfully installed @ %s.\\n" "${CMAKE}" + else + printf " - YUM update complete.\\n" + fi + ;; + [Nn]* ) echo " - Proceeding without update!";; + * ) echo "Please type 'y' for yes or 'n' for no."; exit;; +esac + +printf "Checking RPM for installed dependencies...\\n" +for (( i=0; i<${#DEP_ARRAY[@]}; i++ )); do + pkg=$( rpm -qi "${DEP_ARRAY[$i]}" 2>/dev/null | grep Name ) + if [[ -z $pkg ]]; then + DEP=$DEP" ${DEP_ARRAY[$i]} " + DISPLAY="${DISPLAY}${COUNT}. ${DEP_ARRAY[$i]}\\n" + printf " - Package %s ${bldred} NOT ${txtrst} found!\\n" "${DEP_ARRAY[$i]}" + (( COUNT++ )) else - printf "\\tCMAKE found @ %s.\\n" "${CMAKE}" + printf " - Package %s found.\\n" "${DEP_ARRAY[$i]}" + continue fi - - if [ -d "${HOME}/opt/boost_1_67_0" ]; then - if ! mv "${HOME}/opt/boost_1_67_0" "$BOOST_ROOT" - then - printf "\\n\\tUnable to move directory %s/opt/boost_1_67_0 to %s.\\n" "${HOME}" "${BOOST_ROOT}" - printf "\\n\\tExiting now.\\n" - exit 1 - fi - if [ -d "$BUILD_DIR" ]; then - if ! rm -rf "$BUILD_DIR" - then - printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "$BUILD_DIR" "${BASH_SOURCE[0]}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - fi - fi - - printf "\\n\\tChecking boost library installation.\\n" - BVERSION=$( grep "BOOST_LIB_VERSION" "${BOOST_ROOT}/include/boost/version.hpp" 2>/dev/null \ - | tail -1 | tr -s ' ' | cut -d\ -f3 | sed 's/[^0-9\._]//gI' ) - if [ "${BVERSION}" != "1_67" ]; then - printf "\\tRemoving existing boost libraries in %s/opt/boost*.\\n" "${HOME}" - if ! rm -rf "${HOME}"/opt/boost* - then - printf "\\n\\tUnable to remove deprecated boost libraries at this time.\\n" - printf "\\n\\tExiting now.\\n" - exit 1 - fi - printf "\\tInstalling boost libraries.\\n" - if ! cd "${TEMP_DIR}" - then - printf "\\n\\tUnable to cd into directory %s at this time.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1 - fi - STATUS=$(curl -LO -w '%{http_code}' --connect-timeout 30 \ - "https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2" ) - if [ "${STATUS}" -ne 200 ]; then - printf "\\tUnable to download Boost libraries at this time.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! tar xf "${TEMP_DIR}/boost_1_67_0.tar.bz2" - then - printf "\\tUnable to decompress Boost libraries @ %s/boost_1_67_0.tar.bz2 at this time.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -f "${TEMP_DIR}/boost_1_67_0.tar.bz2" - then - printf "\\tUnable to remove Boost libraries @ %s/boost_1_67_0.tar.bz2 at this time.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/boost_1_67_0/" - then - printf "\\tUnable to change directory into %s/boost_1_67_0/ at this time.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! ./bootstrap.sh "--prefix=${BOOST_ROOT}" - then - printf "\\n\\tInstallation of boost libraries failed. 0\\n" - printf "\\n\\tExiting now.\\n" - exit 1 - fi - if ! "${TEMP_DIR}"/boost_1_67_0/b2 -j"${CPU_CORE}" install - then - printf "\\n\\tInstallation of boost libraries failed. 1\\n" - printf "\\n\\tExiting now.\\n" - exit 1 - fi - if ! rm -rf "${TEMP_DIR}/boost_1_67_0/" - then - printf "\\n\\tUnable to remove boost libraries directory @ %s/boost_1_67_0/.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1 - fi - if [ -d "$BUILD_DIR" ]; then - if ! rm -rf "$BUILD_DIR" - then - printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "$BUILD_DIR" "${BASH_SOURCE[0]}" - printf "\\tExiting now.\\n\\n" - exit 1; +done +if [ "${COUNT}" -gt 1 ]; then + printf "\\nThe following dependencies are required to install EOSIO:\\n" + printf "${DISPLAY}\\n\\n" + if [ $ANSWER != 1 ]; then read -p "Do you wish to install these dependencies? (y/n) " ANSWER; fi + case $ANSWER in + 1 | [Yy]* ) + if ! sudo $YUM -y install ${DEP}; then + printf " - YUM dependency installation failed!\\n" + exit 1; + else + printf " - YUM dependencies installed successfully.\\n" fi - fi - printf "\\tBoost successfully installed @ %s.\\n" "${BOOST_ROOT}" - else - printf "\\tBoost found at %s.\\n" "${BOOST_ROOT}" - fi - - printf "\\n\\tChecking MongoDB installation.\\n" - if [ ! -e "${MONGOD_CONF}" ]; then - printf "\\tInstalling MongoDB 3.6.3.\\n" - if ! cd "${HOME}/opt" - then - printf "\\n\\tUnable to cd into directory %s/opt.\\n" "${HOME}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - STATUS=$( curl -LO -w '%{http_code}' --connect-timeout 30 \ - "https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-3.6.3.tgz" ) - if [ "${STATUS}" -ne 200 ]; then - printf "\\tUnable to download MongoDB at this time.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! tar xf "${HOME}/opt/mongodb-linux-x86_64-amazon-3.6.3.tgz" - then - printf "\\tUnable to decompress file %s at this time.\\n" \ - "${HOME}/opt/mongodb-linux-x86_64-amazon-3.6.3.tgz" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -f "${HOME}/opt/mongodb-linux-x86_64-amazon-3.6.3.tgz" - then - printf "\\tUnable to remove file %s/opt/mongodb-linux-x86_64-amazon-3.6.3.tgz at this time.\\n" "${HOME}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! ln -s "${HOME}/opt/mongodb-linux-x86_64-amazon-3.6.3/" "${HOME}/opt/mongodb" - then - printf "\\tUnable to symlink directory %s/opt/mongodb-linux-x86_64-amazon-3.6.3/ to directory %s/opt/mongodb at this time.\\n" \ - "${HOME}" "${HOME}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! mkdir "${HOME}/opt/mongodb/data" - then - printf "\\tUnable to create directory %s/opt/mongodb/data at this time.\\n" "${HOME}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! mkdir "${HOME}/opt/mongodb/log" - then - printf "\\tUnable to make directory %s/opt/mongodb/log at this time.\\n" "${HOME}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! touch "${HOME}/opt/mongodb/log/mongodb.log" - then - printf "\\tUnable to create log file @ %s/opt/mongodb/log/mongodb.log at this time.\\n" "${HOME}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - -if ! tee > "/dev/null" "${MONGOD_CONF}" < CPU_CORE ? CPU_CORE : MEM_GIG )) + +printf "\\nOS name: %s\\n" "${OS_NAME}" +printf "OS Version: %s\\n" "${OS_VER}" +printf "CPU speed: %sMhz\\n" "${CPU_SPEED}" +printf "CPU cores: %s\\n" "${CPU_CORE}" +printf "Physical Memory: %sMgb\\n" "${MEM_MEG}" +printf "Disk space total: %sGb\\n" "${DISK_TOTAL}" +printf "Disk space available: %sG\\n" "${DISK_AVAIL}" + +if [ "${MEM_MEG}" -lt 7000 ]; then + printf "Your system must have 7 or more Gigabytes of physical memory installed.\\n" + printf "exiting now.\\n" + exit 1 +fi - printf "\\n\\tChecking MongoDB C++ driver installation.\\n" - MONGO_INSTALL=true - if [ -e "/usr/local/lib64/libmongocxx-static.a" ]; then - MONGO_INSTALL=false - if [ ! -f /usr/local/lib64/pkgconfig/libmongocxx-static.pc ]; then - MONGO_INSTALL=true - else - if ! version=$( grep "Version:" /usr/local/lib64/pkgconfig/libmongocxx-static.pc | tr -s ' ' | awk '{print $2}' ) - then - printf "\\tUnable to determine mongodb-cxx-driver version.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - maj=$( echo "${version}" | cut -d'.' -f1 ) - min=$( echo "${version}" | cut -d'.' -f2 ) - if [ "${maj}" -gt 3 ]; then - MONGO_INSTALL=true - elif [ "${maj}" -eq 3 ] && [ "${min}" -lt 3 ]; then - MONGO_INSTALL=true - fi - fi - fi +printf "\\n" + + +printf "Checking CMAKE installation...\\n" +if [ ! -e $CMAKE ]; then + printf "Installing CMAKE...\\n" + curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \ + && tar -xzf cmake-$CMAKE_VERSION.tar.gz \ + && cd cmake-$CMAKE_VERSION \ + && ./bootstrap --prefix=$HOME \ + && make -j"${JOBS}" \ + && make install \ + && cd .. \ + && rm -f cmake-$CMAKE_VERSION.tar.gz \ + || exit 1 + printf " - CMAKE successfully installed @ ${CMAKE} \\n" +else + printf " - CMAKE found @ ${CMAKE}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi + + +printf "\\n" + + +printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" +BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) +if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + printf "Installing Boost library...\\n" + curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ + && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ + && cd $BOOST_ROOT \ + && ./bootstrap.sh --prefix=$BOOST_ROOT \ + && ./b2 -q -j"${JOBS}" install \ + && cd .. \ + && rm -f boost_$BOOST_VERSION.tar.bz2 \ + && rm -rf $BOOST_LINK_LOCATION \ + && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ + || exit 1 + printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" +else + printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi + + +printf "\\n" + + +printf "Checking MongoDB installation...\\n" +if [ ! -d $MONGODB_ROOT ]; then + printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" + curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_LOCATION/mongod.log \ + && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_LOCATION \ + && rm -rf $MONGODB_LINK_LOCATION \ + && rm -rf $BIN_LOCATION/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ + && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ + || exit 1 + printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" +else + printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi +printf "Checking MongoDB C driver installation...\\n" +if [ ! -d $MONGO_C_DRIVER_ROOT ]; then + printf "Installing MongoDB C driver...\\n" + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" +else + printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi +printf "Checking MongoDB C++ driver installation...\\n" +if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then + printf "Installing MongoDB C++ driver...\\n" + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ + && make -j"${JOBS}" VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" +else + printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi + + +printf "\\n" + + +printf "Checking LLVM 4 support...\\n" +if [ ! -d $LLVM_ROOT ]; then + printf "Installing LLVM 4...\\n" + cd ../opt \ + && git clone --depth 1 --single-branch --branch $LLVM_VERSION https://github.com/llvm-mirror/llvm.git llvm && cd llvm \ + && mkdir build \ + && cd build \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${LLVM_ROOT}" -DLLVM_TARGETS_TO_BUILD="host" -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE="Release" .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + || exit 1 + printf " - LLVM successfully installed @ ${LLVM_ROOT}\\n" +else + printf " - LLVM found @ ${LLVM_ROOT}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi - if [ $MONGO_INSTALL == "true" ]; then - if ! cd "${TEMP_DIR}" - then - printf "\\tUnable to enter directory %s.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - STATUS=$( curl -LO -w '%{http_code}' --connect-timeout 30 https://github.com/mongodb/mongo-c-driver/releases/download/1.10.2/mongo-c-driver-1.10.2.tar.gz ) - if [ "${STATUS}" -ne 200 ]; then - if ! rm -f "${TEMP_DIR}/mongo-c-driver-1.10.2.tar.gz" - then - printf "\\tUnable to remove file %s/mongo-c-driver-1.10.2.tar.gz.\\n" "${TEMP_DIR}" - fi - printf "\\tUnable to download MongoDB C driver at this time.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! tar xf mongo-c-driver-1.10.2.tar.gz - then - printf "\\tUnable to unarchive file %s/mongo-c-driver-1.10.2.tar.gz.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -f "${TEMP_DIR}/mongo-c-driver-1.10.2.tar.gz" - then - printf "\\tUnable to remove file mongo-c-driver-1.10.2.tar.gz.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}"/mongo-c-driver-1.10.2 - then - printf "\\tUnable to cd into directory %s/mongo-c-driver-1.10.2.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! mkdir cmake-build - then - printf "\\tUnable to create directory %s/mongo-c-driver-1.10.2/cmake-build.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd cmake-build - then - printf "\\tUnable to enter directory %s/mongo-c-driver-1.10.2/cmake-build.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! "${CMAKE}" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local -DENABLE_BSON=ON \ - -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=ON -DENABLE_STATIC=ON .. - then - printf "\\tConfiguring MongoDB C driver has encountered the errors above.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! make -j"${CPU_CORE}" - then - printf "\\tError compiling MongoDB C driver.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make install - then - printf "\\tError installing MongoDB C driver.\\nMake sure you have sudo privileges.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}" - then - printf "\\tUnable to enter directory %s.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/mongo-c-driver-1.10.2" - then - printf "\\tUnable to remove directory %s/mongo-c-driver-1.10.2.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! git clone https://github.com/mongodb/mongo-cxx-driver.git --branch releases/v3.3 --depth 1 - then - printf "\\tUnable to clone MongoDB C++ driver at this time.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/mongo-cxx-driver/build" - then - printf "\\tUnable to enter directory %s/mongo-cxx-driver/build.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! "${CMAKE}" -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local .. - then - printf "\\tCmake has encountered the above errors building the MongoDB C++ driver.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make -j"${CPU_CORE}" - then - printf "\\tError compiling MongoDB C++ driver.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make install - then - printf "\\tError installing MongoDB C++ driver.\\nMake sure you have sudo privileges.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}" - then - printf "\\tUnable to enter directory %s.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo rm -rf "${TEMP_DIR}/mongo-cxx-driver" - then - printf "\\tUnable to remove directory %s/mongo-cxx-driver.\\n" "${TEMP_DIR}" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\tMongo C++ driver installed at /usr/local/lib64/libmongocxx-static.a.\\n" - else - printf "\\tMongo C++ driver found at /usr/local/lib64/libmongocxx-static.a.\\n" - fi - printf "\\n\\tChecking LLVM with WASM support.\\n" - if [ ! -d "${HOME}/opt/wasm/bin" ]; then - printf "\\tInstalling LLVM & WASM.\\n" - if ! cd "${TEMP_DIR}" - then - printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! mkdir "${TEMP_DIR}/llvm-compiler" 2>/dev/null - then - printf "\\n\\tUnable to make directory %s/llvm-compiler.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/llvm-compiler" - then - printf "\\n\\tUnable to change directory into %s/llvm-compiler.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! git clone --depth 1 --single-branch --branch release_40 https://github.com/llvm-mirror/llvm.git - then - printf "\\tUnable to clone llvm repo @ https://github.com/llvm-mirror/llvm.git.\\n" - printf "\\tExiting now.\\n\\n" - exit; - fi - if ! cd "${TEMP_DIR}/llvm-compiler/llvm/tools" - then - printf "\\n\\tUnable to change directory into %s/llvm-compiler/llvm/tools.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! git clone --depth 1 --single-branch --branch release_40 https://github.com/llvm-mirror/clang.git - then - printf "\\tUnable to clone clang repo @ https://github.com/llvm-mirror/clang.git.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/llvm-compiler/llvm" - then - printf "\\n\\tUnable to change directory into %s/llvm-compiler/llvm.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! mkdir "${TEMP_DIR}/llvm-compiler/llvm/build" 2>/dev/null - then - printf "\\n\\tUnable to create directory %s/llvm-compiler/llvm/build.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/llvm-compiler/llvm/build" - then - printf "\\n\\tUnable to change directory into %s/llvm-compiler/llvm/build.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! "$CMAKE" -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${HOME}/opt/wasm" \ - -DLLVM_ENABLE_RTTI=1 -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD="WebAssembly" \ - -DCMAKE_BUILD_TYPE="Release" .. - then - printf "\\tError compiling LLVM and clang with EXPERIMENTAL WASM support.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! make -j"${JOBS}" - then - printf "\\tError compiling LLVM and clang with EXPERIMENTAL WASM support.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! make install - then - printf "\\tError installing LLVM and clang with EXPERIMENTAL WASM support.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/llvm-compiler" 2>/dev/null - then - printf "\\tError removing directory %s/llvm-compiler.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\tWASM successfully installed at %s/opt/wasm.\\n" "${HOME}" - else - printf "\\tWASM found at %s/opt/wasm.\\n" "${HOME}" - fi +cd .. +printf "\\n" - function print_instructions() - { - printf "\\n\\t%s -f %s &\\n" "$( command -v mongod )" "${MONGOD_CONF}" - printf '\texport PATH=${HOME}/opt/mongodb/bin:$PATH \n' - printf "\\tcd %s; make test\\n\\n" "${BUILD_DIR}" +function print_instructions() { return 0 - } +} diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh old mode 100644 new mode 100755 index fa5e3c61378..3d0056f0b36 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -1,698 +1,305 @@ - OS_VER=$( grep VERSION_ID /etc/os-release | cut -d'=' -f2 | sed 's/[^0-9\.]//gI' \ - | cut -d'.' -f1 ) - - MEM_MEG=$( free -m | sed -n 2p | tr -s ' ' | cut -d\ -f2 ) - CPU_SPEED=$( lscpu | grep "MHz" | tr -s ' ' | cut -d\ -f3 | cut -d'.' -f1 ) - CPU_CORE=$( lscpu -pCPU | grep -v "#" | wc -l ) - MEM_GIG=$(( ((MEM_MEG / 1000) / 2) )) - JOBS=$(( MEM_GIG > CPU_CORE ? CPU_CORE : MEM_GIG )) - - DISK_INSTALL=$( df -h . | tail -1 | tr -s ' ' | cut -d\ -f1 ) - DISK_TOTAL_KB=$( df . | tail -1 | awk '{print $2}' ) - DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) - DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) - DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) - - printf "\\n\\tOS name: %s\\n" "${OS_NAME}" - printf "\\tOS Version: %s\\n" "${OS_VER}" - printf "\\tCPU speed: %sMhz\\n" "${CPU_SPEED}" - printf "\\tCPU cores: %s\\n" "${CPU_CORE}" - printf "\\tPhysical Memory: %s Mgb\\n" "${MEM_MEG}" - printf "\\tDisk install: %s\\n" "${DISK_INSTALL}" - printf "\\tDisk space total: %sG\\n" "${DISK_TOTAL%.*}" - printf "\\tDisk space available: %sG\\n" "${DISK_AVAIL%.*}" - printf "\\tConcurrent Jobs (make -j): ${JOBS}\\n" - - if [ "${MEM_MEG}" -lt 7000 ]; then - printf "\\n\\tYour system must have 7 or more Gigabytes of physical memory installed.\\n" - printf "\\tExiting now.\\n\\n" +if [ $1 == 1 ]; then ANSWER=1; else ANSWER=0; fi + +OS_VER=$( grep VERSION_ID /etc/os-release | cut -d'=' -f2 | sed 's/[^0-9\.]//gI' \ +| cut -d'.' -f1 ) + +MEM_MEG=$( free -m | sed -n 2p | tr -s ' ' | cut -d\ -f2 ) +CPU_SPEED=$( lscpu | grep "MHz" | tr -s ' ' | cut -d\ -f3 | cut -d'.' -f1 ) +CPU_CORE=$( nproc ) +MEM_GIG=$(( ((MEM_MEG / 1000) / 2) )) +export JOBS=$(( MEM_GIG > CPU_CORE ? CPU_CORE : MEM_GIG )) + +DISK_INSTALL=$( df -h . | tail -1 | tr -s ' ' | cut -d\ -f1 ) +DISK_TOTAL_KB=$( df . | tail -1 | awk '{print $2}' ) +DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) +DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) +DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) + +printf "\\nOS name: ${OS_NAME}\\n" +printf "OS Version: ${OS_VER}\\n" +printf "CPU speed: ${CPU_SPEED}Mhz\\n" +printf "CPU cores: ${CPU_CORE}\\n" +printf "Physical Memory: ${MEM_MEG}Mgb\\n" +printf "Disk install: ${DISK_INSTALL}\\n" +printf "Disk space total: ${DISK_TOTAL%.*}G\\n" +printf "Disk space available: ${DISK_AVAIL%.*}G\\n" +printf "Concurrent Jobs (make -j): ${JOBS}\\n" + +if [ "${MEM_MEG}" -lt 7000 ]; then + printf "\\nYour system must have 7 or more Gigabytes of physical memory installed.\\n" + printf "Exiting now.\\n\\n" + exit 1; +fi + +if [ "${OS_VER}" -lt 7 ]; then + printf "\\nYou must be running Centos 7 or higher to install EOSIO.\\n" + printf "Exiting now.\\n\\n" + exit 1; +fi + +if [ "${DISK_AVAIL%.*}" -lt "${DISK_MIN}" ]; then + printf "\\nYou must have at least %sGB of available storage to install EOSIO.\\n" "${DISK_MIN}" + printf "Exiting now.\\n\\n" + exit 1; +fi + +printf "\\n" + +printf "Checking Yum installation...\\n" +if ! YUM=$( command -v yum 2>/dev/null ); then + printf "!! Yum must be installed to compile EOS.IO !!\\n" + printf "Exiting now.\\n" exit 1; - fi - - if [ "${OS_VER}" -lt 7 ]; then - printf "\\n\\tYou must be running Centos 7 or higher to install EOSIO.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - - if [ "${DISK_AVAIL%.*}" -lt "${DISK_MIN}" ]; then - printf "\\n\\tYou must have at least %sGB of available storage to install EOSIO.\\n" "${DISK_MIN}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - - printf "\\n" - - printf "\\tChecking Yum installation...\\n" - if ! YUM=$( command -v yum 2>/dev/null ); then - printf "\\t!! Yum must be installed to compile EOS.IO !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - printf "\\t- Yum installation found at %s.\\n" "${YUM}" - - printf "\\tUpdating YUM repository...\\n" - if ! sudo "${YUM}" -y update > /dev/null 2>&1; then - printf "\\t!! YUM update failed !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - printf "\\t - YUM repository successfully updated.\\n" - - printf "\\tChecking installation of Centos Software Collections Repository...\\n" - SCL=$( rpm -qa | grep -E 'centos-release-scl-[0-9].*' ) - if [ -z "${SCL}" ]; then - printf "\\t - Do you wish to install and enable this repository?\\n" - select yn in "Yes" "No"; do - case $yn in - [Yy]* ) - printf "\\tInstalling SCL...\\n" - if ! sudo "${YUM}" -y --enablerepo=extras install centos-release-scl 2>/dev/null; then - printf "\\t!! Centos Software Collections Repository installation failed !!\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - else - printf "\\tCentos Software Collections Repository installed successfully.\\n" - fi - break;; - [Nn]* ) echo "\\tUser aborting installation of required Centos Software Collections Repository, Exiting now."; exit;; - * ) echo "Please type 1 for yes or 2 for no.";; - esac - done - else - printf "\\t - ${SCL} found.\\n" - fi - - printf "\\tChecking installation of devtoolset-7...\\n" - DEVTOOLSET=$( rpm -qa | grep -E 'devtoolset-7-[0-9].*' ) - if [ -z "${DEVTOOLSET}" ]; then - printf "\\tDo you wish to install devtoolset-7?\\n" - select yn in "Yes" "No"; do - case $yn in - [Yy]* ) - printf "\\tInstalling devtoolset-7...\\n" - if ! sudo "${YUM}" install -y devtoolset-7 2>/dev/null; then - printf "\\t!! Centos devtoolset-7 installation failed !!\\n" - printf "\\tExiting now.\\n" - exit 1; - else - printf "\\tCentos devtoolset installed successfully.\\n" - fi - break;; - [Nn]* ) echo "User aborting installation of devtoolset-7. Exiting now."; exit;; - * ) echo "Please type 1 for yes or 2 for no.";; - esac - done - else - printf "\\t - ${DEVTOOLSET} found.\\n" - fi - printf "\\tEnabling Centos devtoolset-7...\\n" - if ! source "/opt/rh/devtoolset-7/enable" 2>/dev/null; then - printf "\\t!! Unable to enable Centos devtoolset-7 at this time !!\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\tCentos devtoolset-7 successfully enabled.\\n" - - printf "\\tChecking installation of python33...\\n" - PYTHON33=$( rpm -qa | grep -E 'python33-[0-9].*' ) - if [ -z "${PYTHON33}" ]; then - printf "\\tDo you wish to install python33?\\n" - select yn in "Yes" "No"; do - case $yn in - [Yy]* ) - printf "\\tInstalling Python33...\\n" - if ! sudo "${YUM}" install -y python33.x86_64 2>/dev/null; then - printf "\\t!! Centos Python33 installation failed !!\\n" - printf "\\tExiting now.\\n" - exit 1; - else - printf "\\n\\tCentos Python33 installed successfully.\\n" - fi - - break;; - [Nn]* ) echo "User aborting installation of python33. Exiting now."; exit;; - * ) echo "Please type 1 for yes or 2 for no.";; - esac - done - else - printf "\\t - ${PYTHON33} found.\\n" - fi - - printf "\\n" - - DEP_ARRAY=( git autoconf automake bzip2 libtool ocaml.x86_64 doxygen graphviz-devel.x86_64 \ - libicu-devel.x86_64 bzip2.x86_64 bzip2-devel.x86_64 openssl-devel.x86_64 gmp-devel.x86_64 \ - python-devel.x86_64 gettext-devel.x86_64) - COUNT=1 - DISPLAY="" - DEP="" - - printf "\\tChecking YUM for installed dependencies.\\n" - - for (( i=0; i<${#DEP_ARRAY[@]}; i++ )); - do - pkg=$( "${YUM}" info "${DEP_ARRAY[$i]}" 2>/dev/null | grep Repo | tr -s ' ' | cut -d: -f2 | sed 's/ //g' ) - if [ "$pkg" != "installed" ]; then - DEP=$DEP" ${DEP_ARRAY[$i]} " - DISPLAY="${DISPLAY}${COUNT}. ${DEP_ARRAY[$i]}\\n\\t" - printf "\\t!! Package %s ${bldred} NOT ${txtrst} found !!\\n" "${DEP_ARRAY[$i]}" - (( COUNT++ )) - else - printf "\\t - Package %s found.\\n" "${DEP_ARRAY[$i]}" - continue - fi - done - - printf "\\n" - - if [ "${COUNT}" -gt 1 ]; then - printf "\\tThe following dependencies are required to install EOSIO.\\n" - printf "\\t${DISPLAY}\\n\\n" - printf "\\tDo you wish to install these dependencies?\\n" - select yn in "Yes" "No"; do - case $yn in - [Yy]* ) - printf "\\tInstalling dependencies\\n\\n" - if ! sudo "${YUM}" -y install ${DEP}; then - printf "\\t!! YUM dependency installation failed !!\\n" - printf "\\tExiting now.\\n" - exit 1; - else - printf "\\tYUM dependencies installed successfully.\\n" - fi - break;; - [Nn]* ) echo "User aborting installation of required dependencies, Exiting now."; exit;; - * ) echo "Please type 1 for yes or 2 for no.";; - esac - done - else - printf "\\t - No required YUM dependencies to install.\\n" - fi - - printf "\\n" - - if [ "${ENABLE_COVERAGE_TESTING}" = true ]; then - printf "\\tChecking perl installation...\\n" - perl_bin=$( command -v perl 2>/dev/null ) - if [ -z "${perl_bin}" ]; then - printf "\\tInstalling perl...\\n" - if ! sudo "${YUM}" -y install perl; then - printf "\\t!! Unable to install perl at this time !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - else - printf "\\t - Perl installation found at %s.\\n" "${perl_bin}" - fi - - printf "\\n" - - printf "\\tChecking LCOV installation...\\n" - lcov=$( command -v lcov 2>/dev/null ) - if [ -z "${lcov}" ]; then - printf "\\tInstalling LCOV...\\n" - if ! cd "${TEMP_DIR}"; then - printf "\\t!! Unable to enter directory %s !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - LCOVURL="https://github.com/linux-test-project/lcov.git" - if ! git clone "${LCOVURL}"; then - printf "\\t!! Unable to clone LCOV from ${LCOVURL} !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/lcov"; then - printf "\\t!! Unable to enter directory %s/lcov !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! sudo make install; then - printf "\\t!! Unable to install LCOV at this time !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! cd "${CWD}"; then - printf "\\t!! Unable to enter directory %s !!\\n" "${CWD}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/lcov"; then - printf "\\t!! Unable to remove directory %s/lcov !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - printf "\\tSuccessfully installed LCOV.\\n" - else - printf "\\t - LCOV installation found @ %s.\\n" "${lcov}" - fi - fi - - printf "\\n" - - printf "\\tChecking CMAKE installation...\\n" - if [ ! -e "${CMAKE}" ]; then - printf "\\tInstalling CMAKE...\\n" - if [ ! -d "${HOME}/opt" ]; then - if ! mkdir "${HOME}/opt"; then - printf "\\t!! Unable to create directory %s/opt !!\\n" "${HOME}" - printf "\\tExiting now.\\n" - exit 1; - fi - fi - if ! cd "${HOME}/opt"; then - printf "\\t!! Unable to enter directory %s/opt !!\\n" "${HOME}" - printf "\\tExiting now.\\n" - exit 1; - fi - CMAKETGZ="cmake-3.10.2.tar.gz" - CMAKEURL="https://cmake.org/files/v3.10/${CMAKETGZ}" - STATUS=$(curl -LO -w '%{http_code}' --connect-timeout 30 "${CMAKEURL}") - if [ "${STATUS}" -ne 200 ]; then - printf "\\t!! Unable to download CMAKE from ${CMAKEURL} !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! tar xf "${HOME}/opt/${CMAKETGZ}"; then - printf "\\t!! Unable to unarchive %s/opt/CMAKETGZ} !!\\n" "${HOME}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! rm -f "${HOME}/opt/${CMAKETGZ}"; then - printf "\\t!! Unable to remove %s/opt/${CMAKETGZ} !!\\n" "${HOME}" - printf "\\tExiting now.\\n" - exit 1; - fi - CMAKEFOLDER=$(echo $CMAKETGZ | sed 's/.tar.gz//g') - if ! ln -s "${HOME}/opt/${CMAKEFOLDER}/" "${HOME}/opt/cmake"; then - printf "\\t!! Unable to symlink %s/opt/${CMAKEFOLDER} to %s/opt/${CMAKEFOLDER}/cmake !!\\n" "${HOME}" "${HOME}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! cd "${HOME}/opt/cmake"; then - printf "\\t!! Unable to enter directory %s/opt/cmake !!\\n" "${HOME}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! ./bootstrap; then - printf "\\t!! Error running bootstrap for CMAKE from $(pwd) !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! make -j"${JOBS}"; then - printf "\\t!! Compiling CMAKE has exited with the above error !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - printf "\\tCMAKE successfully installed @ %s.\\n\\n" "${CMAKE}" - else - printf "\\t - CMAKE found @ %s.\\n" "${CMAKE}" - fi - - BOOSTTGZ="boost_1_67_0.tar.bz2" - BOOSTFOLDER=$(echo "${BOOSTTGZ}" | sed 's/.tar.bz2//g') - if [ -d "${HOME}/opt/${BOOSTFOLDER}" ]; then - if ! mv "${HOME}/opt/${BOOSTFOLDER}" "${BOOST_ROOT}"; then - printf "\\t!! Unable to move directory %s/opt/${BOOSTFOLDER} to %s !!\\n" "${HOME}" "${BOOST_ROOT}" - printf "\\tExiting now.\\n" - exit 1 - fi - if [ -d "$BUILD_DIR" ]; then - if ! rm -rf "$BUILD_DIR"; then - printf "\\t!! Unable to remove directory %s: Please delete it and try again !! 0\\n" "$BUILD_DIR" "${BASH_SOURCE[0]}" - printf "\\tExiting now.\\n" - exit 1; - fi - fi - fi +fi +printf " - Yum installation found at %s.\\n" "${YUM}" - printf "\\tChecking boost library installation...\\n" - BOOSTVERSION=$( grep "#define BOOST_VERSION" "${BOOST_ROOT}/include/boost/version.hpp" 2>/dev/null \ - | tail -1 | tr -s ' ' | cut -d\ -f3) - if [ "${BOOSTVERSION}" != "106700" ]; then - printf "\\tRemoving existing boost libraries in %s/opt/boost*...\\n" "${HOME}" - if ! rm -rf "${HOME}"/opt/boost*; then - printf "\\t!! Unable to remove deprecated boost libraries at %s/opt/boost* !!\\n" "${HOME}" - printf "\\tExiting now.\\n" - exit 1; - fi - printf "\\tInstalling boost libraries...\\n" - if ! cd "${TEMP_DIR}"; then - printf "\\t!! Unable to enter directory %s !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - BOOSTURL="https://dl.bintray.com/boostorg/release/1.67.0/source/${BOOSTTGZ}" - STATUS=$(curl -LO -w '%{http_code}' --connect-timeout 30 "${BOOSTURL}") - if [ "${STATUS}" -ne 200 ]; then - printf "\\t!! Unable to download Boost libraries from ${BOOSTURL} !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! tar xf "${TEMP_DIR}/${BOOSTTGZ}"; then - printf "\\t!! Unable to unarchive file %s/${BOOSTTGZ} !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -f "${TEMP_DIR}/${BOOSTTGZ}"; then - printf "\\t!! Unable to remove file %s/${BOOSTTGZ} !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/${BOOSTFOLDER}/"; then - printf "\\t!! Unable to enter directory %s/${BOOSTFOLDER} !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! ./bootstrap.sh --prefix=$BOOST_ROOT; then - printf "\\t!! Installation of boost libraries failed with the above error !! 0\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! $TEMP_DIR/$BOOSTFOLDER/b2 -j"${JOBS}" install; then - printf "\\t!! Installation of boost libraries in ${BOOST_ROOT} failed with the above error !! 1\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/${BOOSTFOLDER}/"; then - printf "\\t!! Unable to remove directory %s/boost_1_67_0 !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" +if [ $ANSWER != 1 ]; then read -p "Do you wish to update YUM repositories? (y/n) " ANSWER; fi +case $ANSWER in + 1 | [Yy]* ) + if ! "${YUM}" -y update; then + printf " - YUM update failed.\\n" exit 1; - fi - if [ -d "$BUILD_DIR" ]; then - if ! rm -rf "$BUILD_DIR"; then - printf "\\t!!Unable to remove directory %s: Please manually remove and try again !! 0\\n" "$BUILD_DIR" "${BASH_SOURCE[0]}" - printf "\\tExiting now.\\n" + else + printf " - YUM update complete.\\n" + fi + ;; + [Nn]* ) echo " - Proceeding without update!";; + * ) echo "Please type 'y' for yes or 'n' for no."; exit;; +esac + +printf "Checking installation of Centos Software Collections Repository...\\n" +SCL=$( rpm -qa | grep -E 'centos-release-scl-[0-9].*' ) +if [ -z "${SCL}" ]; then + if [ $ANSWER != 1 ]; then read -p "Do you wish to install and enable this repository? (y/n)? " ANSWER; fi + case $ANSWER in + 1 | [Yy]* ) + printf "Installing SCL...\\n" + if ! "${YUM}" -y --enablerepo=extras install centos-release-scl 2>/dev/null; then + printf "!! Centos Software Collections Repository installation failed !!\\n" + printf "Exiting now.\\n\\n" exit 1; + else + printf "Centos Software Collections Repository installed successfully.\\n" fi - fi - printf "\\tBoost successfully installed @ %s.\\n" "${BOOST_ROOT}" - else - printf "\\t - Boost ${BOOSTVERSION} found at %s.\\n" "${BOOST_ROOT}" - fi - - printf "\\n" - - printf "\\tChecking MongoDB installation.\\n" - if [ ! -e "${MONGOD_CONF}" ]; then - printf "\\tInstalling MongoDB 3.6.3...\\n" - if [ ! -d "${HOME}/opt" ]; then - if ! mkdir "${HOME}/opt"; then - printf "\\t!! Unable to create directory %s/opt !!\\n" "${HOME}" - printf "\\tExiting now.\\n" - exit 1; + ;; + [Nn]* ) echo "User aborting installation of required Centos Software Collections Repository, Exiting now."; exit;; + * ) echo "Please type 'y' for yes or 'n' for no."; exit;; + esac +else + printf " - ${SCL} found.\\n" +fi + +printf "Checking installation of devtoolset-7...\\n" +DEVTOOLSET=$( rpm -qa | grep -E 'devtoolset-7-[0-9].*' ) +if [ -z "${DEVTOOLSET}" ]; then + if [ $ANSWER != 1 ]; then read -p "Do you wish to install devtoolset-7? (y/n)? " ANSWER; fi + case $ANSWER in + 1 | [Yy]* ) + printf "Installing devtoolset-7...\\n" + if ! "${YUM}" install -y devtoolset-7; then + printf "!! Centos devtoolset-7 installation failed !!\\n" + printf "Exiting now.\\n" + exit 1; + else + printf " - Centos devtoolset installed successfully!\\n" fi - fi - if ! cd "${HOME}/opt"; then - printf "\\t!! Unable to enter directory %s/opt !!\\n" "${HOME}" - printf "\\tExiting now.\\n" - exit 1; - fi - MONGOTGZ="mongodb-linux-x86_64-3.6.3.tgz" - MONGOURL="https://fastdl.mongodb.org/linux/${MONGOTGZ}" - STATUS=$(curl -LO -w '%{http_code}' --connect-timeout 30 "${MONGOURL}") - if [ "${STATUS}" -ne 200 ]; then - printf "\\t!! Unable to download MongoDB from ${MONGOURL} !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! tar xf "${HOME}/opt/${MONGOTGZ}"; then - printf "\\t!! Unable to unarchive file %s/opt/${MONGOTGZ} !!\\n" "${HOME}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! rm -f "${HOME}/opt/${MONGOTGZ}"; then - printf "\\t!! Unable to remove file %s/opt/${MONGOTGZ} !!\\n" "${HOME}" - printf "\\tExiting now.\\n" - exit 1; - fi - MONGOFOLDER=$(echo "${MONGOTGZ}" | sed 's/.tgz//g') - if ! ln -s "${HOME}/opt/${MONGOFOLDER}/" "${HOME}/opt/mongodb"; then - printf "\\t!! Unable to symlink file %s/opt/${MONGOFOLDER} to %s/opt/mongodb !!\\n" "${HOME}" "${HOME}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! mkdir "${HOME}/opt/mongodb/data"; then - printf "\\t!! Unable to create directory %s/opt/mongodb/data !!\\n" "${HOME}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! mkdir "${HOME}/opt/mongodb/log"; then - printf "\\t!! Unable to create directory %s/opt/mongodb/log !!\\n" "${HOME}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! touch "${HOME}/opt/mongodb/log/mongodb.log"; then - printf "\\t!! Unable to create file %s/opt/mongodb/log/mongodb.log !!\\n" "${HOME}" - printf "\\tExiting now.\\n" - exit 1; - fi - - printf "\\n" - - if ! tee > /dev/null "${MONGOD_CONF}" </dev/null | grep Name ) + if [[ -z $pkg ]]; then + DEP=$DEP" ${DEP_ARRAY[$i]} " + DISPLAY="${DISPLAY}${COUNT}. ${DEP_ARRAY[$i]}\\n" + printf " - Package %s ${bldred} NOT ${txtrst} found!\\n" "${DEP_ARRAY[$i]}" + (( COUNT++ )) else - printf "\\t - MongoDB config found at %s.\\n" "${MONGOD_CONF}" + printf " - Package %s found.\\n" "${DEP_ARRAY[$i]}" + continue fi - - printf "\\tChecking MongoDB C++ driver installation...\\n" - MONGO_INSTALL=true - if [ -e "/usr/local/lib64/libmongocxx-static.a" ]; then - MONGO_INSTALL=false - if [ ! -f /usr/local/lib64/pkgconfig/libmongocxx-static.pc ]; then - MONGO_INSTALL=true - else - if ! version=$( grep "Version:" /usr/local/lib64/pkgconfig/libmongocxx-static.pc | tr -s ' ' | awk '{print $2}' ); then - printf "\\t!! Unable to determine mongodb-cxx-driver version !!\\n" - printf "\\tExiting now.\\n" +done +if [ "${COUNT}" -gt 1 ]; then + printf "\\nThe following dependencies are required to install EOSIO:\\n" + printf "${DISPLAY}\\n\\n" + if [ $ANSWER != 1 ]; then read -p "Do you wish to install these dependencies? (y/n) " ANSWER; fi + case $ANSWER in + 1 | [Yy]* ) + if ! "${YUM}" -y install ${DEP}; then + printf " - YUM dependency installation failed!\\n" exit 1; + else + printf " - YUM dependencies installed successfully.\\n" fi - maj=$( echo "${version}" | cut -d'.' -f1 ) - min=$( echo "${version}" | cut -d'.' -f2 ) - if [ "${maj}" -gt 3 ]; then - MONGO_INSTALL=true - elif [ "${maj}" -eq 3 ] && [ "${min}" -lt 3 ]; then - MONGO_INSTALL=true - fi - fi - fi - - if [ $MONGO_INSTALL == "true" ]; then - if ! cd "${TEMP_DIR}"; then - printf "\\t!! Unable to enter directory %s !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - MONGODRIVERTGZ="mongo-c-driver-1.10.2.tar.gz" - MONGODRIVERURL="https://github.com/mongodb/mongo-c-driver/releases/download/1.10.2/${MONGODRIVERTGZ}" - STATUS=$( curl -LO -w '%{http_code}' --connect-timeout 30 "${MONGODRIVERURL}" ) - if [ "${STATUS}" -ne 200 ]; then - if ! rm -f "${TEMP_DIR}/${MONGODRIVERTGZ}"; then - printf "\\t!! Unable to remove file %s/${MONGODRIVERTGZ} !!\\n" "${TEMP_DIR}" - fi - printf "\\t!! Unable to download MongoDB C driver at this time !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! tar xf "${MONGODRIVERTGZ}"; then - printf "\\t!! Unable to unarchive file %s/${MONGODRIVERTGZ} !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! rm -f "${TEMP_DIR}/${MONGODRIVERTGZ}"; then - printf "\\t!! Unable to remove file ${MONGODRIVERTGZ} !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - MONGODRIVERFOLDER=$(echo $MONGODRIVERTGZ | sed 's/.tar.gz//g') - if ! cd "${TEMP_DIR}/${MONGODRIVERFOLDER}"; then - printf "\\t!! Unable to cd into directory %s/${MONGODRIVERFOLDER} !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! mkdir cmake-build; then - printf "\\t!! Unable to create directory %s/${MONGODRIVERFOLDER}/cmake-build !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! cd cmake-build; then - printf "\\t!! Unable to enter directory %s/${MONGODRIVERFOLDER}/cmake-build !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! "${CMAKE}" -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local -DENABLE_BSON=ON \ - -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON ..;then - printf "\\t!! Configuring MongoDB C driver has encountered the errors above !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! make -j"${JOBS}"; then - printf "\\t!! Error compiling MongoDB C driver !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! sudo make install; then - printf "\\t!! Error installing MongoDB C driver: Make sure you have sudo privileges !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}"; then - printf "\\t!! Unable to enter directory %s !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/${MONGODRIVERFOLDER}"; then - printf "\\t!! Unable to remove directory %s/${MONGODRIVERFOLDER} !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! git clone https://github.com/mongodb/mongo-cxx-driver.git --branch releases/v3.3 --depth 1; then - printf "\\t!! Unable to clone MongoDB C++ driver at this time. !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/mongo-cxx-driver/build"; then - printf "\\t!! Unable to enter directory %s/mongo-cxx-driver/build !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! "${CMAKE}" -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local ..;then - printf "\\t!! Cmake has encountered the above errors building the MongoDB C++ driver !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! sudo make -j"${JOBS}"; then - printf "\\t!! Error compiling MongoDB C++ driver !!\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make install; then - printf "\\t!! Error installing MongoDB C++ driver.\\nMake sure you have sudo privileges !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}"; then - printf "\\t!! Unable to enter directory %s !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! sudo rm -rf "${TEMP_DIR}/mongo-cxx-driver"; then - printf "\\t!! Unable to remove directory %s/mongo-cxx-driver !!\\n" "${TEMP_DIR}" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - printf "\\tMongo C++ driver installed at /usr/local/lib64/libmongocxx-static.a.\\n" - else - printf "\\t - Mongo C++ driver found at /usr/local/lib64/libmongocxx-static.a.\\n" - fi - - printf "\\n" - - printf "\\tChecking LLVM with WASM support installation...\\n" - if [ ! -d "${HOME}/opt/wasm/bin" ]; then - printf "\\tInstalling LLVM with WASM...\\n" - if ! cd "${TEMP_DIR}"; then - printf "\\t!! Unable to enter directory %s !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! mkdir "${TEMP_DIR}/llvm-compiler" 2>/dev/null; then - printf "\\t!! Unable to create directory %s/llvm-compiler !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/llvm-compiler"; then - printf "\\t!! Unable to enter directory %s/llvm-compiler !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - LLVMURL="https://github.com/llvm-mirror/llvm.git" - if ! git clone --depth 1 --single-branch --branch release_40 "${LLVMURL}"; then - printf "\\t!! Unable to clone llvm repo from ${LLVMURL} !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - LLVMLOCATION="llvm-compiler/llvm/tools" - if ! cd "${TEMP_DIR}/${LLVMLOCATION}"; then - printf "\\t!! Unable to enter directory %s/${LLVMLOCATION} !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - CLANGURL="https://github.com/llvm-mirror/clang.git" - if ! git clone --depth 1 --single-branch --branch release_40 "${CLANGURL}"; then - printf "\\t!! Unable to clone clang repo from ${CLANGURL} !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - LLVMMIDLOCATION=$(echo $LLVMLOCATION | sed 's/\/tools//g') - if ! cd "${TEMP_DIR}/${LLVMMIDLOCATION}"; then - printf "\\t!! Unable to enter directory %s/${LLVMMIDLOCATION} !!\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! mkdir "${TEMP_DIR}/${LLVMMIDLOCATION}/build" 2>/dev/null; then - printf "\\t!! Unable to create directory %s/${LLVMMIDLOCATION}/build !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/${LLVMMIDLOCATION}/build"; then - printf "\\t!! Unable to enter directory %s/${LLVMMIDLOCATION}/build !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! "${CMAKE}" -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${HOME}/opt/wasm" \ - -DLLVM_TARGETS_TO_BUILD="host" -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD="WebAssembly" \ - -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE="Release" ..; then - printf "\\t!! CMake has exited with the above error !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! make -j"${JOBS}"; then - printf "\\t!! Compiling LLVM with EXPERIMENTAL WASM support has exited with the above errors !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - if ! make install; then - printf "\\t!! Installing LLVM with EXPERIMENTAL WASM support has exited with the above errors !!\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - LLVMFOLDER=$(echo $LLVMMIDLOCATION | | sed 's/\/llvm//g') - if ! rm -rf "${TEMP_DIR}/${LLVMFOLDER}" 2>/dev/null; then - printf "\\t!! Unable to remove directory %s/${LLVMFOLDER} !!\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n" - exit 1; - fi - printf "\\tWASM compiler successfully installed at %s/opt/wasm\\n" "${HOME}" - else - printf "\\t - WASM found at %s/opt/wasm\\n" "${HOME}" - fi - - printf "\\n" - - function print_instructions() - { - printf "\\t%s -f %s &\\n" "$( command -v mongod )" "${MONGOD_CONF}" - printf "\\tsource /opt/rh/python33/enable\\n" - printf '\texport PATH=${HOME}/opt/mongodb/bin:$PATH\n' - printf "\\tcd %s; make test\\n\\n" "${BUILD_DIR}" - return 0 - } + ;; + [Nn]* ) echo "User aborting installation of required dependencies, Exiting now."; exit;; + * ) echo "Please type 'y' for yes or 'n' for no."; exit;; + esac +else + printf " - No required YUM dependencies to install.\\n" +fi + +if [ -d /opt/rh/python33 ]; then + printf "Enabling python33...\\n" + source /opt/rh/python33/enable || exit 1 + printf " - Python33 successfully enabled!\\n" +fi + +printf "\\n" + +printf "Checking CMAKE installation...\\n" +if [ ! -e $CMAKE ]; then + printf "Installing CMAKE...\\n" + curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \ + && tar -xzf cmake-$CMAKE_VERSION.tar.gz \ + && cd cmake-$CMAKE_VERSION \ + && ./bootstrap --prefix=$HOME \ + && make -j"${JOBS}" \ + && make install \ + && cd .. \ + && rm -f cmake-$CMAKE_VERSION.tar.gz \ + || exit 1 + printf " - CMAKE successfully installed @ ${CMAKE} \\n" +else + printf " - CMAKE found @ ${CMAKE}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi + + +printf "\\n" + + +export CPATH="$CPATH:/opt/rh/python33/root/usr/include/python3.3m" # m on the end causes problems with boost finding python3 +printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" +BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) +if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + printf "Installing Boost library...\\n" + curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ + && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ + && cd $BOOST_ROOT \ + && ./bootstrap.sh --prefix=$BOOST_ROOT \ + && ./b2 -q -j"${JOBS}" install \ + && cd .. \ + && rm -f boost_$BOOST_VERSION.tar.bz2 \ + && rm -rf $BOOST_LINK_LOCATION \ + && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ + || exit 1 + printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" +else + printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi + + +printf "\\n" + + +printf "Checking MongoDB installation...\\n" +if [ ! -d $MONGODB_ROOT ]; then + printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" + curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_LOCATION/mongod.log \ + && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_LOCATION \ + && rm -rf $MONGODB_LINK_LOCATION \ + && rm -rf $BIN_LOCATION/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ + && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ + || exit 1 + printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" +else + printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi +printf "Checking MongoDB C driver installation...\\n" +if [ ! -d $MONGO_C_DRIVER_ROOT ]; then + printf "Installing MongoDB C driver...\\n" + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" +else + printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi +printf "Checking MongoDB C++ driver installation...\\n" +if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then + printf "Installing MongoDB C++ driver...\\n" + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ + && make -j"${JOBS}" VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" +else + printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi + + +printf "\\n" + + +printf "Checking LLVM 4 support...\\n" +if [ ! -d $LLVM_ROOT ]; then + printf "Installing LLVM 4...\\n" + cd ../opt \ + && git clone --depth 1 --single-branch --branch $LLVM_VERSION https://github.com/llvm-mirror/llvm.git llvm && cd llvm \ + && mkdir build \ + && cd build \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${LLVM_ROOT}" -DLLVM_TARGETS_TO_BUILD="host" -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE="Release" .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + || exit 1 + printf " - LLVM successfully installed @ ${LLVM_ROOT}\\n" +else + printf " - LLVM found @ ${LLVM_ROOT}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi + + +cd .. +printf "\\n" + +function print_instructions() { + printf "source /opt/rh/python33/enable\\n" + printf "source /opt/rh/devtoolset-7/enable\\n" + return 0 +} diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh old mode 100644 new mode 100755 index d1f5894b60a..a7ec32ff7de --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -1,406 +1,269 @@ - OS_VER=$(sw_vers -productVersion) - OS_MAJ=$(echo "${OS_VER}" | cut -d'.' -f1) - OS_MIN=$(echo "${OS_VER}" | cut -d'.' -f2) - OS_PATCH=$(echo "${OS_VER}" | cut -d'.' -f3) +if [ $1 == 1 ]; then ANSWER=1; else ANSWER=0; fi - MEM_GIG=$(bc <<< "($(sysctl -in hw.memsize) / 1024000000)") +OS_VER=$(sw_vers -productVersion) +OS_MAJ=$(echo "${OS_VER}" | cut -d'.' -f1) +OS_MIN=$(echo "${OS_VER}" | cut -d'.' -f2) +OS_PATCH=$(echo "${OS_VER}" | cut -d'.' -f3) +MEM_GIG=$(bc <<< "($(sysctl -in hw.memsize) / 1024000000)") +CPU_SPEED=$(bc <<< "scale=2; ($(sysctl -in hw.cpufrequency) / 10^8) / 10") +CPU_CORE=$( sysctl -in machdep.cpu.core_count ) +export JOBS=$(( MEM_GIG > CPU_CORE ? CPU_CORE : MEM_GIG )) - CPU_SPEED=$(bc <<< "scale=2; ($(sysctl -in hw.cpufrequency) / 10^8) / 10") - CPU_CORE=$( sysctl -in machdep.cpu.core_count ) +DISK_INSTALL=$(df -h . | tail -1 | tr -s ' ' | cut -d\ -f1 || cut -d' ' -f1) +blksize=$(df . | head -1 | awk '{print $2}' | cut -d- -f1) +gbfactor=$(( 1073741824 / blksize )) +total_blks=$(df . | tail -1 | awk '{print $2}') +avail_blks=$(df . | tail -1 | awk '{print $4}') +DISK_TOTAL=$((total_blks / gbfactor )) +DISK_AVAIL=$((avail_blks / gbfactor )) - DISK_INSTALL=$(df -h . | tail -1 | tr -s ' ' | cut -d\ -f1 || cut -d' ' -f1) - blksize=$(df . | head -1 | awk '{print $2}' | cut -d- -f1) - gbfactor=$(( 1073741824 / blksize )) - total_blks=$(df . | tail -1 | awk '{print $2}') - avail_blks=$(df . | tail -1 | awk '{print $4}') - DISK_TOTAL=$((total_blks / gbfactor )) - DISK_AVAIL=$((avail_blks / gbfactor )) +export HOMEBREW_NO_AUTO_UPDATE=1 - printf "\\n\\tOS name: %s\\n" "${ARCH}" - printf "\\tOS Version: %s\\n" "${OS_VER}" - printf "\\tCPU speed: %sGhz\\n" "${CPU_SPEED}" - printf "\\tCPU cores: %s\\n" "${CPU_CORE}" - printf "\\tPhysical Memory: %s Gbytes\\n" "${MEM_GIG}" - printf "\\tDisk install: %s\\n" "${DISK_INSTALL}" - printf "\\tDisk space total: %sG\\n" "${DISK_TOTAL}" - printf "\\tDisk space available: %sG\\n\\n" "${DISK_AVAIL}" +COUNT=1 +DISPLAY="" +DEPS="" - if [ "${MEM_GIG}" -lt 7 ]; then - echo "Your system must have 7 or more Gigabytes of physical memory installed." - echo "Exiting now." - exit 1 - fi +printf "\\nOS name: ${OS_NAME}\\n" +printf "OS Version: ${OS_VER}\\n" +printf "CPU speed: ${CPU_SPEED}Mhz\\n" +printf "CPU cores: %s\\n" "${CPU_CORE}" +printf "Physical Memory: ${MEM_GIG} Gbytes\\n" +printf "Disk install: ${DISK_INSTALL}\\n" +printf "Disk space total: ${DISK_TOTAL}G\\n" +printf "Disk space available: ${DISK_AVAIL}G\\n" - if [ "${OS_MIN}" -lt 12 ]; then - echo "You must be running Mac OS 10.12.x or higher to install EOSIO." - echo "Exiting now." - exit 1 - fi +if [ "${MEM_GIG}" -lt 7 ]; then + echo "Your system must have 7 or more Gigabytes of physical memory installed." + echo "Exiting now." + exit 1 +fi - if [ "${DISK_AVAIL}" -lt "$DISK_MIN" ]; then - echo "You must have at least ${DISK_MIN}GB of available storage to install EOSIO." - echo "Exiting now." - exit 1 - fi +if [ "${OS_MIN}" -lt 12 ]; then + echo "You must be running Mac OS 10.12.x or higher to install EOSIO." + echo "Exiting now." + exit 1 +fi - printf "\\tChecking xcode-select installation\\n" - if ! XCODESELECT=$( command -v xcode-select) - then - printf "\\n\\tXCode must be installed in order to proceed.\\n\\n" - printf "\\tExiting now.\\n" - exit 1 - fi +if [ "${DISK_AVAIL}" -lt "$DISK_MIN" ]; then + echo "You must have at least ${DISK_MIN}GB of available storage to install EOSIO." + echo "Exiting now." + exit 1 +fi - printf "\\txcode-select installation found @ \\n" - printf "\\t%s \\n\\n" "${XCODESELECT}" +printf "\\n" - printf "\\tChecking Ruby installation.\\n" - if ! RUBY=$( command -v ruby) - then - printf "\\nRuby must be installed in order to proceed.\\n\\n" - printf "\\tExiting now.\\n" - exit 1 - fi +printf "Checking xcode-select installation...\\n" +if ! XCODESELECT=$( command -v xcode-select) +then + printf " - XCode must be installed in order to proceed!\\n\\n" + exit 1 +fi +printf " - XCode installation found @ ${XCODESELECT}\\n" - printf "\\tRuby installation found @ \\n" - printf "\\t%s \\n\\n" "${RUBY}" - - printf "\\tChecking Home Brew installation\\n" - if ! BREW=$( command -v brew ) - then - printf "\\tHomebrew must be installed to compile EOS.IO\\n\\n" - printf "\\tDo you wish to install Home Brew?\\n" - select yn in "Yes" "No"; do - case "${yn}" in - [Yy]* ) - "${XCODESELECT}" --install 2>/dev/null; - if ! "${RUBY}" -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" - then - echo "Unable to install homebrew at this time. Exiting now." - exit 1; - else - BREW=$( command -v brew ) - fi - break;; - [Nn]* ) echo "User aborted homebrew installation. Exiting now."; - exit 1;; - * ) echo "Please enter 1 for yes or 2 for no.";; - esac - done - fi +printf "Checking Ruby installation...\\n" +if ! RUBY=$( command -v ruby) +then + printf " - Ruby must be installed in order to proceed!\\n" + exit 1 +fi +printf " - Ruby installation found @ ${RUBY}\\n" - printf "\\tHome Brew installation found @\\n" - printf "\\t%s\\n\\n" "${BREW}" +printf "Checking Home Brew installation...\\n" +if ! BREW=$( command -v brew ) +then + printf "Homebrew must be installed to compile EOS.IO!\\n" + if [ $ANSWER != 1 ]; then read -p "Do you wish to install HomeBrew? (y/n)? " ANSWER; fi + case $ANSWER in + 1 | [Yy]* ) + "${XCODESELECT}" --install 2>/dev/null; + if ! "${RUBY}" -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"; then + echo " - Unable to install homebrew at this time." + exit 1; + else + BREW=$( command -v brew ) + fi + ;; + [Nn]* ) echo "User aborted homebrew installation. Exiting now."; exit 1;; + * ) echo "Please type 'y' for yes or 'n' for no."; exit;; + esac - COUNT=1 - PERMISSION_GETTEXT=0 - DISPLAY="" - DEP="" +fi +printf " - Home Brew installation found @ ${BREW}\\n" - printf "\\tChecking dependencies.\\n" - var_ifs="${IFS}" - IFS="," - while read -r name tester testee brewname uri - do - printf "\\tChecking %s ... " "${name}" - if [ "${tester}" "${testee}" ]; then - printf "\\t\\t %s found\\n" "${name}" +printf "\\nChecking dependencies...\\n" +var_ifs="${IFS}" +IFS="," +while read -r name tester testee brewname uri; do + if [ "${tester}" "${testee}" ]; then + printf " - %s found\\n" "${name}" + continue + fi + # resolve conflict with homebrew glibtool and apple/gnu installs of libtool + if [ "${testee}" == "/usr/local/bin/glibtool" ]; then + if [ "${tester}" "/usr/local/bin/libtool" ]; then + printf " - %s found\\n" "${name}" continue fi - # resolve conflict with homebrew glibtool and apple/gnu installs of libtool - if [ "${testee}" == "/usr/local/bin/glibtool" ]; then - if [ "${tester}" "/usr/local/bin/libtool" ]; then - printf "\\t\\t %s found\\n" "${name}" - continue - fi - fi - if [ "${brewname}" = "gettext" ]; then - PERMISSION_GETTEXT=1 - fi - DEP=$DEP"${brewname} " - DISPLAY="${DISPLAY}${COUNT}. ${name}\\n\\t" - printf "\\t\\t %s ${bldred}NOT${txtrst} found.\\n" "${name}" - (( COUNT++ )) - done < "${SOURCE_DIR}/scripts/eosio_build_dep" - IFS="${var_ifs}" - - printf "\\tChecking Python3 ... " - if [ -z "$( python3 -c 'import sys; print(sys.version_info.major)' 2>/dev/null )" ]; then - DEP=$DEP"python@3 " - DISPLAY="${DISPLAY}${COUNT}. Python 3\\n\\t" - printf "\\t\\t python3 ${bldred}NOT${txtrst} found.\\n" - (( COUNT++ )) - else - printf "\\t\\t Python3 found\\n" fi + DEPS=$DEPS"${brewname}," + DISPLAY="${DISPLAY}${COUNT}. ${name}\\n" + printf " - %s ${bldred}NOT${txtrst} found.\\n" "${name}" + (( COUNT++ )) +done < "${REPO_ROOT}/scripts/eosio_build_darwin_deps" +IFS="${var_ifs}" - if [ $COUNT -gt 1 ]; then - printf "\\n\\tThe following dependencies are required to install EOSIO.\\n" - printf "\\n\\t${DISPLAY}\\n\\n" - echo "Do you wish to install these packages?" - select yn in "Yes" "No"; do - case $yn in - [Yy]* ) - if [ $PERMISSION_GETTEXT -eq 1 ]; then - sudo chown -R "$(whoami)" /usr/local/share - fi - "${XCODESELECT}" --install 2>/dev/null; - printf "\\tUpdating Home Brew.\\n" - if ! brew update - then - printf "\\tUnable to update Home Brew at this time.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\tInstalling Dependencies.\\n" - if ! "${BREW}" install --force ${DEP} - then - printf "\\tHomebrew exited with the above errors.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if [[ "$DEP" == "llvm@4" ]]; then - "${BREW}" unlink ${DEP} - elif ! "${BREW}" unlink ${DEP} && "${BREW}" link --force ${DEP} - then - printf "\\tHomebrew exited with the above errors.\\n" - printf "\\tExiting now.\\n\\n" +if [ ! -d /usr/local/Frameworks ]; then + printf "\\n${bldred}/usr/local/Frameworks is necessary to brew install python@3. Run the following commands as sudo and try again:${txtrst}\\n" + printf "sudo mkdir /usr/local/Frameworks && sudo chown $(whoami):admin /usr/local/Frameworks\\n\\n" + exit 1; +fi + +if [ $COUNT -gt 1 ]; then + printf "\\nThe following dependencies are required to install EOSIO:\\n" + printf "${DISPLAY}\\n\\n" + if [ $ANSWER != 1 ]; then read -p "Do you wish to install these packages? (y/n) " ANSWER; fi + case $ANSWER in + 1 | [Yy]* ) + "${XCODESELECT}" --install 2>/dev/null; + if [ $1 == 0 ]; then read -p "Do you wish to update homebrew packages first? (y/n) " ANSWER; fi + case $ANSWER in + 1 | [Yy]* ) + if ! brew update; then + printf " - Brew update failed.\\n" exit 1; + else + printf " - Brew update complete.\\n" fi - break;; - [Nn]* ) echo "User aborting installation of required dependencies, Exiting now."; exit;; - * ) echo "Please type 1 for yes or 2 for no.";; + ;; + [Nn]* ) echo "Proceeding without update!";; + * ) echo "Please type 'y' for yes or 'n' for no."; exit;; esac - done - else - printf "\\n\\tNo required Home Brew dependencies to install.\\n" - fi + brew tap eosio/eosio # Required to install mongo-cxx-driver with static library + printf "\\nInstalling Dependencies...\\n" + # Ignore cmake so we don't install a newer version. + # Build from source to use local cmake; see homebrew-eosio repo for examples + # DON'T INSTALL llvm@4 WITH --force! + OIFS="$IFS" + IFS=$',' + for DEP in $DEPS; do + # Eval to support string/arguments with $DEP + if ! eval $BREW install $DEP; then + printf " - Homebrew exited with the above errors!\\n" + exit 1; + fi + done + IFS="$OIFS" + ;; + [Nn]* ) echo "User aborting installation of required dependencies, Exiting now."; exit;; + * ) echo "Please type 'y' for yes or 'n' for no."; exit;; + esac +else + printf "\\n - No required Home Brew dependencies to install.\\n" +fi - printf "\\n\\tChecking boost library installation.\\n" - BVERSION=$( grep "#define BOOST_VERSION" "/usr/local/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) - if [ "${BVERSION}" != "106700" ]; then - if [ ! -z "${BVERSION}" ]; then - printf "\\tFound Boost Version %s.\\n" "${BVERSION}" - printf "\\tEOS.IO requires Boost version 1.67.\\n" - printf "\\tWould you like to uninstall version %s and install Boost version 1.67.\\n" "${BVERSION}" - select yn in "Yes" "No"; do - case $yn in - [Yy]* ) - if "${BREW}" list | grep "boost" - then - printf "\\tUninstalling Boost Version %s.\\n" "${BVERSION}" - if ! "${BREW}" uninstall --force boost - then - printf "\\tUnable to remove boost libraries at this time. 0\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - else - printf "\\tRemoving Boost Version %s.\\n" "${BVERSION}" - if ! sudo rm -rf "/usr/local/include/boost" - then - printf "\\tUnable to remove boost libraries at this time. 1\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo rm -rf /usr/local/lib/libboost* - then - printf "\\tUnable to remove boost libraries at this time. 2\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - fi - break;; - [Nn]* ) echo "User cancelled installation of Boost libraries, Exiting now."; exit;; - * ) echo "Please type 1 for yes or 2 for no.";; - esac - done - fi - printf "\\tInstalling boost libraries.\\n" - if ! "${BREW}" install https://raw.githubusercontent.com/Homebrew/homebrew-core/f946d12e295c8a27519b73cc810d06593270a07f/Formula/boost.rb - then - printf "\\tUnable to install boost 1.67 libraries at this time. 0\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if [ -d "$BUILD_DIR" ]; then - if ! rm -rf "$BUILD_DIR" - then - printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "$BUILD_DIR" "${BASH_SOURCE[0]}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - fi - printf "\\tBoost 1.67.0 successfully installed @ /usr/local.\\n" - else - printf "\\tBoost 1.67.0 found at /usr/local.\\n" - fi +printf "\\n" - printf "\\n\\tChecking MongoDB C++ driver installation.\\n" - MONGO_INSTALL=true - if [ -e "/usr/local/lib/libmongocxx-static.a" ]; then - MONGO_INSTALL=false - if ! version=$( grep "Version:" /usr/local/lib/pkgconfig/libmongocxx-static.pc | tr -s ' ' | awk '{print $2}' ) - then - printf "\\tUnable to determine mongodb-cxx-driver version.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi +export CPATH="$(python-config --includes | awk '{print $1}' | cut -dI -f2):$CPATH" # Boost has trouble finding pyconfig.h +printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" +BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) +if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + printf "Installing Boost library...\\n" + curl -LO https://dl.bintray.com/boostorg/release/$BOOST_VERSION_MAJOR.$BOOST_VERSION_MINOR.$BOOST_VERSION_PATCH/source/boost_$BOOST_VERSION.tar.bz2 \ + && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ + && cd $BOOST_ROOT \ + && ./bootstrap.sh --prefix=$BOOST_ROOT \ + && ./b2 -q -j$(sysctl -in machdep.cpu.core_count) install \ + && cd .. \ + && rm -f boost_$BOOST_VERSION.tar.bz2 \ + && rm -rf $BOOST_LINK_LOCATION \ + && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ + || exit 1 + printf " - Boost library successfully installed @ ${BOOST_ROOT}.\\n" +else + printf " - Boost library found with correct version @ ${BOOST_ROOT}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi - maj=$( echo "${version}" | cut -d'.' -f1 ) - min=$( echo "${version}" | cut -d'.' -f2 ) - if [ "${maj}" -gt 3 ]; then - MONGO_INSTALL=true - elif [ "${maj}" -eq 3 ] && [ "${min}" -lt 3 ]; then - MONGO_INSTALL=true - fi - fi - if [ $MONGO_INSTALL == "true" ]; then - if ! cd "${TEMP_DIR}" - then - printf "\\tUnable to enter directory %s.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! pkgconfig=$( "${BREW}" list | grep pkg-config ) - then - if ! "${BREW}" install --force pkg-config - then - printf "\\tHomebrew returned an error installing pkg-config.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! "${BREW}" unlink pkg-config && "${BREW}" link --force pkg-config - then - printf "\\tHomebrew returned an error linking pkgconfig.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - fi - STATUS=$( curl -LO -w '%{http_code}' --connect-timeout 30 https://github.com/mongodb/mongo-c-driver/releases/download/1.10.2/mongo-c-driver-1.10.2.tar.gz ) - if [ "${STATUS}" -ne 200 ]; then - if ! rm -f "${TEMP_DIR}/mongo-c-driver-1.10.2.tar.gz" - then - printf "\\tUnable to remove file %s/mongo-c-driver-1.10.2.tar.gz.\\n" "${TEMP_DIR}" - fi - printf "\\tUnable to download MongoDB C driver at this time.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! tar xf mongo-c-driver-1.10.2.tar.gz - then - printf "\\tUnable to unarchive file %s/mongo-c-driver-1.10.2.tar.gz.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -f "${TEMP_DIR}/mongo-c-driver-1.10.2.tar.gz" - then - printf "\\tUnable to remove file mongo-c-driver-1.10.2.tar.gz.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}"/mongo-c-driver-1.10.2 - then - printf "\\tUnable to cd into directory %s/mongo-c-driver-1.10.2.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! mkdir cmake-build - then - printf "\\tUnable to create directory %s/mongo-c-driver-1.10.2/cmake-build.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd cmake-build - then - printf "\\tUnable to enter directory %s/mongo-c-driver-1.10.2/cmake-build.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local -DENABLE_BSON=ON \ - -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. - then - printf "\\tConfiguring MongoDB C driver has encountered the errors above.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! make -j"${CPU_CORE}" - then - printf "\\tError compiling MongoDB C driver.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make install - then - printf "\\tError installing MongoDB C driver.\\nMake sure you have sudo privileges.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}" - then - printf "\\tUnable to enter directory %s.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/mongo-c-driver-1.10.2" - then - printf "\\tUnable to remove directory %s/mongo-c-driver-1.10.2.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! git clone https://github.com/mongodb/mongo-cxx-driver.git --branch releases/v3.3 --depth 1 - then - printf "\\tUnable to clone MongoDB C++ driver at this time.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/mongo-cxx-driver/build" - then - printf "\\tUnable to enter directory %s/mongo-cxx-driver/build.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cmake -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local .. - then - printf "\\tCmake has encountered the above errors building the MongoDB C++ driver.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! make -j"${CPU_CORE}" - then - printf "\\tError compiling MongoDB C++ driver.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make install - then - printf "\\tError installing MongoDB C++ driver.\\nMake sure you have sudo privileges.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}" - then - printf "\\tUnable to enter directory %s.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/mongo-cxx-driver" - then - printf "\\tUnable to remove directory %s/mongo-cxx-driver.\\n" "${TEMP_DIR}" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\tMongo C++ driver installed at /usr/local/lib/libmongocxx-static.a.\\n" - else - printf "\\tMongo C++ driver found at /usr/local/lib/libmongocxx-static.a.\\n" - fi +printf "\\n" + + +printf "Checking MongoDB installation...\\n" +if [ ! -d $MONGODB_ROOT ]; then + printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" + curl -OL https://fastdl.mongodb.org/osx/mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ + && mv $SRC_LOCATION/mongodb-osx-x86_64-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_LOCATION/mongod.log \ + && rm -f mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_LOCATION \ + && rm -rf $MONGODB_LINK_LOCATION \ + && rm -rf $BIN_LOCATION/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ + && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ + || exit 1 + printf " - MongoDB successfully installed @ ${MONGODB_ROOT}\\n" +else + printf " - MongoDB found with correct version @ ${MONGODB_ROOT}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi +printf "Checking MongoDB C driver installation...\\n" +if [ ! -d $MONGO_C_DRIVER_ROOT ]; then + printf "Installing MongoDB C driver...\\n" + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" +else + printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi +printf "Checking MongoDB C++ driver installation...\\n" +if [ "$(grep "Version:" $HOME/lib/pkgconfig/libmongocxx-static.pc 2>/dev/null | tr -s ' ' | awk '{print $2}')" != $MONGO_CXX_DRIVER_VERSION ]; then + printf "Installing MongoDB C++ driver...\\n" + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ + && make -j"${JOBS}" VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" +else + printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi + +printf "\\n" + + +# We install llvm into /usr/local/opt using brew install llvm@4 +printf "Checking LLVM 4 support...\\n" +if [ ! -d $LLVM_ROOT ]; then + ln -s /usr/local/opt/llvm@4 $LLVM_ROOT \ + || exit 1 + printf " - LLVM successfully linked from /usr/local/opt/llvm@4 to ${LLVM_ROOT}\\n" +else + printf " - LLVM found @ ${LLVM_ROOT}.\\n" +fi + + +cd .. +printf "\\n" - function print_instructions() - { - printf "\\n\\t%s -f %s &\\n" "$( command -v mongod )" "${MONGOD_CONF}" - printf "\\tcd %s; make test\\n\\n" "${BUILD_DIR}" +function print_instructions() { return 0 - } +} diff --git a/scripts/eosio_build_darwin_deps b/scripts/eosio_build_darwin_deps new file mode 100755 index 00000000000..44192f04309 --- /dev/null +++ b/scripts/eosio_build_darwin_deps @@ -0,0 +1,13 @@ +cmake,-f,/usr/local/bin/cmake,cmake,https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4.tar.gz +automake,-x,/usr/local/bin/automake,automake,http://ftp.gnu.org/gnu/automake/automake-1.15.tar.gz +Libtool,-x,/usr/local/bin/glibtool,libtool,http://gnu.askapache.com/libtool/libtool-2.4.6.tar.gz +OpenSSL,-f,/usr/local/opt/openssl/lib/libssl.a,openssl,https://www.openssl.org/source/openssl-1.0.2n.tar.gz +wget,-x,/usr/local/bin/wget,wget,https://ftp.gnu.org/gnu/wget/wget-1.19.2.tar.gz +GMP,-f,/usr/local/opt/gmp/include/gmpxx.h,gmp,https://ftp.gnu.org/gnu/gmp/gmp-6.1.2.tar.bz2 +llvm,-x,/usr/local/opt/llvm@4/bin/clang-4.0,llvm@4,http://releases.llvm.org/4.0.1/llvm-4.0.1.src.tar.xz +python,-d,/usr/local/Cellar/python/3.7.2_1,python,https://www.python.org/ftp/python/3.7.2/Python-3.7.2.tgz +python@2,-d,/usr/local/Cellar/python@2/2.7.15_2,python@2,https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tgz +doxygen,-f,/usr/local/bin/doxygen,doxygen,http://ftp.stack.nl/pub/users/dimitri/doxygen-1.8.14.src.tar.gz +graphviz,-d,/usr/local/opt/graphviz,graphviz,https://fossies.org/linux/misc/graphviz-2.40.1.tar.gz +libusb,-f,/usr/local/lib/libusb-1.0.0.dylib,libusb,https://github.com/libusb/libusb/releases/download/v1.0.22/libusb-1.0.22.tar.bz2 +pkgconfig,-x,/usr/local/bin/pkg-config,pkgconfig,https://pkg-config.freedesktop.org/releases/pkg-config-0.29.2.tar.gz diff --git a/scripts/eosio_build_dep b/scripts/eosio_build_dep deleted file mode 100644 index 7123baf02f5..00000000000 --- a/scripts/eosio_build_dep +++ /dev/null @@ -1,12 +0,0 @@ -automake,-x,/usr/local/bin/automake,automake,http://ftp.gnu.org/gnu/automake/automake-1.15.tar.gz -Libtool,-x,/usr/local/bin/glibtool,libtool,http://gnu.askapache.com/libtool/libtool-2.4.6.tar.gz -OpenSSL,-f,/usr/local/opt/openssl/lib/libssl.a,openssl,https://www.openssl.org/source/openssl-1.0.2n.tar.gz -llvm,-x,/usr/local/opt/llvm@4/bin/clang-4.0,llvm@4,http://releases.llvm.org/5.0.1/llvm-5.0.1.src.tar.xz -wget,-x,/usr/local/bin/wget,wget,https://ftp.gnu.org/gnu/wget/wget-1.19.2.tar.gz -CMake,-x,/usr/local/bin/cmake,cmake,https://cmake.org/files/v3.10/cmake-3.10.1-Darwin-x86_64.tar.gz -GMP,-f,/usr/local/opt/gmp/include/gmpxx.h,gmp,https://ftp.gnu.org/gnu/gmp/gmp-6.1.2.tar.bz2 -gettext,-x,/usr/local/opt/gettext/bin/gettext,gettext,https://ftp.gnu.org/pub/gnu/gettext/gettext-latest.tar.gz -MongoDB,-x,/usr/local/opt/mongodb/bin/mongod,mongodb,https://fastdl.mongodb.org/osx/mongodb-osx-ssl-x86_64-3.6.3.tgz -Doxygen,-x,/usr/local/bin/doxygen,doxygen,http://ftp.stack.nl/pub/users/dimitri/doxygen-1.8.14.src.tar.gz -Graphviz,-x,/usr/local/bin/dot,graphviz,https://graphviz.gitlab.io/pub/graphviz/stable/SOURCES/graphviz.tar.gz -LCOV,-f,/usr/local/bin/lcov,lcov,http://downloads.sourceforge.net/ltp/lcov-1.13.tar.gz diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh old mode 100644 new mode 100755 index 661efea9fc0..c27f47658d3 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -1,500 +1,236 @@ - OS_VER=$( grep VERSION_ID /etc/os-release | cut -d'=' -f2 | sed 's/[^0-9\.]//gI' ) - - MEM_MEG=$( free -m | sed -n 2p | tr -s ' ' | cut -d\ -f2 ) - CPU_SPEED=$( lscpu | grep "MHz" | tr -s ' ' | cut -d\ -f3 | cut -d'.' -f1 ) - CPU_CORE=$( lscpu -pCPU | grep -v "#" | wc -l ) - MEM_GIG=$(( ((MEM_MEG / 1000) / 2) )) - JOBS=$(( MEM_GIG > CPU_CORE ? CPU_CORE : MEM_GIG )) - - DISK_INSTALL=$( df -h . | tail -1 | tr -s ' ' | cut -d\ -f1 ) - DISK_TOTAL_KB=$( df . | tail -1 | awk '{print $2}' ) - DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) - DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) - DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) - - printf "\\n\\tOS name: %s\\n" "${OS_NAME}" - printf "\\tOS Version: %s\\n" "${OS_VER}" - printf "\\tCPU speed: %sMhz\\n" "${CPU_SPEED}" - printf "\\tCPU cores: %s\\n" "${CPU_CORE}" - printf "\\tPhysical Memory: %s Mgb\\n" "${MEM_MEG}" - printf "\\tDisk install: %s\\n" "${DISK_INSTALL}" - printf "\\tDisk space total: %sG\\n" "${DISK_TOTAL%.*}" - printf "\\tDisk space available: %sG\\n" "${DISK_AVAIL%.*}" - - if [ "${MEM_MEG}" -lt 7000 ]; then - printf "\\tYour system must have 7 or more Gigabytes of physical memory installed.\\n" - printf "\\tExiting now.\\n" - exit 1; - fi - - if [ "${OS_VER}" -lt 25 ]; then - printf "\\tYou must be running Fedora 25 or higher to install EOSIO.\\n" - printf "\\tExiting now.\\n" +if [ $1 == 1 ]; then ANSWER=1; else ANSWER=0; fi + +CPU_SPEED=$( lscpu | grep "MHz" | tr -s ' ' | cut -d\ -f3 | cut -d'.' -f1 ) +CPU_CORE=$( nproc ) + +OS_VER=$( grep VERSION_ID /etc/os-release | cut -d'=' -f2 | sed 's/[^0-9\.]//gI' ) +if [ "${OS_VER}" -lt 25 ]; then + printf "You must be running Fedora 25 or higher to install EOSIO.\\n" + printf "Exiting now.\\n" + exit 1; +fi + +# procps-ng includes free command +if [[ -z "$( rpm -qi "procps-ng" 2>/dev/null | grep Name )" ]]; then yum install -y procps-ng; fi +MEM_MEG=$( free -m | sed -n 2p | tr -s ' ' | cut -d\ -f2 ) +if [ "${MEM_MEG}" -lt 7000 ]; then + printf "Your system must have 7 or more Gigabytes of physical memory installed.\\n" + printf "Exiting now.\\n" + exit 1; +fi +MEM_GIG=$(( ((MEM_MEG / 1000) / 2) )) +export JOBS=$(( MEM_GIG > CPU_CORE ? CPU_CORE : MEM_GIG )) + +DISK_INSTALL=$( df -h . | tail -1 | tr -s ' ' | cut -d\\ -f1 ) +DISK_TOTAL_KB=$( df . | tail -1 | awk '{print $2}' ) +DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) +DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) +DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) +if [ "${DISK_AVAIL%.*}" -lt "${DISK_MIN}" ]; then + printf "You must have at least %sGB of available storage to install EOSIO.\\n" "${DISK_MIN}" + printf "Exiting now.\\n" + exit 1; +fi + +printf "\\nOS name: ${OS_NAME}\\n" +printf "OS Version: ${OS_VER}\\n" +printf "CPU speed: ${CPU_SPEED}Mhz\\n" +printf "CPU cores: ${CPU_CORE}\\n" +printf "Physical Memory: ${MEM_MEG} Mgb\\n" +printf "Disk space total: ${DISK_TOTAL%.*}G\\n" +printf "Disk space available: ${DISK_AVAIL%.*}G\\n" + +# llvm is symlinked from /usr/lib64/llvm4.0 into user's home +DEP_ARRAY=( + git sudo procps-ng which gcc gcc-c++ autoconf automake libtool make \ + bzip2-devel wget bzip2 compat-openssl10 graphviz doxygen \ + openssl-devel gmp-devel libstdc++-devel python2 python2-devel python3 python3-devel \ + libedit ncurses-devel swig llvm4.0 llvm4.0-devel llvm4.0-libs llvm4.0-static libcurl-devel libusb-devel +) +COUNT=1 +DISPLAY="" +DEP="" + +printf "\\nChecking Yum installation...\\n" +if ! YUM=$( command -v yum 2>/dev/null ); then + printf "!! Yum must be installed to compile EOS.IO !!\\n" + printf "Exiting now.\\n" exit 1; - fi +fi +printf " - Yum installation found at %s.\\n" "${YUM}" - if [ "${DISK_AVAIL%.*}" -lt "${DISK_MIN}" ]; then - printf "\\tYou must have at least %sGB of available storage to install EOSIO.\\n" "${DISK_MIN}" - printf "\\tExiting now.\\n" - exit 1; - fi - - printf "\\n\\tChecking Yum installation\\n" - - YUM=$( command -v yum 2>/dev/null ) - if [ -z "${YUM}" ]; then - printf "\\n\\tYum must be installed to compile EOS.IO.\\n" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - - printf "\\tYum installation found at %s.\\n" "${YUM}" - printf "\\tUpdating YUM.\\n" - if ! sudo yum -y update - then - printf "\\n\\tYUM update failed with the above errors.\\n" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - - DEP_ARRAY=( git gcc.x86_64 gcc-c++.x86_64 autoconf automake libtool make cmake.x86_64 \ - bzip2.x86_64 bzip2-devel.x86_64 openssl-devel.x86_64 gmp-devel.x86_64 libstdc++-devel.x86_64 \ - python2-devel.x86_64 python3-devel.x86_64 mongodb.x86_64 mongodb-server.x86_64 libedit.x86_64 \ - graphviz.x86_64 doxygen.x86_64 ) - COUNT=1 - DISPLAY="" - DEP="" - - printf "\\n\\tChecking YUM for installed dependencies.\\n\\n" - - for (( i=0; i<${#DEP_ARRAY[@]}; i++ )); - do - pkg=$( "${YUM}" info "${DEP_ARRAY[$i]}" 2>/dev/null | grep Repo | tr -s ' ' | cut -d: -f2 | sed 's/ //g' ) - - if [ "$pkg" != "@System" ]; then - DEP=$DEP" ${DEP_ARRAY[$i]} " - DISPLAY="${DISPLAY}${COUNT}. ${DEP_ARRAY[$i]}\\n\\t" - printf "\\tPackage %s ${bldred} NOT ${txtrst} found.\\n" "${DEP_ARRAY[$i]}" - (( COUNT++ )) - else - printf "\\tPackage %s found.\\n" "${DEP_ARRAY[$i]}" - continue - fi - done - - if [ ${COUNT} -gt 1 ]; then - printf "\\n\\tThe following dependencies are required to install EOSIO.\\n" - printf "\\n\\t${DISPLAY}\\n\\n" - printf "\\tDo you wish to install these dependencies?\\n" - select yn in "Yes" "No"; do - case $yn in - [Yy]* ) - printf "\\n\\n\\tInstalling dependencies\\n\\n" - if ! sudo yum -y install ${DEP} - then - printf "\\n\\tYUM dependency installation failed.\\n" - printf "\\n\\tExiting now.\\n" - exit 1; - else - printf "\\n\\tYUM dependencies installed successfully.\\n" - fi - break;; - [Nn]* ) echo "User aborting installation of required dependencies, Exiting now."; exit;; - * ) echo "Please type 1 for yes or 2 for no.";; - esac - done - else - printf "\\n\\tNo required YUM dependencies to install.\\n" - fi - - if [ "${ENABLE_COVERAGE_TESTING}" = true ]; then - printf "\\n\\tCode coverage build requested." - printf "\\n\\tChecking perl installation.\\n" - perl_bin=$( command -v perl 2>/dev/null ) - if [ -z "${perl_bin}" ]; then - printf "\\n\\tInstalling perl.\\n" - if ! sudo "${YUM}" -y install perl - then - printf "\\n\\tUnable to install perl at this time.\\n" - printf "\\n\\tExiting now.\\n\\n" - exit 1; - fi - else - printf "\\tPerl installation found at %s.\\n" "${perl_bin}" - fi - printf "\\n\\tChecking LCOV installation." - if [ ! -e "/usr/local/bin/lcov" ]; then - printf "\\n\\tLCOV installation not found.\\n" - printf "\\tInstalling LCOV.\\n" - if ! cd "${TEMP_DIR}" - then - printf "\\n\\tUnable to enter %s. Exiting now.\\n" "${TEMP_DIR}" - exit 1; - fi - if ! git clone "https://github.com/linux-test-project/lcov.git" - then - printf "\\n\\tUnable to clone LCOV at this time.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/lcov" - then - printf "\\n\\tUnable to enter %s/lcov. Exiting now.\\n" "${TEMP_DIR}" - exit 1; - fi - if ! sudo make install - then - printf "\\n\\tUnable to install LCOV at this time.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - rm -rf "${TEMP_DIR}/lcov" - printf "\\n\\tSuccessfully installed LCOV.\\n\\n" - else - printf "\\n\\tLCOV installation found @ /usr/local/bin.\\n" - fi - fi - if [ -d "${HOME}/opt/boost_1_67_0" ]; then - if ! mv "${HOME}/opt/boost_1_67_0" "$BOOST_ROOT" - then - printf "\\n\\tUnable to move directory %s/opt/boost_1_67_0 to %s.\\n" "${HOME}" "${BOOST_ROOT}" - printf "\\n\\tExiting now.\\n" - exit 1 - fi - if [ -d "$BUILD_DIR" ]; then - if ! rm -rf "$BUILD_DIR" - then - printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "$BUILD_DIR" "${BASH_SOURCE[0]}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - fi - fi - - printf "\\n\\tChecking boost library installation.\\n" - BVERSION=$( grep "BOOST_LIB_VERSION" "${BOOST_ROOT}/include/boost/version.hpp" 2>/dev/null \ - | tail -1 | tr -s ' ' | cut -d\ -f3 | sed 's/[^0-9\._]//gI' ) - if [ "${BVERSION}" != "1_67" ]; then - printf "\\tRemoving existing boost libraries in %s/opt/boost* .\\n" "${HOME}" - if ! rm -rf "${HOME}"/opt/boost* - then - printf "\\n\\tUnable to remove deprecated boost libraries at this time.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\tInstalling boost libraries.\\n" - if ! cd "${TEMP_DIR}" - then - printf "\\n\\tUnable to enter directory %s.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - STATUS=$( curl -LO -w '%{http_code}' --connect-timeout 30 https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 ) - if [ "${STATUS}" -ne 200 ]; then - printf "\\tUnable to download Boost libraries at this time.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! tar xf "${TEMP_DIR}/boost_1_67_0.tar.bz2" - then - printf "\\n\\tUnable to unarchive file %s/boost_1_67_0.tar.bz2.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -f "${TEMP_DIR}/boost_1_67_0.tar.bz2" - then - printf "\\n\\tUnable to remove file %s/boost_1_67_0.tar.bz2.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/boost_1_67_0/" - then - printf "\\n\\tUnable to enter directory %s/boost_1_67_0.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! "${TEMP_DIR}"/boost_1_67_0/bootstrap.sh "--prefix=${BOOST_ROOT}" - then - printf "\\n\\tInstallation of boost libraries failed. 0\\n" - printf "\\tExiting now.\\n\\n" +if [ $ANSWER != 1 ]; then read -p "Do you wish to update YUM repositories? (y/n) " ANSWER; fi +case $ANSWER in + 1 | [Yy]* ) + if ! sudo $YUM -y update; then + printf " - YUM update failed.\\n" exit 1; - fi - if ! "${TEMP_DIR}"/boost_1_67_0/b2 -j"${CPU_CORE}" install - then - printf "\\n\\tInstallation of boost libraries failed. 1\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/boost_1_67_0" - then - printf "\\n\\tUnable to remove directory %s/boost_1_67_0. 1\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if [ -d "$BUILD_DIR" ]; then - if ! rm -rf "$BUILD_DIR" - then - printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "$BUILD_DIR" "${BASH_SOURCE[0]}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - fi - printf "\\n\\tBoost 1.67.0 successfully installed at %s/opt/boost_1_67_0.\\n\\n" "${HOME}" + else + printf " - YUM update complete.\\n" + fi + ;; + [Nn]* ) echo " - Proceeding without update!";; + * ) echo "Please type 'y' for yes or 'n' for no."; exit;; +esac + +printf "Checking RPM for installed dependencies...\\n" +for (( i=0; i<${#DEP_ARRAY[@]}; i++ )); do + pkg=$( rpm -qi "${DEP_ARRAY[$i]}" 2>/dev/null | grep Name ) + if [[ -z $pkg ]]; then + DEP=$DEP" ${DEP_ARRAY[$i]} " + DISPLAY="${DISPLAY}${COUNT}. ${DEP_ARRAY[$i]}\\n" + printf " - Package %s ${bldred} NOT ${txtrst} found!\\n" "${DEP_ARRAY[$i]}" + (( COUNT++ )) else - printf "\\tBoost 1.67.0 found at %s/opt/boost_1_67_0.\\n" "${HOME}" + printf " - Package %s found.\\n" "${DEP_ARRAY[$i]}" + continue fi - - printf "\\n\\tChecking MongoDB C++ driver installation.\\n" - MONGO_INSTALL=true - if [ -e "/usr/local/lib64/libmongocxx-static.a" ]; then - MONGO_INSTALL=false - if [ ! -f /usr/local/lib64/pkgconfig/libmongocxx-static.pc ]; then - MONGO_INSTALL=true - else - if ! version=$( grep "Version:" /usr/local/lib64/pkgconfig/libmongocxx-static.pc | tr -s ' ' | awk '{print $2}' ) - then - printf "\\tUnable to determine mongodb-cxx-driver version.\\n" - printf "\\tExiting now.\\n\\n" +done +if [ "${COUNT}" -gt 1 ]; then + printf "\\nThe following dependencies are required to install EOSIO:\\n" + printf "${DISPLAY}\\n\\n" + if [ $ANSWER != 1 ]; then read -p "Do you wish to install these dependencies? (y/n) " ANSWER; fi + case $ANSWER in + 1 | [Yy]* ) + if ! sudo $YUM -y install ${DEP}; then + printf " - YUM dependency installation failed!\\n" exit 1; + else + printf " - YUM dependencies installed successfully.\\n" fi - maj=$( echo "${version}" | cut -d'.' -f1 ) - min=$( echo "${version}" | cut -d'.' -f2 ) - if [ "${maj}" -gt 3 ]; then - MONGO_INSTALL=true - elif [ "${maj}" -eq 3 ] && [ "${min}" -lt 3 ]; then - MONGO_INSTALL=true - fi - fi - fi - - if [ $MONGO_INSTALL == "true" ]; then - if ! cd "${TEMP_DIR}" - then - printf "\\tUnable to enter directory %s.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - STATUS=$( curl -LO -w '%{http_code}' --connect-timeout 30 https://github.com/mongodb/mongo-c-driver/releases/download/1.10.2/mongo-c-driver-1.10.2.tar.gz ) - if [ "${STATUS}" -ne 200 ]; then - if ! rm -f "${TEMP_DIR}/mongo-c-driver-1.10.2.tar.gz" - then - printf "\\tUnable to remove file %s/mongo-c-driver-1.10.2.tar.gz.\\n" "${TEMP_DIR}" - fi - printf "\\tUnable to download MongoDB C driver at this time.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! tar xf mongo-c-driver-1.10.2.tar.gz - then - printf "\\tUnable to unarchive file %s/mongo-c-driver-1.10.2.tar.gz.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -f "${TEMP_DIR}/mongo-c-driver-1.10.2.tar.gz" - then - printf "\\tUnable to remove file mongo-c-driver-1.10.2.tar.gz.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}"/mongo-c-driver-1.10.2 - then - printf "\\tUnable to cd into directory %s/mongo-c-driver-1.10.2.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! mkdir cmake-build - then - printf "\\tUnable to create directory %s/mongo-c-driver-1.10.2/cmake-build.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd cmake-build - then - printf "\\tUnable to enter directory %s/mongo-c-driver-1.10.2/cmake-build.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local -DENABLE_BSON=ON \ - -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. - then - printf "\\tConfiguring MongoDB C driver has encountered the errors above.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! make -j"${CPU_CORE}" - then - printf "\\tError compiling MongoDB C driver.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make install - then - printf "\\tError installing MongoDB C driver.\\nMake sure you have sudo privileges.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}" - then - printf "\\tUnable to enter directory %s.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/mongo-c-driver-1.10.2" - then - printf "\\tUnable to remove directory %s/mongo-c-driver-1.10.2.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! git clone https://github.com/mongodb/mongo-cxx-driver.git --branch releases/v3.3 --depth 1 - then - printf "\\tUnable to clone MongoDB C++ driver at this time.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/mongo-cxx-driver/build" - then - printf "\\tUnable to enter directory %s/mongo-cxx-driver/build.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cmake -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local .. - then - printf "\\tCmake has encountered the above errors building the MongoDB C++ driver.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make -j"${CPU_CORE}" - then - printf "\\tError compiling MongoDB C++ driver.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo make install - then - printf "\\tError installing MongoDB C++ driver.\\nMake sure you have sudo privileges.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}" - then - printf "\\tUnable to enter directory %s.\\n" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! sudo rm -rf "${TEMP_DIR}/mongo-cxx-driver" - then - printf "\\tUnable to remove directory %s/mongo-cxx-driver.\\n" "${TEMP_DIR}" "${TEMP_DIR}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\tMongo C++ driver installed at /usr/local/lib64/libmongocxx-static.a.\\n" - else - printf "\\tMongo C++ driver found at /usr/local/lib64/libmongocxx-static.a.\\n" - fi - - printf "\\n\\tChecking LLVM with WASM support installation.\\n" - if [ ! -d "${HOME}/opt/wasm/bin" ]; then - printf "\\tInstalling LLVM & WASM\\n" - if ! cd "${TEMP_DIR}" - then - printf "\\n\\tUnable to cd into directory %s.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! mkdir "${TEMP_DIR}/llvm-compiler" 2>/dev/null - then - printf "\\n\\tUnable to create directory %s/llvm-compiler.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/llvm-compiler" - then - printf "\\n\\tUnable to enter directory %s/llvm-compiler.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! git clone --depth 1 --single-branch --branch release_40 https://github.com/llvm-mirror/llvm.git - then - printf "\\tUnable to clone llvm repo @ https://github.com/llvm-mirror/llvm.git.\\n" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/llvm-compiler/llvm" - then - printf "\\n\\tUnable to enter directory %s/llvm-compiler/llvm.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! $(curl https://bugzilla.redhat.com/attachment.cgi?id=1389687 | git apply) - then - printf "\\n\\tUnable to apply patch https://bugzilla.redhat.com/attachment.cgi?id=1389687.\\n" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/llvm-compiler/llvm/tools" - then - printf "\\n\\tUnable to enter directory %s/llvm-compiler/llvm/tools.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! git clone --depth 1 --single-branch --branch release_40 https://github.com/llvm-mirror/clang.git - then - printf "\\tUnable to clone clang repo @ https://github.com/llvm-mirror/clang.git.\\n" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/llvm-compiler/llvm" - then - printf "\\n\\tUnable to enter directory %s/llvm-compiler/llvm.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! mkdir "${TEMP_DIR}/llvm-compiler/llvm/build" - then - printf "\\n\\tUnable to create directory %s/llvm-compiler/llvm/build.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/llvm-compiler/llvm/build" - then - printf "\\n\\tUnable to enter directory %s/llvm-compiler/llvm/build.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${HOME}/opt/wasm" -DLLVM_ENABLE_RTTI=1 \ - -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=WebAssembly -DCMAKE_BUILD_TYPE=Release ../ - then - printf "\\tCmake compiling LLVM/Clang with WASM support has exited with the above errors.\\n" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! make -j"${JOBS}" - then - printf "\\tMake compiling LLVM/Clang with WASM support has exited with the above errors.\\n" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! make install - then - printf "\\tMake installing LLVM/Clang with WASM support has exited with the above errors.\\n" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - if ! rm -rf "${TEMP_DIR}/llvm-compiler" 2>/dev/null - then - printf "\\n\\tUnable to remove directory %s/llvm-compiler.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n" - exit 1; - fi - printf "\\n\\tWASM successfully installed at %s/opt/wasm\\n\\n" "${HOME}" - else - printf "\\n\\tWASM found @ %s/opt/wasm\\n\\n" "${HOME}" - fi - - function print_instructions() - { - printf "\\n\\t%s -f %s &\\n" "$( command -v mongod )" "${MONGOD_CONF}" - printf "\\tcd %s; make test\\n\\n" "${BUILD_DIR}" - return 0; - } + ;; + [Nn]* ) echo "User aborting installation of required dependencies, Exiting now."; exit;; + * ) echo "Please type 'y' for yes or 'n' for no."; exit;; + esac +else + printf " - No required YUM dependencies to install.\\n" +fi + +printf "\\n" + + +printf "Checking CMAKE installation...\\n" +if [ ! -e $CMAKE ]; then + printf "Installing CMAKE...\\n" + curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \ + && tar -xzf cmake-$CMAKE_VERSION.tar.gz \ + && cd cmake-$CMAKE_VERSION \ + && ./bootstrap --prefix=$HOME \ + && make -j"${JOBS}" \ + && make install \ + && cd .. \ + && rm -f cmake-$CMAKE_VERSION.tar.gz \ + || exit 1 + printf " - CMAKE successfully installed @ ${CMAKE} \\n" +else + printf " - CMAKE found @ ${CMAKE}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi + + +printf "\\n" + + +printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" +BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) +if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + printf "Installing Boost library...\\n" + curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ + && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ + && cd $BOOST_ROOT \ + && ./bootstrap.sh --prefix=$BOOST_ROOT \ + && ./b2 -q -j"${JOBS}" install \ + && cd .. \ + && rm -f boost_$BOOST_VERSION.tar.bz2 \ + && rm -rf $BOOST_LINK_LOCATION \ + && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ + || exit 1 + printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" +else + printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi + + +printf "\\n" + + +printf "Checking MongoDB installation...\\n" +if [ ! -d $MONGODB_ROOT ]; then + printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" + curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_LOCATION/mongod.log \ + && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_LOCATION \ + && rm -rf $MONGODB_LINK_LOCATION \ + && rm -rf $BIN_LOCATION/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ + && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ + || exit 1 + printf " - MongoDB successfully installed @ ${MONGODB_ROOT}\\n" +else + printf " - MongoDB found with correct version @ ${MONGODB_ROOT}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi +printf "Checking MongoDB C driver installation...\\n" +if [ ! -d $MONGO_C_DRIVER_ROOT ]; then + printf "Installing MongoDB C driver...\\n" + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" +else + printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi +printf "Checking MongoDB C++ driver installation...\\n" +if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then + printf "Installing MongoDB C++ driver...\\n" + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ + && make -j"${JOBS}" VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" +else + printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi + + +printf "\\n" + + +printf "Checking LLVM 4 support...\\n" +if [ ! -d $LLVM_ROOT ]; then + ln -s /usr/lib64/llvm4.0 $LLVM_ROOT \ + || exit 1 + printf " - LLVM successfully linked from /usr/lib64/llvm4.0 to ${LLVM_ROOT}\\n" +else + printf " - LLVM found @ ${LLVM_ROOT}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi + + +cd .. +printf "\\n" + +function print_instructions() { + return 0 +} \ No newline at end of file diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh old mode 100644 new mode 100755 index 84b28a2701b..65fbfeeec07 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -1,430 +1,259 @@ - OS_VER=$( grep VERSION_ID /etc/os-release | cut -d'=' -f2 | sed 's/[^0-9\.]//gI' ) - OS_MAJ=$(echo "${OS_VER}" | cut -d'.' -f1) - OS_MIN=$(echo "${OS_VER}" | cut -d'.' -f2) +if [ $1 == 1 ]; then ANSWER=1; else ANSWER=0; fi - MEM_MEG=$( free -m | sed -n 2p | tr -s ' ' | cut -d\ -f2 || cut -d' ' -f2 ) - CPU_SPEED=$( lscpu | grep -m1 "MHz" | tr -s ' ' | cut -d\ -f3 || cut -d' ' -f3 | cut -d'.' -f1 ) - CPU_CORE=$( lscpu -pCPU | grep -v "#" | wc -l ) +OS_VER=$( grep VERSION_ID /etc/os-release | cut -d'=' -f2 | sed 's/[^0-9\.]//gI' ) +OS_MAJ=$(echo "${OS_VER}" | cut -d'.' -f1) +OS_MIN=$(echo "${OS_VER}" | cut -d'.' -f2) - MEM_GIG=$(( ((MEM_MEG / 1000) / 2) )) - JOBS=$(( MEM_GIG > CPU_CORE ? CPU_CORE : MEM_GIG )) +MEM_MEG=$( free -m | sed -n 2p | tr -s ' ' | cut -d\ -f2 || cut -d' ' -f2 ) +CPU_SPEED=$( lscpu | grep -m1 "MHz" | tr -s ' ' | cut -d\ -f3 || cut -d' ' -f3 | cut -d'.' -f1 ) +CPU_CORE=$( nproc ) +MEM_GIG=$(( ((MEM_MEG / 1000) / 2) )) +export JOBS=$(( MEM_GIG > CPU_CORE ? CPU_CORE : MEM_GIG )) - DISK_INSTALL=$(df -h . | tail -1 | tr -s ' ' | cut -d\ -f1 || cut -d' ' -f1) - DISK_TOTAL_KB=$(df . | tail -1 | awk '{print $2}') - DISK_AVAIL_KB=$(df . | tail -1 | awk '{print $4}') - DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) - DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) +DISK_INSTALL=$(df -h . | tail -1 | tr -s ' ' | cut -d\ -f1 || cut -d' ' -f1) +DISK_TOTAL_KB=$(df . | tail -1 | awk '{print $2}') +DISK_AVAIL_KB=$(df . | tail -1 | awk '{print $4}') +DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) +DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) - printf "\\n\\tOS name: %s\\n" "${OS_NAME}" - printf "\\tOS Version: %s\\n" "${OS_VER}" - printf "\\tCPU speed: %sMhz\\n" "${CPU_SPEED}" - printf "\\tCPU cores: %s\\n" "${CPU_CORE}" - printf "\\tPhysical Memory: %s Mgb\\n" "${MEM_MEG}" - printf "\\tDisk install: %s\\n" "${DISK_INSTALL}" - printf "\\tDisk space total: %sG\\n" "${DISK_TOTAL%.*}" - printf "\\tDisk space available: %sG\\n" "${DISK_AVAIL%.*}" +printf "\\nOS name: ${OS_NAME}\\n" +printf "OS Version: ${OS_VER}\\n" +printf "CPU speed: ${CPU_SPEED}Mhz\\n" +printf "CPU cores: %s\\n" "${CPU_CORE}" +printf "Physical Memory: ${MEM_MEG} Mgb\\n" +printf "Disk install: ${DISK_INSTALL}\\n" +printf "Disk space total: ${DISK_TOTAL%.*}G\\n" +printf "Disk space available: ${DISK_AVAIL%.*}G\\n" - if [ "${MEM_MEG}" -lt 7000 ]; then - printf "\\tYour system must have 7 or more Gigabytes of physical memory installed.\\n" - printf "\\tExiting now.\\n" - exit 1 +if [ "${MEM_MEG}" -lt 7000 ]; then + printf "Your system must have 7 or more Gigabytes of physical memory installed.\\n" + printf "Exiting now.\\n" + exit 1 +fi + +case "${OS_NAME}" in + "Linux Mint") + if [ "${OS_MAJ}" -lt 18 ]; then + printf "You must be running Linux Mint 18.x or higher to install EOSIO.\\n" + printf "Exiting now.\\n" + exit 1 + fi + ;; + "Ubuntu") + if [ "${OS_MAJ}" -lt 16 ]; then + printf "You must be running Ubuntu 16.04.x or higher to install EOSIO.\\n" + printf "Exiting now.\\n" + exit 1 + fi + # UBUNTU 18 doesn't have MONGODB 3.6.3 + if [ $OS_MAJ -gt 16 ]; then + export MONGODB_VERSION=4.1.1 + fi + # We have to re-set this with the new version + export MONGODB_ROOT=${OPT_LOCATION}/mongodb-${MONGODB_VERSION} + ;; + "Debian") + if [ $OS_MAJ -lt 10 ]; then + printf "You must be running Debian 10 to install EOSIO, and resolve missing dependencies from unstable (sid).\n" + printf "Exiting now.\n" + exit 1 fi + ;; +esac - case "${OS_NAME}" in - "Linux Mint") - if [ "${OS_MAJ}" -lt 18 ]; then - printf "\\tYou must be running Linux Mint 18.x or higher to install EOSIO.\\n" - printf "\\tExiting now.\\n" - exit 1 - fi - ;; - "Ubuntu") - if [ "${OS_MAJ}" -lt 16 ]; then - printf "\\tYou must be running Ubuntu 16.04.x or higher to install EOSIO.\\n" - printf "\\tExiting now.\\n" +if [ "${DISK_AVAIL%.*}" -lt "${DISK_MIN}" ]; then + printf "You must have at least %sGB of available storage to install EOSIO.\\n" "${DISK_MIN}" + printf "Exiting now.\\n" + exit 1 +fi + +# llvm-4.0 is installed into /usr/lib/llvm-4.0 +# clang is necessary for building on ubuntu +DEP_ARRAY=( + git llvm-4.0 clang-4.0 libclang-4.0-dev make automake libbz2-dev libssl-dev doxygen graphviz \ + libgmp3-dev autotools-dev build-essential libicu-dev python2.7 python2.7-dev python3 python3-dev \ + autoconf libtool curl zlib1g-dev sudo ruby libusb-1.0-0-dev libcurl4-gnutls-dev pkg-config +) +COUNT=1 +DISPLAY="" +DEP="" + +if [[ "${ENABLE_CODE_COVERAGE}" == true ]]; then + DEP_ARRAY+=(lcov) +fi + +if [ $ANSWER != 1 ]; then read -p "Do you wish to update repositories with apt-get update? (y/n) " ANSWER; fi +case $ANSWER in + 1 | [Yy]* ) + if ! sudo apt-get update; then + printf " - APT update failed.\\n" + exit 1; + else + printf " - APT update complete.\\n" + fi + ;; + [Nn]* ) echo "Proceeding without update!";; + * ) echo "Please type 'y' for yes or 'n' for no."; exit;; +esac + +printf "\\nChecking for installed dependencies...\\n" +for (( i=0; i<${#DEP_ARRAY[@]}; i++ )); do + pkg=$( dpkg -s "${DEP_ARRAY[$i]}" 2>/dev/null | grep Status | tr -s ' ' | cut -d\ -f4 ) + if [ -z "$pkg" ]; then + DEP=$DEP" ${DEP_ARRAY[$i]} " + DISPLAY="${DISPLAY}${COUNT}. ${DEP_ARRAY[$i]}\\n" + printf " - Package %s${bldred} NOT${txtrst} found!\\n" "${DEP_ARRAY[$i]}" + (( COUNT++ )) + else + printf " - Package %s found.\\n" "${DEP_ARRAY[$i]}" + continue + fi +done +if [ "${COUNT}" -gt 1 ]; then + printf "\\nThe following dependencies are required to install EOSIO:\\n" + printf "${DISPLAY}\\n\\n" + if [ $ANSWER != 1 ]; then read -p "Do you wish to install these packages? (y/n) " ANSWER; fi + case $ANSWER in + 1 | [Yy]* ) + if ! sudo apt-get -y install ${DEP}; then + printf " - APT dependency failed.\\n" exit 1 + else + printf " - APT dependencies installed successfully.\\n" fi ;; - "Debian") - if [ $OS_MAJ -lt 10 ]; then - printf "\tYou must be running Debian 10 to install EOSIO, and resolve missing dependencies from unstable (sid).\n" - printf "\tExiting now.\n" - exit 1 - fi - ;; + [Nn]* ) echo "User aborting installation of required dependencies, Exiting now."; exit;; + * ) echo "Please type 'y' for yes or 'n' for no."; exit;; esac +else + printf " - No required APT dependencies to install." +fi - if [ "${DISK_AVAIL%.*}" -lt "${DISK_MIN}" ]; then - printf "\\tYou must have at least %sGB of available storage to install EOSIO.\\n" "${DISK_MIN}" - printf "\\tExiting now.\\n" - exit 1 - fi - DEP_ARRAY=(clang-4.0 lldb-4.0 libclang-4.0-dev cmake make automake libbz2-dev libssl-dev \ - libgmp3-dev autotools-dev build-essential libicu-dev python2.7-dev python3-dev \ - autoconf libtool curl zlib1g-dev doxygen graphviz) - COUNT=1 - DISPLAY="" - DEP="" +printf "\\n" - if [[ "${ENABLE_CODE_COVERAGE}" == true ]]; then - DEP_ARRAY+=(lcov) - fi - printf "\\n\\tChecking for installed dependencies.\\n\\n" +printf "Checking CMAKE installation...\\n" +if [ ! -e $CMAKE ]; then + printf "Installing CMAKE...\\n" + curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \ + && tar -xzf cmake-$CMAKE_VERSION.tar.gz \ + && cd cmake-$CMAKE_VERSION \ + && ./bootstrap --prefix=$HOME \ + && make -j"${JOBS}" \ + && make install \ + && cd .. \ + && rm -f cmake-$CMAKE_VERSION.tar.gz \ + || exit 1 + printf " - CMAKE successfully installed @ ${CMAKE} \\n" +else + printf " - CMAKE found @ ${CMAKE}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi - for (( i=0; i<${#DEP_ARRAY[@]}; i++ )); - do - pkg=$( dpkg -s "${DEP_ARRAY[$i]}" 2>/dev/null | grep Status | tr -s ' ' | cut -d\ -f4 ) - if [ -z "$pkg" ]; then - DEP=$DEP" ${DEP_ARRAY[$i]} " - DISPLAY="${DISPLAY}${COUNT}. ${DEP_ARRAY[$i]}\\n\\t" - printf "\\tPackage %s ${bldred} NOT ${txtrst} found.\\n" "${DEP_ARRAY[$i]}" - (( COUNT++ )) - else - printf "\\tPackage %s found.\\n" "${DEP_ARRAY[$i]}" - continue - fi - done - if [ "${COUNT}" -gt 1 ]; then - printf "\\n\\tThe following dependencies are required to install EOSIO.\\n" - printf "\\n\\t${DISPLAY}\\n\\n" - printf "\\tDo you wish to install these packages?\\n" - select yn in "Yes" "No"; do - case $yn in - [Yy]* ) - printf "\\n\\n\\tInstalling dependencies\\n\\n" - sudo apt-get update - if ! sudo apt-get -y install ${DEP} - then - printf "\\n\\tDPKG dependency failed.\\n" - printf "\\n\\tExiting now.\\n" - exit 1 - else - printf "\\n\\tDPKG dependencies installed successfully.\\n" - fi - break;; - [Nn]* ) echo "User aborting installation of required dependencies, Exiting now."; exit;; - * ) echo "Please type 1 for yes or 2 for no.";; - esac - done - else - printf "\\n\\tNo required dpkg dependencies to install.\\n" - fi +printf "\\n" - if [ -d "${HOME}/opt/boost_1_67_0" ]; then - if ! mv "${HOME}/opt/boost_1_67_0" "$BOOST_ROOT" - then - printf "\\n\\tUnable to move directory %s/opt/boost_1_67_0 to %s.\\n" "${HOME}" "${BOOST_ROOT}" - printf "\\n\\tExiting now.\\n" - exit 1 - fi - if [ -d "$BUILD_DIR" ]; then - if ! rm -rf "$BUILD_DIR" - then - printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "$BUILD_DIR" "${BASH_SOURCE[0]}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - fi - fi - printf "\\n\\tChecking boost library installation.\\n" - BVERSION=$( grep BOOST_LIB_VERSION "${BOOST_ROOT}/include/boost/version.hpp" 2>/dev/null \ - | tail -1 | tr -s ' ' | cut -d\ -f3 | sed 's/[^0-9\._]//gI') - if [ "${BVERSION}" != "1_67" ]; then - printf "\\tRemoving existing boost libraries in %s/opt/boost* .\\n" "${HOME}" - if ! rm -rf "${HOME}"/opt/boost* - then - printf "\\n\\tUnable to remove deprecated boost libraries at this time.\\n" - printf "\\n\\tExiting now.\\n\\n" - exit 1; - fi - printf "\\tInstalling boost libraries.\\n" - if ! cd "${TEMP_DIR}" - then - printf "\\n\\tUnable to enter directory %s.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n\\n" - exit 1; - fi - STATUS=$(curl -LO -w '%{http_code}' --connect-timeout 30 https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2) - if [ "${STATUS}" -ne 200 ]; then - printf "\\tUnable to download Boost libraries at this time.\\n" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - if ! tar xf "${TEMP_DIR}/boost_1_67_0.tar.bz2" - then - printf "\\n\\tUnable to unarchive file %s/boost_1_67_0.tar.bz2.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -f "${TEMP_DIR}/boost_1_67_0.tar.bz2" - then - printf "\\n\\tUnable to remove file %s/boost_1_67_0.tar.bz2.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n\\n" - exit 1; - fi - if ! cd "${TEMP_DIR}/boost_1_67_0/" - then - printf "\\n\\tUnable to enter directory %s/boost_1_67_0.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n\\n" - exit 1; - fi - if ! ./bootstrap.sh "--prefix=$BOOST_ROOT" - then - printf "\\n\\tInstallation of boost libraries failed. 0\\n" - printf "\\n\\tExiting now.\\n\\n" - exit 1 - fi - if ! ./b2 -j"${CPU_CORE}" install - then - printf "\\n\\tInstallation of boost libraries failed. 1\\n" - printf "\\n\\tExiting now.\\n\\n" - exit 1 - fi - if ! rm -rf "${TEMP_DIR}"/boost_1_67_0 - then - printf "\\n\\tUnable to remove %s/boost_1_67_0.\\n" "${TEMP_DIR}" - printf "\\n\\tExiting now.\\n\\n" - exit 1 - fi - if [ -d "$BUILD_DIR" ]; then - if ! rm -rf "$BUILD_DIR" - then - printf "\\tUnable to remove directory %s. Please remove this directory and run this script %s again. 0\\n" "$BUILD_DIR" "${BASH_SOURCE[0]}" - printf "\\tExiting now.\\n\\n" - exit 1; - fi - fi - printf "\\tBoost successfully installed @ %s.\\n" "${BOOST_ROOT}" - else - printf "\\tBoost found at %s.\\n" "${BOOST_ROOT}" - fi +printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" +BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) +if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + printf "Installing Boost library...\\n" + curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ + && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ + && cd $BOOST_ROOT \ + && ./bootstrap.sh --prefix=$BOOST_ROOT \ + && ./b2 -q -j"${JOBS}" install \ + && cd .. \ + && rm -f boost_$BOOST_VERSION.tar.bz2 \ + && rm -rf $BOOST_LINK_LOCATION \ + && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ + || exit 1 + printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" +else + printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi - printf "\\n\\tChecking MongoDB installation.\\n" - if [ ! -e "${MONGOD_CONF}" ]; then - printf "\\n\\tInstalling MongoDB 3.6.3.\\n" - if ! cd "${HOME}/opt" - then - printf "\\n\\tUnable to enter directory %s/opt.\\n" "${HOME}" - printf "\\n\\tExiting now.\\n\\n" - exit 1; - fi - STATUS=$(curl -LO -w '%{http_code}' --connect-timeout 30 https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-3.6.3.tgz) - if [ "${STATUS}" -ne 200 ]; then - printf "\\tUnable to download MongoDB at this time.\\n" - printf "\\n\\tExiting now.\\n\\n" - exit 1; - fi - if ! tar xf "${HOME}/opt/mongodb-linux-x86_64-3.6.3.tgz" - then - printf "\\tUnable to unarchive file %s/opt/mongodb-linux-x86_64-3.6.3.tgz.\\n" "${HOME}" - printf "\\n\\tExiting now.\\n\\n" - exit 1; - fi - if ! rm -f "${HOME}/opt/mongodb-linux-x86_64-3.6.3.tgz" - then - printf "\\tUnable to remove file %s/opt/mongodb-linux-x86_64-3.6.3.tgz.\\n" "${HOME}" - printf "\\n\\tExiting now.\\n\\n" - exit 1; - fi - if ! ln -s "${HOME}/opt/mongodb-linux-x86_64-3.6.3/" "${HOME}/opt/mongodb" - then - printf "\\tUnable to symbolic link %s/opt/mongodb-linux-x86_64-3.6.3/ to %s/opt/mongodb.\\n" "${HOME}" "${HOME}" - printf "\\n\\tExiting now.\\n\\n" - exit 1; - fi - if ! mkdir "${HOME}/opt/mongodb/data" - then - printf "\\tUnable to create directory %s/opt/mongodb/data.\\n" "${HOME}" - printf "\\n\\tExiting now.\\n\\n" - exit 1; - fi - if ! mkdir "${HOME}/opt/mongodb/log" - then - printf "\\tUnable to create directory %s/opt/mongodb/log.\\n" "${HOME}" - printf "\\n\\tExiting now.\\n\\n" - exit 1; - fi - if ! touch "${HOME}/opt/mongodb/log/mongodb.log" - then - printf "\\tUnable to create file %s/opt/mongodb/log/mongodb.log.\\n" "${HOME}" - printf "\\n\\tExiting now.\\n\\n" - exit 1; - fi -if ! tee > /dev/null "${MONGOD_CONF}" < /dev/null;then + printf "Unable to enter build directory %s.\\n Exiting now.\\n" "${BUILD_DIR}" + exit 1; +fi + +if ! make install; then + printf "\\nMAKE installing EOSIO has exited with the above error.\\n\\n" + exit -1 +fi +popd &> /dev/null + +printf "\n${bldred} ___ ___ ___ ___\n" +printf " / /\\ / /\\ / /\\ ___ / /\\ \n" +printf " / /:/_ / /::\\ / /:/_ / /\\ / /::\\ \n" +printf " / /:/ /\\ / /:/\\:\\ / /:/ /\\ / /:/ / /:/\\:\\ \n" +printf " / /:/ /:/_ / /:/ \\:\\ / /:/ /::\\ /__/::\\ / /:/ \\:\\ \n" +printf " /__/:/ /:/ /\\ /__/:/ \\__\\:\\ /__/:/ /:/\\:\\ \\__\\/\\:\\__ /__/:/ \\__\\:\\ \n" +printf " \\ \\:\\/:/ /:/ \\ \\:\\ / /:/ \\ \\:\\/:/~/:/ \\ \\:\\/\\ \\ \\:\\ / /:/ \n" +printf " \\ \\::/ /:/ \\ \\:\\ /:/ \\ \\::/ /:/ \\__\\::/ \\ \\:\\ /:/ \n" +printf " \\ \\:\\/:/ \\ \\:\\/:/ \\__\\/ /:/ /__/:/ \\ \\:\\/:/ \n" +printf " \\ \\::/ \\ \\::/ /__/:/ \\__\\/ \\ \\::/ \n" +printf " \\__\\/ \\__\\/ \\__\\/ \\__\\/ \n\n${txtrst}" + +printf "==============================================================================================\\n" +printf "EOSIO has been installed into ${OPT_LOCATION}/eosio/bin!\\n" +printf "If you need to, you can fully uninstall using eosio_uninstall.sh && scripts/clean_old_install.sh.\\n" +printf "==============================================================================================\\n\\n" + +printf "EOSIO website: https://eos.io\\n" +printf "EOSIO resources: https://eos.io/resources/\\n" +printf "EOSIO Stack Exchange: https://eosio.stackexchange.com\\n" diff --git a/scripts/eosio_uninstall.sh b/scripts/eosio_uninstall.sh new file mode 100755 index 00000000000..facb5f935f4 --- /dev/null +++ b/scripts/eosio_uninstall.sh @@ -0,0 +1,87 @@ +#! /bin/bash + +OPT_LOCATION=$HOME/opt + +binaries=( + cleos + eosio-abigen + eosio-launcher + eosio-s2wasm + eosio-wast2wasm + eosiocpp + keosd + nodeos + eosio-applesdemo +) + +if [ -d $OPT_LOCATION/eosio ]; then + printf "Do you wish to remove this install? (requires sudo)\n" + select yn in "Yes" "No"; do + case $yn in + [Yy]* ) + if [ "$(id -u)" -ne 0 ]; then + printf "\nThis requires sudo, please run ./eosio_uninstall.sh with sudo\n\n" + exit -1 + fi + + pushd $HOME &> /dev/null + pushd opt &> /dev/null + rm -rf eosio + # Handle cleanup of directories created from installation + if [ "$1" == "--full" ]; then + if [ -d ~/Library/Application\ Support/eosio ]; then rm -rf ~/Library/Application\ Support/eosio; fi # Mac OS + if [ -d ~/.local/share/eosio ]; then rm -rf ~/.local/share/eosio; fi # Linux + fi + popd &> /dev/null + pushd bin &> /dev/null + for binary in ${binaries[@]}; do + rm ${binary} + done + popd &> /dev/null + pushd lib/cmake &> /dev/null + rm -rf eosio + popd &> /dev/null + + break;; + [Nn]* ) + printf "Aborting uninstall\n\n" + exit -1;; + esac + done +fi + +if [ -d "/usr/local/eosio" ]; then + printf "Do you wish to remove this install? (requires sudo)\n" + select yn in "Yes" "No"; do + case $yn in + [Yy]* ) + if [ "$(id -u)" -ne 0 ]; then + printf "\nThis requires sudo, please run ./eosio_uninstall.sh with sudo\n\n" + exit -1 + fi + + pushd /usr/local &> /dev/null + pushd opt &> /dev/null + rm -rf eosio + # Handle cleanup of directories created from installation + if [ "$1" == "--full" ]; then + if [ -d ~/Library/Application\ Support/eosio ]; then rm -rf ~/Library/Application\ Support/eosio; fi # Mac OS + if [ -d ~/.local/share/eosio ]; then rm -rf ~/.local/share/eosio; fi # Linux + fi + popd &> /dev/null + pushd bin &> /dev/null + for binary in ${binaries[@]}; do + rm ${binary} + done + popd &> /dev/null + pushd lib/cmake &> /dev/null + rm -rf eosio + popd &> /dev/null + + break;; + [Nn]* ) + printf "Aborting uninstall\n\n" + exit -1;; + esac + done +fi diff --git a/scripts/full_uninstaller.sh b/scripts/full_uninstaller.sh new file mode 100755 index 00000000000..94401c4a12e --- /dev/null +++ b/scripts/full_uninstaller.sh @@ -0,0 +1,134 @@ +#! /bin/bash +ANSWER=0 +if [[ $1 =~ force-* ]]; then FORCED=1; else FORCED=0; fi +if [ -d "/usr/local/include/eosio" ] || [ -d "$HOME/opt/eosio" ] || [ $FORCED == 1 ]; then # use force for running the script directly + printf "\nEOSIO installation (AND DEPENDENCIES) already found...\n" + if [ $1 == 0 ]; then + read -p "Do you wish to remove them? (this includes dependencies)? (y/n) " ANSWER + elif [ $1 == 1 ] || [ $FORCED == 1 ]; then + ANSWER=1 + fi + echo "Uninstalling..." + case $ANSWER in + 1 | [Yy]* ) + if [ -d "$HOME/opt/eosio" ] || [[ $1 == "force-new" ]]; then + if [ $( uname ) == "Darwin" ]; then + # gettext and other brew packages are not modified as they can be dependencies for things other than eosio + if [ $ANSWER != 1 ]; then read -p "Do you wish to uninstall and unlink all brew installed llvm@4 versions? (y/n) " ANSWER; fi + case $ANSWER in + 1 | [Yy]* ) + brew uninstall llvm@4 --force + brew cleanup -s llvm@4 + ;; + [Nn]* ) ;; + * ) echo "Please type 'y' for yes or 'n' for no."; exit;; + esac + if [ $ANSWER != 1 ]; then read -p "Do you wish to uninstall and unlink all brew installed doxygen versions? (y/n) " ANSWER; fi + case $ANSWER in + 1 | [Yy]* ) + brew uninstall doxygen --force + brew cleanup -s doxygen + ;; + [Nn]* ) ;; + * ) echo "Please type 'y' for yes or 'n' for no."; exit;; + esac + if [ $ANSWER != 1 ]; then read -p "Do you wish to uninstall and unlink all brew installed graphviz versions? (y/n) " ANSWER; fi + case $ANSWER in + 1 | [Yy]* ) + brew uninstall graphviz --force + brew cleanup -s graphviz + ;; + [Nn]* ) ;; + * ) echo "Please type 'y' for yes or 'n' for no."; exit;; + esac + if [ $ANSWER != 1 ]; then read -p "Do you wish to uninstall and unlink all brew installed libusb versions? (y/n) " ANSWER; fi + case $ANSWER in + 1 | [Yy]* ) + brew uninstall libusb --force + brew cleanup -s libusb + ;; + [Nn]* ) ;; + * ) echo "Please type 'y' for yes or 'n' for no."; exit;; + esac + fi + rm -rf $HOME/opt/eosio + rm -f $HOME/bin/eosio-launcher + rm -rf $HOME/lib/cmake/eosios + rm -rf $HOME/opt/llvm + rm -f $HOME/opt/boost + rm -rf $HOME/src/boost_* + rm -rf $HOME/src/cmake-* + rm -rf $HOME/share/cmake-* + rm -rf $HOME/share/aclocal/cmake* + rm -rf $HOME/doc/cmake* + rm -f $HOME/bin/nodeos $HOME/bin/keosd $HOME/bin/cleos $HOME/bin/ctest $HOME/bin/*cmake* $HOME/bin/cpack + rm -rf $HOME/src/mongo* + fi + + if [ -d "/usr/local/include/eosio" ] || [[ $1 == "force-old" ]]; then + if [ "$(id -u)" -ne 0 ]; then + printf "\nCleanup requires sudo... Please manually run ./scripts/clean_old_install.sh with sudo.\n" + exit -1 + fi + pushd /usr/local &> /dev/null + rm -rf wasm + pushd include &> /dev/null + rm -rf libbson-1.0 libmongoc-1.0 mongocxx bsoncxx appbase chainbase eosio.system eosiolib fc libc++ musl secp256k* 2>/dev/null + rm -rf eosio 2>/dev/null + popd &> /dev/null + pushd bin &> /dev/null + rm cleos eosio-abigen eosio-applesedemo eosio-launcher eosio-s2wasm eosio-wast2wasm eosiocpp keosd nodeos 2>/dev/null + popd &> /dev/null + libraries=( + libeosio_testing + libeosio_chain + libfc + libbinaryen + libWAST + libWASM + libRuntime + libPlatform + libIR + libLogging + libsoftfloat + libchainbase + libappbase + libbuiltins + libbson-1.0 + libbson-static-1.0.a + libbsoncxx-static + libmongoc-1.0 + libmongoc-static-1.0.a + libmongocxx-static + libsecp256k* + ) + pushd lib &> /dev/null + for lib in ${libraries[@]}; do + rm ${lib}.a ${lib}.dylib ${lib}.so 2>/dev/null + rm pkgconfig/${lib}.pc 2>/dev/null + rm cmake/${lib} 2>/dev/null + done + popd &> /dev/null + pushd etc &> /dev/null + rm eosio 2>/dev/null + popd &> /dev/null + pushd share &> /dev/null + rm eosio 2>/dev/null + popd &> /dev/null + pushd usr/share &> /dev/null + rm eosio 2>/dev/null + popd &> /dev/null + pushd var/lib &> /dev/null + rm eosio 2>/dev/null + popd &> /dev/null + pushd var/log &> /dev/null + rm eosio 2>/dev/null + popd &> /dev/null + fi + ;; + [Nn]* ) + printf "Skipping\n\n" + exit 0 + ;; + esac +fi diff --git a/scripts/generate_bottle.sh b/scripts/generate_bottle.sh index 13cd62fd71d..075fb9a7bb7 100644 --- a/scripts/generate_bottle.sh +++ b/scripts/generate_bottle.sh @@ -14,7 +14,7 @@ else MAC_VERSION="high_sierra" fi -NAME="${PROJECT}-${VERSION}.${MAC_VERSION}.bottle.tar.gz" +NAME="${PROJECT}-${VERSION}.${MAC_VERSION}.bottle" mkdir -p ${PROJECT}/${VERSION}/opt/eosio/lib/cmake @@ -28,9 +28,9 @@ export SPREFIX export SUBPREFIX export SSUBPREFIX -bash generate_tarball.sh ${NAME} +. ./generate_tarball.sh ${NAME} -hash=`openssl dgst -sha256 ${NAME} | awk 'NF>1{print $NF}'` +hash=`openssl dgst -sha256 ${NAME}.tar.gz | awk 'NF>1{print $NF}'` echo "class Eosio < Formula @@ -38,17 +38,16 @@ echo "class Eosio < Formula revision 0 url \"https://github.com/eosio/eos/archive/v${VERSION}.tar.gz\" version \"${VERSION}\" - + option :universal - depends_on \"gmp\" + depends_on \"gmp\" depends_on \"gettext\" depends_on \"openssl\" - depends_on \"gmp\" - depends_on :xcode + depends_on \"libusb\" depends_on :macos => :high_sierra depends_on :arch => :intel - + bottle do root_url \"https://github.com/eosio/eos/releases/download/v${VERSION}\" sha256 \"${hash}\" => :${MAC_VERSION} @@ -59,4 +58,4 @@ echo "class Eosio < Formula end __END__" &> eosio.rb -rm -r ${PROJECT} +rm -r ${PROJECT} || exit 1 diff --git a/scripts/generate_deb.sh b/scripts/generate_deb.sh old mode 100644 new mode 100755 index 9af16c069f3..e8c22d154fb --- a/scripts/generate_deb.sh +++ b/scripts/generate_deb.sh @@ -13,29 +13,45 @@ fi NAME="${PROJECT}_${VERSION_NO_SUFFIX}-${RELEASE}_amd64" +if [[ -f /etc/upstream-release/lsb-release ]]; then + source /etc/upstream-release/lsb-release +elif [[ -f /etc/lsb-release ]]; then + source /etc/lsb-release +else + echo "Unrecognized Debian derivative. Not generating .deb file." + exit 1 +fi + +if [ ${DISTRIB_RELEASE} = "16.04" ]; then + LIBSSL="libssl1.0.0" +elif [ ${DISTRIB_RELEASE} = "18.04" ]; then + LIBSSL="libssl1.1" +else + echo "Unrecognized Ubuntu version. Update generate_deb.sh. Not generating .deb file." + exit 1 +fi + mkdir -p ${PROJECT}/DEBIAN -chmod 0755 ${PROJECT}/DEBIAN +chmod 0755 ${PROJECT}/DEBIAN || exit 1 echo "Package: ${PROJECT} Version: ${VERSION_NO_SUFFIX}-${RELEASE} Section: devel Priority: optional -Depends: libbz2-dev (>= 1.0), libssl-dev (>= 1.0), libgmp3-dev, build-essential, libicu-dev, zlib1g-dev, libtinfo5 +Depends: libc6, libgcc1, ${LIBSSL}, libstdc++6, libtinfo5, zlib1g, libusb-1.0-0, libcurl3-gnutls Architecture: amd64 Homepage: ${URL} Maintainer: ${EMAIL} Description: ${DESC}" &> ${PROJECT}/DEBIAN/control +cat ${PROJECT}/DEBIAN/control export PREFIX export SUBPREFIX export SPREFIX export SSUBPREFIX -bash generate_tarball.sh ${NAME}.tar.gz - -tar -xvzf ${NAME}.tar.gz -C ${PROJECT} -dpkg-deb --build ${PROJECT} -BUILDSTATUS=$? -mv ${PROJECT}.deb ${NAME}.deb -rm -r ${PROJECT} - -exit $BUILDSTATUS +. ./generate_tarball.sh ${NAME} +echo "Unpacking tarball: ${NAME}.tar.gz..." +tar -xzvf ${NAME}.tar.gz -C ${PROJECT} || exit 1 +dpkg-deb --build ${PROJECT} || exit 1 +mv ${PROJECT}.deb ${NAME}.deb || exit 1 +rm -r ${PROJECT} || exit 1 \ No newline at end of file diff --git a/scripts/generate_package.sh.in b/scripts/generate_package.sh.in index 9c190ed7d87..4874a1b4246 100644 --- a/scripts/generate_package.sh.in +++ b/scripts/generate_package.sh.in @@ -5,7 +5,9 @@ VARIANT=$1 VERSION_NO_SUFFIX="@VERSION_MAJOR@.@VERSION_MINOR@.@VERSION_PATCH@" VERSION_SUFFIX="@VERSION_SUFFIX@" VERSION="@VERSION_FULL@" -BUILD_DIR="@CMAKE_BINARY_DIR@" + +# Using CMAKE_BINARY_DIR uses an absolute path and will break cross-vm building/download/make functionality +BUILD_DIR="../../build" VENDOR="@VENDOR@" PROJECT="@PROJECT_NAME@" @@ -23,21 +25,17 @@ export DESC export URL export EMAIL -mkdir tmp +mkdir -p tmp if [[ ${VARIANT} == "brew" ]]; then - bash generate_bottle.sh + . ./generate_bottle.sh elif [[ ${VARIANT} == "deb" ]]; then - bash generate_deb.sh + . ./generate_deb.sh elif [[ ${VARIANT} == "rpm" ]]; then - bash generate_rpm.sh + . ./generate_rpm.sh else echo "Error, unknown package type. Use either ['brew', 'deb', 'rpm']." exit -1 fi -BUILDSTATUS=$? - -rm -r tmp - -exit $BUILDSTATUS +rm -r tmp || exit 1 diff --git a/scripts/generate_rpm.sh b/scripts/generate_rpm.sh index 8d7ebf03caa..625eff29d7b 100644 --- a/scripts/generate_rpm.sh +++ b/scripts/generate_rpm.sh @@ -18,10 +18,10 @@ export SUBPREFIX export SPREFIX export SSUBPREFIX -bash generate_tarball.sh ${NAME}.tar.gz +. ./generate_tarball.sh ${NAME} RPMBUILD=`realpath ~/rpmbuild/BUILDROOT/${NAME}.x86_64` -mkdir -p ${RPMBUILD} +mkdir -p ${RPMBUILD} FILES=$(tar -xvzf ${NAME}.tar.gz -C ${RPMBUILD}) PFILES="" for f in ${FILES[@]}; do @@ -31,13 +31,13 @@ for f in ${FILES[@]}; do done echo -e ${PFILES} &> ~/rpmbuild/BUILD/filenames.txt -mkdir -p ${PROJECT} +mkdir -p ${PROJECT} echo -e "Name: ${PROJECT} Version: ${VERSION_NO_SUFFIX} License: MIT Vendor: ${VENDOR} Source: ${URL} -Requires: openssl-devel, gmp-devel, libstdc++-devel, bzip2, bzip2-devel, mongodb, mongodb-server +Requires: openssl, gmp, libstdc++, bzip2, libcurl, libusbx URL: ${URL} Packager: ${VENDOR} <${EMAIL}> Summary: ${DESC} @@ -45,10 +45,8 @@ Release: ${RELEASE} %description ${DESC} %files -f filenames.txt" &> ${PROJECT}.spec +cat ${PROJECT}.spec -rpmbuild -bb ${PROJECT}.spec -BUILDSTATUS=$? -mv ~/rpmbuild/RPMS/x86_64 ./ -rm -r ${PROJECT} ~/rpmbuild/BUILD/filenames.txt ${PROJECT}.spec - -exit $BUILDSTATUS +rpmbuild -bb ${PROJECT}.spec || exit 1 +mv ~/rpmbuild/RPMS/x86_64/*.rpm ./ || exit 1 +rm -r ${PROJECT} ~/rpmbuild/BUILD/filenames.txt ${PROJECT}.spec || exit 1 diff --git a/scripts/generate_tarball.sh b/scripts/generate_tarball.sh index 675f30b4af7..02f5e009ce5 100644 --- a/scripts/generate_tarball.sh +++ b/scripts/generate_tarball.sh @@ -4,7 +4,7 @@ NAME=$1 EOS_PREFIX=${PREFIX}/${SUBPREFIX} mkdir -p ${PREFIX}/bin/ #mkdir -p ${PREFIX}/lib/cmake/${PROJECT} -mkdir -p ${EOS_PREFIX}/bin +mkdir -p ${EOS_PREFIX}/bin mkdir -p ${EOS_PREFIX}/licenses/eosio #mkdir -p ${EOS_PREFIX}/include #mkdir -p ${EOS_PREFIX}/lib/cmake/${PROJECT} @@ -12,10 +12,10 @@ mkdir -p ${EOS_PREFIX}/licenses/eosio #mkdir -p ${EOS_PREFIX}/scripts # install binaries -cp -R ${BUILD_DIR}/bin/* ${EOS_PREFIX}/bin +cp -R ${BUILD_DIR}/bin/* ${EOS_PREFIX}/bin || exit 1 # install licenses -cp -R ${BUILD_DIR}/licenses/eosio/* ${EOS_PREFIX}/licenses +cp -R ${BUILD_DIR}/licenses/eosio/* ${EOS_PREFIX}/licenses || exit 1 # install libraries #cp -R ${BUILD_DIR}/lib/* ${EOS_PREFIX}/lib @@ -33,12 +33,10 @@ cp -R ${BUILD_DIR}/licenses/eosio/* ${EOS_PREFIX}/licenses #ln -sf ../../../${SUBPREFIX}/lib/cmake/${PROJECT}/EosioTester.cmake EosioTester.cmake #popd &> /dev/null -pushd ${PREFIX}/bin &> /dev/null -for f in `ls ${BUILD_DIR}/bin/`; do +for f in $(ls "${BUILD_DIR}/bin/"); do bn=$(basename $f) - ln -sf ../${SUBPREFIX}/bin/$bn $bn + ln -sf ../${SUBPREFIX}/bin/$bn ${PREFIX}/bin/$bn || exit 1 done -popd &> /dev/null - -tar -cvzf $NAME ./${PREFIX}/* -rm -r ${PREFIX} +echo "Generating Tarball $NAME.tar.gz..." +tar -cvzf $NAME.tar.gz ./${PREFIX}/* || exit 1 +rm -r ${PREFIX} || exit 1 diff --git a/scripts/mongod.conf b/scripts/mongod.conf new file mode 100644 index 00000000000..9599b86fe2f --- /dev/null +++ b/scripts/mongod.conf @@ -0,0 +1,3 @@ +systemLog: + logAppend: true + logRotate: reopen \ No newline at end of file diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 9f545c378b2..66a826fa0e6 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -15,7 +15,7 @@ include_directories("${CMAKE_SOURCE_DIR}/plugins/wallet_plugin/include") file(GLOB UNIT_TESTS "*.cpp") -add_executable( plugin_test ${UNIT_TESTS} ${WASM_UNIT_TESTS} ) +add_executable( plugin_test ${UNIT_TESTS} ) target_link_libraries( plugin_test eosio_testing eosio_chain chainbase chain_plugin wallet_plugin fc ${PLATFORM_SPECIFIC_LIBS} ) target_include_directories( plugin_test PUBLIC diff --git a/tests/Cluster.py b/tests/Cluster.py index ac513475d8b..3bc0f215566 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -32,7 +32,6 @@ class Cluster(object): __bootlog="eosio-ignition-wd/bootlog.txt" __configDir="etc/eosio/" __dataDir="var/lib/" - __fileDivider="=================================================================" # pylint: disable=too-many-arguments # walletd [True|False] Is keosd running. If not load the wallet plugin @@ -1258,21 +1257,21 @@ def killSomeEosInstances(self, killCount, killSignalStr=Utils.SigKillTag): time.sleep(1) # Give processes time to stand down return True - def relaunchEosInstances(self): + def relaunchEosInstances(self, cachePopen=False): chainArg=self.__chainSyncStrategy.arg newChain= False if self.__chainSyncStrategy.name in [Utils.SyncHardReplayTag, Utils.SyncNoneTag] else True for i in range(0, len(self.nodes)): node=self.nodes[i] - if node.killed and not node.relaunch(i, chainArg, newChain=newChain): + if node.killed and not node.relaunch(i, chainArg, newChain=newChain, cachePopen=cachePopen): return False return True @staticmethod def dumpErrorDetailImpl(fileName): - Utils.Print(Cluster.__fileDivider) + Utils.Print(Utils.FileDivider) Utils.Print("Contents of %s:" % (fileName)) if os.path.exists(fileName): with open(fileName, "r") as f: @@ -1280,20 +1279,36 @@ def dumpErrorDetailImpl(fileName): else: Utils.Print("File %s not found." % (fileName)) + @staticmethod + def __findFiles(path): + files=[] + it=os.scandir(path) + for entry in it: + if entry.is_file(follow_symlinks=False): + match=re.match("stderr\..+\.txt", entry.name) + if match: + files.append(os.path.join(path, entry.name)) + files.sort() + return files + def dumpErrorDetails(self): - fileName=Cluster.__configDir + Cluster.nodeExtensionToName("bios") + "/config.ini" - Cluster.dumpErrorDetailImpl(fileName) - fileName=Cluster.__dataDir + Cluster.nodeExtensionToName("bios") + "/stderr.txt" + fileName=os.path.join(Cluster.__configDir + Cluster.nodeExtensionToName("bios"), "config.ini") Cluster.dumpErrorDetailImpl(fileName) + path=Cluster.__dataDir + Cluster.nodeExtensionToName("bios") + fileNames=Cluster.__findFiles(path) + for fileName in fileNames: + Cluster.dumpErrorDetailImpl(fileName) for i in range(0, len(self.nodes)): - configLocation=Cluster.__configDir + Cluster.nodeExtensionToName(i) + "/" - fileName=configLocation + "config.ini" - Cluster.dumpErrorDetailImpl(fileName) - fileName=configLocation + "genesis.json" + configLocation=Cluster.__configDir + Cluster.nodeExtensionToName(i) + fileName=os.path.join(configLocation, "config.ini") Cluster.dumpErrorDetailImpl(fileName) - fileName=Cluster.__dataDir + Cluster.nodeExtensionToName(i) + "/stderr.txt" + fileName=os.path.join(configLocation, "genesis.json") Cluster.dumpErrorDetailImpl(fileName) + path=Cluster.__dataDir + Cluster.nodeExtensionToName(i) + fileNames=Cluster.__findFiles(path) + for fileName in fileNames: + Cluster.dumpErrorDetailImpl(fileName) if self.useBiosBootFile: Cluster.dumpErrorDetailImpl(Cluster.__bootlog) @@ -1444,8 +1459,8 @@ def getBlockLog(self, nodeExtension): def printBlockLog(self): blockLogBios=self.getBlockLog("bios") - Utils.Print(Cluster.__fileDivider) - Utils.Print("Block log from %s:\n%s" % (blockLogDir, json.dumps(blockLogBios, indent=1))) + Utils.Print(Utils.FileDivider) + Utils.Print("Block log from %s:\n%s" % ("bios", json.dumps(blockLogBios, indent=1))) if not hasattr(self, "nodes"): return @@ -1454,8 +1469,8 @@ def printBlockLog(self): for i in range(numNodes): node=self.nodes[i] blockLog=self.getBlockLog(i) - Utils.Print(Cluster.__fileDivider) - Utils.Print("Block log from %s:\n%s" % (blockLogDir, json.dumps(blockLog, indent=1))) + Utils.Print(Utils.FileDivider) + Utils.Print("Block log from node %s:\n%s" % (i, json.dumps(blockLog, indent=1))) def compareBlockLogs(self): @@ -1531,11 +1546,11 @@ def compareCommon(blockLogs, blockNameExtensions, first, last): if ret is not None: blockLogDir1=Cluster.__dataDir + Cluster.nodeExtensionToName(commonBlockNameExtensions[0]) + "/blocks/" blockLogDir2=Cluster.__dataDir + Cluster.nodeExtensionToName(commonBlockNameExtensions[i]) + "/blocks/" - Utils.Print(Cluster.__fileDivider) + Utils.Print(Utils.FileDivider) Utils.Print("Block log from %s:\n%s" % (blockLogDir1, json.dumps(commonBlockLogs[0], indent=1))) - Utils.Print(Cluster.__fileDivider) + Utils.Print(Utils.FileDivider) Utils.Print("Block log from %s:\n%s" % (blockLogDir2, json.dumps(commonBlockLogs[i], indent=1))) - Utils.Print(Cluster.__fileDivider) + Utils.Print(Utils.FileDivider) Utils.errorExit("Block logs do not match, difference description -> %s" % (ret)) return True diff --git a/tests/Node.py b/tests/Node.py index 55ecbb0be22..1c01893ceca 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -997,6 +997,19 @@ def delegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, tran return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + def undelegatebw(self, fromAccount, netQuantity, cpuQuantity, toAccount=None, waitForTransBlock=False, exitOnError=False): + if toAccount is None: + toAccount=fromAccount + + cmdDesc="system undelegatebw" + cmd="%s -j %s %s \"%s %s\" \"%s %s\"" % ( + cmdDesc, fromAccount.name, toAccount.name, netQuantity, CORE_SYMBOL, cpuQuantity, CORE_SYMBOL) + msg="fromAccount=%s, toAccount=%s" % (fromAccount.name, toAccount.name); + trans=self.processCleosCmd(cmd, cmdDesc, exitOnError=exitOnError, exitMsg=msg) + self.trackCmdTransaction(trans) + + return self.waitForTransBlockIfNeeded(trans, waitForTransBlock, exitOnError=exitOnError) + def regproducer(self, producer, url, location, waitForTransBlock=False, exitOnError=False): cmdDesc="system regproducer" cmd="%s -j %s %s %s %s" % ( @@ -1212,7 +1225,7 @@ def interruptAndVerifyExitStatus(self): assert self.popenProc is not None, "node: \"%s\" does not have a popenProc, this may be because it is only set after a relaunch." % (self.cmd) self.popenProc.send_signal(signal.SIGINT) try: - outs, _ = self.popenProc.communicate(timeout=1) + outs, _ = self.popenProc.communicate(timeout=15) assert self.popenProc.returncode == 0, "Expected terminating \"%s\" to have an exit status of 0, but got %d" % (self.cmd, self.popenProc.returncode) except subprocess.TimeoutExpired: Utils.errorExit("Terminate call failed on node: %s" % (self.cmd)) @@ -1220,6 +1233,8 @@ def interruptAndVerifyExitStatus(self): def verifyAlive(self, silent=False): if not silent and Utils.Debug: Utils.Print("Checking if node(pid=%s) is alive(killed=%s): %s" % (self.pid, self.killed, self.cmd)) if self.killed or self.pid is None: + self.killed=True + self.pid=None return False try: @@ -1231,8 +1246,8 @@ def verifyAlive(self, silent=False): return False except PermissionError as ex: return True - else: - return True + + return True def getBlockProducerByNum(self, blockNum, timeout=None, waitForBlock=True, exitOnError=True): if waitForBlock: @@ -1280,7 +1295,7 @@ def getNextCleanProductionCycle(self, trans): # TBD: make nodeId an internal property # pylint: disable=too-many-locals - def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None): + def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None, cachePopen=False): assert(self.pid is None) assert(self.killed) @@ -1326,8 +1341,10 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr: cmd=myCmd + ("" if chainArg is None else (" " + chainArg)) Utils.Print("cmd: %s" % (cmd)) - self.popenProc=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) - self.pid=self.popenProc.pid + popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) + if cachePopen: + self.popenProc=popen + self.pid=popen.pid if Utils.Debug: Utils.Print("restart Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) def isNodeAlive(): diff --git a/tests/TestHelper.py b/tests/TestHelper.py index 1650597dee5..10b69fa334d 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -145,6 +145,9 @@ def shutdown(cluster, walletMgr, testSuccessful=True, killEosInstances=True, kil Utils.Print("Test failed.") if not testSuccessful and dumpErrorDetails: cluster.reportStatus() + Utils.Print(Utils.FileDivider) + psOut=Cluster.pgrepEosServers(timeout=60) + Utils.Print("pgrep output:\n%s" % (psOut)) cluster.dumpErrorDetails() if walletMgr: walletMgr.dumpErrorDetails() diff --git a/tests/chain_plugin_tests.cpp b/tests/chain_plugin_tests.cpp index 4b4caa45311..a6e119a5b06 100644 --- a/tests/chain_plugin_tests.cpp +++ b/tests/chain_plugin_tests.cpp @@ -93,7 +93,7 @@ BOOST_FIXTURE_TEST_CASE( get_block_with_invalid_abi, TESTER ) try { char headnumstr[20]; sprintf(headnumstr, "%d", headnum); chain_apis::read_only::get_block_params param{headnumstr}; - chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX)); + chain_apis::read_only plugin(*(this->control), fc::microseconds::maximum()); // block should be decoded successfully std::string block_str = json::to_pretty_string(plugin.get_block(param)); diff --git a/tests/get_table_tests.cpp b/tests/get_table_tests.cpp index ca5ff408e43..6abeb325913 100644 --- a/tests/get_table_tests.cpp +++ b/tests/get_table_tests.cpp @@ -69,18 +69,18 @@ BOOST_FIXTURE_TEST_CASE( get_scope_test, TESTER ) try { produce_blocks(1); // iterate over scope - eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX)); + eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds::maximum()); eosio::chain_apis::read_only::get_table_by_scope_params param{N(eosio.token), N(accounts), "inita", "", 10}; eosio::chain_apis::read_only::get_table_by_scope_result result = plugin.read_only::get_table_by_scope(param); - BOOST_REQUIRE_EQUAL(4, result.rows.size()); + BOOST_REQUIRE_EQUAL(4u, result.rows.size()); BOOST_REQUIRE_EQUAL("", result.more); if (result.rows.size() >= 4) { BOOST_REQUIRE_EQUAL(name(N(eosio.token)), result.rows[0].code); BOOST_REQUIRE_EQUAL(name(N(inita)), result.rows[0].scope); BOOST_REQUIRE_EQUAL(name(N(accounts)), result.rows[0].table); BOOST_REQUIRE_EQUAL(name(N(eosio)), result.rows[0].payer); - BOOST_REQUIRE_EQUAL(1, result.rows[0].count); + BOOST_REQUIRE_EQUAL(1u, result.rows[0].count); BOOST_REQUIRE_EQUAL(name(N(initb)), result.rows[1].scope); BOOST_REQUIRE_EQUAL(name(N(initc)), result.rows[2].scope); @@ -90,7 +90,7 @@ BOOST_FIXTURE_TEST_CASE( get_scope_test, TESTER ) try { param.lower_bound = "initb"; param.upper_bound = "initc"; result = plugin.read_only::get_table_by_scope(param); - BOOST_REQUIRE_EQUAL(2, result.rows.size()); + BOOST_REQUIRE_EQUAL(2u, result.rows.size()); BOOST_REQUIRE_EQUAL("", result.more); if (result.rows.size() >= 2) { BOOST_REQUIRE_EQUAL(name(N(initb)), result.rows[0].scope); @@ -99,17 +99,17 @@ BOOST_FIXTURE_TEST_CASE( get_scope_test, TESTER ) try { param.limit = 1; result = plugin.read_only::get_table_by_scope(param); - BOOST_REQUIRE_EQUAL(1, result.rows.size()); + BOOST_REQUIRE_EQUAL(1u, result.rows.size()); BOOST_REQUIRE_EQUAL("initc", result.more); param.table = name(0); result = plugin.read_only::get_table_by_scope(param); - BOOST_REQUIRE_EQUAL(1, result.rows.size()); + BOOST_REQUIRE_EQUAL(1u, result.rows.size()); BOOST_REQUIRE_EQUAL("initc", result.more); param.table = N(invalid); result = plugin.read_only::get_table_by_scope(param); - BOOST_REQUIRE_EQUAL(0, result.rows.size()); + BOOST_REQUIRE_EQUAL(0u, result.rows.size()); BOOST_REQUIRE_EQUAL("", result.more); } FC_LOG_AND_RETHROW() /// get_scope_test @@ -190,7 +190,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { produce_blocks(1); // get table: normal case - eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX)); + eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds::maximum()); eosio::chain_apis::read_only::get_table_rows_params p; p.code = N(eosio.token); p.scope = "inita"; @@ -198,7 +198,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { p.json = true; p.index_position = "primary"; eosio::chain_apis::read_only::get_table_rows_result result = plugin.read_only::get_table_rows(p); - BOOST_REQUIRE_EQUAL(4, result.rows.size()); + BOOST_REQUIRE_EQUAL(4u, result.rows.size()); BOOST_REQUIRE_EQUAL(false, result.more); if (result.rows.size() >= 4) { BOOST_REQUIRE_EQUAL("9999.0000 AAA", result.rows[0]["balance"].as_string()); @@ -210,7 +210,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { // get table: reverse ordered p.reverse = true; result = plugin.read_only::get_table_rows(p); - BOOST_REQUIRE_EQUAL(4, result.rows.size()); + BOOST_REQUIRE_EQUAL(4u, result.rows.size()); BOOST_REQUIRE_EQUAL(false, result.more); if (result.rows.size() >= 4) { BOOST_REQUIRE_EQUAL("9999.0000 AAA", result.rows[3]["balance"].as_string()); @@ -223,7 +223,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { p.reverse = true; p.show_payer = true; result = plugin.read_only::get_table_rows(p); - BOOST_REQUIRE_EQUAL(4, result.rows.size()); + BOOST_REQUIRE_EQUAL(4u, result.rows.size()); BOOST_REQUIRE_EQUAL(false, result.more); if (result.rows.size() >= 4) { BOOST_REQUIRE_EQUAL("9999.0000 AAA", result.rows[3]["data"]["balance"].as_string()); @@ -242,7 +242,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { p.upper_bound = "CCC"; p.reverse = false; result = plugin.read_only::get_table_rows(p); - BOOST_REQUIRE_EQUAL(2, result.rows.size()); + BOOST_REQUIRE_EQUAL(2u, result.rows.size()); BOOST_REQUIRE_EQUAL(false, result.more); if (result.rows.size() >= 2) { BOOST_REQUIRE_EQUAL("8888.0000 BBB", result.rows[0]["balance"].as_string()); @@ -254,7 +254,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { p.upper_bound = "CCC"; p.reverse = true; result = plugin.read_only::get_table_rows(p); - BOOST_REQUIRE_EQUAL(2, result.rows.size()); + BOOST_REQUIRE_EQUAL(2u, result.rows.size()); BOOST_REQUIRE_EQUAL(false, result.more); if (result.rows.size() >= 2) { BOOST_REQUIRE_EQUAL("8888.0000 BBB", result.rows[1]["balance"].as_string()); @@ -266,7 +266,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { p.limit = 1; p.reverse = false; result = plugin.read_only::get_table_rows(p); - BOOST_REQUIRE_EQUAL(1, result.rows.size()); + BOOST_REQUIRE_EQUAL(1u, result.rows.size()); BOOST_REQUIRE_EQUAL(true, result.more); if (result.rows.size() >= 1) { BOOST_REQUIRE_EQUAL("9999.0000 AAA", result.rows[0]["balance"].as_string()); @@ -277,7 +277,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { p.limit = 1; p.reverse = true; result = plugin.read_only::get_table_rows(p); - BOOST_REQUIRE_EQUAL(1, result.rows.size()); + BOOST_REQUIRE_EQUAL(1u, result.rows.size()); BOOST_REQUIRE_EQUAL(true, result.more); if (result.rows.size() >= 1) { BOOST_REQUIRE_EQUAL("10000.0000 SYS", result.rows[0]["balance"].as_string()); @@ -289,7 +289,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { p.limit = 1; p.reverse = false; result = plugin.read_only::get_table_rows(p); - BOOST_REQUIRE_EQUAL(1, result.rows.size()); + BOOST_REQUIRE_EQUAL(1u, result.rows.size()); BOOST_REQUIRE_EQUAL(true, result.more); if (result.rows.size() >= 1) { BOOST_REQUIRE_EQUAL("8888.0000 BBB", result.rows[0]["balance"].as_string()); @@ -301,7 +301,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { p.limit = 1; p.reverse = true; result = plugin.read_only::get_table_rows(p); - BOOST_REQUIRE_EQUAL(1, result.rows.size()); + BOOST_REQUIRE_EQUAL(1u, result.rows.size()); BOOST_REQUIRE_EQUAL(true, result.more); if (result.rows.size() >= 1) { BOOST_REQUIRE_EQUAL("7777.0000 CCC", result.rows[0]["balance"].as_string()); @@ -363,7 +363,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_by_seckey_test, TESTER ) try { produce_blocks(1); // get table: normal case - eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds(INT_MAX)); + eosio::chain_apis::read_only plugin(*(this->control), fc::microseconds::maximum()); eosio::chain_apis::read_only::get_table_rows_params p; p.code = N(eosio); p.scope = "eosio"; @@ -372,7 +372,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_by_seckey_test, TESTER ) try { p.index_position = "secondary"; // ordered by high_bid p.key_type = "i64"; eosio::chain_apis::read_only::get_table_rows_result result = plugin.read_only::get_table_rows(p); - BOOST_REQUIRE_EQUAL(4, result.rows.size()); + BOOST_REQUIRE_EQUAL(4u, result.rows.size()); BOOST_REQUIRE_EQUAL(false, result.more); if (result.rows.size() >= 4) { BOOST_REQUIRE_EQUAL("html", result.rows[0]["newname"].as_string()); @@ -396,7 +396,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_by_seckey_test, TESTER ) try { p.reverse = true; p.show_payer = true; result = plugin.read_only::get_table_rows(p); - BOOST_REQUIRE_EQUAL(4, result.rows.size()); + BOOST_REQUIRE_EQUAL(4u, result.rows.size()); BOOST_REQUIRE_EQUAL(false, result.more); if (result.rows.size() >= 4) { BOOST_REQUIRE_EQUAL("html", result.rows[3]["data"]["newname"].as_string()); @@ -425,7 +425,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_by_seckey_test, TESTER ) try { p.show_payer = false; p.limit = 1; result = plugin.read_only::get_table_rows(p); - BOOST_REQUIRE_EQUAL(1, result.rows.size()); + BOOST_REQUIRE_EQUAL(1u, result.rows.size()); BOOST_REQUIRE_EQUAL(true, result.more); if (result.rows.size() >= 1) { BOOST_REQUIRE_EQUAL("html", result.rows[0]["newname"].as_string()); @@ -438,7 +438,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_by_seckey_test, TESTER ) try { p.show_payer = false; p.limit = 1; result = plugin.read_only::get_table_rows(p); - BOOST_REQUIRE_EQUAL(1, result.rows.size()); + BOOST_REQUIRE_EQUAL(1u, result.rows.size()); BOOST_REQUIRE_EQUAL(true, result.more); if (result.rows.size() >= 1) { BOOST_REQUIRE_EQUAL("com", result.rows[0]["newname"].as_string()); diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 33914240157..4ef22ab082f 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -320,6 +320,9 @@ def getMinHeadAndLib(prodNodes): nextProdChange=False #identify the earliest LIB to start identify the earliest block to check if divergent branches eventually reach concensus (headBlockNum, libNumAroundDivergence)=getMinHeadAndLib(prodNodes) + Print("Tracking block producers from %d till divergence or %d. Head block is %d and lowest LIB is %d" % (preKillBlockNum, lastBlockNum, headBlockNum, libNumAroundDivergence)) + transitionCount=0 + missedTransitionBlock=None for blockNum in range(preKillBlockNum,lastBlockNum): #avoiding getting LIB until my current block passes the head from the last time I checked if blockNum>headBlockNum: @@ -341,19 +344,31 @@ def getMinHeadAndLib(prodNodes): if not nextProdChange and prodChanged and blockProducer1==killAtProducer: nextProdChange=True elif nextProdChange and blockProducer1!=killAtProducer: - actualLastBlockNum=blockNum - break + nextProdChange=False + if blockProducer0!=blockProducer1: + Print("Divergence identified at block %s, node_00 producer: %s, node_01 producer: %s" % (blockNum, blockProducer0, blockProducer1)) + actualLastBlockNum=blockNum + break + else: + missedTransitionBlock=blockNum + transitionCount+=1 + # allow this to transition twice, in case the script was identifying an earlier transition than the bridge node received the kill command + if transitionCount>1: + Print("At block %d and have passed producer: %s %d times and we have not diverged, stopping looking and letting errors report" % (blockNum, killAtProducer, transitionCount)) + actualLastBlockNum=blockNum + break #if we diverge before identifying the actualLastBlockNum, then there is an ERROR if blockProducer0!=blockProducer1: - Utils.errorExit("Groups reported different block producers for block number %d. %s != %s." % (blockNum,blockProducer0,blockProducer1)) + extra="" if transitionCount==0 else " Diverged after expected killAtProducer transition at block %d." % (missedTransitionBlock) + Utils.errorExit("Groups reported different block producers for block number %d.%s %s != %s." % (blockNum,extra,blockProducer0,blockProducer1)) #verify that the non producing node is not alive (and populate the producer nodes with current getInfo data to report if #an error occurs) if nonProdNode.verifyAlive(): Utils.errorExit("Expected the non-producing node to have shutdown.") - Print("Analyzing the producers leading up to the block after killing the non-producing node") + Print("Analyzing the producers leading up to the block after killing the non-producing node, expecting divergence at %d" % (blockNum)) firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=True) # Nodes should not have diverged till the last block @@ -363,13 +378,14 @@ def getMinHeadAndLib(prodNodes): blockProducers1=[] for prodNode in prodNodes: - prodNode.getInfo() - - - Print("Tracking the blocks from the divergence till there are 10*12 blocks on one chain and 10*12+1 on the other") + info=prodNode.getInfo() + Print("node info: %s" % (info)) killBlockNum=blockNum lastBlockNum=killBlockNum+(maxActiveProducers - 1)*inRowCountPerProducer+1 # allow 1st testnet group to produce just 1 more block than the 2nd + + Print("Tracking the blocks from the divergence till there are 10*12 blocks on one chain and 10*12+1 on the other, from block %d to %d" % (killBlockNum, lastBlockNum)) + for blockNum in range(killBlockNum,lastBlockNum): blockProducer0=prodNodes[0].getBlockProducerByNum(blockNum) blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum) @@ -377,7 +393,7 @@ def getMinHeadAndLib(prodNodes): blockProducers1.append({"blockNum":blockNum, "prod":blockProducer1}) - Print("Analyzing the producers from the divergence to the lastBlockNum and verify they stay diverged") + Print("Analyzing the producers from the divergence to the lastBlockNum and verify they stay diverged, expecting divergence at block %d" % (killBlockNum)) firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=True) if firstDivergence!=killBlockNum: @@ -385,6 +401,9 @@ def getMinHeadAndLib(prodNodes): blockProducers0=[] blockProducers1=[] + for prodNode in prodNodes: + info=prodNode.getInfo() + Print("node info: %s" % (info)) Print("Relaunching the non-producing bridge node to connect the producing nodes again") @@ -392,14 +411,41 @@ def getMinHeadAndLib(prodNodes): errorExit("Failure - (non-production) node %d should have restarted" % (nonProdNode.nodeNum)) - Print("Identifying the producers from the saved LIB to the current highest head") + Print("Waiting to allow forks to resolve") + + for prodNode in prodNodes: + info=prodNode.getInfo() + Print("node info: %s" % (info)) #ensure that the nodes have enough time to get in concensus, so wait for 3 producers to produce their complete round time.sleep(inRowCountPerProducer * 3 / 2) + remainingChecks=20 + match=False + checkHead=False + while remainingChecks>0: + checkMatchBlock=killBlockNum if not checkHead else prodNodes[0].getBlockNum() + blockProducer0=prodNodes[0].getBlockProducerByNum(checkMatchBlock) + blockProducer1=prodNodes[1].getBlockProducerByNum(checkMatchBlock) + match=blockProducer0==blockProducer1 + if match: + if checkHead: + break + else: + checkHead=True + continue + Print("Fork has not resolved yet, wait a little more. Block %s has producer %s for node_00 and %s for node_01. Original divergence was at block %s. Wait time remaining: %d" % (checkMatchBlock, blockProducer0, blockProducer1, killBlockNum, remainingChecks)) + time.sleep(1) + remainingChecks-=1 + + for prodNode in prodNodes: + info=prodNode.getInfo() + Print("node info: %s" % (info)) # ensure all blocks from the lib before divergence till the current head are now in consensus endBlockNum=max(prodNodes[0].getBlockNum(), prodNodes[1].getBlockNum()) + Print("Identifying the producers from the saved LIB to the current highest head, from block %d to %d" % (libNumAroundDivergence, endBlockNum)) + for blockNum in range(libNumAroundDivergence,endBlockNum): blockProducer0=prodNodes[0].getBlockProducerByNum(blockNum) blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum) @@ -411,11 +457,28 @@ def getMinHeadAndLib(prodNodes): analyzeBPs(blockProducers0, blockProducers1, expectDivergence=False) + resolvedKillBlockProducer=None + for prod in blockProducers0: + if prod["blockNum"]==killBlockNum: + resolvedKillBlockProducer = prod["prod"] + if resolvedKillBlockProducer is None: + Utils.errorExit("Did not find find block %s (the original divergent block) in blockProducers0, test setup is wrong. blockProducers0: %s" % (killBlockNum, ", ".join(blockProducers))) + Print("Fork resolved and determined producer %s for block %s" % (resolvedKillBlockProducer, killBlockNum)) + blockProducers0=[] blockProducers1=[] testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + + if not testSuccessful: + Print(Utils.FileDivider) + Print("Compare Blocklog") + cluster.compareBlockLogs() + Print(Utils.FileDivider) + Print("Compare Blocklog") + cluster.printBlockLog() + Print(Utils.FileDivider) exit(0) diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index 1563450d81a..1db83f7f692 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -669,6 +669,7 @@ errorExit("mongo get block by id %s" % blockId) Print("Request invalid block numbered %d. This will generate an expected error message." % (currentBlockNum+1000)) + currentBlockNum=node.getHeadBlockNum() # If the tests take too long, we could be far beyond currentBlockNum+1000 and that'll cause a block to be found. block=node.getBlock(currentBlockNum+1000, silentErrors=True) if block is not None: errorExit("ERROR: Received block where not expected") diff --git a/tests/nodeos_under_min_avail_ram.py b/tests/nodeos_under_min_avail_ram.py index d4b4a859c14..57f51beafe4 100755 --- a/tests/nodeos_under_min_avail_ram.py +++ b/tests/nodeos_under_min_avail_ram.py @@ -328,6 +328,6 @@ def setName(self, num): testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) exit(0) diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index d4781d0eefe..ee728962b9f 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -97,6 +97,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): Utils.Print("ADJUSTED %s blocks" % (invalidCount-1)) prodsSeen=None + reportFirstMissedBlock=False Utils.Print("Verify %s complete rounds of all producers producing" % (rounds)) for i in range(0, rounds): prodsSeen={} @@ -113,17 +114,19 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): validBlockProducer(prodsActive, prodsSeen, blockNum, node1) blockProducer=node.getBlockProducerByNum(blockNum) if lastBlockProducer!=blockProducer: - printStr="" - newBlockNum=blockNum-18 - for l in range(0,36): - printStr+="%s" % (newBlockNum) - printStr+=":" - newBlockProducer=node.getBlockProducerByNum(newBlockNum) - printStr+="%s" % (newBlockProducer) - printStr+=" " - newBlockNum+=1 - Utils.cmdError("expected blockNum %s (started from %s) to be produced by %s, but produded by %s: round=%s, prod slot=%s, prod num=%s - %s" % (blockNum, startingFrom, lastBlockProducer, blockProducer, i, j, k, printStr)) - Utils.errorExit("Failed because of incorrect block producer order") + if not reportFirstMissedBlock: + printStr="" + newBlockNum=blockNum-18 + for l in range(0,36): + printStr+="%s" % (newBlockNum) + printStr+=":" + newBlockProducer=node.getBlockProducerByNum(newBlockNum) + printStr+="%s" % (newBlockProducer) + printStr+=" " + newBlockNum+=1 + Utils.Print("NOTE: expected blockNum %s (started from %s) to be produced by %s, but produded by %s: round=%s, prod slot=%s, prod num=%s - %s" % (blockNum, startingFrom, lastBlockProducer, blockProducer, i, j, k, printStr)) + reportFirstMissedBlock=True + break blockNum+=1 # make sure that we have seen all 21 producers @@ -246,6 +249,6 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) exit(0) diff --git a/tests/restart-scenarios-test.py b/tests/restart-scenarios-test.py index afb7a12ccdc..6b3c217d75d 100755 --- a/tests/restart-scenarios-test.py +++ b/tests/restart-scenarios-test.py @@ -113,7 +113,7 @@ errorExit("Cluster sync wait failed.") Print ("Relaunch dead cluster nodes instances.") - if cluster.relaunchEosInstances() is False: + if cluster.relaunchEosInstances(cachePopen=True) is False: errorExit("Failed to relaunch Eos instances") Print("nodeos instances relaunched.") @@ -140,6 +140,6 @@ testSuccessful=True finally: - TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killEosInstances, keepLogs, killAll, dumpErrorDetails) + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killEosInstances, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) exit(0) diff --git a/tests/testUtils.py b/tests/testUtils.py index a8dbe0fd4d2..dc09eb34ae8 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -34,6 +34,8 @@ class Utils: EosBlockLogPath="programs/eosio-blocklog/eosio-blocklog" + FileDivider="=================================================================" + @staticmethod def Print(*args, **kwargs): stackDepth=len(inspect.stack())-2 diff --git a/tests/wallet_tests.cpp b/tests/wallet_tests.cpp index 28290fef211..2f5956cf126 100644 --- a/tests/wallet_tests.cpp +++ b/tests/wallet_tests.cpp @@ -32,13 +32,13 @@ BOOST_AUTO_TEST_CASE(wallet_test) wallet.set_wallet_filename("test"); BOOST_CHECK_EQUAL("test", wallet.get_wallet_filename()); - BOOST_CHECK_EQUAL(0, wallet.list_keys().size()); + BOOST_CHECK_EQUAL(0u, wallet.list_keys().size()); auto priv = fc::crypto::private_key::generate(); auto pub = priv.get_public_key(); auto wif = (std::string)priv; wallet.import_key(wif); - BOOST_CHECK_EQUAL(1, wallet.list_keys().size()); + BOOST_CHECK_EQUAL(1u, wallet.list_keys().size()); auto privCopy = wallet.get_private_key(pub); BOOST_CHECK_EQUAL(wif, (std::string)privCopy); @@ -46,7 +46,7 @@ BOOST_AUTO_TEST_CASE(wallet_test) wallet.lock(); BOOST_CHECK(wallet.is_locked()); wallet.unlock("pass"); - BOOST_CHECK_EQUAL(1, wallet.list_keys().size()); + BOOST_CHECK_EQUAL(1u, wallet.list_keys().size()); wallet.save_wallet_file("wallet_test.json"); BOOST_CHECK(fc::exists("wallet_test.json")); @@ -58,7 +58,7 @@ BOOST_AUTO_TEST_CASE(wallet_test) BOOST_CHECK(wallet2.is_locked()); wallet2.unlock("pass"); - BOOST_CHECK_EQUAL(1, wallet2.list_keys().size()); + BOOST_CHECK_EQUAL(1u, wallet2.list_keys().size()); auto privCopy2 = wallet2.get_private_key(pub); BOOST_CHECK_EQUAL(wif, (std::string)privCopy2); @@ -80,7 +80,7 @@ BOOST_AUTO_TEST_CASE(wallet_manager_test) constexpr auto key3 = "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"; wallet_manager wm; - BOOST_CHECK_EQUAL(0, wm.list_wallets().size()); + BOOST_CHECK_EQUAL(0u, wm.list_wallets().size()); BOOST_CHECK_THROW(wm.get_public_keys(), wallet_not_available_exception); BOOST_CHECK_NO_THROW(wm.lock_all()); @@ -90,11 +90,11 @@ BOOST_AUTO_TEST_CASE(wallet_manager_test) auto pw = wm.create("test"); BOOST_CHECK(!pw.empty()); - BOOST_CHECK_EQUAL(0, pw.find("PW")); // starts with PW - BOOST_CHECK_EQUAL(1, wm.list_wallets().size()); + BOOST_CHECK_EQUAL(0u, pw.find("PW")); // starts with PW + BOOST_CHECK_EQUAL(1u, wm.list_wallets().size()); // wallet has no keys when it is created - BOOST_CHECK_EQUAL(0, wm.get_public_keys().size()); - BOOST_CHECK_EQUAL(0, wm.list_keys("test", pw).size()); + BOOST_CHECK_EQUAL(0u, wm.get_public_keys().size()); + BOOST_CHECK_EQUAL(0u, wm.list_keys("test", pw).size()); BOOST_CHECK(wm.list_wallets().at(0).find("*") != std::string::npos); wm.lock("test"); BOOST_CHECK(wm.list_wallets().at(0).find("*") == std::string::npos); @@ -102,7 +102,7 @@ BOOST_AUTO_TEST_CASE(wallet_manager_test) BOOST_CHECK_THROW(wm.unlock("test", pw), chain::wallet_unlocked_exception); BOOST_CHECK(wm.list_wallets().at(0).find("*") != std::string::npos); wm.import_key("test", key1); - BOOST_CHECK_EQUAL(1, wm.get_public_keys().size()); + BOOST_CHECK_EQUAL(1u, wm.get_public_keys().size()); auto keys = wm.list_keys("test", pw); auto pub_pri_pair = [](const char *key) -> auto { @@ -120,34 +120,34 @@ BOOST_AUTO_TEST_CASE(wallet_manager_test) BOOST_CHECK(std::find(keys.cbegin(), keys.cend(), pub_pri_pair(key3)) == keys.cend()); wm.remove_key("test", pw, string(pub_pri_pair(key2).first)); - BOOST_CHECK_EQUAL(1, wm.get_public_keys().size()); + BOOST_CHECK_EQUAL(1u, wm.get_public_keys().size()); keys = wm.list_keys("test", pw); BOOST_CHECK(std::find(keys.cbegin(), keys.cend(), pub_pri_pair(key2)) == keys.cend()); wm.import_key("test", key2); - BOOST_CHECK_EQUAL(2, wm.get_public_keys().size()); + BOOST_CHECK_EQUAL(2u, wm.get_public_keys().size()); keys = wm.list_keys("test", pw); BOOST_CHECK(std::find(keys.cbegin(), keys.cend(), pub_pri_pair(key2)) != keys.cend()); BOOST_CHECK_THROW(wm.remove_key("test", pw, string(pub_pri_pair(key3).first)), fc::exception); - BOOST_CHECK_EQUAL(2, wm.get_public_keys().size()); + BOOST_CHECK_EQUAL(2u, wm.get_public_keys().size()); BOOST_CHECK_THROW(wm.remove_key("test", "PWnogood", string(pub_pri_pair(key2).first)), wallet_invalid_password_exception); - BOOST_CHECK_EQUAL(2, wm.get_public_keys().size()); + BOOST_CHECK_EQUAL(2u, wm.get_public_keys().size()); wm.lock("test"); BOOST_CHECK_THROW(wm.list_keys("test", pw), wallet_locked_exception); BOOST_CHECK_THROW(wm.get_public_keys(), wallet_locked_exception); wm.unlock("test", pw); - BOOST_CHECK_EQUAL(2, wm.get_public_keys().size()); - BOOST_CHECK_EQUAL(2, wm.list_keys("test", pw).size()); + BOOST_CHECK_EQUAL(2u, wm.get_public_keys().size()); + BOOST_CHECK_EQUAL(2u, wm.list_keys("test", pw).size()); wm.lock_all(); BOOST_CHECK_THROW(wm.get_public_keys(), wallet_locked_exception); BOOST_CHECK(wm.list_wallets().at(0).find("*") == std::string::npos); auto pw2 = wm.create("test2"); - BOOST_CHECK_EQUAL(2, wm.list_wallets().size()); + BOOST_CHECK_EQUAL(2u, wm.list_wallets().size()); // wallet has no keys when it is created - BOOST_CHECK_EQUAL(0, wm.get_public_keys().size()); + BOOST_CHECK_EQUAL(0u, wm.get_public_keys().size()); wm.import_key("test2", key3); - BOOST_CHECK_EQUAL(1, wm.get_public_keys().size()); + BOOST_CHECK_EQUAL(1u, wm.get_public_keys().size()); BOOST_CHECK_THROW(wm.import_key("test2", key3), fc::exception); keys = wm.list_keys("test2", pw2); BOOST_CHECK(std::find(keys.cbegin(), keys.cend(), pub_pri_pair(key1)) == keys.cend()); @@ -160,7 +160,7 @@ BOOST_AUTO_TEST_CASE(wallet_manager_test) BOOST_CHECK(std::find(keys.cbegin(), keys.cend(), pub_pri_pair(key1)) != keys.cend()); BOOST_CHECK(std::find(keys.cbegin(), keys.cend(), pub_pri_pair(key2)) != keys.cend()); BOOST_CHECK(std::find(keys.cbegin(), keys.cend(), pub_pri_pair(key3)) != keys.cend()); - BOOST_CHECK_EQUAL(3, keys.size()); + BOOST_CHECK_EQUAL(3u, keys.size()); BOOST_CHECK_THROW(wm.list_keys("test2", "PWnogood"), wallet_invalid_password_exception); @@ -175,11 +175,11 @@ BOOST_AUTO_TEST_CASE(wallet_manager_test) trx = wm.sign_transaction(trx, pubkeys, chain_id ); flat_set pks; trx.get_signature_keys(chain_id, fc::time_point::maximum(), pks); - BOOST_CHECK_EQUAL(2, pks.size()); + BOOST_CHECK_EQUAL(2u, pks.size()); BOOST_CHECK(find(pks.cbegin(), pks.cend(), pkey1.get_public_key()) != pks.cend()); BOOST_CHECK(find(pks.cbegin(), pks.cend(), pkey2.get_public_key()) != pks.cend()); - BOOST_CHECK_EQUAL(3, wm.get_public_keys().size()); + BOOST_CHECK_EQUAL(3u, wm.get_public_keys().size()); wm.set_timeout(chrono::seconds(0)); BOOST_CHECK_THROW(wm.get_public_keys(), wallet_locked_exception); BOOST_CHECK_THROW(wm.list_keys("test", pw), wallet_locked_exception); diff --git a/tutorials/exchange-tutorial-python/README.md b/tutorials/exchange-tutorial-python/README.md deleted file mode 100644 index 95415935d60..00000000000 --- a/tutorials/exchange-tutorial-python/README.md +++ /dev/null @@ -1,36 +0,0 @@ -The following steps must be taken for the example script to work. - -0. Create wallet -0. Create account for eosio.token -0. Create account for scott -0. Create account for exchange -0. Set token contract on eosio.token -0. Create EOS token -0. Issue initial tokens to scott - -**Note**: -Deleting the `transactions.txt` file will prevent replay from working. - - -### Create wallet -`cleos wallet create` - -### Create account steps -`cleos create key` - -`cleos create key` - -`cleos wallet import --private-key ` - -`cleos wallet import --private-key ` - -`cleos create account eosio ` - -### Set contract steps -`cleos set contract eosio.token /contracts/eosio.token -p eosio.token@active` - -### Create EOS token steps -`cleos push action eosio.token create '{"issuer": "eosio.token", "maximum_supply": "100000.0000 EOS", "can_freeze": 1, "can_recall": 1, "can_whitelist": 1}' -p eosio.token@active` - -### Issue token steps -`cleos push action eosio.token issue '{"to": "scott", "quantity": "900.0000 EOS", "memo": "testing"}' -p eosio.token@active` diff --git a/tutorials/exchange-tutorial-python/exchange_tutorial.py b/tutorials/exchange-tutorial-python/exchange_tutorial.py deleted file mode 100644 index e260fc132e5..00000000000 --- a/tutorials/exchange-tutorial-python/exchange_tutorial.py +++ /dev/null @@ -1,187 +0,0 @@ -import json -import pprint -import os -import sys -import subprocess -import time - -from subprocess import PIPE - -# This key would be different for each user. -KEY_TO_INTERNAL_ACCOUNT='12345' -DEMO_USER='scott' - -def main(): - try: - command = sys.argv[1] - if command == 'monitor': - setup() - while True: - monitor_exchange() - time.sleep(.1) - elif command == 'transfer': - if len(sys.argv) == 4: - transfer(sys.argv[2], sys.argv[3]) - else: - print('Transfer must be called by `python exchange_tutorial.py transfer {} 1.0000`'.format(DEMO_USER)) - except subprocess.CalledProcessError as e: - print(e) - print(str(e.stderr, 'utf-8')) - -def monitor_exchange(): - action_num = get_last_action() + 1 - results = cleos('get actions tokenxchange {} 0 -j'.format(action_num)) - - results = json.loads(results.stdout) - action_list = results['actions'] - if len(action_list) == 0: - return - - action = action_list[0] - last_irreversible_block = results['last_irreversible_block'] - to = action['action_trace']['act']['data']['to'] - block_num = action['block_num'] - - if is_irreversible(block_num, last_irreversible_block): - update_balance(action, to) - set_last_action(action_num) - -def update_balance(action, to): - current_balance = get_balance() - new_balance = current_balance - transfer_quantity = action['action_trace']['act']['data']['quantity'].split()[0] - transfer_quantity = float(transfer_quantity) - - if to == 'tokenxchange': - if is_valid_deposit(action): - new_balance = current_balance + transfer_quantity - set_balance(new_balance) - elif is_valid_withdrawal(action): - new_balance = current_balance - transfer_quantity - set_balance(new_balance) - - -def transfer(to, quantity): - if quantity[:-4] != ' SYS': - quantity += ' SYS' - results = cleos('transfer tokenxchange {} "{}" {} -j'.format(to, quantity, KEY_TO_INTERNAL_ACCOUNT)) - transaction_info = json.loads(str(results.stdout, 'utf-8')) - transaction_id = transaction_info['transaction_id'] - - transaction_status = transaction_info['processed']['receipt']['status'] - if transaction_status == 'hard_fail': - print('Transaction failed.') - return - - add_transactions(transaction_id) - print('Initiated transfer of {} to {}. Transaction id is {}.'.format(quantity, to, transaction_id)) - - -def is_irreversible(block_num, last_irreversible_block): - return block_num <= last_irreversible_block - -def is_valid_deposit(action): - account = action['action_trace']['act']['account'] - action_name = action['action_trace']['act']['name'] - memo = action['action_trace']['act']['data']['memo'] - receiver = action['action_trace']['receipt']['receiver'] - token = action['action_trace']['act']['data']['quantity'].split()[1] - - valid_user = action['action_trace']['act']['data']['to'] == 'tokenxchange' - from_user = action['action_trace']['act']['data']['from'] - - # Filter only to actions that notify the tokenxchange account. - if receiver != 'tokenxchange': - return False - - if (account == 'eosio.token' and - action_name == 'transfer' and - memo == KEY_TO_INTERNAL_ACCOUNT and - valid_user and - from_user == DEMO_USER and - token == 'SYS'): - return True - - print('Invalid deposit') - return False - -def is_valid_withdrawal(action): - account = action['action_trace']['act']['account'] - action_name = action['action_trace']['act']['name'] - memo = action['action_trace']['act']['data']['memo'] - receiver = action['action_trace']['receipt']['receiver'] - token = action['action_trace']['act']['data']['quantity'].split()[1] - - transaction_id = action['action_trace']['trx_id'] - - valid_user = action['action_trace']['act']['data']['from'] == 'tokenxchange' - to_user = action['action_trace']['act']['data']['to'] - - # Filter only to actions that notify the exchange account. - if receiver != 'tokenxchange': - return False - - if (account == 'eosio.token' and - action_name == 'transfer' and - memo == KEY_TO_INTERNAL_ACCOUNT and - valid_user and - to_user == DEMO_USER and - transaction_id in get_transactions() and - token == 'SYS'): - return True - - print('Invalid withdrawal') - return False - -def cleos(args): - if isinstance(args, list): - command = ['cleos'] - command.extend(args) - else: - command = 'cleos ' + args - - results = subprocess.run(command, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True, check=True) - return results - -def setup(): - if not os.path.exists('last_action.txt'): - set_last_action(-1) - if not os.path.exists('balance.txt'): - set_balance(0) - if not os.path.exists('transactions.txt'): - with open('transactions.txt', 'w') as f: - f.write(json.dumps({"transactions": []})) - -def get_transactions(): - with open('transactions.txt', 'r') as f: - transactions = json.load(f) - return set(transactions['transactions']) - -def add_transactions(transaction_id): - transactions = get_transactions() - transactions.add(transaction_id) - with open('transactions.txt', 'w') as f: - transactions = json.dumps({'transactions': list(transactions)}) - f.write(transactions) - -def get_last_action(): - with open('last_action.txt', 'r') as f: - last_action = int(f.read()) - return last_action - -def set_last_action(action): - with open('last_action.txt', 'w') as f: - f.write(str(action)) - -def get_balance(): - with open('balance.txt', 'r') as f: - balance = float(f.read()) - return balance - -def set_balance(balance): - with open('balance.txt', 'w') as f: - f.write(str(balance)) - print("{}'s balance is: {}".format(DEMO_USER, balance)) - -if __name__ == '__main__': - main() diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index 0d789225f6b..3b288f2d2a3 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -1,19 +1,18 @@ find_package( Gperftools QUIET ) -### Build contracts with cdt if available +### Build contracts with cdt if available ### include(ExternalProject) -# if no cdt root is given use default path -if(EOSIO_CDT_ROOT STREQUAL "" OR NOT EOSIO_CDT_ROOT) + +if( EOSIO_COMPILE_TEST_CONTRACTS ) set(EOSIO_WASM_OLD_BEHAVIOR "Off") - find_package(eosio.cdt) -endif() + find_package(eosio.cdt REQUIRED) -if (eosio.cdt_FOUND) + message( STATUS "Building contracts in directory `eos/unittests/test-contracts/`" ) ExternalProject_Add( test_contracts_project SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/test-contracts BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/test-contracts - CMAKE_ARGS -DCMAKE_TOOLCHAIN_FILE=${EOSIO_CDT_ROOT}/lib/cmake/eosio.cdt/EosioWasmToolchain.cmake -DBoost_INCLUDE_DIRS=${Boost_INCLUDE_DIRS} + CMAKE_ARGS -DCMAKE_TOOLCHAIN_FILE=${EOSIO_CDT_ROOT}/lib/cmake/eosio.cdt/EosioWasmToolchain.cmake -DEOSIO_COMPILE_TEST_CONTRACTS=${EOSIO_COMPILE_TEST_CONTRACTS} UPDATE_COMMAND "" PATCH_COMMAND "" TEST_COMMAND "" @@ -21,12 +20,13 @@ if (eosio.cdt_FOUND) BUILD_ALWAYS 1 ) else() - add_subdirectory(test-contracts) + message( STATUS "Not building contracts in directory `eos/unittests/test-contracts/`" ) + add_subdirectory(test-contracts) endif() if( GPERFTOOLS_FOUND ) - message( STATUS "Found gperftools; compiling tests with TCMalloc") - list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) + message( STATUS "Found gperftools; compiling tests with TCMalloc" ) + list( APPEND PLATFORM_SPECIFIC_LIBS tcmalloc ) endif() find_package(LLVM 4.0 REQUIRED CONFIG) @@ -37,16 +37,13 @@ set( CMAKE_CXX_STANDARD 14 ) add_subdirectory(contracts) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/include/config.hpp.in ${CMAKE_CURRENT_BINARY_DIR}/include/config.hpp ESCAPE_QUOTES) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/contracts.hpp.in ${CMAKE_CURRENT_BINARY_DIR}/include/contracts.hpp ESCAPE_QUOTES) -file(GLOB UNIT_TESTS "*.cpp") - -add_executable( unit_test ${UNIT_TESTS} ${WASM_UNIT_TESTS} ) +### BUILD UNIT TEST EXECUTABLE ### +file(GLOB UNIT_TESTS "*.cpp") # find all unit test suites +add_executable( unit_test ${UNIT_TESTS}) # build unit tests as one executable target_link_libraries( unit_test eosio_chain chainbase eosio_testing fc ${PLATFORM_SPECIFIC_LIBS} ) - target_compile_options(unit_test PUBLIC -DDISABLE_EOSLIB_SERIALIZE) - target_include_directories( unit_test PUBLIC ${CMAKE_SOURCE_DIR}/libraries/testing/include ${CMAKE_SOURCE_DIR}/test-contracts @@ -55,59 +52,55 @@ target_include_directories( unit_test PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/contracts ${CMAKE_CURRENT_BINARY_DIR}/include ) -#Manually run unit_test for all supported runtimes -#To run unit_test with all log from blockchain displayed, put --verbose after --, i.e. unit_test -- --verbose -add_test(NAME unit_test_wavm COMMAND unit_test - -t \!wasm_tests/weighted_cpu_limit_tests - --report_level=detailed --color_output --catch_system_errors=no -- --wavm) - add_test(NAME unit_test_wabt COMMAND unit_test - -t \!wasm_tests/weighted_cpu_limit_tests - --report_level=detailed --color_output -- --wabt) - +### MARK TEST SUITES FOR EXECUTION ### +foreach(TEST_SUITE ${UNIT_TESTS}) # create an independent target for each test suite + execute_process(COMMAND bash -c "grep -E 'BOOST_AUTO_TEST_SUITE\\s*[(]' ${TEST_SUITE} | grep -vE '//.*BOOST_AUTO_TEST_SUITE\\s*[(]' | cut -d ')' -f 1 | cut -d '(' -f 2" OUTPUT_VARIABLE SUITE_NAME OUTPUT_STRIP_TRAILING_WHITESPACE) # get the test suite name from the *.cpp file + if (NOT "" STREQUAL "${SUITE_NAME}") # ignore empty lines + execute_process(COMMAND bash -c "echo ${SUITE_NAME} | sed -e 's/s$//' | sed -e 's/_test$//'" OUTPUT_VARIABLE TRIMMED_SUITE_NAME OUTPUT_STRIP_TRAILING_WHITESPACE) # trim "_test" or "_tests" from the end of ${SUITE_NAME} + # to run unit_test with all log from blockchain displayed, put "--verbose" after "--", i.e. "unit_test -- --verbose" + add_test(NAME ${TRIMMED_SUITE_NAME}_unit_test_wavm COMMAND unit_test --run_test=${SUITE_NAME} --report_level=detailed --color_output --catch_system_errors=no -- --wavm) + add_test(NAME ${TRIMMED_SUITE_NAME}_unit_test_wabt COMMAND unit_test --run_test=${SUITE_NAME} --report_level=detailed --color_output -- --wabt) + # build list of tests to run during coverage testing + if(NOT "" STREQUAL "${ctest_tests}") + set(ctest_tests "${ctest_tests}|${TRIMMED_SUITE_NAME}_unit_test_wavm|${TRIMMED_SUITE_NAME}_unit_test_wabt") + else() + set(ctest_tests "${TRIMMED_SUITE_NAME}_unit_test_wavm|${TRIMMED_SUITE_NAME}_unit_test_wabt") + endif() + endif() +endforeach(TEST_SUITE) +set(ctest_tests "'${ctest_tests}' -j8") # surround test list string in apostrophies + +### COVERAGE TESTING ### if(ENABLE_COVERAGE_TESTING) - set(Coverage_NAME ${PROJECT_NAME}_ut_coverage) - + # check for dependencies if(NOT LCOV_PATH) message(FATAL_ERROR "lcov not found! Aborting...") - endif() # NOT LCOV_PATH - + endif() if(NOT LLVMCOV_PATH) message(FATAL_ERROR "llvm-cov not found! Aborting...") - endif() # NOT LCOV_PATH - + endif() if(NOT GENHTML_PATH) message(FATAL_ERROR "genhtml not found! Aborting...") - endif() # NOT GENHTML_PATH - - # no spaces allowed within tests list - set(ctest_tests 'unit_test_wabt|unit_test_wavm') - set(ctest_exclude_tests '') - - # Setup target + endif() + # tests to skip during coverage testing + set(ctest_exclude_tests '') # no spaces allowed within tests list + # setup target add_custom_target(${Coverage_NAME} - - # Cleanup lcov + # cleanup lcov COMMAND ${LCOV_PATH} --directory . --zerocounters - - # Run tests + # run tests COMMAND ./tools/ctestwrapper.sh -R ${ctest_tests} -E ${ctest_exclude_tests} - COMMAND ${LCOV_PATH} --directory . --capture --gcov-tool ${CMAKE_SOURCE_DIR}/tools/llvm-gcov.sh --output-file ${Coverage_NAME}.info - COMMAND ${LCOV_PATH} -remove ${Coverage_NAME}.info '*/boost/*' '/usr/lib/*' '/usr/include/*' '*/externals/*' '*/fc/*' '*/wasm-jit/*' --output-file ${Coverage_NAME}_filtered.info - COMMAND ${GENHTML_PATH} -o ${Coverage_NAME} ${PROJECT_BINARY_DIR}/${Coverage_NAME}_filtered.info - COMMAND if [ "$CI" != "true" ]\; then ${CMAKE_COMMAND} -E remove ${Coverage_NAME}.base ${Coverage_NAME}.info ${Coverage_NAME}_filtered.info ${Coverage_NAME}.total ${PROJECT_BINARY_DIR}/${Coverage_NAME}.info.cleaned ${PROJECT_BINARY_DIR}/${Coverage_NAME}_filtered.info.cleaned\; fi - WORKING_DIRECTORY ${PROJECT_BINARY_DIR} COMMENT "Resetting code coverage counters to zero. Processing code coverage counters and generating report. Report published in ./${Coverage_NAME}" - ) - - # Show info where to find the report + ) + # show info where to find the report add_custom_command(TARGET ${Coverage_NAME} POST_BUILD COMMAND ; COMMENT "Open ./${Coverage_NAME}/index.html in your browser to view the coverage report." - ) + ) endif() diff --git a/unittests/abi_tests.cpp b/unittests/abi_tests.cpp index 440ec5acbfc..7e6c84fb6f2 100644 --- a/unittests/abi_tests.cpp +++ b/unittests/abi_tests.cpp @@ -22,8 +22,6 @@ #include -#include - #include #include @@ -897,13 +895,13 @@ BOOST_AUTO_TEST_CASE(updateauth_test) BOOST_TEST("updauth.prnt" == updauth.parent); BOOST_TEST(2147483145u == updauth.auth.threshold); - BOOST_TEST_REQUIRE(2 == updauth.auth.keys.size()); + BOOST_TEST_REQUIRE(2u == updauth.auth.keys.size()); BOOST_TEST("EOS65rXebLhtk2aTTzP4e9x1AQZs7c5NNXJp89W8R3HyaA6Zyd4im" == (std::string)updauth.auth.keys[0].key); BOOST_TEST(57005u == updauth.auth.keys[0].weight); BOOST_TEST("EOS5eVr9TVnqwnUBNwf9kwMTbrHvX5aPyyEG97dz2b2TNeqWRzbJf" == (std::string)updauth.auth.keys[1].key); BOOST_TEST(57605u == updauth.auth.keys[1].weight); - BOOST_TEST_REQUIRE(2 == updauth.auth.accounts.size()); + BOOST_TEST_REQUIRE(2u == updauth.auth.accounts.size()); BOOST_TEST("prm.acct1" == updauth.auth.accounts[0].permission.actor); BOOST_TEST("prm.prm1" == updauth.auth.accounts[0].permission.permission); BOOST_TEST(53005u == updauth.auth.accounts[0].weight); @@ -1001,13 +999,13 @@ BOOST_AUTO_TEST_CASE(newaccount_test) BOOST_TEST(2147483145u == newacct.owner.threshold); - BOOST_TEST_REQUIRE(2 == newacct.owner.keys.size()); + BOOST_TEST_REQUIRE(2u == newacct.owner.keys.size()); BOOST_TEST("EOS65rXebLhtk2aTTzP4e9x1AQZs7c5NNXJp89W8R3HyaA6Zyd4im" == (std::string)newacct.owner.keys[0].key); BOOST_TEST(57005u == newacct.owner.keys[0].weight); BOOST_TEST("EOS5eVr9TVnqwnUBNwf9kwMTbrHvX5aPyyEG97dz2b2TNeqWRzbJf" == (std::string)newacct.owner.keys[1].key); BOOST_TEST(57605u == newacct.owner.keys[1].weight); - BOOST_TEST_REQUIRE(2 == newacct.owner.accounts.size()); + BOOST_TEST_REQUIRE(2u == newacct.owner.accounts.size()); BOOST_TEST("prm.acct1" == newacct.owner.accounts[0].permission.actor); BOOST_TEST("prm.prm1" == newacct.owner.accounts[0].permission.permission); BOOST_TEST(53005u == newacct.owner.accounts[0].weight); @@ -1017,13 +1015,13 @@ BOOST_AUTO_TEST_CASE(newaccount_test) BOOST_TEST(2146483145u == newacct.active.threshold); - BOOST_TEST_REQUIRE(2 == newacct.active.keys.size()); + BOOST_TEST_REQUIRE(2u == newacct.active.keys.size()); BOOST_TEST("EOS65rXebLhtk2aTTzP4e9x1AQZs7c5NNXJp89W8R3HyaA6Zyd4im" == (std::string)newacct.active.keys[0].key); BOOST_TEST(57005u == newacct.active.keys[0].weight); BOOST_TEST("EOS5eVr9TVnqwnUBNwf9kwMTbrHvX5aPyyEG97dz2b2TNeqWRzbJf" == (std::string)newacct.active.keys[1].key); BOOST_TEST(57605u == newacct.active.keys[1].weight); - BOOST_TEST_REQUIRE(2 == newacct.active.accounts.size()); + BOOST_TEST_REQUIRE(2u == newacct.active.accounts.size()); BOOST_TEST("prm.acct1" == newacct.active.accounts[0].permission.actor); BOOST_TEST("prm.prm1" == newacct.active.accounts[0].permission.permission); BOOST_TEST(53005u == newacct.active.accounts[0].weight); @@ -1305,22 +1303,22 @@ BOOST_AUTO_TEST_CASE(setabi_test) auto var = fc::json::from_string(abi_string); auto abi = var.as(); - BOOST_TEST_REQUIRE(1 == abi.types.size()); + BOOST_TEST_REQUIRE(1u == abi.types.size()); BOOST_TEST("account_name" == abi.types[0].new_type_name); BOOST_TEST("name" == abi.types[0].type); - BOOST_TEST_REQUIRE(3 == abi.structs.size()); + BOOST_TEST_REQUIRE(3u == abi.structs.size()); BOOST_TEST("transfer_base" == abi.structs[0].name); BOOST_TEST("" == abi.structs[0].base); - BOOST_TEST_REQUIRE(1 == abi.structs[0].fields.size()); + BOOST_TEST_REQUIRE(1u == abi.structs[0].fields.size()); BOOST_TEST("memo" == abi.structs[0].fields[0].name); BOOST_TEST("string" == abi.structs[0].fields[0].type); BOOST_TEST("transfer" == abi.structs[1].name); BOOST_TEST("transfer_base" == abi.structs[1].base); - BOOST_TEST_REQUIRE(3 == abi.structs[1].fields.size()); + BOOST_TEST_REQUIRE(3u == abi.structs[1].fields.size()); BOOST_TEST("from" == abi.structs[1].fields[0].name); BOOST_TEST("account_name" == abi.structs[1].fields[0].type); BOOST_TEST("to" == abi.structs[1].fields[1].name); @@ -1330,23 +1328,23 @@ BOOST_AUTO_TEST_CASE(setabi_test) BOOST_TEST("account" == abi.structs[2].name); BOOST_TEST("" == abi.structs[2].base); - BOOST_TEST_REQUIRE(2 == abi.structs[2].fields.size()); + BOOST_TEST_REQUIRE(2u == abi.structs[2].fields.size()); BOOST_TEST("account" == abi.structs[2].fields[0].name); BOOST_TEST("name" == abi.structs[2].fields[0].type); BOOST_TEST("balance" == abi.structs[2].fields[1].name); BOOST_TEST("uint64" == abi.structs[2].fields[1].type); - BOOST_TEST_REQUIRE(1 == abi.actions.size()); + BOOST_TEST_REQUIRE(1u == abi.actions.size()); BOOST_TEST("transfer" == abi.actions[0].name); BOOST_TEST("transfer" == abi.actions[0].type); - BOOST_TEST_REQUIRE(1 == abi.tables.size()); + BOOST_TEST_REQUIRE(1u == abi.tables.size()); BOOST_TEST("account" == abi.tables[0].name); BOOST_TEST("account" == abi.tables[0].type); BOOST_TEST("i64" == abi.tables[0].index_type); - BOOST_TEST_REQUIRE(1 == abi.tables[0].key_names.size()); + BOOST_TEST_REQUIRE(1u == abi.tables[0].key_names.size()); BOOST_TEST("account" == abi.tables[0].key_names[0]); - BOOST_TEST_REQUIRE(1 == abi.tables[0].key_types.size()); + BOOST_TEST_REQUIRE(1u == abi.tables[0].key_types.size()); BOOST_TEST("name" == abi.tables[0].key_types[0]); auto var2 = verify_byte_round_trip_conversion( abis, "abi_def", var ); diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 909dd061160..1d4fedc0e7c 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -201,7 +201,7 @@ transaction_trace_ptr CallAction(TESTER& test, T ac, const vector& auto pl = vector{{scope[0], config::active_name}}; if (scope.size() > 1) - for (int i = 1; i < scope.size(); i++) + for (size_t i = 1; i < scope.size(); i++) pl.push_back({scope[i], config::active_name}); action act(pl, ac); @@ -234,10 +234,10 @@ transaction_trace_ptr CallFunction(TESTER& test, T ac, const vector& data, test.set_transaction_headers(trx, test.DEFAULT_EXPIRATION_DELTA); auto sigs = trx.sign(test.get_private_key(scope[0], "active"), test.control->get_chain_id()); - + flat_set keys; trx.get_signature_keys(test.control->get_chain_id(), fc::time_point::maximum(), keys); - + auto res = test.push_transaction(trx); BOOST_CHECK_EQUAL(res->receipt->status, transaction_receipt::executed); test.produce_block(); @@ -305,47 +305,105 @@ struct MySink : public bio::sink uint32_t last_fnc_err = 0; BOOST_FIXTURE_TEST_CASE(action_receipt_tests, TESTER) { try { - produce_blocks(2); - create_account( N(testapi) ); - create_account( N(testapi2) ); - produce_blocks(10); - set_code( N(testapi), contracts::test_api_wasm() ); - produce_blocks(1); - - auto res = CALL_TEST_FUNCTION( *this, "test_action", "assert_true", {}); - BOOST_REQUIRE_EQUAL(uint32_t(res->action_traces[0].receipt.code_sequence), 1); - BOOST_REQUIRE_EQUAL(uint32_t(res->action_traces[0].receipt.abi_sequence), 0); + produce_blocks(2); + create_account( N(test) ); + set_code( N(test), contracts::payloadless_wasm() ); + produce_blocks(1); - set_code( N(testapi), contracts::test_api_db_wasm() ); - set_code( config::system_account_name, contracts::test_api_db_wasm() ); - res = CALL_TEST_FUNCTION( *this, "test_db", "primary_i64_general", {}); - BOOST_REQUIRE_EQUAL(uint32_t(res->action_traces[0].receipt.code_sequence), 2); - BOOST_REQUIRE_EQUAL(uint32_t(res->action_traces[0].receipt.abi_sequence), 0); + auto call_doit_and_check = [&]( account_name contract, account_name signer, auto&& checker ) { + signed_transaction trx; + trx.actions.emplace_back( vector{{signer, config::active_name}}, contract, N(doit), bytes{} ); + this->set_transaction_headers( trx, this->DEFAULT_EXPIRATION_DELTA ); + trx.sign( this->get_private_key(signer, "active"), control->get_chain_id() ); + auto res = this->push_transaction(trx); + checker( res ); + }; - { + auto call_provereset_and_check = [&]( account_name contract, account_name signer, auto&& checker ) { signed_transaction trx; - auto pl = vector{{config::system_account_name, config::active_name}}; - action act(pl, test_chain_action{}); - act.authorization = {{config::system_account_name, config::active_name}}; - trx.actions.push_back(act); - this->set_transaction_headers(trx, this->DEFAULT_EXPIRATION_DELTA); - trx.sign(this->get_private_key(config::system_account_name, "active"), control->get_chain_id()); - flat_set keys; - trx.get_signature_keys(control->get_chain_id(), fc::time_point::maximum(), keys); + trx.actions.emplace_back( vector{{signer, config::active_name}}, contract, N(provereset), bytes{} ); + this->set_transaction_headers( trx, this->DEFAULT_EXPIRATION_DELTA ); + trx.sign( this->get_private_key(signer, "active"), control->get_chain_id() ); auto res = this->push_transaction(trx); - BOOST_CHECK_EQUAL(res->receipt->status, transaction_receipt::executed); - this->produce_block(); - BOOST_REQUIRE_EQUAL(uint32_t(res->action_traces[0].receipt.code_sequence), 2); - BOOST_REQUIRE_EQUAL(uint32_t(res->action_traces[0].receipt.abi_sequence), 1); - } + checker( res ); + }; + + auto result = push_reqauth( config::system_account_name, "active" ); + BOOST_REQUIRE_EQUAL( result->receipt->status, transaction_receipt::executed ); + BOOST_REQUIRE( result->action_traces[0].receipt.auth_sequence.find( config::system_account_name ) + != result->action_traces[0].receipt.auth_sequence.end() ); + auto base_global_sequence_num = result->action_traces[0].receipt.global_sequence; + auto base_system_recv_seq_num = result->action_traces[0].receipt.recv_sequence; + auto base_system_auth_seq_num = result->action_traces[0].receipt.auth_sequence[config::system_account_name]; + auto base_system_code_seq_num = result->action_traces[0].receipt.code_sequence.value; + auto base_system_abi_seq_num = result->action_traces[0].receipt.abi_sequence.value; + + uint64_t base_test_recv_seq_num = 0; + uint64_t base_test_auth_seq_num = 0; + call_doit_and_check( N(test), N(test), [&]( const transaction_trace_ptr& res ) { + BOOST_CHECK_EQUAL( res->receipt->status, transaction_receipt::executed ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt.global_sequence, base_global_sequence_num + 1 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt.code_sequence.value, 1 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt.abi_sequence.value, 0 ); + base_test_recv_seq_num = res->action_traces[0].receipt.recv_sequence; + BOOST_CHECK( base_test_recv_seq_num > 0 ); + base_test_recv_seq_num--; + const auto& m = res->action_traces[0].receipt.auth_sequence; + BOOST_CHECK_EQUAL( m.size(), 1 ); + BOOST_CHECK_EQUAL( m.begin()->first.to_string(), "test" ); + base_test_auth_seq_num = m.begin()->second; + BOOST_CHECK( base_test_auth_seq_num > 0 ); + --base_test_auth_seq_num; + } ); + + set_code( N(test), contracts::asserter_wasm() ); + set_code( config::system_account_name, contracts::payloadless_wasm() ); + + call_provereset_and_check( N(test), N(test), [&]( const transaction_trace_ptr& res ) { + BOOST_CHECK_EQUAL( res->receipt->status, transaction_receipt::executed ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt.global_sequence, base_global_sequence_num + 4 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt.recv_sequence, base_test_recv_seq_num + 2 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt.code_sequence.value, 2 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt.abi_sequence.value, 0 ); + const auto& m = res->action_traces[0].receipt.auth_sequence; + BOOST_CHECK_EQUAL( m.size(), 1 ); + BOOST_CHECK_EQUAL( m.begin()->first.to_string(), "test" ); + BOOST_CHECK_EQUAL( m.begin()->second, base_test_auth_seq_num + 3 ); + } ); + + produce_blocks(1); // Added to avoid the last doit transaction from being considered a duplicate. + // Adding a block also retires an onblock action which increments both the global sequence number + // and the recv and auth sequences numbers for the system account. + + call_doit_and_check( config::system_account_name, N(test), [&]( const transaction_trace_ptr& res ) { + BOOST_CHECK_EQUAL( res->receipt->status, transaction_receipt::executed ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt.global_sequence, base_global_sequence_num + 6 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt.recv_sequence, base_system_recv_seq_num + 4 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt.code_sequence.value, base_system_code_seq_num + 1 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt.abi_sequence.value, base_system_abi_seq_num ); + const auto& m = res->action_traces[0].receipt.auth_sequence; + BOOST_CHECK_EQUAL( m.size(), 1 ); + BOOST_CHECK_EQUAL( m.begin()->first.to_string(), "test" ); + BOOST_CHECK_EQUAL( m.begin()->second, base_test_auth_seq_num + 4 ); + } ); + set_code( config::system_account_name, contracts::eosio_bios_wasm() ); - set_code( N(testapi), contracts::eosio_bios_wasm() ); - set_abi(N(testapi), contracts::eosio_bios_abi().data() ); - set_code( N(testapi), contracts::test_api_wasm() ); - res = CALL_TEST_FUNCTION( *this, "test_action", "assert_true", {}); - BOOST_REQUIRE_EQUAL(uint32_t(res->action_traces[0].receipt.code_sequence), 4); - BOOST_REQUIRE_EQUAL(uint32_t(res->action_traces[0].receipt.abi_sequence), 1); + set_code( N(test), contracts::eosio_bios_wasm() ); + set_abi( N(test), contracts::eosio_bios_abi().data() ); + set_code( N(test), contracts::payloadless_wasm() ); + + call_doit_and_check( N(test), N(test), [&]( const transaction_trace_ptr& res ) { + BOOST_CHECK_EQUAL( res->receipt->status, transaction_receipt::executed); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt.global_sequence, base_global_sequence_num + 11 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt.recv_sequence, base_test_recv_seq_num + 3 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt.code_sequence.value, 4 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt.abi_sequence.value, 1 ); + const auto& m = res->action_traces[0].receipt.auth_sequence; + BOOST_CHECK_EQUAL( m.size(), 1 ); + BOOST_CHECK_EQUAL( m.begin()->first.to_string(), "test" ); + BOOST_CHECK_EQUAL( m.begin()->second, base_test_auth_seq_num + 8 ); + } ); } FC_LOG_AND_RETHROW() } @@ -414,7 +472,7 @@ BOOST_FIXTURE_TEST_CASE(action_tests, TESTER) { try { auto res = test.push_transaction(trx); BOOST_CHECK_EQUAL(res->receipt->status, transaction_receipt::executed); }; - + BOOST_CHECK_EXCEPTION(test_require_notice(*this, raw_bytes, scope), unsatisfied_authorization, [](const unsatisfied_authorization& e) { return expect_assert_message(e, "transaction declares authority"); @@ -478,15 +536,15 @@ BOOST_FIXTURE_TEST_CASE(action_tests, TESTER) { try { produce_block(); BOOST_CHECK_EXCEPTION( CALL_TEST_FUNCTION( *this, "test_action", "test_current_time", fc::raw::pack(now) ), eosio_assert_message_exception, eosio_assert_message_is("tmp == current_time()") ); - + // test test_current_receiver CALL_TEST_FUNCTION( *this, "test_action", "test_current_receiver", fc::raw::pack(N(testapi))); - + // test send_action_sender CALL_TEST_FUNCTION( *this, "test_transaction", "send_action_sender", fc::raw::pack(N(testapi))); - + produce_block(); - + // test_publication_time uint64_t pub_time = static_cast( control->head_block_time().time_since_epoch().count() ); pub_time += config::block_interval_us; @@ -568,13 +626,13 @@ BOOST_FIXTURE_TEST_CASE(cf_action_tests, TESTER) { try { return expect_assert_message(e, "transaction must have at least one authorization"); } ); - + action act({}, cfa); trx.context_free_actions.push_back(act); trx.context_free_data.emplace_back(fc::raw::pack(100)); // verify payload matches context free data trx.context_free_data.emplace_back(fc::raw::pack(200)); set_transaction_headers(trx); - + BOOST_CHECK_EXCEPTION(push_transaction(trx), tx_no_auths, [](const fc::exception& e) { return expect_assert_message(e, "transaction must have at least one authorization"); @@ -924,106 +982,47 @@ BOOST_FIXTURE_TEST_CASE(checktime_hashing_fail, TESTER) { try { //hit deadline exception, but cache the contract BOOST_CHECK_EXCEPTION( call_test( *this, test_api_action{}, - 5000, 10 ), + 5000, 3 ), deadline_exception, is_deadline_exception ); #warning TODO validate that the contract was successfully cached //the contract should be cached, now we should get deadline_exception because of calls to checktime() from hashing function BOOST_CHECK_EXCEPTION( call_test( *this, test_api_action{}, - 5000, 10 ), + 5000, 3 ), deadline_exception, is_deadline_exception ); BOOST_CHECK_EXCEPTION( call_test( *this, test_api_action{}, - 5000, 10 ), + 5000, 3 ), deadline_exception, is_deadline_exception ); BOOST_CHECK_EXCEPTION( call_test( *this, test_api_action{}, - 5000, 10 ), + 5000, 3 ), deadline_exception, is_deadline_exception ); BOOST_CHECK_EXCEPTION( call_test( *this, test_api_action{}, - 5000, 10 ), + 5000, 3 ), deadline_exception, is_deadline_exception ); BOOST_CHECK_EXCEPTION( call_test( *this, test_api_action{}, - 5000, 10 ), + 5000, 3 ), deadline_exception, is_deadline_exception ); BOOST_CHECK_EXCEPTION( call_test( *this, test_api_action{}, - 5000, 10 ), + 5000, 3 ), deadline_exception, is_deadline_exception ); BOOST_CHECK_EXCEPTION( call_test( *this, test_api_action{}, - 5000, 10 ), + 5000, 3 ), deadline_exception, is_deadline_exception ); BOOST_CHECK_EXCEPTION( call_test( *this, test_api_action{}, - 5000, 10 ), + 5000, 3 ), deadline_exception, is_deadline_exception ); BOOST_REQUIRE_EQUAL( validate(), true ); } FC_LOG_AND_RETHROW() } -/************************************************************************************* - * compiler_builtins_tests test case - *************************************************************************************/ -BOOST_FIXTURE_TEST_CASE(compiler_builtins_tests, TESTER) { try { - produce_blocks(2); - create_account( N(testapi) ); - produce_blocks(10); - set_code( N(testapi), contracts::test_api_wasm() ); - produce_blocks(1); - - // test test_multi3 - CALL_TEST_FUNCTION( *this, "test_compiler_builtins", "test_multi3", {}); - - // test test_divti3 - CALL_TEST_FUNCTION( *this, "test_compiler_builtins", "test_divti3", {}); - - // test test_divti3_by_0 - BOOST_CHECK_EXCEPTION(CALL_TEST_FUNCTION( *this, "test_compiler_builtins", "test_divti3_by_0", {}), arithmetic_exception, - [](const fc::exception& e) { - return expect_assert_message(e, "divide by zero"); - } - ); - - // test test_udivti3 - CALL_TEST_FUNCTION( *this, "test_compiler_builtins", "test_udivti3", {}); - - // test test_udivti3_by_0 - BOOST_CHECK_EXCEPTION(CALL_TEST_FUNCTION( *this, "test_compiler_builtins", "test_udivti3_by_0", {}), arithmetic_exception, - [](const fc::exception& e) { - return expect_assert_message(e, "divide by zero"); - } - ); - - // test test_modti3 - CALL_TEST_FUNCTION( *this, "test_compiler_builtins", "test_modti3", {}); - - // test test_modti3_by_0 - BOOST_CHECK_EXCEPTION(CALL_TEST_FUNCTION( *this, "test_compiler_builtins", "test_modti3_by_0", {}), arithmetic_exception, - [](const fc::exception& e) { - return expect_assert_message(e, "divide by zero"); - } - ); - - // test test_lshlti3 - CALL_TEST_FUNCTION( *this, "test_compiler_builtins", "test_lshlti3", {}); - - // test test_lshrti3 - CALL_TEST_FUNCTION( *this, "test_compiler_builtins", "test_lshrti3", {}); - - // test test_ashlti3 - CALL_TEST_FUNCTION( *this, "test_compiler_builtins", "test_ashlti3", {}); - - // test test_ashrti3 - CALL_TEST_FUNCTION( *this, "test_compiler_builtins", "test_ashrti3", {}); - - BOOST_REQUIRE_EQUAL( validate(), true ); -} FC_LOG_AND_RETHROW() } - - /************************************************************************************* * transaction_tests test case *************************************************************************************/ @@ -1152,7 +1151,7 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { produce_blocks( 3 ); //check that only one deferred transaction executed - auto dtrxs = control->get_scheduled_transactions(); + auto dtrxs = get_scheduled_transactions(); BOOST_CHECK_EQUAL(dtrxs.size(), 1); for (const auto& trx: dtrxs) { control->push_scheduled_transaction(trx, fc::time_point::maximum()); @@ -1177,7 +1176,7 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { produce_blocks( 3 ); //check that only one deferred transaction executed - auto dtrxs = control->get_scheduled_transactions(); + auto dtrxs = get_scheduled_transactions(); BOOST_CHECK_EQUAL(dtrxs.size(), 1); for (const auto& trx: dtrxs) { control->push_scheduled_transaction(trx, fc::time_point::maximum()); @@ -1362,82 +1361,103 @@ BOOST_FIXTURE_TEST_CASE(db_tests, TESTER) { try { create_account( N(testapi2) ); produce_blocks(10); set_code( N(testapi), contracts::test_api_db_wasm() ); + set_abi( N(testapi), contracts::test_api_db_abi().data() ); set_code( N(testapi2), contracts::test_api_db_wasm() ); + set_abi( N(testapi2), contracts::test_api_db_abi().data() ); produce_blocks(1); - CALL_TEST_FUNCTION( *this, "test_db", "primary_i64_general", {}); - CALL_TEST_FUNCTION( *this, "test_db", "primary_i64_lowerbound", {}); - CALL_TEST_FUNCTION( *this, "test_db", "primary_i64_upperbound", {}); - CALL_TEST_FUNCTION( *this, "test_db", "idx64_general", {}); - CALL_TEST_FUNCTION( *this, "test_db", "idx64_lowerbound", {}); - CALL_TEST_FUNCTION( *this, "test_db", "idx64_upperbound", {}); + push_action( N(testapi), N(pg), N(testapi), mutable_variant_object() ); // primary_i64_general + push_action( N(testapi), N(pl), N(testapi), mutable_variant_object() ); // primary_i64_lowerbound + push_action( N(testapi), N(pu), N(testapi), mutable_variant_object() ); // primary_i64_upperbound + push_action( N(testapi), N(s1g), N(testapi), mutable_variant_object() ); // idx64_general + push_action( N(testapi), N(s1l), N(testapi), mutable_variant_object() ); // idx64_lowerbound + push_action( N(testapi), N(s1u), N(testapi), mutable_variant_object() ); // idx64_upperbound // Store value in primary table - invalid_access_action ia1{.code = N(testapi), .val = 10, .index = 0, .store = true}; - auto res = push_action( action({{N(testapi), config::active_name}}, - N(testapi), WASM_TEST_ACTION("test_db", "test_invalid_access"), - fc::raw::pack(ia1)), - N(testapi) ); - BOOST_CHECK_EQUAL( res, success() ); + push_action( N(testapi), N(tia), N(testapi), mutable_variant_object() // test_invalid_access + ("code", "testapi") + ("val", 10) + ("index", 0) + ("store", true) + ); // Attempt to change the value stored in the primary table under the code of N(testapi) - invalid_access_action ia2{.code = ia1.code, .val = 20, .index = 0, .store = true}; - res = push_action( action({{N(testapi2), config::active_name}}, - N(testapi2), WASM_TEST_ACTION("test_db", "test_invalid_access"), - fc::raw::pack(ia2)), - N(testapi2) ); - wdump((res)); - BOOST_CHECK_EQUAL( boost::algorithm::ends_with(res, "db access violation"), true ); - + BOOST_CHECK_EXCEPTION( push_action( N(testapi2), N(tia), N(testapi2), mutable_variant_object() + ("code", "testapi") + ("val", "20") + ("index", 0) + ("store", true) + ), table_access_violation, + fc_exception_message_is("db access violation") + ); // Verify that the value has not changed. - ia1.store = false; - res = push_action( action({{N(testapi), config::active_name}}, - N(testapi), WASM_TEST_ACTION("test_db", "test_invalid_access"), - fc::raw::pack(ia1)), - N(testapi) ); - BOOST_CHECK_EQUAL( res, success() ); + push_action( N(testapi), N(tia), N(testapi), mutable_variant_object() + ("code", "testapi") + ("val", 10) + ("index", 0) + ("store", false) + ); // Store value in secondary table - ia1.store = true; ia1.index = 1; - res = push_action( action({{N(testapi), config::active_name}}, - N(testapi), WASM_TEST_ACTION("test_db", "test_invalid_access"), - fc::raw::pack(ia1)), - N(testapi) ); - BOOST_CHECK_EQUAL( res, success() ); + push_action( N(testapi), N(tia), N(testapi), mutable_variant_object() // test_invalid_access + ("code", "testapi") + ("val", 10) + ("index", 1) + ("store", true) + ); // Attempt to change the value stored in the secondary table under the code of N(testapi) - ia2.index = 1; - res = push_action( action({{N(testapi2), config::active_name}}, - N(testapi2), WASM_TEST_ACTION("test_db", "test_invalid_access"), - fc::raw::pack(ia2)), - N(testapi2) ); - BOOST_CHECK_EQUAL( boost::algorithm::ends_with(res, "db access violation"), true ); + BOOST_CHECK_EXCEPTION( push_action( N(testapi2), N(tia), N(testapi2), mutable_variant_object() + ("code", "testapi") + ("val", "20") + ("index", 1) + ("store", true) + ), table_access_violation, + fc_exception_message_is("db access violation") + ); // Verify that the value has not changed. - ia1.store = false; - res = push_action( action({{N(testapi), config::active_name}}, - N(testapi), WASM_TEST_ACTION("test_db", "test_invalid_access"), - fc::raw::pack(ia1)), - N(testapi) ); - BOOST_CHECK_EQUAL( res, success() ); - - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_db", "idx_double_nan_create_fail", {}, - transaction_exception, "NaN is not an allowed value for a secondary key"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_db", "idx_double_nan_modify_fail", {}, - transaction_exception, "NaN is not an allowed value for a secondary key"); - - uint32_t lookup_type = 0; // 0 for find, 1 for lower bound, and 2 for upper bound; - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_db", "idx_double_nan_lookup_fail", fc::raw::pack(lookup_type), - transaction_exception, "NaN is not an allowed value for a secondary key"); - lookup_type = 1; - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_db", "idx_double_nan_lookup_fail", fc::raw::pack(lookup_type), - transaction_exception, "NaN is not an allowed value for a secondary key"); - lookup_type = 2; - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_db", "idx_double_nan_lookup_fail", fc::raw::pack(lookup_type), - transaction_exception, "NaN is not an allowed value for a secondary key"); - - CALL_TEST_FUNCTION( *this, "test_db", "misaligned_secondary_key256_tests", {}); + push_action( N(testapi), N(tia), N(testapi), mutable_variant_object() + ("code", "testapi") + ("val", 10) + ("index", 1) + ("store", false) + ); + + // idx_double_nan_create_fail + BOOST_CHECK_EXCEPTION( push_action( N(testapi), N(sdnancreate), N(testapi), mutable_variant_object() ), + transaction_exception, + fc_exception_message_is("NaN is not an allowed value for a secondary key") + ); + + // idx_double_nan_modify_fail + BOOST_CHECK_EXCEPTION( push_action( N(testapi), N(sdnanmodify), N(testapi), mutable_variant_object() ), + transaction_exception, + fc_exception_message_is("NaN is not an allowed value for a secondary key") + ); + + // idx_double_nan_lookup_fail + BOOST_CHECK_EXCEPTION( push_action( N(testapi), N(sdnanlookup), N(testapi), mutable_variant_object() + ("lookup_type", 0) // 0 for find + ), transaction_exception, + fc_exception_message_is("NaN is not an allowed value for a secondary key") + ); + + BOOST_CHECK_EXCEPTION( push_action( N(testapi), N(sdnanlookup), N(testapi), mutable_variant_object() + ("lookup_type", 1) // 1 for lower bound + ), transaction_exception, + fc_exception_message_is("NaN is not an allowed value for a secondary key") + ); + + BOOST_CHECK_EXCEPTION( push_action( N(testapi), N(sdnanlookup), N(testapi), mutable_variant_object() + ("lookup_type", 2) // 2 for upper bound + ), transaction_exception, + fc_exception_message_is("NaN is not an allowed value for a secondary key") + ); + + push_action( N(testapi), N(sk32align), N(testapi), mutable_variant_object() ); // misaligned_secondary_key256_tests + BOOST_REQUIRE_EQUAL( validate(), true ); } FC_LOG_AND_RETHROW() } @@ -1449,79 +1469,50 @@ BOOST_FIXTURE_TEST_CASE(multi_index_tests, TESTER) { try { create_account( N(testapi) ); produce_blocks(1); set_code( N(testapi), contracts::test_api_multi_index_wasm() ); + set_abi( N(testapi), contracts::test_api_multi_index_abi().data() ); produce_blocks(1); - CALL_TEST_FUNCTION( *this, "test_multi_index", "idx64_general", {}); - CALL_TEST_FUNCTION( *this, "test_multi_index", "idx64_store_only", {}); - CALL_TEST_FUNCTION( *this, "test_multi_index", "idx64_check_without_storing", {}); - CALL_TEST_FUNCTION( *this, "test_multi_index", "idx128_general", {}); - CALL_TEST_FUNCTION( *this, "test_multi_index", "idx128_store_only", {}); - CALL_TEST_FUNCTION( *this, "test_multi_index", "idx128_check_without_storing", {}); - CALL_TEST_FUNCTION( *this, "test_multi_index", "idx128_autoincrement_test", {}); - CALL_TEST_FUNCTION( *this, "test_multi_index", "idx128_autoincrement_test_part1", {}); - CALL_TEST_FUNCTION( *this, "test_multi_index", "idx128_autoincrement_test_part2", {}); - CALL_TEST_FUNCTION( *this, "test_multi_index", "idx256_general", {}); - CALL_TEST_FUNCTION( *this, "test_multi_index", "idx_double_general", {}); - CALL_TEST_FUNCTION( *this, "test_multi_index", "idx_long_double_general", {}); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_pk_iterator_exceed_end", {}, - eosio_assert_message_exception, "cannot increment end iterator"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_sk_iterator_exceed_end", {}, - eosio_assert_message_exception, "cannot increment end iterator"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_pk_iterator_exceed_begin", {}, - eosio_assert_message_exception, "cannot decrement iterator at beginning of table"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_sk_iterator_exceed_begin", {}, - eosio_assert_message_exception, "cannot decrement iterator at beginning of index"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_pass_pk_ref_to_other_table", {}, - eosio_assert_message_exception, "object passed to iterator_to is not in multi_index"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_pass_sk_ref_to_other_table", {}, - eosio_assert_message_exception, "object passed to iterator_to is not in multi_index"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_pass_pk_end_itr_to_iterator_to", {}, - eosio_assert_message_exception, "object passed to iterator_to is not in multi_index"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_pass_pk_end_itr_to_modify", {}, - eosio_assert_message_exception, "cannot pass end iterator to modify"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_pass_pk_end_itr_to_erase", {}, - eosio_assert_message_exception, "cannot pass end iterator to erase"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_pass_sk_end_itr_to_iterator_to", {}, - eosio_assert_message_exception, "object passed to iterator_to is not in multi_index"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_pass_sk_end_itr_to_modify", {}, - eosio_assert_message_exception, "cannot pass end iterator to modify"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_pass_sk_end_itr_to_erase", {}, - eosio_assert_message_exception, "cannot pass end iterator to erase"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_modify_primary_key", {}, - eosio_assert_message_exception, "updater cannot change primary key when modifying an object"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_run_out_of_avl_pk", {}, - eosio_assert_message_exception, "next primary key in table is at autoincrement limit"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_require_find_fail", {}, - eosio_assert_message_exception, "unable to find key"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_require_find_fail_with_msg", {}, - eosio_assert_message_exception, "unable to find primary key in require_find"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_require_find_sk_fail", {}, - eosio_assert_message_exception, "unable to find secondary key"); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_multi_index", "idx64_require_find_sk_fail_with_msg", {}, - eosio_assert_message_exception, "unable to find sec key"); - CALL_TEST_FUNCTION( *this, "test_multi_index", "idx64_sk_cache_pk_lookup", {}); - CALL_TEST_FUNCTION( *this, "test_multi_index", "idx64_pk_cache_sk_lookup", {}); - - BOOST_REQUIRE_EQUAL( validate(), true ); -} FC_LOG_AND_RETHROW() } - -/************************************************************************************* - * fixedpoint_tests test case - *************************************************************************************/ -BOOST_FIXTURE_TEST_CASE(fixedpoint_tests, TESTER) { try { - produce_blocks(2); - create_account( N(testapi) ); - produce_blocks(10); - set_code( N(testapi), contracts::test_api_wasm() ); - produce_blocks(10); + auto check_failure = [this]( action_name a, const char* expected_error_msg ) { + BOOST_CHECK_EXCEPTION( push_action( N(testapi), a, N(testapi), {} ), + eosio_assert_message_exception, + eosio_assert_message_is( expected_error_msg ) + ); + }; - CALL_TEST_FUNCTION( *this, "test_fixedpoint", "create_instances", {}); - CALL_TEST_FUNCTION( *this, "test_fixedpoint", "test_addition", {}); - CALL_TEST_FUNCTION( *this, "test_fixedpoint", "test_subtraction", {}); - CALL_TEST_FUNCTION( *this, "test_fixedpoint", "test_multiplication", {}); - CALL_TEST_FUNCTION( *this, "test_fixedpoint", "test_division", {}); - CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_fixedpoint", "test_division_by_0", {}, - eosio_assert_message_exception, "divide by zero" ); + push_action( N(testapi), N(s1g), N(testapi), {} ); // idx64_general + push_action( N(testapi), N(s1store), N(testapi), {} ); // idx64_store_only + push_action( N(testapi), N(s1check), N(testapi), {} ); // idx64_check_without_storing + push_action( N(testapi), N(s2g), N(testapi), {} ); // idx128_general + push_action( N(testapi), N(s2store), N(testapi), {} ); // idx128_store_only + push_action( N(testapi), N(s2check), N(testapi), {} ); // idx128_check_without_storing + push_action( N(testapi), N(s2autoinc), N(testapi), {} ); // idx128_autoincrement_test + push_action( N(testapi), N(s2autoinc1), N(testapi), {} ); // idx128_autoincrement_test_part1 + push_action( N(testapi), N(s2autoinc2), N(testapi), {} ); // idx128_autoincrement_test_part2 + push_action( N(testapi), N(s3g), N(testapi), {} ); // idx256_general + push_action( N(testapi), N(sdg), N(testapi), {} ); // idx_double_general + push_action( N(testapi), N(sldg), N(testapi), {} ); // idx_long_double_general + + check_failure( N(s1pkend), "cannot increment end iterator" ); // idx64_pk_iterator_exceed_end + check_failure( N(s1skend), "cannot increment end iterator" ); // idx64_sk_iterator_exceed_end + check_failure( N(s1pkbegin), "cannot decrement iterator at beginning of table" ); // idx64_pk_iterator_exceed_begin + check_failure( N(s1skbegin), "cannot decrement iterator at beginning of index" ); // idx64_sk_iterator_exceed_begin + check_failure( N(s1pkref), "object passed to iterator_to is not in multi_index" ); // idx64_pass_pk_ref_to_other_table + check_failure( N(s1skref), "object passed to iterator_to is not in multi_index" ); // idx64_pass_sk_ref_to_other_table + check_failure( N(s1pkitrto), "object passed to iterator_to is not in multi_index" ); // idx64_pass_pk_end_itr_to_iterator_to + check_failure( N(s1pkmodify), "cannot pass end iterator to modify" ); // idx64_pass_pk_end_itr_to_modify + check_failure( N(s1pkerase), "cannot pass end iterator to erase" ); // idx64_pass_pk_end_itr_to_erase + check_failure( N(s1skitrto), "object passed to iterator_to is not in multi_index" ); // idx64_pass_sk_end_itr_to_iterator_to + check_failure( N(s1skmodify), "cannot pass end iterator to modify" ); // idx64_pass_sk_end_itr_to_modify + check_failure( N(s1skerase), "cannot pass end iterator to erase" ); // idx64_pass_sk_end_itr_to_erase + check_failure( N(s1modpk), "updater cannot change primary key when modifying an object" ); // idx64_modify_primary_key + check_failure( N(s1exhaustpk), "next primary key in table is at autoincrement limit" ); // idx64_run_out_of_avl_pk + check_failure( N(s1findfail1), "unable to find key" ); // idx64_require_find_fail + check_failure( N(s1findfail2), "unable to find primary key in require_find" );// idx64_require_find_fail_with_msg + check_failure( N(s1findfail3), "unable to find secondary key" ); // idx64_require_find_sk_fail + check_failure( N(s1findfail4), "unable to find sec key" ); // idx64_require_find_sk_fail_with_msg + + push_action( N(testapi), N(s1skcache), N(testapi), {} ); // idx64_sk_cache_pk_lookup + push_action( N(testapi), N(s1pkcache), N(testapi), {} ); // idx64_pk_cache_sk_lookup BOOST_REQUIRE_EQUAL( validate(), true ); } FC_LOG_AND_RETHROW() } @@ -1561,7 +1552,7 @@ BOOST_FIXTURE_TEST_CASE(crypto_tests, TESTER) { try { crypto_api_exception, fc_exception_message_is("Error expected key different than recovered key") ); } - + CALL_TEST_FUNCTION( *this, "test_crypto", "test_sha1", {} ); CALL_TEST_FUNCTION( *this, "test_crypto", "test_sha256", {} ); @@ -1574,7 +1565,7 @@ BOOST_FIXTURE_TEST_CASE(crypto_tests, TESTER) { try { CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_crypto", "assert_sha256_false", {}, crypto_api_exception, "hash mismatch" ); - + CALL_TEST_FUNCTION( *this, "test_crypto", "assert_sha256_true", {} ); CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION( *this, "test_crypto", "assert_sha1_false", {}, @@ -1595,118 +1586,6 @@ BOOST_FIXTURE_TEST_CASE(crypto_tests, TESTER) { try { BOOST_REQUIRE_EQUAL( validate(), true ); } FC_LOG_AND_RETHROW() } - -/************************************************************************************* - * memory_tests test cases - *************************************************************************************/ -BOOST_FIXTURE_TEST_CASE(memory_tests, TESTER) { try { - produce_blocks(1000); - create_account(N(testapi) ); - produce_blocks(1000); - set_code(N(testapi), contracts::test_api_mem_wasm() ); - produce_blocks(1000); - - CALL_TEST_FUNCTION( *this, "test_memory", "test_memory_allocs", {} ); - produce_blocks(1000); - CALL_TEST_FUNCTION( *this, "test_memory", "test_memory_hunk", {} ); - produce_blocks(1000); - CALL_TEST_FUNCTION( *this, "test_memory", "test_memory_hunks", {} ); - produce_blocks(1000); - //Disabling this for now as it fails due to malloc changes for variable wasm max memory sizes -#if 0 - CALL_TEST_FUNCTION( *this, "test_memory", "test_memory_hunks_disjoint", {} ); - produce_blocks(1000); -#endif - CALL_TEST_FUNCTION( *this, "test_memory", "test_memset_memcpy", {} ); - produce_blocks(1000); - BOOST_CHECK_THROW( CALL_TEST_FUNCTION( *this, "test_memory", "test_memcpy_overlap_start", {} ), overlapping_memory_error ); - produce_blocks(1000); - BOOST_CHECK_THROW( CALL_TEST_FUNCTION( *this, "test_memory", "test_memcpy_overlap_end", {} ), overlapping_memory_error ); - produce_blocks(1000); - CALL_TEST_FUNCTION( *this, "test_memory", "test_memcmp", {} ); - produce_blocks(1000); - -#define test_memory_oob(func) \ - try { \ - CALL_TEST_FUNCTION( *this, "test_memory", func, {} ); \ - BOOST_FAIL("assert failed in test out of bound memory in " func); \ - } catch (...) { \ - BOOST_REQUIRE_EQUAL(true, true); \ - } - -#define test_memory_oob2(func) \ - try { \ - CALL_TEST_FUNCTION( *this, "test_memory", func, {} );\ - } catch (const fc::exception& e) {\ - if (!expect_assert_message(e, "access violation")) throw; \ - } - - test_memory_oob("test_outofbound_0"); - test_memory_oob("test_outofbound_1"); - test_memory_oob("test_outofbound_2"); - test_memory_oob("test_outofbound_3"); - test_memory_oob("test_outofbound_4"); - test_memory_oob("test_outofbound_5"); - test_memory_oob("test_outofbound_6"); - test_memory_oob("test_outofbound_7"); - test_memory_oob("test_outofbound_8"); - test_memory_oob("test_outofbound_9"); - test_memory_oob("test_outofbound_10"); - test_memory_oob("test_outofbound_11"); - test_memory_oob("test_outofbound_12"); - test_memory_oob("test_outofbound_13"); - - BOOST_REQUIRE_EQUAL( validate(), true ); -} FC_LOG_AND_RETHROW() } - - -/************************************************************************************* - * extended_memory_tests test cases - *************************************************************************************/ -BOOST_FIXTURE_TEST_CASE(extended_memory_test_initial_memory, TESTER) { try { - produce_blocks(1000); - create_account(N(testapi) ); - produce_blocks(1000); - set_code(N(testapi), contracts::test_api_mem_wasm() ); - produce_blocks(1000); - CALL_TEST_FUNCTION( *this, "test_extended_memory", "test_initial_buffer", {} ); - - BOOST_REQUIRE_EQUAL( validate(), true ); -} FC_LOG_AND_RETHROW() } - -BOOST_FIXTURE_TEST_CASE(extended_memory_test_page_memory, TESTER) { try { - produce_blocks(1000); - create_account(N(testapi) ); - produce_blocks(1000); - set_code(N(testapi), contracts::test_api_mem_wasm() ); - produce_blocks(1000); - CALL_TEST_FUNCTION( *this, "test_extended_memory", "test_page_memory", {} ); - - BOOST_REQUIRE_EQUAL( validate(), true ); -} FC_LOG_AND_RETHROW() } - -BOOST_FIXTURE_TEST_CASE(extended_memory_test_page_memory_exceeded, TESTER) { try { - produce_blocks(1000); - create_account(N(testapi) ); - produce_blocks(1000); - set_code(N(testapi), contracts::test_api_mem_wasm() ); - produce_blocks(1000); - CALL_TEST_FUNCTION( *this, "test_extended_memory", "test_page_memory_exceeded", {} ); - - BOOST_REQUIRE_EQUAL( validate(), true ); -} FC_LOG_AND_RETHROW() } - -BOOST_FIXTURE_TEST_CASE(extended_memory_test_page_memory_negative_bytes, TESTER) { try { - produce_blocks(1000); - create_account(N(testapi) ); - produce_blocks(1000); - set_code(N(testapi), contracts::test_api_mem_wasm() ); - produce_blocks(1000); - CALL_TEST_FUNCTION( *this, "test_extended_memory", "test_page_memory_negative_bytes", {} ); - - BOOST_REQUIRE_EQUAL( validate(), true ); -} FC_LOG_AND_RETHROW() } - /************************************************************************************* * print_tests test case *************************************************************************************/ @@ -1747,22 +1626,22 @@ BOOST_FIXTURE_TEST_CASE(print_tests, TESTER) { try { // test printn auto tx5_trace = CALL_TEST_FUNCTION( *this, "test_print", "test_printn", {} ); auto tx5_act_cnsl = tx5_trace->action_traces.front().console; - + BOOST_CHECK_EQUAL( tx5_act_cnsl.substr(0,1), "1" ); BOOST_CHECK_EQUAL( tx5_act_cnsl.substr(1,1), "5" ); BOOST_CHECK_EQUAL( tx5_act_cnsl.substr(2,1), "a" ); BOOST_CHECK_EQUAL( tx5_act_cnsl.substr(3,1), "z" ); - + BOOST_CHECK_EQUAL( tx5_act_cnsl.substr(4,3), "abc" ); BOOST_CHECK_EQUAL( tx5_act_cnsl.substr(7,3), "123" ); - + BOOST_CHECK_EQUAL( tx5_act_cnsl.substr(10,7), "abc.123" ); BOOST_CHECK_EQUAL( tx5_act_cnsl.substr(17,7), "123.abc" ); - + BOOST_CHECK_EQUAL( tx5_act_cnsl.substr(24,13), "12345abcdefgj" ); BOOST_CHECK_EQUAL( tx5_act_cnsl.substr(37,13), "ijklmnopqrstj" ); BOOST_CHECK_EQUAL( tx5_act_cnsl.substr(50,13), "vwxyz.12345aj" ); - + BOOST_CHECK_EQUAL( tx5_act_cnsl.substr(63, 13), "111111111111j" ); BOOST_CHECK_EQUAL( tx5_act_cnsl.substr(76, 13), "555555555555j" ); BOOST_CHECK_EQUAL( tx5_act_cnsl.substr(89, 13), "aaaaaaaaaaaaj" ); @@ -1941,7 +1820,7 @@ BOOST_FIXTURE_TEST_CASE(permission_tests, TESTER) { try { }) ); BOOST_CHECK_EQUAL( int64_t(0), get_result_int64() ); - + } FC_LOG_AND_RETHROW() } #if 0 @@ -2023,45 +1902,6 @@ BOOST_FIXTURE_TEST_CASE(datastream_tests, TESTER) { try { BOOST_REQUIRE_EQUAL( validate(), true ); } FC_LOG_AND_RETHROW() } -/************************************************************************************* - * new api feature test - *************************************************************************************/ -BOOST_FIXTURE_TEST_CASE(new_api_feature_tests, TESTER) { try { - - produce_blocks(1); - create_account(N(testapi) ); - produce_blocks(1); - set_code(N(testapi), contracts::test_api_wasm() ); - produce_blocks(1); - - BOOST_CHECK_EXCEPTION( CALL_TEST_FUNCTION( *this, "test_transaction", "new_feature", {} ), - unaccessible_api, - [](const fc::exception& e) { - return expect_assert_message(e, "testapi does not have permission to call this API"); - }); - - BOOST_CHECK_EXCEPTION( CALL_TEST_FUNCTION( *this, "test_transaction", "active_new_feature", {} ), - unaccessible_api, - [](const fc::exception& e) { - return expect_assert_message(e, "testapi does not have permission to call this API"); - }); - - // change privilege - push_action(config::system_account_name, N(setpriv), config::system_account_name, mutable_variant_object() - ("account", "testapi") - ("is_priv", 1)); - - CALL_TEST_FUNCTION( *this, "test_transaction", "new_feature", {} ); - - BOOST_CHECK_EXCEPTION( CALL_TEST_FUNCTION( *this, "test_transaction", "active_new_feature", {} ), - unsupported_feature, - [](const fc::exception& e) { - return expect_assert_message(e, "Unsupported Hardfork Detected"); - }); - - BOOST_REQUIRE_EQUAL( validate(), true ); -} FC_LOG_AND_RETHROW() } - /************************************************************************************* * permission_usage_tests test cases *************************************************************************************/ diff --git a/unittests/auth_tests.cpp b/unittests/auth_tests.cpp index c0566654824..a238c7246a3 100644 --- a/unittests/auth_tests.cpp +++ b/unittests/auth_tests.cpp @@ -119,9 +119,9 @@ try { BOOST_TEST(obj->parent == 0); owner_id = obj->id; auto auth = obj->auth.to_authority(); - BOOST_TEST(auth.threshold == 1); - BOOST_TEST(auth.keys.size() == 1); - BOOST_TEST(auth.accounts.size() == 0); + BOOST_TEST(auth.threshold == 1u); + BOOST_TEST(auth.keys.size() == 1u); + BOOST_TEST(auth.accounts.size() == 0u); BOOST_TEST(auth.keys[0].key == new_owner_pub_key); BOOST_TEST(auth.keys[0].weight == 1); } @@ -140,11 +140,11 @@ try { BOOST_TEST(obj->name == "active"); BOOST_TEST(obj->parent == owner_id); auto auth = obj->auth.to_authority(); - BOOST_TEST(auth.threshold == 1); - BOOST_TEST(auth.keys.size() == 1); - BOOST_TEST(auth.accounts.size() == 0); + BOOST_TEST(auth.threshold == 1u); + BOOST_TEST(auth.keys.size() == 1u); + BOOST_TEST(auth.accounts.size() == 0u); BOOST_TEST(auth.keys[0].key == new_active_pub_key); - BOOST_TEST(auth.keys[0].weight == 1); + BOOST_TEST(auth.keys[0].weight == 1u); } auto spending_priv_key = chain.get_private_key("alice", "spending"); @@ -302,18 +302,18 @@ try { // Verify account created properly const auto& joe_owner_authority = chain.get(boost::make_tuple("joe", "owner")); - BOOST_TEST(joe_owner_authority.auth.threshold == 1); - BOOST_TEST(joe_owner_authority.auth.accounts.size() == 1); - BOOST_TEST(joe_owner_authority.auth.keys.size() == 1); + BOOST_TEST(joe_owner_authority.auth.threshold == 1u); + BOOST_TEST(joe_owner_authority.auth.accounts.size() == 1u); + BOOST_TEST(joe_owner_authority.auth.keys.size() == 1u); BOOST_TEST(string(joe_owner_authority.auth.keys[0].key) == string(chain.get_public_key("joe", "owner"))); - BOOST_TEST(joe_owner_authority.auth.keys[0].weight == 1); + BOOST_TEST(joe_owner_authority.auth.keys[0].weight == 1u); const auto& joe_active_authority = chain.get(boost::make_tuple("joe", "active")); - BOOST_TEST(joe_active_authority.auth.threshold == 1); - BOOST_TEST(joe_active_authority.auth.accounts.size() == 1); - BOOST_TEST(joe_active_authority.auth.keys.size() == 1); + BOOST_TEST(joe_active_authority.auth.threshold == 1u); + BOOST_TEST(joe_active_authority.auth.accounts.size() == 1u); + BOOST_TEST(joe_active_authority.auth.keys.size() == 1u); BOOST_TEST(string(joe_active_authority.auth.keys[0].key) == string(chain.get_public_key("joe", "active"))); - BOOST_TEST(joe_active_authority.auth.keys[0].weight == 1); + BOOST_TEST(joe_active_authority.auth.keys[0].weight == 1u); // Create duplicate name BOOST_CHECK_EXCEPTION(chain.create_account("joe"), action_validate_exception, @@ -419,8 +419,8 @@ try { const auto &usage2 = db.get(acc1a); - BOOST_TEST(usage.cpu_usage.average() > 0); - BOOST_TEST(usage.net_usage.average() > 0); + BOOST_TEST(usage.cpu_usage.average() > 0U); + BOOST_TEST(usage.net_usage.average() > 0U); BOOST_REQUIRE_EQUAL(usage.cpu_usage.average(), usage2.cpu_usage.average()); BOOST_REQUIRE_EQUAL(usage.net_usage.average(), usage2.net_usage.average()); chain.produce_block(); diff --git a/unittests/block_timestamp_tests.cpp b/unittests/block_timestamp_tests.cpp index d7f9b1289c4..81e513189f9 100644 --- a/unittests/block_timestamp_tests.cpp +++ b/unittests/block_timestamp_tests.cpp @@ -17,11 +17,11 @@ BOOST_AUTO_TEST_SUITE(block_timestamp_tests) BOOST_AUTO_TEST_CASE(constructor_test) { block_timestamp_type bt; - BOOST_TEST( bt.slot == 0, "Default constructor gives wrong value"); + BOOST_TEST( bt.slot == 0u, "Default constructor gives wrong value"); fc::time_point t(fc::seconds(978307200)); block_timestamp_type bt2(t); - BOOST_TEST( bt2.slot == (978307200 - 946684800)*2, "Time point constructor gives wrong value"); + BOOST_TEST( bt2.slot == (978307200u - 946684800u)*2, "Time point constructor gives wrong value"); } BOOST_AUTO_TEST_CASE(conversion_test) { diff --git a/unittests/bootseq_tests.cpp b/unittests/bootseq_tests.cpp index b12978961a2..a3df02b652c 100644 --- a/unittests/bootseq_tests.cpp +++ b/unittests/bootseq_tests.cpp @@ -285,7 +285,7 @@ BOOST_FIXTURE_TEST_CASE( bootseq_test, bootseq_tester ) { // No producers will be set, since the total activated stake is less than 150,000,000 produce_blocks_for_n_rounds(2); // 2 rounds since new producer schedule is set when the first block of next round is irreversible auto active_schedule = control->head_block_state()->active_schedule; - BOOST_TEST(active_schedule.producers.size() == 1); + BOOST_TEST(active_schedule.producers.size() == 1u); BOOST_TEST(active_schedule.producers.front().producer_name == "eosio"); // Spend some time so the producer pay pool is filled by the inflation rate diff --git a/unittests/contracts.hpp.in b/unittests/contracts.hpp.in index e04730ec98f..8dd1f2b4dcf 100644 --- a/unittests/contracts.hpp.in +++ b/unittests/contracts.hpp.in @@ -37,19 +37,16 @@ namespace eosio { MAKE_READ_WASM_ABI(eosio_wrap, eosio.wrap, contracts) // Contracts in `eos/unittests/unittests/test-contracts' directory - MAKE_READ_WASM_ABI(deferred_test, deferred_test, test-contracts) MAKE_READ_WASM_ABI(asserter, asserter, test-contracts) - MAKE_READ_WASM_ABI(integration_test, integration_test, test-contracts) - MAKE_READ_WASM_ABI(multi_index_test, multi_index_test, test-contracts) + MAKE_READ_WASM_ABI(deferred_test, deferred_test, test-contracts) MAKE_READ_WASM_ABI(noop, noop, test-contracts) MAKE_READ_WASM_ABI(payloadless, payloadless, test-contracts) MAKE_READ_WASM_ABI(proxy, proxy, test-contracts) MAKE_READ_WASM_ABI(snapshot_test, snapshot_test, test-contracts) - MAKE_READ_WASM_ABI(test_ram_limit, test_ram_limit, test-contracts) MAKE_READ_WASM_ABI(test_api, test_api, test-contracts) MAKE_READ_WASM_ABI(test_api_db, test_api_db, test-contracts) MAKE_READ_WASM_ABI(test_api_multi_index, test_api_multi_index, test-contracts) - MAKE_READ_WASM_ABI(test_api_mem, test_api_mem, test-contracts) + MAKE_READ_WASM_ABI(test_ram_limit, test_ram_limit, test-contracts) }; } /// eosio::testing } /// eosio diff --git a/unittests/currency_tests.cpp b/unittests/currency_tests.cpp index 2d94d19f30f..0485d0a4681 100644 --- a/unittests/currency_tests.cpp +++ b/unittests/currency_tests.cpp @@ -417,16 +417,16 @@ BOOST_FIXTURE_TEST_CASE( test_proxy, currency_tester ) try { action setowner_act; setowner_act.account = N(proxy); setowner_act.name = N(setowner); - setowner_act.authorization = vector{{N(alice), config::active_name}}; + setowner_act.authorization = vector{{N(proxy), config::active_name}}; setowner_act.data = proxy_abi_ser.variant_to_binary("setowner", mutable_variant_object() ("owner", "alice") ("delay", 10), abi_serializer_max_time ); trx.actions.emplace_back(std::move(setowner_act)); - + set_transaction_headers(trx); - trx.sign(get_private_key(N(alice), "active"), control->get_chain_id()); + trx.sign(get_private_key(N(proxy), "active"), control->get_chain_id()); push_transaction(trx); produce_block(); BOOST_REQUIRE_EQUAL(true, chain_has_transaction(trx.id())); @@ -473,7 +473,7 @@ BOOST_FIXTURE_TEST_CASE( test_deferred_failure, currency_tester ) try { action setowner_act; setowner_act.account = N(proxy); setowner_act.name = N(setowner); - setowner_act.authorization = vector{{N(bob), config::active_name}}; + setowner_act.authorization = vector{{N(proxy), config::active_name}}; setowner_act.data = proxy_abi_ser.variant_to_binary("setowner", mutable_variant_object() ("owner", "bob") ("delay", 10), @@ -482,7 +482,7 @@ BOOST_FIXTURE_TEST_CASE( test_deferred_failure, currency_tester ) try { trx.actions.emplace_back(std::move(setowner_act)); set_transaction_headers(trx); - trx.sign(get_private_key(N(bob), "active"), control->get_chain_id()); + trx.sign(get_private_key(N(proxy), "active"), control->get_chain_id()); push_transaction(trx); produce_block(); BOOST_REQUIRE_EQUAL(true, chain_has_transaction(trx.id())); @@ -506,7 +506,6 @@ BOOST_FIXTURE_TEST_CASE( test_deferred_failure, currency_tester ) try { produce_block(); BOOST_REQUIRE_EQUAL(get_balance( N(proxy)), asset::from_string("5.0000 CUR")); BOOST_REQUIRE_EQUAL(get_balance( N(bob)), asset::from_string("0.0000 CUR")); - BOOST_REQUIRE_EQUAL(get_balance( N(bob)), asset::from_string("0.0000 CUR")); BOOST_REQUIRE_EQUAL(1, index.size()); BOOST_REQUIRE_EQUAL(false, chain_has_transaction(deferred_id)); } @@ -525,7 +524,7 @@ BOOST_FIXTURE_TEST_CASE( test_deferred_failure, currency_tester ) try { action setowner_act; setowner_act.account = N(bob); setowner_act.name = N(setowner); - setowner_act.authorization = vector{{N(alice), config::active_name}}; + setowner_act.authorization = vector{{N(bob), config::active_name}}; setowner_act.data = proxy_abi_ser.variant_to_binary("setowner", mutable_variant_object() ("owner", "alice") ("delay", 0), @@ -534,7 +533,7 @@ BOOST_FIXTURE_TEST_CASE( test_deferred_failure, currency_tester ) try { trx.actions.emplace_back(std::move(setowner_act)); set_transaction_headers(trx); - trx.sign(get_private_key(N(alice), "active"), control->get_chain_id()); + trx.sign(get_private_key(N(bob), "active"), control->get_chain_id()); push_transaction(trx); produce_block(); BOOST_REQUIRE_EQUAL(true, chain_has_transaction(trx.id())); diff --git a/unittests/delay_tests.cpp b/unittests/delay_tests.cpp index 523555bb936..913f3395e8e 100644 --- a/unittests/delay_tests.cpp +++ b/unittests/delay_tests.cpp @@ -77,8 +77,9 @@ BOOST_FIXTURE_TEST_CASE( delay_error_create_account, validating_tester) { try { produce_blocks(6); - auto scheduled_trxs = control->get_scheduled_transactions(); - BOOST_REQUIRE_EQUAL(scheduled_trxs.size(), 1); + auto scheduled_trxs = get_scheduled_transactions(); + BOOST_REQUIRE_EQUAL(scheduled_trxs.size(), 1u); + auto dtrace = control->push_scheduled_transaction(scheduled_trxs.front(), fc::time_point::maximum()); BOOST_REQUIRE_EQUAL(dtrace->except.valid(), true); BOOST_REQUIRE_EQUAL(dtrace->except->code(), missing_auth_exception::code_value); @@ -142,7 +143,7 @@ BOOST_AUTO_TEST_CASE( link_delay_direct_test ) { try { ); BOOST_REQUIRE_EQUAL(transaction_receipt::executed, trace->receipt->status); auto gen_size = chain.control->db().get_index().size(); - BOOST_REQUIRE_EQUAL(0, gen_size); + BOOST_REQUIRE_EQUAL(0u, gen_size); chain.produce_blocks(); diff --git a/unittests/forked_tests.cpp b/unittests/forked_tests.cpp index d2981e3249c..6e412bfbccc 100644 --- a/unittests/forked_tests.cpp +++ b/unittests/forked_tests.cpp @@ -145,7 +145,7 @@ BOOST_AUTO_TEST_CASE( fork_with_bad_block ) try { BOOST_TEST_CONTEXT("Testing Fork: " << i) { const auto& fork = forks.at(i); // push the fork to the original node - for (int fidx = 0; fidx < fork.blocks.size() - 1; fidx++) { + for (size_t fidx = 0; fidx < fork.blocks.size() - 1; fidx++) { const auto& b = fork.blocks.at(fidx); // push the block only if its not known already if (!bios.control->fetch_block_by_id(b->id())) { @@ -331,10 +331,10 @@ BOOST_AUTO_TEST_CASE( prune_remove_branch ) try { push_blocks(c, c2); // fork happen after block 61 - BOOST_REQUIRE_EQUAL(61, c.control->head_block_num()); - BOOST_REQUIRE_EQUAL(61, c2.control->head_block_num()); + BOOST_REQUIRE_EQUAL(61u, c.control->head_block_num()); + BOOST_REQUIRE_EQUAL(61u, c2.control->head_block_num()); - int fork_num = c.control->head_block_num(); + uint32_t fork_num = c.control->head_block_num(); auto nextproducer = [](tester &c, int skip_interval) ->account_name { auto head_time = c.control->head_block_time(); @@ -358,17 +358,18 @@ BOOST_AUTO_TEST_CASE( prune_remove_branch ) try { else ++skip2; } - BOOST_REQUIRE_EQUAL(87, c.control->head_block_num()); - BOOST_REQUIRE_EQUAL(73, c2.control->head_block_num()); + BOOST_REQUIRE_EQUAL(87u, c.control->head_block_num()); + BOOST_REQUIRE_EQUAL(73u, c2.control->head_block_num()); // push fork from c2 => c - int p = fork_num; + size_t p = fork_num; + while ( p < c2.control->head_block_num()) { auto fb = c2.control->fetch_block_by_number(++p); c.push_block(fb); } - BOOST_REQUIRE_EQUAL(73, c.control->head_block_num()); + BOOST_REQUIRE_EQUAL(73u, c.control->head_block_num()); } FC_LOG_AND_RETHROW() diff --git a/unittests/include/config.hpp.in b/unittests/include/config.hpp.in deleted file mode 100644 index 8483e4dd73c..00000000000 --- a/unittests/include/config.hpp.in +++ /dev/null @@ -1,11 +0,0 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ - -namespace eosio { namespace unittests { namespace config { - constexpr char eosiolib_path[] = "${CMAKE_CURRENT_SOURCE_DIR}/../contracts"; - constexpr char core_symbol_path[] = "${CMAKE_BINARY_DIR}/contracts"; - constexpr char pfr_include_path[] = "${CMAKE_CURRENT_SOURCE_DIR}/../externals/magic_get/include"; - constexpr char boost_include_path[] = "${Boost_INCLUDE_DIR}"; -}}} diff --git a/unittests/message_buffer_tests.cpp b/unittests/message_buffer_tests.cpp index 7209bda8ce4..f810a80d2be 100644 --- a/unittests/message_buffer_tests.cpp +++ b/unittests/message_buffer_tests.cpp @@ -29,8 +29,8 @@ void* mb_data(boost::asio::mutable_buffer& mb) { BOOST_AUTO_TEST_SUITE(message_buffer_tests) -constexpr auto def_buffer_size_mb = 4; -constexpr auto def_buffer_size = 1024*1024*def_buffer_size_mb; +constexpr size_t def_buffer_size_mb = 4; +constexpr size_t def_buffer_size = 1024*1024*def_buffer_size_mb; /// Test default construction and buffer sequence generation BOOST_AUTO_TEST_CASE(message_buffer_construction) @@ -39,7 +39,7 @@ BOOST_AUTO_TEST_CASE(message_buffer_construction) fc::message_buffer mb; BOOST_CHECK_EQUAL(mb.total_bytes(), def_buffer_size); BOOST_CHECK_EQUAL(mb.bytes_to_write(), def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0); + BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0u); BOOST_CHECK_EQUAL(mb.read_ptr(), mb.write_ptr()); auto mbs = mb.get_buffer_sequence_for_boost_async_read(); @@ -60,7 +60,7 @@ BOOST_AUTO_TEST_CASE(message_buffer_growth) mb.add_buffer_to_chain(); BOOST_CHECK_EQUAL(mb.total_bytes(), 2 * def_buffer_size); BOOST_CHECK_EQUAL(mb.bytes_to_write(), 2 * def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0); + BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0u); BOOST_CHECK_EQUAL(mb.read_ptr(), mb.write_ptr()); { @@ -79,7 +79,7 @@ BOOST_AUTO_TEST_CASE(message_buffer_growth) mb.advance_write_ptr(100); BOOST_CHECK_EQUAL(mb.total_bytes(), 2 * def_buffer_size); BOOST_CHECK_EQUAL(mb.bytes_to_write(), 2 * def_buffer_size - 100); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 100); + BOOST_CHECK_EQUAL(mb.bytes_to_read(), 100u); BOOST_CHECK_NE(mb.read_ptr(), nullptr); BOOST_CHECK_NE(mb.write_ptr(), nullptr); BOOST_CHECK_EQUAL((mb.read_ptr() + 100), mb.write_ptr()); @@ -100,7 +100,7 @@ BOOST_AUTO_TEST_CASE(message_buffer_growth) mb.advance_read_ptr(50); BOOST_CHECK_EQUAL(mb.total_bytes(), 2 * def_buffer_size); BOOST_CHECK_EQUAL(mb.bytes_to_write(), 2 * def_buffer_size - 100); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 50); + BOOST_CHECK_EQUAL(mb.bytes_to_read(), 50u); mb.advance_write_ptr(def_buffer_size); BOOST_CHECK_EQUAL(mb.total_bytes(), 2 * def_buffer_size); @@ -111,29 +111,29 @@ BOOST_AUTO_TEST_CASE(message_buffer_growth) mb.advance_read_ptr(def_buffer_size); BOOST_CHECK_EQUAL(mb.total_bytes(), def_buffer_size); BOOST_CHECK_EQUAL(mb.bytes_to_write(), def_buffer_size - 100); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 50); + BOOST_CHECK_EQUAL(mb.bytes_to_read(), 50u); // Moving read_ptr to write_ptr should shrink chain and reset ptrs mb.advance_read_ptr(50); BOOST_CHECK_EQUAL(mb.total_bytes(), def_buffer_size); BOOST_CHECK_EQUAL(mb.bytes_to_write(), def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0); + BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0u); mb.add_buffer_to_chain(); BOOST_CHECK_EQUAL(mb.total_bytes(), 2 * def_buffer_size); BOOST_CHECK_EQUAL(mb.bytes_to_write(), 2 * def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0); + BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0u); mb.advance_write_ptr(50); BOOST_CHECK_EQUAL(mb.total_bytes(), 2 * def_buffer_size); BOOST_CHECK_EQUAL(mb.bytes_to_write(), 2 * def_buffer_size - 50); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 50); + BOOST_CHECK_EQUAL(mb.bytes_to_read(), 50u); // Moving read_ptr to write_ptr should shrink chain and reset ptrs mb.advance_read_ptr(50); BOOST_CHECK_EQUAL(mb.total_bytes(), def_buffer_size); BOOST_CHECK_EQUAL(mb.bytes_to_write(), def_buffer_size); - BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0); + BOOST_CHECK_EQUAL(mb.bytes_to_read(), 0u); } FC_LOG_AND_RETHROW() } @@ -227,7 +227,7 @@ BOOST_AUTO_TEST_CASE(message_buffer_write_ptr_to_end) BOOST_CHECK_EQUAL(mb.write_index().second, 0); char* write_ptr = mb.write_ptr(); - for (char ind = 0; ind < small; ind++) { + for (uint32_t ind = 0; ind < small; ind++) { *write_ptr = ind; write_ptr++; } @@ -318,6 +318,45 @@ BOOST_AUTO_TEST_CASE(message_buffer_read_peek_bounds_multi) { BOOST_CHECK_THROW(mbuff.read(&throw_away_buffer, 1), fc::out_of_range_exception); } +BOOST_AUTO_TEST_CASE(message_buffer_datastream) { + using my_message_buffer_t = fc::message_buffer<1024>; + my_message_buffer_t mbuff; + + char buf[1024]; + fc::datastream ds( buf, 1024 ); + + int v = 13; + fc::raw::pack( ds, v ); + v = 42; + fc::raw::pack( ds, 42 ); + fc::raw::pack( ds, std::string( "hello" ) ); + + memcpy(mbuff.write_ptr(), buf, 1024); + mbuff.advance_write_ptr(1024); + + for( int i = 0; i < 3; ++i ) { + auto ds2 = mbuff.create_peek_datastream(); + fc::raw::unpack( ds2, v ); + BOOST_CHECK_EQUAL( 13, v ); + fc::raw::unpack( ds2, v ); + BOOST_CHECK_EQUAL( 42, v ); + std::string s; + fc::raw::unpack( ds2, s ); + BOOST_CHECK_EQUAL( s, std::string( "hello" ) ); + } + + { + auto ds2 = mbuff.create_datastream(); + fc::raw::unpack( ds2, v ); + BOOST_CHECK_EQUAL( 13, v ); + fc::raw::unpack( ds2, v ); + BOOST_CHECK_EQUAL( 42, v ); + std::string s; + fc::raw::unpack( ds2, s ); + BOOST_CHECK_EQUAL( s, std::string( "hello" ) ); + } +} + BOOST_AUTO_TEST_SUITE_END() } // namespace eosio diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index dcc4560b0f0..607c78859fd 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -26,6 +26,60 @@ using namespace eosio::testing; #include #include +struct base_reflect : fc::reflect_init { + int bv = 0; + bool base_reflect_initialized = false; + int base_reflect_called = 0; +protected: + friend struct fc::reflector; + friend struct fc::reflector_init_visitor; + friend struct fc::has_reflector_init; + void reflector_init() { + BOOST_CHECK_EQUAL( bv, 42 ); // should be deserialized before called, set by test + ++base_reflect_called; + base_reflect_initialized = true; + } +}; + +struct derived_reflect : public base_reflect { + int dv = 0; + bool derived_reflect_initialized = false; + int derived_reflect_called = 0; +protected: + friend struct fc::reflector; + friend struct fc::reflector_init_visitor; + friend struct fc::has_reflector_init; + void reflector_init() { + BOOST_CHECK_EQUAL( bv, 42 ); // should be deserialized before called, set by test + BOOST_CHECK_EQUAL( dv, 52 ); // should be deserialized before called, set by test + ++derived_reflect_called; + base_reflect::reflector_init(); + derived_reflect_initialized = true; + } +}; + +struct final_reflect : public derived_reflect { + int fv = 0; + bool final_reflect_initialized = false; + int final_reflect_called = 0; +private: + friend struct fc::reflector; + friend struct fc::reflector_init_visitor; + friend struct fc::has_reflector_init; + void reflector_init() { + BOOST_CHECK_EQUAL( bv, 42 ); // should be deserialized before called, set by test + BOOST_CHECK_EQUAL( dv, 52 ); // should be deserialized before called, set by test + BOOST_CHECK_EQUAL( fv, 62 ); // should be deserialized before called, set by test + ++final_reflect_called; + derived_reflect::reflector_init(); + final_reflect_initialized = true; + } +}; + +FC_REFLECT( base_reflect, (bv) ) +FC_REFLECT_DERIVED( derived_reflect, (base_reflect), (dv) ) +FC_REFLECT_DERIVED( final_reflect, (derived_reflect), (fv) ) + namespace eosio { using namespace chain; @@ -102,6 +156,33 @@ BOOST_AUTO_TEST_CASE(json_from_string_test) BOOST_CHECK_EQUAL(exc_found, true); } +BOOST_AUTO_TEST_CASE(variant_format_string_limited) +{ + const string format = "${a} ${b} ${c}"; + { + fc::mutable_variant_object mu; + mu( "a", string( 1024, 'a' ) ); + mu( "b", string( 1024, 'b' ) ); + mu( "c", string( 1024, 'c' ) ); + string result = fc::format_string( format, mu, true ); + BOOST_CHECK_EQUAL( result, string( 256, 'a' ) + "... " + string( 256, 'b' ) + "... " + string( 256, 'c' ) + "..." ); + } + { + fc::mutable_variant_object mu; + signed_block a; + blob b; + for( int i = 0; i < 1024; ++i) + b.data.push_back('b'); + variants c; + c.push_back(variant(a)); + mu( "a", a ); + mu( "b", b ); + mu( "c", c ); + string result = fc::format_string( format, mu, true ); + BOOST_CHECK_EQUAL( result, "${a} ${b} ${c}"); + } +} + // Test overflow handling in asset::from_string BOOST_AUTO_TEST_CASE(asset_from_string_overflow) { @@ -245,31 +326,31 @@ BOOST_AUTO_TEST_CASE(authority_checker) auto checker = make_auth_checker(GetNullAuthority, 2, {a, b}); BOOST_TEST(checker.satisfied(A)); BOOST_TEST(checker.all_keys_used()); - BOOST_TEST(checker.used_keys().size() == 2); - BOOST_TEST(checker.unused_keys().size() == 0); + BOOST_TEST(checker.used_keys().size() == 2u); + BOOST_TEST(checker.unused_keys().size() == 0u); } { auto checker = make_auth_checker(GetNullAuthority, 2, {a, c}); BOOST_TEST(!checker.satisfied(A)); BOOST_TEST(!checker.all_keys_used()); - BOOST_TEST(checker.used_keys().size() == 0); - BOOST_TEST(checker.unused_keys().size() == 2); + BOOST_TEST(checker.used_keys().size() == 0u); + BOOST_TEST(checker.unused_keys().size() == 2u); } { auto checker = make_auth_checker(GetNullAuthority, 2, {a, b, c}); BOOST_TEST(checker.satisfied(A)); BOOST_TEST(!checker.all_keys_used()); - BOOST_TEST(checker.used_keys().size() == 2); - BOOST_TEST(checker.used_keys().count(a) == 1); - BOOST_TEST(checker.used_keys().count(b) == 1); - BOOST_TEST(checker.unused_keys().size() == 1); - BOOST_TEST(checker.unused_keys().count(c) == 1); + BOOST_TEST(checker.used_keys().size() == 2u); + BOOST_TEST(checker.used_keys().count(a) == 1u); + BOOST_TEST(checker.used_keys().count(b) == 1u); + BOOST_TEST(checker.unused_keys().size() == 1u); + BOOST_TEST(checker.unused_keys().count(c) == 1u); } { auto checker = make_auth_checker(GetNullAuthority, 2, {b, c}); BOOST_TEST(!checker.satisfied(A)); BOOST_TEST(!checker.all_keys_used()); - BOOST_TEST(checker.used_keys().size() == 0); + BOOST_TEST(checker.used_keys().size() == 0u); } A = authority(3, {key_weight{a, 1}, key_weight{b, 1}, key_weight{c, 1}}); @@ -301,35 +382,35 @@ BOOST_AUTO_TEST_CASE(authority_checker) { auto checker = make_auth_checker(GetCAuthority, 2, {b}); BOOST_TEST(!checker.satisfied(A)); - BOOST_TEST(checker.used_keys().size() == 0); - BOOST_TEST(checker.unused_keys().size() == 1); - BOOST_TEST(checker.unused_keys().count(b) == 1); + BOOST_TEST(checker.used_keys().size() == 0u); + BOOST_TEST(checker.unused_keys().size() == 1u); + BOOST_TEST(checker.unused_keys().count(b) == 1u); } { auto checker = make_auth_checker(GetCAuthority, 2, {c}); BOOST_TEST(!checker.satisfied(A)); - BOOST_TEST(checker.used_keys().size() == 0); - BOOST_TEST(checker.unused_keys().size() == 1); - BOOST_TEST(checker.unused_keys().count(c) == 1); + BOOST_TEST(checker.used_keys().size() == 0u); + BOOST_TEST(checker.unused_keys().size() == 1u); + BOOST_TEST(checker.unused_keys().count(c) == 1u); } { auto checker = make_auth_checker(GetCAuthority, 2, {b, c}); BOOST_TEST(checker.satisfied(A)); BOOST_TEST(checker.all_keys_used()); - BOOST_TEST(checker.used_keys().size() == 2); - BOOST_TEST(checker.unused_keys().size() == 0); - BOOST_TEST(checker.used_keys().count(b) == 1); - BOOST_TEST(checker.used_keys().count(c) == 1); + BOOST_TEST(checker.used_keys().size() == 2u); + BOOST_TEST(checker.unused_keys().size() == 0u); + BOOST_TEST(checker.used_keys().count(b) == 1u); + BOOST_TEST(checker.used_keys().count(c) == 1u); } { auto checker = make_auth_checker(GetCAuthority, 2, {b, c, a}); BOOST_TEST(checker.satisfied(A)); BOOST_TEST(!checker.all_keys_used()); - BOOST_TEST(checker.used_keys().size() == 1); - BOOST_TEST(checker.used_keys().count(a) == 1); - BOOST_TEST(checker.unused_keys().size() == 2); - BOOST_TEST(checker.unused_keys().count(b) == 1); - BOOST_TEST(checker.unused_keys().count(c) == 1); + BOOST_TEST(checker.used_keys().size() == 1u); + BOOST_TEST(checker.used_keys().count(a) == 1u); + BOOST_TEST(checker.unused_keys().size() == 2u); + BOOST_TEST(checker.unused_keys().count(b) == 1u); + BOOST_TEST(checker.unused_keys().count(c) == 1u); } A = authority(3, {key_weight{a, 2}, key_weight{b, 1}}, {permission_level_weight{{"hello", "world"}, 3}}); @@ -342,11 +423,11 @@ BOOST_AUTO_TEST_CASE(authority_checker) auto checker = make_auth_checker(GetCAuthority, 2, {a, b, c}); BOOST_TEST(checker.satisfied(A)); BOOST_TEST(!checker.all_keys_used()); - BOOST_TEST(checker.used_keys().size() == 1); - BOOST_TEST(checker.used_keys().count(c) == 1); - BOOST_TEST(checker.unused_keys().size() == 2); - BOOST_TEST(checker.unused_keys().count(a) == 1); - BOOST_TEST(checker.unused_keys().count(b) == 1); + BOOST_TEST(checker.used_keys().size() == 1u); + BOOST_TEST(checker.used_keys().count(c) == 1u); + BOOST_TEST(checker.unused_keys().size() == 2u); + BOOST_TEST(checker.unused_keys().count(a) == 1u); + BOOST_TEST(checker.unused_keys().count(b) == 1u); } A = authority(2, {key_weight{a, 1}, key_weight{b, 1}}, {permission_level_weight{{"hello", "world"}, 1}}); @@ -360,9 +441,9 @@ BOOST_AUTO_TEST_CASE(authority_checker) auto checker = make_auth_checker(GetCAuthority, 2, {a, b, c}); BOOST_TEST(checker.satisfied(A)); BOOST_TEST(!checker.all_keys_used()); - BOOST_TEST(checker.used_keys().size() == 2); - BOOST_TEST(checker.unused_keys().size() == 1); - BOOST_TEST(checker.unused_keys().count(c) == 1); + BOOST_TEST(checker.used_keys().size() == 2u); + BOOST_TEST(checker.unused_keys().size() == 1u); + BOOST_TEST(checker.unused_keys().count(c) == 1u); } A = authority(2, {key_weight{a, 1}, key_weight{b, 1}}, {permission_level_weight{{"hello", "world"}, 2}}); @@ -374,9 +455,9 @@ BOOST_AUTO_TEST_CASE(authority_checker) auto checker = make_auth_checker(GetCAuthority, 2, {a, b, c}); BOOST_TEST(checker.satisfied(A)); BOOST_TEST(!checker.all_keys_used()); - BOOST_TEST(checker.used_keys().size() == 1); - BOOST_TEST(checker.unused_keys().size() == 2); - BOOST_TEST(checker.used_keys().count(c) == 1); + BOOST_TEST(checker.used_keys().size() == 1u); + BOOST_TEST(checker.unused_keys().size() == 2u); + BOOST_TEST(checker.used_keys().count(c) == 1u); } auto d = test.get_public_key("d", "active"); @@ -398,20 +479,20 @@ BOOST_AUTO_TEST_CASE(authority_checker) auto checker = make_auth_checker(GetAuthority, 2, {a, b, c, d, e}); BOOST_TEST(checker.satisfied(A)); BOOST_TEST(!checker.all_keys_used()); - BOOST_TEST(checker.used_keys().size() == 2); - BOOST_TEST(checker.unused_keys().size() == 3); - BOOST_TEST(checker.used_keys().count(d) == 1); - BOOST_TEST(checker.used_keys().count(e) == 1); + BOOST_TEST(checker.used_keys().size() == 2u); + BOOST_TEST(checker.unused_keys().size() == 3u); + BOOST_TEST(checker.used_keys().count(d) == 1u); + BOOST_TEST(checker.used_keys().count(e) == 1u); } { auto checker = make_auth_checker(GetAuthority, 2, {a, b, c, e}); BOOST_TEST(checker.satisfied(A)); BOOST_TEST(!checker.all_keys_used()); - BOOST_TEST(checker.used_keys().size() == 3); - BOOST_TEST(checker.unused_keys().size() == 1); - BOOST_TEST(checker.used_keys().count(a) == 1); - BOOST_TEST(checker.used_keys().count(b) == 1); - BOOST_TEST(checker.used_keys().count(c) == 1); + BOOST_TEST(checker.used_keys().size() == 3u); + BOOST_TEST(checker.unused_keys().size() == 1u); + BOOST_TEST(checker.used_keys().count(a) == 1u); + BOOST_TEST(checker.used_keys().count(b) == 1u); + BOOST_TEST(checker.used_keys().count(c) == 1u); } BOOST_TEST(make_auth_checker(GetAuthority, 1, {a, b, c}).satisfied(A)); // Fails due to short recursion depth limit @@ -443,15 +524,15 @@ BOOST_AUTO_TEST_CASE(authority_checker) BOOST_TEST(!validate(F)); BOOST_TEST(!checker.all_keys_used()); - BOOST_TEST(checker.unused_keys().count(b) == 1); - BOOST_TEST(checker.unused_keys().count(a) == 1); - BOOST_TEST(checker.unused_keys().count(c) == 1); + BOOST_TEST(checker.unused_keys().count(b) == 1u); + BOOST_TEST(checker.unused_keys().count(a) == 1u); + BOOST_TEST(checker.unused_keys().count(c) == 1u); BOOST_TEST(checker.satisfied(A)); BOOST_TEST(checker.satisfied(B)); BOOST_TEST(!checker.all_keys_used()); - BOOST_TEST(checker.unused_keys().count(b) == 0); - BOOST_TEST(checker.unused_keys().count(a) == 0); - BOOST_TEST(checker.unused_keys().count(c) == 1); + BOOST_TEST(checker.unused_keys().count(b) == 0u); + BOOST_TEST(checker.unused_keys().count(a) == 0u); + BOOST_TEST(checker.unused_keys().count(c) == 1u); } { auto A2 = authority(4, {key_weight{b, 1}, key_weight{a, 1}, key_weight{c, 1}}, @@ -542,7 +623,7 @@ BOOST_AUTO_TEST_CASE(alphabetic_sort) tmp.push_back(str); } - for(int i = 0; i < words.size(); ++i ) { + for(size_t i = 0; i < words.size(); ++i ) { BOOST_TEST(tmp[i] == words[i]); } @@ -584,13 +665,13 @@ BOOST_AUTO_TEST_CASE(transaction_test) { try { trx.expiration = fc::time_point::now(); trx.validate(); - BOOST_CHECK_EQUAL(0, trx.signatures.size()); + BOOST_CHECK_EQUAL(0u, trx.signatures.size()); ((const signed_transaction &)trx).sign( test.get_private_key( config::system_account_name, "active" ), test.control->get_chain_id()); - BOOST_CHECK_EQUAL(0, trx.signatures.size()); + BOOST_CHECK_EQUAL(0u, trx.signatures.size()); auto private_key = test.get_private_key( config::system_account_name, "active" ); auto public_key = private_key.get_public_key(); trx.sign( private_key, test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1, trx.signatures.size()); + BOOST_CHECK_EQUAL(1u, trx.signatures.size()); trx.validate(); packed_transaction pkt(trx, packed_transaction::none); @@ -613,11 +694,11 @@ BOOST_AUTO_TEST_CASE(transaction_test) { try { flat_set keys; auto cpu_time1 = pkt.get_signed_transaction().get_signature_keys(test.control->get_chain_id(), fc::time_point::maximum(), keys); - BOOST_CHECK_EQUAL(1, keys.size()); + BOOST_CHECK_EQUAL(1u, keys.size()); BOOST_CHECK_EQUAL(public_key, *keys.begin()); keys.clear(); auto cpu_time2 = pkt.get_signed_transaction().get_signature_keys(test.control->get_chain_id(), fc::time_point::maximum(), keys); - BOOST_CHECK_EQUAL(1, keys.size()); + BOOST_CHECK_EQUAL(1u, keys.size()); BOOST_CHECK_EQUAL(public_key, *keys.begin()); BOOST_CHECK(cpu_time1 > fc::microseconds(0)); @@ -631,6 +712,7 @@ BOOST_AUTO_TEST_CASE(transaction_test) { try { uint32_t pack_size = fc::raw::pack_size( pkt ); vector buf(pack_size); fc::datastream ds(buf.data(), pack_size); + fc::raw::pack( ds, pkt ); // unpack ds.seekp(0); @@ -644,6 +726,10 @@ BOOST_AUTO_TEST_CASE(transaction_test) { try { ds2.seekp(0); packed_transaction pkt4; fc::raw::unpack(ds2, pkt4); + // to/from variant + fc::variant pkt_v( pkt3 ); + packed_transaction pkt5; + fc::from_variant(pkt_v, pkt5); bytes raw3 = pkt3.get_raw_transaction(); bytes raw4 = pkt4.get_raw_transaction(); @@ -653,17 +739,46 @@ BOOST_AUTO_TEST_CASE(transaction_test) { try { BOOST_CHECK_EQUAL(true, std::equal(raw.begin(), raw.end(), raw4.begin())); BOOST_CHECK_EQUAL(pkt.get_signed_transaction().id(), pkt3.get_signed_transaction().id()); BOOST_CHECK_EQUAL(pkt.get_signed_transaction().id(), pkt4.get_signed_transaction().id()); + BOOST_CHECK_EQUAL(pkt.get_signed_transaction().id(), pkt5.get_signed_transaction().id()); // failure indicates reflector_init not working BOOST_CHECK_EQUAL(pkt.id(), pkt4.get_signed_transaction().id()); BOOST_CHECK_EQUAL(true, trx.expiration == pkt4.expiration()); BOOST_CHECK_EQUAL(true, trx.expiration == pkt4.get_signed_transaction().expiration); keys.clear(); - auto cpu_time3 = pkt4.get_signed_transaction().get_signature_keys(test.control->get_chain_id(), fc::time_point::maximum(), keys); - BOOST_CHECK_EQUAL(1, keys.size()); + pkt4.get_signed_transaction().get_signature_keys(test.control->get_chain_id(), fc::time_point::maximum(), keys); + BOOST_CHECK_EQUAL(1u, keys.size()); BOOST_CHECK_EQUAL(public_key, *keys.begin()); } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE(signed_int_test) { try { + char buf[32]; + fc::datastream ds(buf,32); + signed_int a(47), b((1<<30)+2), c(-47), d(-(1<<30)-2); //small +, big +, small -, big - + signed_int ee; + fc::raw::pack(ds,a); + ds.seekp(0); + fc::raw::unpack(ds,ee); + ds.seekp(0); + BOOST_CHECK_EQUAL(a,ee); + fc::raw::pack(ds,b); + ds.seekp(0); + fc::raw::unpack(ds,ee); + ds.seekp(0); + BOOST_CHECK_EQUAL(b,ee); + fc::raw::pack(ds,c); + ds.seekp(0); + fc::raw::unpack(ds,ee); + ds.seekp(0); + BOOST_CHECK_EQUAL(c,ee); + fc::raw::pack(ds,d); + ds.seekp(0); + fc::raw::unpack(ds,ee); + ds.seekp(0); + BOOST_CHECK_EQUAL(d,ee); + +} FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { testing::TESTER test; @@ -700,7 +815,7 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { auto private_key = test.get_private_key( config::system_account_name, "active" ); auto public_key = private_key.get_public_key(); trx.sign( private_key, test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1, trx.signatures.size()); + BOOST_CHECK_EQUAL(1u, trx.signatures.size()); packed_transaction pkt(trx, packed_transaction::none); packed_transaction pkt2(trx, packed_transaction::zlib); @@ -729,21 +844,218 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { transaction_metadata::create_signing_keys_future( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); auto keys = mtrx->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1, keys.size()); + BOOST_CHECK_EQUAL(1u, keys.size()); BOOST_CHECK_EQUAL(public_key, *keys.begin()); // again keys = mtrx->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1, keys.size()); + BOOST_CHECK_EQUAL(1u, keys.size()); BOOST_CHECK_EQUAL(public_key, *keys.begin()); auto keys2 = mtrx2->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1, keys.size()); + BOOST_CHECK_EQUAL(1u, keys.size()); BOOST_CHECK_EQUAL(public_key, *keys.begin()); } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE(reflector_init_test) { + try { + + base_reflect br; + br.bv = 42; + derived_reflect dr; + dr.bv = 42; + dr.dv = 52; + final_reflect fr; + fr.bv = 42; + fr.dv = 52; + fr.fv = 62; + BOOST_CHECK_EQUAL( br.base_reflect_initialized, false ); + BOOST_CHECK_EQUAL( dr.derived_reflect_initialized, false ); + + { // base + // pack + uint32_t pack_size = fc::raw::pack_size( br ); + vector buf( pack_size ); + fc::datastream ds( buf.data(), pack_size ); + + fc::raw::pack( ds, br ); + // unpack + ds.seekp( 0 ); + base_reflect br2; + fc::raw::unpack( ds, br2 ); + // pack again + pack_size = fc::raw::pack_size( br2 ); + fc::datastream ds2( buf.data(), pack_size ); + fc::raw::pack( ds2, br2 ); + // unpack + ds2.seekp( 0 ); + base_reflect br3; + fc::raw::unpack( ds2, br3 ); + // to/from variant + fc::variant v( br3 ); + base_reflect br4; + fc::from_variant( v, br4 ); + + BOOST_CHECK_EQUAL( br2.bv, 42 ); + BOOST_CHECK_EQUAL( br2.base_reflect_initialized, true ); + BOOST_CHECK_EQUAL( br2.base_reflect_called, 1 ); + BOOST_CHECK_EQUAL( br3.bv, 42 ); + BOOST_CHECK_EQUAL( br3.base_reflect_initialized, true ); + BOOST_CHECK_EQUAL( br3.base_reflect_called, 1 ); + BOOST_CHECK_EQUAL( br4.bv, 42 ); + BOOST_CHECK_EQUAL( br4.base_reflect_initialized, true ); + BOOST_CHECK_EQUAL( br4.base_reflect_called, 1 ); + } + { // derived + // pack + uint32_t pack_size = fc::raw::pack_size( dr ); + vector buf( pack_size ); + fc::datastream ds( buf.data(), pack_size ); + + fc::raw::pack( ds, dr ); + // unpack + ds.seekp( 0 ); + derived_reflect dr2; + fc::raw::unpack( ds, dr2 ); + // pack again + pack_size = fc::raw::pack_size( dr2 ); + fc::datastream ds2( buf.data(), pack_size ); + fc::raw::pack( ds2, dr2 ); + // unpack + ds2.seekp( 0 ); + derived_reflect dr3; + fc::raw::unpack( ds2, dr3 ); + // to/from variant + fc::variant v( dr3 ); + derived_reflect dr4; + fc::from_variant( v, dr4 ); + + BOOST_CHECK_EQUAL( dr2.bv, 42 ); + BOOST_CHECK_EQUAL( dr2.base_reflect_initialized, true ); + BOOST_CHECK_EQUAL( dr2.base_reflect_called, 1 ); + BOOST_CHECK_EQUAL( dr3.bv, 42 ); + BOOST_CHECK_EQUAL( dr3.base_reflect_initialized, true ); + BOOST_CHECK_EQUAL( dr3.base_reflect_called, 1 ); + BOOST_CHECK_EQUAL( dr4.bv, 42 ); + BOOST_CHECK_EQUAL( dr4.base_reflect_initialized, true ); + BOOST_CHECK_EQUAL( dr4.base_reflect_called, 1 ); + + BOOST_CHECK_EQUAL( dr2.dv, 52 ); + BOOST_CHECK_EQUAL( dr2.derived_reflect_initialized, true ); + BOOST_CHECK_EQUAL( dr2.derived_reflect_called, 1 ); + BOOST_CHECK_EQUAL( dr3.dv, 52 ); + BOOST_CHECK_EQUAL( dr3.derived_reflect_initialized, true ); + BOOST_CHECK_EQUAL( dr3.derived_reflect_called, 1 ); + BOOST_CHECK_EQUAL( dr4.dv, 52 ); + BOOST_CHECK_EQUAL( dr4.derived_reflect_initialized, true ); + BOOST_CHECK_EQUAL( dr4.derived_reflect_called, 1 ); + + base_reflect br5; + ds2.seekp( 0 ); + fc::raw::unpack( ds2, br5 ); + base_reflect br6; + fc::from_variant( v, br6 ); + + BOOST_CHECK_EQUAL( br5.bv, 42 ); + BOOST_CHECK_EQUAL( br5.base_reflect_initialized, true ); + BOOST_CHECK_EQUAL( br5.base_reflect_called, 1 ); + BOOST_CHECK_EQUAL( br6.bv, 42 ); + BOOST_CHECK_EQUAL( br6.base_reflect_initialized, true ); + BOOST_CHECK_EQUAL( br6.base_reflect_called, 1 ); + } + { // final + // pack + uint32_t pack_size = fc::raw::pack_size( fr ); + vector buf( pack_size ); + fc::datastream ds( buf.data(), pack_size ); + + fc::raw::pack( ds, fr ); + // unpack + ds.seekp( 0 ); + final_reflect fr2; + fc::raw::unpack( ds, fr2 ); + // pack again + pack_size = fc::raw::pack_size( fr2 ); + fc::datastream ds2( buf.data(), pack_size ); + fc::raw::pack( ds2, fr2 ); + // unpack + ds2.seekp( 0 ); + final_reflect fr3; + fc::raw::unpack( ds2, fr3 ); + // to/from variant + fc::variant v( fr3 ); + final_reflect fr4; + fc::from_variant( v, fr4 ); + + BOOST_CHECK_EQUAL( fr2.bv, 42 ); + BOOST_CHECK_EQUAL( fr2.base_reflect_initialized, true ); + BOOST_CHECK_EQUAL( fr2.base_reflect_called, 1 ); + BOOST_CHECK_EQUAL( fr3.bv, 42 ); + BOOST_CHECK_EQUAL( fr3.base_reflect_initialized, true ); + BOOST_CHECK_EQUAL( fr3.base_reflect_called, 1 ); + BOOST_CHECK_EQUAL( fr4.bv, 42 ); + BOOST_CHECK_EQUAL( fr4.base_reflect_initialized, true ); + BOOST_CHECK_EQUAL( fr4.base_reflect_called, 1 ); + + BOOST_CHECK_EQUAL( fr2.dv, 52 ); + BOOST_CHECK_EQUAL( fr2.derived_reflect_initialized, true ); + BOOST_CHECK_EQUAL( fr2.derived_reflect_called, 1 ); + BOOST_CHECK_EQUAL( fr3.dv, 52 ); + BOOST_CHECK_EQUAL( fr3.derived_reflect_initialized, true ); + BOOST_CHECK_EQUAL( fr3.derived_reflect_called, 1 ); + BOOST_CHECK_EQUAL( fr4.dv, 52 ); + BOOST_CHECK_EQUAL( fr4.derived_reflect_initialized, true ); + BOOST_CHECK_EQUAL( fr4.derived_reflect_called, 1 ); + + BOOST_CHECK_EQUAL( fr2.fv, 62 ); + BOOST_CHECK_EQUAL( fr2.final_reflect_initialized, true ); + BOOST_CHECK_EQUAL( fr2.final_reflect_called, 1 ); + BOOST_CHECK_EQUAL( fr3.fv, 62 ); + BOOST_CHECK_EQUAL( fr3.final_reflect_initialized, true ); + BOOST_CHECK_EQUAL( fr3.final_reflect_called, 1 ); + BOOST_CHECK_EQUAL( fr4.fv, 62 ); + BOOST_CHECK_EQUAL( fr4.final_reflect_initialized, true ); + BOOST_CHECK_EQUAL( fr4.final_reflect_called, 1 ); + + base_reflect br5; + ds2.seekp( 0 ); + fc::raw::unpack( ds2, br5 ); + base_reflect br6; + fc::from_variant( v, br6 ); + + BOOST_CHECK_EQUAL( br5.bv, 42 ); + BOOST_CHECK_EQUAL( br5.base_reflect_initialized, true ); + BOOST_CHECK_EQUAL( br5.base_reflect_called, 1 ); + BOOST_CHECK_EQUAL( br6.bv, 42 ); + BOOST_CHECK_EQUAL( br6.base_reflect_initialized, true ); + BOOST_CHECK_EQUAL( br6.base_reflect_called, 1 ); + + derived_reflect dr7; + ds2.seekp( 0 ); + fc::raw::unpack( ds2, dr7 ); + derived_reflect dr8; + fc::from_variant( v, dr8 ); + + BOOST_CHECK_EQUAL( dr7.bv, 42 ); + BOOST_CHECK_EQUAL( dr7.base_reflect_initialized, true ); + BOOST_CHECK_EQUAL( dr7.base_reflect_called, 1 ); + BOOST_CHECK_EQUAL( dr8.bv, 42 ); + BOOST_CHECK_EQUAL( dr8.base_reflect_initialized, true ); + BOOST_CHECK_EQUAL( dr8.base_reflect_called, 1 ); + + BOOST_CHECK_EQUAL( dr7.dv, 52 ); + BOOST_CHECK_EQUAL( dr7.derived_reflect_initialized, true ); + BOOST_CHECK_EQUAL( dr7.derived_reflect_called, 1 ); + BOOST_CHECK_EQUAL( dr8.dv, 52 ); + BOOST_CHECK_EQUAL( dr8.derived_reflect_initialized, true ); + BOOST_CHECK_EQUAL( dr8.derived_reflect_called, 1 ); + } + + } FC_LOG_AND_RETHROW() +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/multi_index_tests.cpp b/unittests/multi_index_tests.cpp deleted file mode 100644 index 821e187ffd1..00000000000 --- a/unittests/multi_index_tests.cpp +++ /dev/null @@ -1,82 +0,0 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ -#include -#include - -#include -#include - -#include - -#include - -#ifdef NON_VALIDATING_TEST -#define TESTER tester -#else -#define TESTER validating_tester -#endif - -using namespace eosio::testing; - -BOOST_AUTO_TEST_SUITE(multi_index_tests) - -BOOST_FIXTURE_TEST_CASE( multi_index_load, TESTER ) try { - - produce_blocks(2); - create_accounts( {N(multitest)} ); - produce_blocks(2); - - set_code( N(multitest), contracts::multi_index_test_wasm() ); - set_abi( N(multitest), contracts::multi_index_test_abi().data() ); - - produce_blocks(1); - - auto abi_string = std::string(contracts::multi_index_test_abi().data()); - - abi_serializer abi_ser(json::from_string(abi_string).as(), abi_serializer_max_time); - - signed_transaction trx1; - { - auto& trx = trx1; - action trigger_act; - trigger_act.account = N(multitest); - trigger_act.name = N(multitest); - trigger_act.authorization = vector{{N(multitest), config::active_name}}; - trigger_act.data = abi_ser.variant_to_binary("multitest", mutable_variant_object() - ("what", 0), - abi_serializer_max_time - ); - - trx.actions.emplace_back(std::move(trigger_act)); - set_transaction_headers(trx); - trx.sign(get_private_key(N(multitest), "active"), control->get_chain_id()); - push_transaction(trx); - } - - signed_transaction trx2; - { - auto& trx = trx2; - - action trigger_act; - trigger_act.account = N(multitest); - trigger_act.name = N(multitest); - trigger_act.authorization = vector{{N(multitest), config::active_name}}; - trigger_act.data = abi_ser.variant_to_binary("multitest", mutable_variant_object() - ("what", 1), - abi_serializer_max_time - ); - trx.actions.emplace_back(std::move(trigger_act)); - set_transaction_headers(trx); - trx.sign(get_private_key(N(multitest), "active"), control->get_chain_id()); - push_transaction(trx); - } - - produce_block(); - BOOST_REQUIRE_EQUAL(true, chain_has_transaction(trx1.id())); - BOOST_REQUIRE_EQUAL(true, chain_has_transaction(trx2.id())); - -} FC_LOG_AND_RETHROW() - -BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/multisig_tests.cpp b/unittests/multisig_tests.cpp deleted file mode 100644 index 246f6aae745..00000000000 --- a/unittests/multisig_tests.cpp +++ /dev/null @@ -1,632 +0,0 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ -#include -#include -#include -#include - -#include - -#include - -#include - -using namespace eosio::testing; -using namespace eosio; -using namespace eosio::chain; -using namespace eosio::testing; -using namespace fc; - -using mvo = fc::mutable_variant_object; - -class eosio_msig_tester : public tester { -public: - - eosio_msig_tester() { - create_accounts( { N(eosio.msig), N(eosio.stake), N(eosio.ram), N(eosio.ramfee), N(alice), N(bob), N(carol) } ); - produce_block(); - - auto trace = base_tester::push_action(config::system_account_name, N(setpriv), - config::system_account_name, mutable_variant_object() - ("account", "eosio.msig") - ("is_priv", 1) - ); - - set_code( N(eosio.msig), contracts::eosio_msig_wasm() ); - set_abi( N(eosio.msig), contracts::eosio_msig_abi().data() ); - - produce_blocks(); - const auto& accnt = control->db().get( N(eosio.msig) ); - abi_def abi; - BOOST_REQUIRE_EQUAL(abi_serializer::to_abi(accnt.abi, abi), true); - abi_ser.set_abi(abi, abi_serializer_max_time); - } - - transaction_trace_ptr create_account_with_resources( account_name a, account_name creator, asset ramfunds, bool multisig, - asset net = core_from_string("10.0000"), asset cpu = core_from_string("10.0000") ) { - signed_transaction trx; - set_transaction_headers(trx); - - authority owner_auth; - if (multisig) { - // multisig between account's owner key and creators active permission - owner_auth = authority(2, {key_weight{get_public_key( a, "owner" ), 1}}, {permission_level_weight{{creator, config::active_name}, 1}}); - } else { - owner_auth = authority( get_public_key( a, "owner" ) ); - } - - trx.actions.emplace_back( vector{{creator,config::active_name}}, - newaccount{ - .creator = creator, - .name = a, - .owner = owner_auth, - .active = authority( get_public_key( a, "active" ) ) - }); - - trx.actions.emplace_back( get_action( config::system_account_name, N(buyram), vector{{creator,config::active_name}}, - mvo() - ("payer", creator) - ("receiver", a) - ("quant", ramfunds) ) - ); - - trx.actions.emplace_back( get_action( config::system_account_name, N(delegatebw), vector{{creator,config::active_name}}, - mvo() - ("from", creator) - ("receiver", a) - ("stake_net_quantity", net ) - ("stake_cpu_quantity", cpu ) - ("transfer", 0 ) - ) - ); - - set_transaction_headers(trx); - trx.sign( get_private_key( creator, "active" ), control->get_chain_id() ); - return push_transaction( trx ); - } - void create_currency( name contract, name manager, asset maxsupply ) { - auto act = mutable_variant_object() - ("issuer", manager ) - ("maximum_supply", maxsupply ); - - base_tester::push_action(contract, N(create), contract, act ); - } - void issue( name to, const asset& amount, name manager = config::system_account_name ) { - base_tester::push_action( N(eosio.token), N(issue), manager, mutable_variant_object() - ("to", to ) - ("quantity", amount ) - ("memo", "") - ); - } - void transfer( name from, name to, const string& amount, name manager = config::system_account_name ) { - base_tester::push_action( N(eosio.token), N(transfer), manager, mutable_variant_object() - ("from", from) - ("to", to ) - ("quantity", asset::from_string(amount) ) - ("memo", "") - ); - } - asset get_balance( const account_name& act ) { - //return get_currency_balance( config::system_account_name, symbol(CORE_SYMBOL), act ); - //temporary code. current get_currency_balancy uses table name N(accounts) from currency.h - //generic_currency table name is N(account). - const auto& db = control->db(); - const auto* tbl = db.find(boost::make_tuple(N(eosio.token), act, N(accounts))); - share_type result = 0; - - // the balance is implied to be 0 if either the table or row does not exist - if (tbl) { - const auto *obj = db.find(boost::make_tuple(tbl->id, symbol(CORE_SYMBOL).to_symbol_code())); - if (obj) { - // balance is the first field in the serialization - fc::datastream ds(obj->value.data(), obj->value.size()); - fc::raw::unpack(ds, result); - } - } - return asset( result, symbol(CORE_SYMBOL) ); - } - - transaction_trace_ptr push_action( const account_name& signer, const action_name& name, const variant_object& data, bool auth = true ) { - vector accounts; - if( auth ) - accounts.push_back( signer ); - auto trace = base_tester::push_action( N(eosio.msig), name, accounts, data ); - produce_block(); - BOOST_REQUIRE_EQUAL( true, chain_has_transaction(trace->id) ); - return trace; - } - - transaction reqauth( account_name from, const vector& auths, const fc::microseconds& max_serialization_time ); - - abi_serializer abi_ser; -}; - -transaction eosio_msig_tester::reqauth( account_name from, const vector& auths, const fc::microseconds& max_serialization_time ) { - fc::variants v; - for ( auto& level : auths ) { - v.push_back(fc::mutable_variant_object() - ("actor", level.actor) - ("permission", level.permission) - ); - } - variant pretty_trx = fc::mutable_variant_object() - ("expiration", "2020-01-01T00:30") - ("ref_block_num", 2) - ("ref_block_prefix", 3) - ("max_net_usage_words", 0) - ("max_cpu_usage_ms", 0) - ("delay_sec", 0) - ("actions", fc::variants({ - fc::mutable_variant_object() - ("account", name(config::system_account_name)) - ("name", "reqauth") - ("authorization", v) - ("data", fc::mutable_variant_object() ("from", from) ) - }) - ); - transaction trx; - abi_serializer::from_variant(pretty_trx, trx, get_resolver(), max_serialization_time); - return trx; -} - -BOOST_AUTO_TEST_SUITE(eosio_msig_tests) - -BOOST_FIXTURE_TEST_CASE( propose_approve_execute, eosio_msig_tester ) try { - auto trx = reqauth("alice", {permission_level{N(alice), config::active_name}}, abi_serializer_max_time ); - - push_action( N(alice), N(propose), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("trx", trx) - ("requested", vector{{ N(alice), config::active_name }}) - ); - - //fail to execute before approval - BOOST_REQUIRE_EXCEPTION( push_action( N(alice), N(exec), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("executer", "alice") - ), - eosio_assert_message_exception, - eosio_assert_message_is("transaction authorization failed") - ); - - //approve and execute - push_action( N(alice), N(approve), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("level", permission_level{ N(alice), config::active_name }) - ); - - transaction_trace_ptr trace; - control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t->scheduled) { trace = t; } } ); - push_action( N(alice), N(exec), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("executer", "alice") - ); - - BOOST_REQUIRE( bool(trace) ); - BOOST_REQUIRE_EQUAL( 1, trace->action_traces.size() ); - BOOST_REQUIRE_EQUAL( transaction_receipt::executed, trace->receipt->status ); -} FC_LOG_AND_RETHROW() - - -BOOST_FIXTURE_TEST_CASE( propose_approve_unapprove, eosio_msig_tester ) try { - auto trx = reqauth("alice", {permission_level{N(alice), config::active_name}}, abi_serializer_max_time ); - - push_action( N(alice), N(propose), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("trx", trx) - ("requested", vector{{ N(alice), config::active_name }}) - ); - - push_action( N(alice), N(approve), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("level", permission_level{ N(alice), config::active_name }) - ); - - push_action( N(alice), N(unapprove), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("level", permission_level{ N(alice), config::active_name }) - ); - - BOOST_REQUIRE_EXCEPTION( push_action( N(alice), N(exec), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("executer", "alice") - ), - eosio_assert_message_exception, - eosio_assert_message_is("transaction authorization failed") - ); - -} FC_LOG_AND_RETHROW() - - -BOOST_FIXTURE_TEST_CASE( propose_approve_by_two, eosio_msig_tester ) try { - auto trx = reqauth("alice", vector{ { N(alice), config::active_name }, { N(bob), config::active_name } }, abi_serializer_max_time ); - push_action( N(alice), N(propose), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("trx", trx) - ("requested", vector{ { N(alice), config::active_name }, { N(bob), config::active_name } }) - ); - - //approve by alice - push_action( N(alice), N(approve), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("level", permission_level{ N(alice), config::active_name }) - ); - - //fail because approval by bob is missing - BOOST_REQUIRE_EXCEPTION( push_action( N(alice), N(exec), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("executer", "alice") - ), - eosio_assert_message_exception, - eosio_assert_message_is("transaction authorization failed") - ); - - //approve by bob and execute - push_action( N(bob), N(approve), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("level", permission_level{ N(bob), config::active_name }) - ); - - transaction_trace_ptr trace; - control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t->scheduled) { trace = t; } } ); - - push_action( N(alice), N(exec), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("executer", "alice") - ); - - BOOST_REQUIRE( bool(trace) ); - BOOST_REQUIRE_EQUAL( 1, trace->action_traces.size() ); - BOOST_REQUIRE_EQUAL( transaction_receipt::executed, trace->receipt->status ); -} FC_LOG_AND_RETHROW() - - -BOOST_FIXTURE_TEST_CASE( propose_with_wrong_requested_auth, eosio_msig_tester ) try { - auto trx = reqauth("alice", vector{ { N(alice), config::active_name }, { N(bob), config::active_name } }, abi_serializer_max_time ); - //try with not enough requested auth - BOOST_REQUIRE_EXCEPTION( push_action( N(alice), N(propose), mvo() - ("proposer", "alice") - ("proposal_name", "third") - ("trx", trx) - ("requested", vector{ { N(alice), config::active_name } } ) - ), - eosio_assert_message_exception, - eosio_assert_message_is("transaction authorization failed") - ); - -} FC_LOG_AND_RETHROW() - - -BOOST_FIXTURE_TEST_CASE( big_transaction, eosio_msig_tester ) try { - vector perm = { { N(alice), config::active_name }, { N(bob), config::active_name } }; - // auto wasm = wast_to_wasm( contracts::eosio_token_wasm() ); - auto wasm = contracts::eosio_token_wasm(); - - variant pretty_trx = fc::mutable_variant_object() - ("expiration", "2020-01-01T00:30") - ("ref_block_num", 2) - ("ref_block_prefix", 3) - ("max_net_usage_words", 0) - ("max_cpu_usage_ms", 0) - ("delay_sec", 0) - ("actions", fc::variants({ - fc::mutable_variant_object() - ("account", name(config::system_account_name)) - ("name", "setcode") - ("authorization", perm) - ("data", fc::mutable_variant_object() - ("account", "alice") - ("vmtype", 0) - ("vmversion", 0) - ("code", bytes( wasm.begin(), wasm.end() )) - ) - }) - ); - - transaction trx; - abi_serializer::from_variant(pretty_trx, trx, get_resolver(), abi_serializer_max_time); - - push_action( N(alice), N(propose), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("trx", trx) - ("requested", perm) - ); - - //approve by alice - push_action( N(alice), N(approve), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("level", permission_level{ N(alice), config::active_name }) - ); - //approve by bob and execute - push_action( N(bob), N(approve), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("level", permission_level{ N(bob), config::active_name }) - ); - - transaction_trace_ptr trace; - control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t->scheduled) { trace = t; } } ); - - push_action( N(alice), N(exec), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("executer", "alice") - ); - - BOOST_REQUIRE( bool(trace) ); - BOOST_REQUIRE_EQUAL( 1, trace->action_traces.size() ); - BOOST_REQUIRE_EQUAL( transaction_receipt::executed, trace->receipt->status ); -} FC_LOG_AND_RETHROW() - - - -BOOST_FIXTURE_TEST_CASE( update_system_contract_all_approve, eosio_msig_tester ) try { - - // required to set up the link between (eosio active) and (eosio.prods active) - // - // eosio active - // | - // eosio.prods active (2/3 threshold) - // / | \ <--- implicitly updated in onblock action - // alice active bob active carol active - - set_authority(config::system_account_name, "active", authority(1, - vector{{get_private_key("eosio", "active").get_public_key(), 1}}, - vector{{{config::producers_account_name, config::active_name}, 1}}), "owner", - { { config::system_account_name, "active" } }, { get_private_key( config::system_account_name, "active" ) }); - - set_producers( {N(alice),N(bob),N(carol)} ); - produce_blocks(50); - - create_accounts( { N(eosio.token) } ); - set_code( N(eosio.token), contracts::eosio_token_wasm() ); - set_abi( N(eosio.token), contracts::eosio_token_abi().data() ); - - create_currency( N(eosio.token), config::system_account_name, core_from_string("10000000000.0000") ); - issue(config::system_account_name, core_from_string("1000000000.0000")); - BOOST_REQUIRE_EQUAL( core_from_string("1000000000.0000"), - get_balance("eosio") + get_balance("eosio.ramfee") + get_balance("eosio.stake") + get_balance("eosio.ram") ); - - set_code( config::system_account_name, contracts::eosio_system_wasm() ); - set_abi( config::system_account_name, contracts::eosio_system_abi().data() ); - - produce_blocks(); - - base_tester::push_action(config::system_account_name, N(init), - config::system_account_name, mutable_variant_object() - ("version", 0) - ("core", CORE_SYM_STR)); - - create_account_with_resources( N(alice1111111), config::system_account_name, core_from_string("1.0000"), false ); - create_account_with_resources( N(bob111111111), config::system_account_name, core_from_string("0.4500"), false ); - create_account_with_resources( N(carol1111111), config::system_account_name, core_from_string("1.0000"), false ); - - BOOST_REQUIRE_EQUAL( core_from_string("1000000000.0000"), - get_balance("eosio") + get_balance("eosio.ramfee") + get_balance("eosio.stake") + get_balance("eosio.ram") ); - - vector perm = { { N(alice), config::active_name }, { N(bob), config::active_name }, - {N(carol), config::active_name} }; - - vector action_perm = {{config::system_account_name, config::active_name}}; - - auto wasm = contracts::test_api_wasm(); - - variant pretty_trx = fc::mutable_variant_object() - ("expiration", "2020-01-01T00:30") - ("ref_block_num", 2) - ("ref_block_prefix", 3) - ("max_net_usage_words", 0) - ("max_cpu_usage_ms", 0) - ("delay_sec", 0) - ("actions", fc::variants({ - fc::mutable_variant_object() - ("account", name(config::system_account_name)) - ("name", "setcode") - ("authorization", action_perm) - ("data", fc::mutable_variant_object() - ("account", name(config::system_account_name)) - ("vmtype", 0) - ("vmversion", 0) - ("code", bytes( wasm.begin(), wasm.end() )) - ) - }) - ); - - transaction trx; - abi_serializer::from_variant(pretty_trx, trx, get_resolver(), abi_serializer_max_time); - - // propose action - push_action( N(alice), N(propose), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("trx", trx) - ("requested", perm) - ); - - //approve by alice - push_action( N(alice), N(approve), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("level", permission_level{ N(alice), config::active_name }) - ); - //approve by bob - push_action( N(bob), N(approve), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("level", permission_level{ N(bob), config::active_name }) - ); - //approve by carol - push_action( N(carol), N(approve), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("level", permission_level{ N(carol), config::active_name }) - ); - // execute by alice to replace the eosio system contract - transaction_trace_ptr trace; - control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t->scheduled) { trace = t; } } ); - - push_action( N(alice), N(exec), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("executer", "alice") - ); - - BOOST_REQUIRE( bool(trace) ); - BOOST_REQUIRE_EQUAL( 1, trace->action_traces.size() ); - BOOST_REQUIRE_EQUAL( transaction_receipt::executed, trace->receipt->status ); - - // can't create account because system contract was replace by the test_api contract - - BOOST_REQUIRE_EXCEPTION( create_account_with_resources( N(alice1111112), config::system_account_name, core_from_string("1.0000"), false ), - eosio_assert_message_exception, eosio_assert_message_is("Unknown Test") - - ); -} FC_LOG_AND_RETHROW() - -BOOST_FIXTURE_TEST_CASE( update_system_contract_major_approve, eosio_msig_tester ) try { - - // set up the link between (eosio active) and (eosio.prods active) - set_authority(config::system_account_name, "active", authority(1, - vector{{get_private_key("eosio", "active").get_public_key(), 1}}, - vector{{{config::producers_account_name, config::active_name}, 1}}), "owner", - { { config::system_account_name, "active" } }, { get_private_key( config::system_account_name, "active" ) }); - - create_accounts( { N(apple) } ); - set_producers( {N(alice),N(bob),N(carol), N(apple)} ); - produce_blocks(50); - - create_accounts( { N(eosio.token) } ); - set_code( N(eosio.token), contracts::eosio_token_wasm() ); - set_abi( N(eosio.token), contracts::eosio_token_abi().data() ); - - create_currency( N(eosio.token), config::system_account_name, core_from_string("10000000000.0000") ); - issue(config::system_account_name, core_from_string("1000000000.0000")); - BOOST_REQUIRE_EQUAL( core_from_string("1000000000.0000"), get_balance( "eosio" ) ); - - set_code( config::system_account_name, contracts::eosio_system_wasm() ); - set_abi( config::system_account_name, contracts::eosio_system_abi().data() ); - - produce_blocks(); - - base_tester::push_action(config::system_account_name, N(init), - config::system_account_name, mutable_variant_object() - ("version", 0) - ("core", CORE_SYM_STR)); - - create_account_with_resources( N(alice1111111), config::system_account_name, core_from_string("1.0000"), false ); - create_account_with_resources( N(bob111111111), config::system_account_name, core_from_string("0.4500"), false ); - create_account_with_resources( N(carol1111111), config::system_account_name, core_from_string("1.0000"), false ); - - BOOST_REQUIRE_EQUAL( core_from_string("1000000000.0000"), - get_balance("eosio") + get_balance("eosio.ramfee") + get_balance("eosio.stake") + get_balance("eosio.ram") ); - - vector perm = { { N(alice), config::active_name }, { N(bob), config::active_name }, - {N(carol), config::active_name}, {N(apple), config::active_name}}; - - vector action_perm = {{config::system_account_name, config::active_name}}; - - auto wasm = contracts::test_api_wasm(); - - variant pretty_trx = fc::mutable_variant_object() - ("expiration", "2020-01-01T00:30") - ("ref_block_num", 2) - ("ref_block_prefix", 3) - ("max_net_usage_words", 0) - ("max_cpu_usage_ms", 0) - ("delay_sec", 0) - ("actions", fc::variants({ - fc::mutable_variant_object() - ("account", name(config::system_account_name)) - ("name", "setcode") - ("authorization", action_perm) - ("data", fc::mutable_variant_object() - ("account", name(config::system_account_name)) - ("vmtype", 0) - ("vmversion", 0) - ("code", bytes( wasm.begin(), wasm.end() )) - ) - }) - ); - - transaction trx; - abi_serializer::from_variant(pretty_trx, trx, get_resolver(), abi_serializer_max_time); - - // propose action - push_action( N(alice), N(propose), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("trx", trx) - ("requested", perm) - ); - - //approve by alice - push_action( N(alice), N(approve), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("level", permission_level{ N(alice), config::active_name }) - ); - //approve by bob - push_action( N(bob), N(approve), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("level", permission_level{ N(bob), config::active_name }) - ); - - // not enough approvers - BOOST_REQUIRE_EXCEPTION( - push_action( N(alice), N(exec), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("executer", "alice") - ), - eosio_assert_message_exception, eosio_assert_message_is("transaction authorization failed") - ); - - //approve by apple - push_action( N(apple), N(approve), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("level", permission_level{ N(apple), config::active_name }) - ); - // execute by alice to replace the eosio system contract - transaction_trace_ptr trace; - control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t->scheduled) { trace = t; } } ); - - // execute by another producer different from proposer - push_action( N(apple), N(exec), mvo() - ("proposer", "alice") - ("proposal_name", "first") - ("executer", "apple") - ); - - BOOST_REQUIRE( bool(trace) ); - BOOST_REQUIRE_EQUAL( 1, trace->action_traces.size() ); - BOOST_REQUIRE_EQUAL( transaction_receipt::executed, trace->receipt->status ); - - // can't create account because system contract was replace by the test_api contract - - BOOST_REQUIRE_EXCEPTION( create_account_with_resources( N(alice1111112), config::system_account_name, core_from_string("1.0000"), false ), - eosio_assert_message_exception, eosio_assert_message_is("Unknown Test") - - ); -} FC_LOG_AND_RETHROW() - - -BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/producer_schedule_tests.cpp b/unittests/producer_schedule_tests.cpp index 877645cbcde..de103654380 100644 --- a/unittests/producer_schedule_tests.cpp +++ b/unittests/producer_schedule_tests.cpp @@ -219,14 +219,14 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_promotion_test, TESTER ) try { wlog("set producer schedule to [alice,bob]"); BOOST_REQUIRE_EQUAL( true, control->proposed_producers().valid() ); BOOST_CHECK_EQUAL( true, compare_schedules( sch1, *control->proposed_producers() ) ); - BOOST_CHECK_EQUAL( control->pending_producers().version, 0 ); + BOOST_CHECK_EQUAL( control->pending_producers().version, 0u ); produce_block(); // Starts new block which promotes the proposed schedule to pending - BOOST_CHECK_EQUAL( control->pending_producers().version, 1 ); + BOOST_CHECK_EQUAL( control->pending_producers().version, 1u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->pending_producers() ) ); - BOOST_CHECK_EQUAL( control->active_producers().version, 0 ); + BOOST_CHECK_EQUAL( control->active_producers().version, 0u ); produce_block(); produce_block(); // Starts new block which promotes the pending schedule to active - BOOST_CHECK_EQUAL( control->active_producers().version, 1 ); + BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->active_producers() ) ); produce_blocks(7); @@ -245,15 +245,15 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_promotion_test, TESTER ) try { // Bob's first block (which advances LIB to Alice's last block) is started but not finalized. BOOST_REQUIRE_EQUAL( control->head_block_producer(), N(alice) ); BOOST_REQUIRE_EQUAL( control->pending_block_producer(), N(bob) ); - BOOST_CHECK_EQUAL( control->pending_producers().version, 2 ); + BOOST_CHECK_EQUAL( control->pending_producers().version, 2u ); produce_blocks(12); // Bob produces his first 11 blocks - BOOST_CHECK_EQUAL( control->active_producers().version, 1 ); + BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); produce_blocks(12); // Bob produces his 12th block. // Alice's first block of the second round is started but not finalized (which advances LIB to Bob's last block). BOOST_REQUIRE_EQUAL( control->head_block_producer(), N(alice) ); BOOST_REQUIRE_EQUAL( control->pending_block_producer(), N(bob) ); - BOOST_CHECK_EQUAL( control->active_producers().version, 2 ); + BOOST_CHECK_EQUAL( control->active_producers().version, 2u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch2, control->active_producers() ) ); produce_block(); // Alice produces the first block of her second round which has changed the active schedule. @@ -282,14 +282,14 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_reduction, tester ) try { wlog("set producer schedule to [alice,bob,carol]"); BOOST_REQUIRE_EQUAL( true, control->proposed_producers().valid() ); BOOST_CHECK_EQUAL( true, compare_schedules( sch1, *control->proposed_producers() ) ); - BOOST_CHECK_EQUAL( control->pending_producers().version, 0 ); + BOOST_CHECK_EQUAL( control->pending_producers().version, 0u ); produce_block(); // Starts new block which promotes the proposed schedule to pending - BOOST_CHECK_EQUAL( control->pending_producers().version, 1 ); + BOOST_CHECK_EQUAL( control->pending_producers().version, 1u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->pending_producers() ) ); - BOOST_CHECK_EQUAL( control->active_producers().version, 0 ); + BOOST_CHECK_EQUAL( control->active_producers().version, 0u ); produce_block(); produce_block(); // Starts new block which promotes the pending schedule to active - BOOST_CHECK_EQUAL( control->active_producers().version, 1 ); + BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->active_producers() ) ); produce_blocks(7); @@ -305,15 +305,15 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_reduction, tester ) try { produce_blocks(48); BOOST_REQUIRE_EQUAL( control->head_block_producer(), N(bob) ); BOOST_REQUIRE_EQUAL( control->pending_block_producer(), N(carol) ); - BOOST_CHECK_EQUAL( control->pending_producers().version, 2 ); + BOOST_CHECK_EQUAL( control->pending_producers().version, 2u ); produce_blocks(47); - BOOST_CHECK_EQUAL( control->active_producers().version, 1 ); + BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); produce_blocks(1); BOOST_REQUIRE_EQUAL( control->head_block_producer(), N(carol) ); BOOST_REQUIRE_EQUAL( control->pending_block_producer(), N(alice) ); - BOOST_CHECK_EQUAL( control->active_producers().version, 2 ); + BOOST_CHECK_EQUAL( control->active_producers().version, 2u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch2, control->active_producers() ) ); produce_blocks(2); @@ -338,38 +338,38 @@ BOOST_FIXTURE_TEST_CASE( empty_producer_schedule_has_no_effect, tester ) try { wlog("set producer schedule to [alice,bob]"); BOOST_REQUIRE_EQUAL( true, control->proposed_producers().valid() ); BOOST_CHECK_EQUAL( true, compare_schedules( sch1, *control->proposed_producers() ) ); - BOOST_CHECK_EQUAL( control->pending_producers().producers.size(), 0 ); + BOOST_CHECK_EQUAL( control->pending_producers().producers.size(), 0u ); // Start a new block which promotes the proposed schedule to pending produce_block(); - BOOST_CHECK_EQUAL( control->pending_producers().version, 1 ); + BOOST_CHECK_EQUAL( control->pending_producers().version, 1u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->pending_producers() ) ); - BOOST_CHECK_EQUAL( control->active_producers().version, 0 ); + BOOST_CHECK_EQUAL( control->active_producers().version, 0u ); // Start a new block which promotes the pending schedule to active produce_block(); - BOOST_CHECK_EQUAL( control->active_producers().version, 1 ); + BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->active_producers() ) ); produce_blocks(7); res = set_producers( {} ); wlog("set producer schedule to []"); BOOST_REQUIRE_EQUAL( true, control->proposed_producers().valid() ); - BOOST_CHECK_EQUAL( control->proposed_producers()->producers.size(), 0 ); - BOOST_CHECK_EQUAL( control->proposed_producers()->version, 2 ); + BOOST_CHECK_EQUAL( control->proposed_producers()->producers.size(), 0u ); + BOOST_CHECK_EQUAL( control->proposed_producers()->version, 2u ); produce_blocks(12); - BOOST_CHECK_EQUAL( control->pending_producers().version, 1 ); + BOOST_CHECK_EQUAL( control->pending_producers().version, 1u ); // Empty producer schedule does get promoted from proposed to pending produce_block(); - BOOST_CHECK_EQUAL( control->pending_producers().version, 2 ); + BOOST_CHECK_EQUAL( control->pending_producers().version, 2u ); BOOST_CHECK_EQUAL( false, control->proposed_producers().valid() ); // However it should not get promoted from pending to active produce_blocks(24); - BOOST_CHECK_EQUAL( control->active_producers().version, 1 ); - BOOST_CHECK_EQUAL( control->pending_producers().version, 2 ); + BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); + BOOST_CHECK_EQUAL( control->pending_producers().version, 2u ); // Setting a new producer schedule should still use version 2 res = set_producers( {N(alice),N(bob),N(carol)} ); @@ -381,17 +381,17 @@ BOOST_FIXTURE_TEST_CASE( empty_producer_schedule_has_no_effect, tester ) try { wlog("set producer schedule to [alice,bob,carol]"); BOOST_REQUIRE_EQUAL( true, control->proposed_producers().valid() ); BOOST_CHECK_EQUAL( true, compare_schedules( sch2, *control->proposed_producers() ) ); - BOOST_CHECK_EQUAL( control->proposed_producers()->version, 2 ); + BOOST_CHECK_EQUAL( control->proposed_producers()->version, 2u ); // Produce enough blocks to promote the proposed schedule to pending, which it can do because the existing pending has zero producers produce_blocks(24); - BOOST_CHECK_EQUAL( control->active_producers().version, 1 ); - BOOST_CHECK_EQUAL( control->pending_producers().version, 2 ); + BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); + BOOST_CHECK_EQUAL( control->pending_producers().version, 2u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch2, control->pending_producers() ) ); // Produce enough blocks to promote the pending schedule to active produce_blocks(24); - BOOST_CHECK_EQUAL( control->active_producers().version, 2 ); + BOOST_CHECK_EQUAL( control->active_producers().version, 2u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch2, control->active_producers() ) ); BOOST_REQUIRE_EQUAL( validate(), true ); diff --git a/unittests/resource_limits_test.cpp b/unittests/resource_limits_test.cpp index 69e62915bfb..3bcd8582e4d 100644 --- a/unittests/resource_limits_test.cpp +++ b/unittests/resource_limits_test.cpp @@ -216,7 +216,7 @@ BOOST_AUTO_TEST_SUITE(resource_limits_test) const uint64_t increment = 1000; const uint64_t expected_iterations = config::default_max_block_cpu_usage / increment; - for (int idx = 0; idx < expected_iterations; idx++) { + for (uint64_t idx = 0; idx < expected_iterations; idx++) { add_transaction_usage({account}, increment, 0, 0); } @@ -233,7 +233,7 @@ BOOST_AUTO_TEST_SUITE(resource_limits_test) const uint64_t increment = 1000; const uint64_t expected_iterations = config::default_max_block_net_usage / increment; - for (int idx = 0; idx < expected_iterations; idx++) { + for (uint64_t idx = 0; idx < expected_iterations; idx++) { add_transaction_usage({account}, 0, increment, 0); } @@ -252,7 +252,7 @@ BOOST_AUTO_TEST_SUITE(resource_limits_test) set_account_limits(account, limit, -1, -1 ); process_account_limit_updates(); - for (int idx = 0; idx < expected_iterations - 1; idx++) { + for (uint64_t idx = 0; idx < expected_iterations - 1; idx++) { add_pending_ram_usage(account, increment); verify_account_ram_usage(account); } diff --git a/unittests/special_accounts_tests.cpp b/unittests/special_accounts_tests.cpp index 58e9444b58a..aa769bcacaa 100644 --- a/unittests/special_accounts_tests.cpp +++ b/unittests/special_accounts_tests.cpp @@ -36,14 +36,14 @@ BOOST_FIXTURE_TEST_CASE(accounts_exists, tester) auto nobody = chain1_db.find(config::null_account_name); BOOST_CHECK(nobody != nullptr); const auto& nobody_active_authority = chain1_db.get(boost::make_tuple(config::null_account_name, config::active_name)); - BOOST_CHECK_EQUAL(nobody_active_authority.auth.threshold, 1); - BOOST_CHECK_EQUAL(nobody_active_authority.auth.accounts.size(), 0); - BOOST_CHECK_EQUAL(nobody_active_authority.auth.keys.size(), 0); + BOOST_CHECK_EQUAL(nobody_active_authority.auth.threshold, 1u); + BOOST_CHECK_EQUAL(nobody_active_authority.auth.accounts.size(), 0u); + BOOST_CHECK_EQUAL(nobody_active_authority.auth.keys.size(), 0u); const auto& nobody_owner_authority = chain1_db.get(boost::make_tuple(config::null_account_name, config::owner_name)); - BOOST_CHECK_EQUAL(nobody_owner_authority.auth.threshold, 1); - BOOST_CHECK_EQUAL(nobody_owner_authority.auth.accounts.size(), 0); - BOOST_CHECK_EQUAL(nobody_owner_authority.auth.keys.size(), 0); + BOOST_CHECK_EQUAL(nobody_owner_authority.auth.threshold, 1u); + BOOST_CHECK_EQUAL(nobody_owner_authority.auth.accounts.size(), 0u); + BOOST_CHECK_EQUAL(nobody_owner_authority.auth.keys.size(), 0u); auto producers = chain1_db.find(config::producers_account_name); BOOST_CHECK(producers != nullptr); @@ -54,7 +54,7 @@ BOOST_FIXTURE_TEST_CASE(accounts_exists, tester) auto expected_threshold = (active_producers.producers.size() * 2)/3 + 1; BOOST_CHECK_EQUAL(producers_active_authority.auth.threshold, expected_threshold); BOOST_CHECK_EQUAL(producers_active_authority.auth.accounts.size(), active_producers.producers.size()); - BOOST_CHECK_EQUAL(producers_active_authority.auth.keys.size(), 0); + BOOST_CHECK_EQUAL(producers_active_authority.auth.keys.size(), 0u); std::vector active_auth; for(auto& apw : producers_active_authority.auth.accounts) { @@ -62,7 +62,7 @@ BOOST_FIXTURE_TEST_CASE(accounts_exists, tester) } std::vector diff; - for (int i = 0; i < std::max(active_auth.size(), active_producers.producers.size()); ++i) { + for (size_t i = 0; i < std::max(active_auth.size(), active_producers.producers.size()); ++i) { account_name n1 = i < active_auth.size() ? active_auth[i] : (account_name)0; account_name n2 = i < active_producers.producers.size() ? active_producers.producers[i].producer_name : (account_name)0; if (n1 != n2) diff.push_back((uint64_t)n2 - (uint64_t)n1); diff --git a/unittests/test-contracts/CMakeLists.txt b/unittests/test-contracts/CMakeLists.txt index 70a3fc83e66..4a458969514 100644 --- a/unittests/test-contracts/CMakeLists.txt +++ b/unittests/test-contracts/CMakeLists.txt @@ -1,17 +1,19 @@ -set(EOSIO_WASM_OLD_BEHAVIOR "Off") -find_package( eosio.cdt ) +if( EOSIO_COMPILE_TEST_CONTRACTS ) + set(EOSIO_WASM_OLD_BEHAVIOR "Off") + find_package( eosio.cdt REQUIRED ) +endif() + +if ("${CMAKE_GENERATOR}" STREQUAL "Ninja") + add_compile_options(-fcolor-diagnostics) +endif() add_subdirectory( asserter ) add_subdirectory( deferred_test ) -add_subdirectory( integration_test ) -add_subdirectory( multi_index_test ) add_subdirectory( noop ) add_subdirectory( payloadless ) add_subdirectory( proxy ) add_subdirectory( snapshot_test ) -add_subdirectory( test.inline ) add_subdirectory( test_api ) -add_subdirectory( test_api_mem ) add_subdirectory( test_api_db ) add_subdirectory( test_api_multi_index ) add_subdirectory( test_ram_limit ) diff --git a/unittests/test-contracts/README.md b/unittests/test-contracts/README.md new file mode 100644 index 00000000000..8b03cc131cd --- /dev/null +++ b/unittests/test-contracts/README.md @@ -0,0 +1,5 @@ +test_ram_limit contract was compiled with eosio.cdt v1.4.1 + +That contract was ported to compile with eosio.cdt v1.5.0, but the test that uses it is very sensitive to stdlib/eosiolib changes, compilation flags and linker flags. + +The remaining contracts have been ported to compile with eosio.cdt v1.6.x. They were compiled with a patched version of eosio.cdt v1.6.0-rc1 (commit 1c9180ff5a1e431385180ce459e11e6a1255c1a4). diff --git a/unittests/test-contracts/Readme.txt b/unittests/test-contracts/Readme.txt deleted file mode 100644 index 7453ed0c6a9..00000000000 --- a/unittests/test-contracts/Readme.txt +++ /dev/null @@ -1,3 +0,0 @@ -test_ram_limit contract was compiled with eosio.cdt v1.4.1 - -This contract is already ported to compile with eosio.cdt v1.5.0, but the test that uses it is very sensitive to stdlib/eosiolib changes, compilation flags and linker flags. diff --git a/unittests/test-contracts/asserter/CMakeLists.txt b/unittests/test-contracts/asserter/CMakeLists.txt index f8bf4b8643b..c9c992df101 100644 --- a/unittests/test-contracts/asserter/CMakeLists.txt +++ b/unittests/test-contracts/asserter/CMakeLists.txt @@ -1,8 +1,6 @@ -if( ${eosio.cdt_FOUND} ) - add_executable( asserter asserter asserter.cpp ) - target_include_directories(asserter PUBLIC ${CMAKE_CURRENT_SOURCE}/../) +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( asserter asserter asserter.cpp ) else() - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/asserter.wasm ${CMAKE_CURRENT_BINARY_DIR}/asserter.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/asserter.wasm ${CMAKE_CURRENT_BINARY_DIR}/asserter.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/asserter.abi ${CMAKE_CURRENT_BINARY_DIR}/asserter.abi COPYONLY ) endif() - -configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/asserter.abi ${CMAKE_CURRENT_BINARY_DIR}/asserter.abi COPYONLY ) diff --git a/unittests/test-contracts/asserter/asserter.abi b/unittests/test-contracts/asserter/asserter.abi index b5bc251a5c6..3b78641f955 100644 --- a/unittests/test-contracts/asserter/asserter.abi +++ b/unittests/test-contracts/asserter/asserter.abi @@ -1,37 +1,41 @@ { - "version": "eosio::abi/1.0", - "types": [], - "structs": [ - { - "name": "assertdef", - "base": "", - "fields": [ - { - "name": "condition", - "type": "int8" - },{ - "name": "message", - "type": "string" - } - ] - }, { - "name": "nothing", - "base": "", - "fields": [] - } - ], - "actions": [ - { - "name": "procassert", - "type": "assertdef", - "ricardian_contract": "" - }, { - "name": "provereset", - "type": "nothing", - "ricardian_contract": "" - } - ], - "tables": [], - "ricardian_clauses": [], - "abi_extensions": [] -} + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", + "version": "eosio::abi/1.1", + "types": [], + "structs": [ + { + "name": "procassert", + "base": "", + "fields": [ + { + "name": "condition", + "type": "int8" + }, + { + "name": "message", + "type": "string" + } + ] + }, + { + "name": "provereset", + "base": "", + "fields": [] + } + ], + "actions": [ + { + "name": "procassert", + "type": "procassert", + "ricardian_contract": "" + }, + { + "name": "provereset", + "type": "provereset", + "ricardian_contract": "" + } + ], + "tables": [], + "ricardian_clauses": [], + "variants": [] +} \ No newline at end of file diff --git a/unittests/test-contracts/asserter/asserter.cpp b/unittests/test-contracts/asserter/asserter.cpp index 5c5e95a51e0..3402e9d9940 100644 --- a/unittests/test-contracts/asserter/asserter.cpp +++ b/unittests/test-contracts/asserter/asserter.cpp @@ -1,28 +1,18 @@ /** * @file - * @copyright defined in eos/LICENSE.txt + * @copyright defined in eos/LICENSE */ -#include "asserter.hpp" /// defines assert_def struct (abi) +#include "asserter.hpp" using namespace eosio; -using namespace asserter; static int global_variable = 45; -extern "C" { - /// The apply method implements the dispatch of events to this contract - void apply( uint64_t /* receiver */, uint64_t code, uint64_t action ) { - require_auth(code); - if( code == "asserter"_n.value ) { - if( action == "procassert"_n.value ) { - assertdef def = eosio::unpack_action_data(); +void asserter::procassert( int8_t condition, std::string message ) { + check( condition != 0, message ); +} - // maybe assert? - eosio_assert( (uint32_t)def.condition, def.message.c_str() ); - } else if( action == "provereset"_n.value ) { - eosio_assert( global_variable == 45, "Global Variable Initialized poorly" ); - global_variable = 100; - } - } - } +void asserter::provereset() { + check( global_variable == 45, "Global Variable Initialized poorly" ); + global_variable = 100; } diff --git a/unittests/test-contracts/asserter/asserter.hpp b/unittests/test-contracts/asserter/asserter.hpp index 123e29a862f..625dc2e4cd7 100644 --- a/unittests/test-contracts/asserter/asserter.hpp +++ b/unittests/test-contracts/asserter/asserter.hpp @@ -2,13 +2,17 @@ * @file * @copyright defined in eos/LICENSE */ -#include +#pragma once -namespace asserter { - TABLE assertdef { - int8_t condition; - std::string message; +#include - EOSLIB_SERIALIZE( assertdef, (condition)(message) ) - }; -} /// asserter +class [[eosio::contract]] asserter : public eosio::contract { +public: + using eosio::contract::contract; + + [[eosio::action]] + void procassert( int8_t condition, std::string message ); + + [[eosio::action]] + void provereset(); +}; diff --git a/unittests/test-contracts/asserter/asserter.wasm b/unittests/test-contracts/asserter/asserter.wasm index c639a6db70c3969350189b0e99498a1e91ad2ca4..050b3caf56ef387fc90a0689fe847fe952fcaf0a 100755 GIT binary patch delta 1773 zcmZuy&u<$=6n-)m$Zcp{-txuuz18{r_e3N3QgsuG?^rRt$VY)-sAi)Juap3|7%J;@iNfEKKv-5tx?|skS zs^2(JrmVjzh)A@j&eP`9zCacmwAnaX$c+s;FA7oMLGo)=Rh^5MCOX~zQn#J;`jZ4;1;o^p`>dL$W6I!Wb39CLo`KaNnn?l}d%^RGkF2Lh@4m}3ty~7>evUij}-fn{?Y5HvOwW5Fg1>A;0Dn|oeLXZ*+99uyiOH<*_SsY zv9X&Qje>?aIQk}dTO_07;m^ga2SGLEQ&yQ8;-cz0S<+ilVG}mO!cs#9iJT`uO)k#S z2Du;cT%LsEXgl=9o872yN*s zTSJ91OL-ch288yVz__sSvW+Z4M~M1((A=O8$_$BZ#Div8Dj9gP5eCj6o%cj!7Cxq) zFMCWs(9qM5$MyfblLuRl4$-g$&La7B1`k|y;sj(?)ki0zRSfF0=o6tv=i*sW8@&~~ zrxQI%+ygV6%n_|ieAY&H<3aK4e|0f*Sk&iqNG2qGN1$fxCLQArFwaynZ8@HJ#6j;c zE4_sTb}?h&8?G>PekRiO{1`w(ZXROkG$ffN9`@h_adeRmdOGkw>&?qD*Cil8azWe{5@rsQzUQ73Amz$381+<6-%4)1Jv>o z&TB2L7X!})eR8xpRXhHAwzSe&w1ckAR#v(T*`Q;4*@aG@{MV8W`Bw+;*gPFvz^?{! J>EI1Z^bbCKMqvN| literal 4641 zcmaJ_O>7)V6@FFKJw0*H*p0MAtR0}bV-_!LSh7J`v>f7!l!Pdtz#<{Bmr2`hZ|27{ zo*#RYRW{>95g>8NVGrSe!~qG3RV0pkLI&lq7mktOh=e$S!~rCbRvW&rYWzclh-G(I z)qAhL_r0I0j+EmqB_iqv*$t5!>aN_-)2ZALHRTKbnyG(8ckvIyc*++@`H`O9p~8D% ze8+Slu{DV}UE8X**0*m<%t2hP4PZtN7RJ>iEF_TH8thhKRNo%8 z`;&327dD)0wRZZ0(Z*!+HK|cGX|<~1;i%qfZSC|Xo986qB8KG#WRHg9owdUuRQuJT zYNOTKY~R}Hk0+bsw$!QK#8hukPB7V?Yz6=)yx5UUI+Hn)fL3*DQuQ}BHfj9C%u<*e z`?#}VQZ~7SnsaiEZhCob~!4y zs!25(<8{4TZ3GW2to(nm$Sl4n&G{(O3k&mn=y^SF=HqA~T9}VwwXKwjjS^KmX{O5U z?Mpfp_m}(X;E_C54F-EKx~&Ba-K@&()>=8Pj6wi?vVb3><8phucUwMNYrVD6`_8T` z$E~;e?{uW#v+T%eE|ggK!V7-kdy}(g%U=8KervbvwI=csr_0Xs(_j2_u=o7@3g=<& z`T3XUJ~;Ivfc`=$Jv}Wh>lS8NvQt)NqF1fDxVN|W;nRXdr zPR_{Qs+*1>nuz9P$@G*sd8=AG9c;IOc%!8i>?BW$3mvyT>=uynpHS9Vom3~uJeua#>o};r~5!)#VV(=Xpu`! zL5SkwqE7aC%mqY0Gpa^Jwp-|B&_Uy$0W0#l$zXs|nO!8x<3Cof=ok1WD@?bDE|ExA zpCR+gVk5VGy&2;%cMVKwyr{_=AXYmy%HxKHPiF)q4V#1~-%x~G@8qUoQV3^szNlqI z9z(eXC3>fA9DI%74aD-q#(B-G+8VDIrW)4SnyU57_ZE@9cVTcdC=LH zfKOX<9;BLV8Sl;_U96-()J9L$T{E?^qN7|p$<0N{BZLuD#Xg$ee-Hz7SKW_%P6f;c(4Oztz<48U7`Cf{-Hxv7`G06XqJ^ zJuwqlL3&+XDw0c0#3Dh$X-1QEk_$2mabsHH2iD0XU4duFE7g=nl%m4(1izY&OZJM7 zB`8gRNOiC&I7WCnMVy->d$asJu*3Lc!wAfA=YkOo{wQ>`Zm!_~RK5w^U>sQjjtD5{ zp4JDqW>FMS0>?J-7+IGpTV6=hucJG{0`OZ|DpYp1kfxYbpcV%+4^|7!ex^Sq)W9lK zQ&>)GFqjqTyil9=B>P$#W|!0MVb4YHfPhAKV$At_4ci>-XR=qJs#m$8-wc+p$jyQZ z{CO2zYHX=lCJj7SbR*|F8z+F&@dwaedLGJbekF}1izWTLR>@(a00(*yHr4=-YC!av z;xMXqbK~an$cKzsqut#hLW%$YVU(K`t1;9NLjKYlm$E~v(3vP3`&xkz%Hy&>U1B$P zr3sSJf0!ycoJ6AL5qM58izZC~rV*n;u#Q|)RAz%zO_eUd;BlZ(VPU`!EugW@EL?&K z#y!}^ZWKJWiae$lOjd-|fLBt3%|B5?Ti>0{!van)bf0LE`_>rU2;B$os0FdYh6Fx% zuGH}$;?RZ{m~$rd^BM0oaX;Qi^N{qMQd4&_H=cDQf+r2Te6>(M>i(F5JE$?UkY;DV ze)>;^^2QvpTq8%UAm=Umlz>eTka>c#vlMqHk=jm3RfsDK5gFH+Wi5~abH|!tp)WJb z+#QrGLmateNDOEjIA3lJq2_L(Dp^H1FF*!Xx5XRv`8#c(G%~<25 zSrk^27W`@o3jV-wFe$!^X`1w1O}Z>koz97Sv=0Zf*V5lds=*2cKEVUw+joUVE@kMj z;`V9BY5dchwd`#b!7ahMttB6LeMjYYkX~3K}d^D!q>M;SCRY0ulHI zKK*er?65^c9=VN{)u1YWuP)iskbS#FbOG0U`h#PtyDE!-itcp*Rf;Q) zH;cZBIT%Chc%Q1|7FpKl<&iH}Z){k^Ztq|I5Y~W>42zKfY-t=Ky(yIpF~>$TI~KJ+ z1cM(<0d{i?JMPyL9?ZlZg@i|@hOn8u=DkQdDEb1)#wY}bVmMT%L$R&XA&ZGCxHJT3 zYH$X(gap|PUyPgje&W4h9v-9`&Rr%+jN-oB)YuAv_DNbWYK6kl04uzagph`^ad)ih z;#S~;mUt5^nwiY8G!gd%)qs5DF3g8;%7@x65PFs~?#R>@4~pcYlw*IK1Z*}00i7@) zW0|r)vW?IDU}1+_*DQsBP#+=;mB2k_GRF~OUysq(I}l?3eaEDSJXIiH4NEzV=T6+3QZ?IPO?Dxx2yIkv4_G-U9Y2yp^-D<;b4+bNA+zPML>09{d zmaVdfuk+UCXt0Gp6=dEi$9AI{Dku1;7hZqUnE&M>{{d99P9y*T diff --git a/unittests/test-contracts/deferred_test/CMakeLists.txt b/unittests/test-contracts/deferred_test/CMakeLists.txt index 053cbb8683b..9ff36a6f57f 100644 --- a/unittests/test-contracts/deferred_test/CMakeLists.txt +++ b/unittests/test-contracts/deferred_test/CMakeLists.txt @@ -1,6 +1,6 @@ -if( ${eosio.cdt_FOUND} ) - add_contract( deferred_test deferred_test deferred_test.cpp ) +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( deferred_test deferred_test deferred_test.cpp ) else() - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/deferred_test.wasm ${CMAKE_CURRENT_BINARY_DIR}/deferred_test.wasm COPYONLY ) - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/deferred_test.abi ${CMAKE_CURRENT_BINARY_DIR}/deferred_test.abi COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/deferred_test.wasm ${CMAKE_CURRENT_BINARY_DIR}/deferred_test.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/deferred_test.abi ${CMAKE_CURRENT_BINARY_DIR}/deferred_test.abi COPYONLY ) endif() diff --git a/unittests/test-contracts/deferred_test/deferred_test.abi b/unittests/test-contracts/deferred_test/deferred_test.abi index 27c6c50cd0c..37c9eff9492 100644 --- a/unittests/test-contracts/deferred_test/deferred_test.abi +++ b/unittests/test-contracts/deferred_test/deferred_test.abi @@ -1,6 +1,7 @@ { - "____comment": "This file was generated with eosio-abigen. DO NOT EDIT Fri Dec 7 11:56:43 2018", + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", "version": "eosio::abi/1.1", + "types": [], "structs": [ { "name": "defercall", @@ -53,7 +54,6 @@ ] } ], - "types": [], "actions": [ { "name": "defercall", @@ -73,6 +73,5 @@ ], "tables": [], "ricardian_clauses": [], - "variants": [], - "abi_extensions": [] + "variants": [] } \ No newline at end of file diff --git a/unittests/test-contracts/deferred_test/deferred_test.cpp b/unittests/test-contracts/deferred_test/deferred_test.cpp index 3d7392e34b6..54c02ed4a95 100644 --- a/unittests/test-contracts/deferred_test/deferred_test.cpp +++ b/unittests/test-contracts/deferred_test/deferred_test.cpp @@ -2,62 +2,32 @@ * @file * @copyright defined in eos/LICENSE */ -#include -#include -#include +#include "deferred_test.hpp" +#include using namespace eosio; -CONTRACT deferred_test : public contract { - public: - using contract::contract; +void deferred_test::defercall( name payer, uint64_t sender_id, name contract, uint64_t payload ) { + print( "defercall called on ", get_self(), "\n" ); + require_auth( payer ); - struct deferfunc_args { - uint64_t payload; - }; - - ACTION defercall( name payer, uint64_t sender_id, name contract, uint64_t payload ) { - print( "defercall called on ", name{_self}, "\n" ); - require_auth( payer ); - - print( "deferred send of deferfunc action to ", name{contract}, " by ", name{payer}, " with sender id ", sender_id ); - transaction trx; - deferfunc_args a = {.payload = payload}; - trx.actions.emplace_back(permission_level{_self, name{"active"}}, contract, name{"deferfunc"}, a); - trx.send( (static_cast(payer.value) << 64) | sender_id, payer); - } - - ACTION deferfunc( uint64_t payload ) { - print("deferfunc called on ", name{_self}, " with payload = ", payload, "\n"); - eosio_assert( payload != 13, "value 13 not allowed in payload" ); - } - - ACTION inlinecall( name contract, name authorizer, uint64_t payload ) { - action a( {permission_level{authorizer, "active"_n}}, contract, "deferfunc"_n, payload ); - a.send(); - } + print( "deferred send of deferfunc action to ", contract, " by ", payer, " with sender id ", sender_id ); + transaction trx; + deferfunc_action a( contract, {get_self(), "active"_n} ); + trx.actions.emplace_back( a.to_action( payload ) ); + trx.send( (static_cast(payer.value) << 64) | sender_id, payer ); +} - private: -}; +void deferred_test::deferfunc( uint64_t payload ) { + print( "deferfunc called on ", get_self(), " with payload = ", payload, "\n" ); + check( payload != 13, "value 13 not allowed in payload" ); +} -void apply_onerror(uint64_t receiver, const onerror& error ) { - print("onerror called on ", name{receiver}, "\n"); +void deferred_test::inlinecall( name contract, name authorizer, uint64_t payload ) { + deferfunc_action a( contract, {authorizer, "active"_n} ); + a.send( payload ); } -extern "C" { - /// The apply method implements the dispatch of events to this contract - void apply( uint64_t receiver, uint64_t code, uint64_t action ) { - if( code == name{"eosio"}.value && action == name{"onerror"}.value ) { - apply_onerror( receiver, onerror::from_current_action() ); - } else if( code == receiver ) { - deferred_test thiscontract( name{receiver}, name{code}, eosio::datastream{nullptr, 0} ); - if( action == name{"defercall"}.value ) { - execute_action( name{receiver}, name{code}, &deferred_test::defercall ); - } else if( action == name{"deferfunc"}.value ) { - execute_action( name{receiver}, name{code}, &deferred_test::deferfunc ); - } else if(action == name{"inlinecall"}.value) { - execute_action( name{receiver}, name{code}, &deferred_test::inlinecall ); - } - } - } +void deferred_test::on_error( uint128_t sender_id, ignore> sent_trx ) { + print( "onerror called on ", get_self(), "\n" ); } diff --git a/unittests/test-contracts/deferred_test/deferred_test.hpp b/unittests/test-contracts/deferred_test/deferred_test.hpp new file mode 100644 index 00000000000..1e5aa22681b --- /dev/null +++ b/unittests/test-contracts/deferred_test/deferred_test.hpp @@ -0,0 +1,26 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include +#include + +class [[eosio::contract]] deferred_test : public eosio::contract { +public: + using eosio::contract::contract; + + [[eosio::action]] + void defercall( eosio::name payer, uint64_t sender_id, eosio::name contract, uint64_t payload ); + + [[eosio::action]] + void deferfunc( uint64_t payload ); + using deferfunc_action = eosio::action_wrapper<"deferfunc"_n, &deferred_test::deferfunc>; + + [[eosio::action]] + void inlinecall( eosio::name contract, eosio::name authorizer, uint64_t payload ); + + [[eosio::on_notify("eosio::onerror")]] + void on_error( uint128_t sender_id, eosio::ignore> sent_trx ); +}; diff --git a/unittests/test-contracts/deferred_test/deferred_test.wasm b/unittests/test-contracts/deferred_test/deferred_test.wasm index 50ee1c13b4b379b53aeceecf1642f259bd6e47ac..eea70b8dc6ef0738a6e760c5705d1b7c5590f8af 100755 GIT binary patch literal 8204 zcmds6TZmm(8D5utIg>q;*=f@_O{1>8I}NAO)@hSYVrg+!us5p~iw}7aXHI8Mn%QSE zGnqL_Tah_qQK+I&1hMr+r9wckYPByFBn69N`%q8|Vi6ytN{LreC`GK__pg0s&cs~W z2SJ>WbM{{AU;p~o_uto1t<_^%DWyNPV@9c&xVP5p_0){!|1~vZ@uqt-hGQx`$S0;S zrq?jIHe=V;@Q$VO%_V9k>VXsakQ~$IO>>!TFPk(UYTF) zt}3(nvZyRyhF99hPt32hr&}kwN5fDm#3$y%TXL*@YSy0yC6Ug@fs zLz!$hzql~J*bXCkYqmSTv^YK2>b9m==Re(62?_I+c57~WU{0kR-Z98>dUk29tr{GU zTlX(v55;G<+U`!@zpymhnLXN?Uz}cUt+bA{yX}=#JkEA-N)g39a49m`aTmEQuPiMu zt+wa*Fn3}Wtke*<8lF7?*cQ9f-T7l}wT(kN$(C}TY!6TiD}n=V0B7$tDjOb&w~xkY z8mBq_Qv8jM8iV)Pm?TQKjT#ljdRc27Tdj1m%l5QiKdrK!@^3u1@s$qgxV5~z@UXgk z+mj!-CQ|ui_ARaQ%barhC0!o^TIEbGkE84{+<}a>tBEU^{vD2oMbhD^Dhm@_j|*V*dwK9a0Aj`1!M&kN@0!RFmctm{hl@!!9Q2C&SkQB{+2YbGP+Q z`CF?=a~Iz20Xq={vOdrEmWm^)kHs@Te&VJBwgF}nmCq}u@KGj{viGf(3r`OF9Rw>9 z;p0(Vs6Hg<;M#Qx{x<}`qP zS6FtizYKD0a14a`*ls*LrmJz4{mtl@I&huRLIu`?cFb9zVslR|r~*h8iJ1ah?T4!- zgwsva=#(iXPJMKugs$+X?;X)#kXOD@xfEC>F7i>gDkC1q5{*23?HuF@;H&Kx#}b!@1hHeW)4h1@`S;DjZB(2rFv>>Y&S*{C{b zd=F;Ft1u7_i`|TH1}DD4^^6uZyqbH*8t4_@x>?h-HWYW#di_+ zDH|>j8UOUFFJ5^Mu!@}Vx?94yTy*zUWqkVxfQo%{A>Uh=VFO1K^%>%)xI5r8d676x z6mn>E4BK66RFT5;#cMPK4nvLpkb&^cg{mCd0Ds6OTs}lqYkbr?86xB=)%%+hJuMkmCM2tklcnRfCDcA9(Ib4 zkdMXz#qx9y)utUII^gwiMccr`Q6VycXrXe3EDecb>q3d4kdrQ{*kj7voyK8y1fB5p6snlgK6cm3)u5lBY{>I`DyCV2<`LU*JG@9P2Z> zYB$bjWJL)sV*VY7qyl#p!WMjDl8(V6MB+i&;}($$L_l())DWRIr2W905Lrhng6UZ3 zxrjh%2f?#j!HWveyGWF!5SX{B$2d&{2~^l|d~nC+IXez)6$!rFN&-(r`7QMnJQu2%q%CqOcF3;G7<#8(eiP0gNQ6mAbMro+1sI(o2yZ|VJqeqDH z4iPT#y^o9-{}%qzBE=e5M5i?VUl_0qY)J@g1``Pa8*+`wm{bte=sxhPLW!=C#g)T9 zD3GDkQg7P#;K?IuIAW<$q>L$>5zx4_Lb!wep`{K=21F=0bzwL8S+Jsk)O<3E2OT2H za_2I^X5l=jTx_Q>H~^L~dBJjEHb`FL@Qk{_8IcPqnPR|9MMheR&YPoI_6-zwsN3US zeGcHT4dn+``m8Fz@n$nD(iw_{V0I15;1{~l`#{}cQ3IB zCxd$j$1(^7*gi&vB^xAXMgt`728w_SG6ILxB@FNlzkHl=5t<|y$*g4pFJN*_0V7j? z=You|1fBM!UVwD%81mg5iOt=E*lI)V5mN?@@w$5r^?5(pVkfBX(}B# z3(mO}KN)AH8$fwA~=yzTS z{f_kq1$Z8f_)GN}`G<}ppEG6*QcABY;tRVb79e}DTupqwg|^PEXfY6|>J!72Yud3b zv=&*TPh{C_g#M1X0KFw!KLZNl1Z!nih`NK#qILFKdypR!{AlvSZdXvV~lP(qeQk@z#GdEcRMxA9rPtr4`MaPvXT4voMI zvGyx{P0;JJ*9^oq^!f9s|H5(y4Ue%bf^>xXV&c;pa3vsg7dr*%@^kD07JQEW&`oS8 zndO9-*;a!fq665lT?IsAQnL{&7$+2(9K}Gv$BQ(J+7@82I1R+cXx07fk zRW8!89%M!Okd9+-#CQV>^yoDUc!Q z0k|M}pP$s>=_N&G#cryfeizCm)F}>({21&;`xxj3o59DP;FxXTkP!)%4 znn1FA90eBR1X`LqM>O|km)Xa7H1irGbWnWZBIPgoBABCp*$Q|#@1;xN3*tUl0{1~b ze+f)L1Dt)tg96Z_B_EUg5un+kNz}y~0U?;0OX~JhN=udfEZEpa_(Q$mZV;Zwg%ReI*sI)Z5mQ`5I=Z; ztMbV6SCu;7Un&;)EOtFA%a=UUk9KB<0>Yd{5sURIOW-y6-fX+Xc~-V6zc~w|&8!oC zMhTr6*c)8a6A%8s2ix@bv-yo!YNoiMYzFq2zH>#X$FIv`m1k%dD6;<*exll^b$jY z37Bv8y2gbPs}nBr*b%oZPNC}noH$I5WJ!2P?sq{^9{1}c#SV&BHaK}zqlXR9iXzD$ zFU=j?j~BR1VrY5N+&Ep4N&w5)f>ID&kKmFIX$U-A zpuxqKvoc5JIDe7y$dWU33ib!vg@^8GdS<{>J<+~dHfFcdyhko z(mK4p1;Yg607hXF>!Bm*pIa7CuV*po^OGeq5{j6!Si;dWo)A_(tyr!@w`V+++I8xq z1ak_w-7&(uHDZZf3_3TihRSrsS;R$#)~}vsXoaSnrIIJ1-B1%@7Z?oXgT>%fgFA6_ z()@GQsEzP#NZMWQdpuMUT z>!8eR!vlT_%=Y$ww8rnfi&%QZ$-t2li?c5LZcH8^ei_8EyZ>Q6IJ-o2gnsc{t4a)J%oop?fXuBKtxy7Zf2>c*qpI?N? rLJ2aIaoOcdi}-zgX=U)7lB%-g!Ik-L8)PqlEas2icve~T26p&26XXDc literal 10790 zcmds-U5s7Tb;tKU=iCqb%rM6d(1~r@b5C4%OamRj8C$4PyiEfpq{X6nup*@K#dD44 z&UikrF|H6}f`fQS6r_X-sz{+#E25~blB)D0k&p>Ipf-J~@)}iYlvYh25(z43)6n04 z?Q>_YjScAoQq{41&pG>JeXq6GTIaa#`f2Z+^A8rs8yk=F=Z+^E{A7A?$DKRwH~4SE z+&I6%{06_yKdv1c8;qv>GL+gDe)Z5`Omm)x5~ z8JvnRTZZk-e2wA6nYFdv(qMKlf4b+kY0vn2Z)t9Ju6MG>%$(b47h*4DW_u>eHlqkQ=N zmKg&=3_uXyye}PVXKvh&C*#fQniI`tJN`{fWLdl2Y-ZU+D{oJ~*qcfC#g-r%;|x{LDyEU7Q`bn>F0+Upf36 zH$-?>j(=3+fV*=1mTlX-3qDUjbi*%i|H7yCLG$M_pKN@@g>U+WD&6CXi82j8zT{?- zulsmcOwS~L#(kOYOTX?9qz4)PiR<%wuwU%hnH)+EmHzN~{ayO@bKiFlj63FjDiF^J z1a|`Q)f&WefS6|Z1uqcO0#PQm{m=jN2mkgb8*w`bieHkv3s<_rhcDY*I+dLFRrbko z!My;0Y$kDKR!oigy!2&_`fZ^;#R_hOc`?#{>OeZhU?hDil5Ut^_`(<8_{QJ=UA>#V zb1d^S$?>@F_<_^`9F)cNJB26>PrkycWAR299*irG9Z1Vf4CM%h^MCl}o0nnu49k+4 zA3jFfpl^c$>gT^zV(;Sy!7p#zOlu`Z?h4*w8M28!F8JHp%UF3Zah_GHOR72Hv6|YSGB^e z_XuLA1tie4!u|d5j$aMgeHc$G9PMw-d~G96eK=12=|-H>W=MMCyO31lhK4dLTNe+% zNXusM{i<1}&_9;sLn7Im+G1Ep7eZq(Wc`az8IiCw4rzyBC9>~sabs08+cDkvtGiZSS;=x_YnTFWXY}}528mREVBy`!c+c? z_o*DSSg3p_Dg7P@NDdWgm4+uW7{VU+cf3>~ZDP z1W1|oXOgz=G|smB!rw*_mOd24HhfVxEXue8dB6Bo#<&9bD`~=k^EfoUK(J`+aVRuB zkUs6B0-E7vK*9E=zxtvP{?W!##@@C&&)v`ME-9KttCNlQ9P z!=$^@9V;7J`e3{)ih{$JE`0p_i{ZB*`UuZh85y5t{ z-XdnBOceEoOI~Mg7fZC$G{{k8 zbG#UV9Eq1h@p3d?j^S|xviG6u8 zl6x-DA_5x0pXp;A%pODWT#k0UE=M}sbUD=7E(HX?Dq)EP;+6?gBHCVVLu(A}aE@hB zmI%B!fS}L;I^l$!_Wto`g-Q78t8$A3!~d4u`J!1LX#Vi{1L3<2Ciz?5H{|ZqAEXYa zAh$Jic@tX_+Vc7=?C#^{ZL}nLmOPjqd5|n6Qsmd>g0_*Y6fh#jsg6W(Es051{wlJX zj-q-tiGl8tTc|Qj7X#K&ohd1(56*}hLWoGk-pbqwzld4KOW%3^c3Tkk)bTDO@j z46n7($wjWQ_dPZtwD;P?PI!Zg5fXLI+gPO1*Y~0_|Eq>?ZCEM?y^&}c_|$1~8d9TH z_?PcJe+Rir_z_o$6r*J&$D1u`gunRF_hRNH?;cd`vR$^w`P!EAwZqh6vA5HRM@XAv zq@jGo6}NJDknAz7^eeH@jXqRDSQu%_0SgvPastF0rD>i`bYVEUXLHk^Q!}KhCHFyB zH8_PUl9hIU2tMFPwx&rmLl`%Kpj6;4n{w=A#IY0H%yil)#YCUL54C*-eo26YPo#oEfNK7lpSm&W4)jC)2gPF;Z! zqK2qU28L=_9;;?W8!BniS{pSzXyaP+?0G+WCP(Sn{ejand2M>AmR?RgSujQ!|4Bpb z$_zWJ(uLBi9*dSxWz;V6+Gg1J^mr29;jh(cAsd3i0xtG*22xdSSk%s+NlSK<($LJF{F-gJP9!o54{hU#A5{YGN#qv*QQ2 zdMDk2mjX%#Qp1r!fROycFjs`Mh_O=EztBlwfGtPliC9&t14{&Tgi~3Y_%VYpXSO(L zE{P*f=qofdiPnZR+WH^-+Rdaz;Bs+7l(>BtL=Jw&%(^dPh znh|pArbP@AioO;%55chlCPZu4%ZwR9v&@mk{CfE6N5%p{2Df}XEP)`m2CD- zIAJ@9upU+?L}I1_Gc!>$1PY++-tZE|F9szMCNx8g-$Z6(I--FQ-f_rD&4MZ&T)Y6N zG+-?%&d&Q)6DdMf5}zQG8I%w*ltrv$otB$K9Boy}8QOzru9!_buqZ zyXtOp&6+yS#S!YfsH3-O)J>G{0v;oV`bt-$iW5?~qWX*I=sQUrL}-N6I#Qgou%&_o zw=$JnCA_9D5=lqG3jPdT7EF=agazNrn6?8n*`%v!A$e0;ZHu#YcpV1NN}2CgL|3e| zbX#yYdjVQ$H(H|PsrjD%3q-KY}dyB3$B035sEE6DAD~e8M zqHMf|yX8tUVA?2TDbbl|-tmOrM_DwPmk=luRZ;?t%$WsO3gP?yJuK*564=t^2Vg!U zjyjY=ZBGkT^LOhGlgZQg=6sRYM!q?a#rLLT{L%kG-BSA2x>eD$B}#LPp@M%Ax47oo zE8QY`k=z15uu^u!{14L3;rMXDmu^?(i~W^TnMyw90zv(UcU6gA940&HFxxEoqh1N+ zO=vIn=GQsxwxp6)ZXBJ;7vkH|7>roJD=#%2?1;DzG(&u8al4g+LTJc<#H5hLEo9`k z*KeT`)vjZZK1IYIQF0qXAnoQWNvX#_nm1gLlTo|;qNinAx{e=KJ0Z1RHE*jTUgcOG zKl3)>OV5OoFvYjDgA^2YQ%Vb7I7}%2cx<>HH&l{H`nItS>F@maf1T~GhVxH+?!u!& zA%T;b!knPr;{3azMv(Agkhvhs@UO4B{0{Fo!i}BO(syL2mspTe+@V4eVOhZ&v-Wf% zFK&a3{7p|?#~9^Jf@pz8(o25q$e)mXaUUDerj~~rahc#IC^-y+Q{{xB`Z5L6E6&9` zn{-tj9q_4JqSL9$LTgntP%qLKdUTa-lmpg{Av}^b6Cy#}T8lpU7&RLSva;`bgtDGP z8W?3NYKXsk5a^#2t*c= z(NZed2y$yxEkT*xxT@-6MpZL4^UqY5lWtiJ^*UTy$4cyS(8JB^p`v|+6-s_EQ~B^T z^b3p8-mkWCe|LEHlIkrV#2f?Fc7m4e<3$e7{f_j4C5&luJhlT?O(^5RQBI5BGDT{j z0**{7zaR5a(8f+!0&mFE`UtD!c4Ag-{Eb z!O8C&UWlU>PmCMc;n37*xX4HvIQQM-38+xC8L6e< zl^vVbO0E;X=r-Z8(l5{sGNrW)1Xck$Q+qTsGZVfA(5BN1Ezsg}0uJC(YQRZs~@kQd~?eb$Y;$ zvXL@IBa1@zpbij6Hm(v~Qju8RUI(fzTJ!(NZZMJMA6j1Go3-V&@8OpyF1O;@`>4#Ys;tk=`nq_yI#)q*7;V8hru$-7tfUs^@h851jj zD~o){#v%jN9rWe^|6{;6?K?dl_+%}{G2Tq<@I>7*+2<3VF zl&)UFw>tUW^#Pv*K3vYPL)UV-xXe8SLR+`wv$FZQa*1#1*v`i&`CWrk^J{}1AMu?k z`Q&bmj{u>2e#zFr%+k{GAZ~?vO$|O;CQp2OVVe0 - -using namespace eosio; - -CONTRACT integration_test : public contract { - using contract::contract; - - TABLE payload { - uint64_t key; - std::vector data; - - uint64_t primary_key()const { return key; } - - EOSLIB_SERIALIZE( payload, (key)(data) ) - }; - typedef eosio::multi_index<"payloads"_n, payload> payloads; - -public: - ACTION store( name from, name to, uint64_t num ) { - require_auth(from); - - eosio_assert( is_account( to ), "to account does not exist" ); - eosio_assert( num < std::numeric_limits::max(), "num to large" ); - - payloads data ( _self, from.value ); - uint64_t key = 0; - const uint64_t num_keys = 5; - - while ( data.find( key ) != data.end() ) { - key += num_keys; - } - for (size_t i = 0; i < num_keys; ++i) { - data.emplace(from, [&]( auto& g ) { - g.key = key + i; - g.data = std::vector( static_cast(num), 5); }); - } - } -}; - -EOSIO_DISPATCH( integration_test, (store) ) diff --git a/unittests/test-contracts/integration_test/integration_test.wasm b/unittests/test-contracts/integration_test/integration_test.wasm deleted file mode 100755 index 39caaaec3d653565dede23e34ac354c2d78478d1..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6889 zcmb`M&5s>dTE@?*Q+2Dl>t1)+v7NTtA?H@i&_){MQNXrFBgLnsbYdbj%n-52=K8w( zChptcx33)s8Q%<<#VkNVVgo3PY*0iX5)1wSkC~51Bcy=Hk`Y@ZR!B%Jfbe_Hx!vP2 z5^TtfyQTV;|k_=sr$6Zjj|kBD2|}nbBp&#cIgTO!jeh%8mB!H^YtT*8bjLb2uFij<$Yh zWU@Fs84WiFPt6&p;qGYvXls8kJUSXprY6@=XLq!_aqylg;{0TEcyDVm8Vv7E@0yN= z7PpSjv$20~Z)!>n)SGt(Z*T2w4z{lSs;M;6Y}}hnMtjr2WVA8bx<8tjg}4&IcSh4F z7qu6|JNs;FHQ8nIXu8iiBbHF;qVY{r8%wsYRiY4-R(TGfSPpTbnz-dEcxQ zgSYnH9h(cf4#(!ACWcI0IvXc`Yx=dX4R^NQ*&Ey+?hK~p1q~0!&kp~>xuoye@fS6Y zx@X7zLTOE~wQUyv;8!kT@tfApUNWKCZrx?moXT!`>&>@&#%F8q>%Wiw@vqnQ=z_VH zb-CDUS>>H~S6p@78KkXu&FPDEFip#JGrnxE-NZb~Bh@UhA2V^%uJq6Z?y`WBC8;e}3`QIhz$7Q&tO$b+>nF zsX6`J*)w3M_MO!Xdwi)2Ci(n|TV5+3wnSptSAJo=cUJavFV}t{+#R@2SP@p|~r7{a>uT=z_^oxCs5qzpd(qrdsl zPyc*Ijyf&Dc)fQ%IF`SN7%RPL&OD=~dtOuILAsl9e<5^`e*r6b{>+S1g7R>dyRQ5S z%iLA*4uzJ1YTP`hXEXFO^khR9w>x?fj z4yYEnTp$d`5h5ITAsZI7x@LMA(^j*ptu)YOfIuyT?$}ontD2S2V-T+vxIM+~IYOt9 zHerdUv)cT|SZOkfUfrh}80WU*YpF1-JI%;xNq%ks_);AaHNTvyL&0>f4mp=@9iHP- zCy~Vny6!>}sgg)p$C!5NsJ5uX85ZQpSyx9JvpQN0W@U@1>z~3yGy905dQ4HdJ|#tA zWvf|E8E`TE&M~SR)0VGNvSs*AeyeAF*?gs+bN6zlMVwQS zT}>BNUo5#Yly8Ked|w5tn)6wx8g~&54-Q)ws@I;HBk!6XCkd$F=O!CXHuK3mT+*YE zL%D0sGS9x8ql@xy01yTUSJ|~}ISTnA*n4Bq!4-F-&%!J2Y9GK|ajX3b=3@F#jyuBP zJ29o58h0yD$-rr25K2)Ybj=mz6VkbCAjH3i`H62P+PX?$*oELAPhBpz*n-7^XR+R6 z)M>K^8t`_V5a&En!v$PJpwa6k7YCJ^0%E+%KE7D0y%i}nRhx$w?+1Gn{z}1B?yyD) z!>^bytIXs-mno}}rn-nv+z_l)-CfIk@Xdv`P+>H_AaT@V;pa7cl0R@VPfbkIwd^ux zQL=>!_WVUw!0pRuEan#|r0pmbPBYzvuNa9|wAMmQtmp2V7+rZpE5%wy#C9Q`Ll;Q& z)k&_ulYg^k2@-3`xl4ZlhofYhbMqL;cjbpnE|cM;jR;xNtBdMASE{WFJ&!1Apb@J- z$FZ(I%4|N*XX-+k+KR=kruc}GTXeQcT$f`&(&l+W9Vi8$u`K9UtRX~^r$xowDMUn0 zqCiqMgO=6It0tlvg5y$*u%v1guNc`Rn!=+P;a1RLuFk&*9+F`nJDtw1VxATBb#@a4$` z54_t}&+I#Qldu=s7LrlPSL!5H54tKmD`~J+6AQ6$A|B$F!UMRBp>)j(Ld_VSiXFqo zJf^tP*vKcs{Yq^$+Om*rap{RfY`0Z#_de|}(xswB*ub*IweoxI;+jrLHSsQeN3~wT zh~6>Jn6fahWk)Sn+LHs%aV=M3-6HiTF06`Ys!;xV_~3~^NfD(=Wfm`$?1J{Se3AUX zcGc63owQU+m0;a2N?WrCfKQB2Z7yn6^A>gI@V2=!A9%Az846izk8sL95DI<753pXNy@>ERaM%wE1>e-i3O|PAB}u7oU6~G69!#{lT z#V@~sMLM(DDL^24w^r1z2ocENX)g5h)T?x!%N02jZ6L=&oK3kD;V$pAz{sX_S)UOA zl@)Y>XsoQLQRhSzQfiRUP9BLoOZJz_T@D`M@4f|gnTVX4ipS15a1x$`C8i_o3$y|t z&j;hUv7V*E=Y1NP&uImzfEk+a*)QisB(KUo7-o?)X+F7Oq*I(3ft6z%CY3TN9wcez z(IjS{ro!xm3TXyvZO2rY`%-oYrjaglX6ir7Tt?BmnXJKD;_TK`+S;68$(nvgq3{K9 zxD|dJnKGWvvacHQO8PQoJnqIG;Y$Pd!TnWDl}Z`jw}-JzB5cTE-jYMK zw&gap)xujJw=e{OP=#za&(CLxX3A6Nwb(B3fL8Y#5>&9DP{<^a2(eTN3Movr@i3%v zl{@T&)}NhWSjSK*u55k}K`3fsUhtFx=1u+2ne9Zh%XGFEj@yVD_)*4tE4y975de?w z5EWi<676L{Z#Y)KQ@@5`kqQ6Bm{AR=A7}T72Rup*bYAUczH_`*ZxDD$Ib<9Sy%Y5N z8D4cF`!Y$cl!h4|3u+NV;xxj7JDg1^7yjwwmrAHvKulX8AtEP93gQ2)dahN+m=t1R zF}dBrsqMa%gTcNY(}+2#HjliIO=5*P6ox_hO*0PLgeJ@W+@_A>|>6(RWQa_4!~As5`6{)=6H%w zhrzUnP?1f-U?q}>Wl=tbE3}?4c8mx)iD%>Z(kCn>l`&T>IHnNUSU)F%$6h_t#pgnxZ&ufxKpk7VAyjVRdt%K4r98AvBzn(&$RPjaE*ushcB^ z>3oD6`u7Ig9Pj6v%PiGFcD@;!nAB!z32o%^{4kq|a=Mtlnsxh{pLBO#L$78p8R$i{*qtI_u(L^NTvCuzMs!u@QJ*s+*TcO#N?`Din;h zD6k_hKOVNxYdy|1Z?KT?NIaYe_W|TfG`qzi$heI-lwBVS;8)Hi+a(7kDl<@{8Ih5x$O!mbHWE3kAq5{H$zl}$KU6(NyDmi zB8r%mDQ;UftfXiw8mrk40^x{cD>u74V||ICWigw_XnvQ+$e&7#CZjQWkwCkn82Y)p zDN_;^!ow;QaT!-svp8|82vEnN7y|W4kSEdj(n1KIlA1>l;;U%d^?xsHaS!k=sh)N00v^IPX`?X87ecbX{z^sRlAL~eEgPdcaU6dc0yU8I) z0^`2&0us8K>_y5h4`Wbz`d(t>uXj*AOVA@o)jHDZi~vp$!8hV5t}hSAYC*_@ZK}1a zrTTYk#rGn$KRA@bNvFCQ)J4Nb?R=lNN@~TN z923h}z}EHhC~ca8te#N+Nx`?dga$d8-3;cd=8xB412MseYcvd7ze2(Bnrgq!4Es^? zQ|<8K8a5px3Ln(q9O%2>=CAN-EPptdZQnE(F_o`#uTZ5N*rHlKY%qp0n(c>cELCht zOLwQA$8wFz4%I#X-3d||rh~8o0L1e%kMX>vC^M$cB0#E*Zi^n0A3cNZ?kU^(4lz}O zm+g51JV;GxqxD5E%CSCY05)YIIaJ_KC5JeI%OTIeYdzLQXZUnO7UK5`d~snG`6PNH z93DfBQ?5W#z&YzZOjM#HNYnc>pAMu2NX@%YDx2`o+ihQrW70TDqC#~zmq{>-Ffri= zlm&Q{$Vm$$EU_F&y~>&JXtjw(#e*#8d2!_2HlLE9wp+)g~tv zW+xV=O3)s2nWrse-)Lhzb>S$9cpL81Bj04aTM;pEf`8Y%m?(*mu!Z?=$!BjO diff --git a/unittests/test-contracts/multi_index_test/CMakeLists.txt b/unittests/test-contracts/multi_index_test/CMakeLists.txt deleted file mode 100644 index f94955764eb..00000000000 --- a/unittests/test-contracts/multi_index_test/CMakeLists.txt +++ /dev/null @@ -1,6 +0,0 @@ -if( ${eosio.cdt_FOUND} ) - add_contract( snapshot_test multi_index_test multi_index_test.cpp ) -else() - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/multi_index_test.wasm ${CMAKE_CURRENT_BINARY_DIR}/multi_index_test.wasm COPYONLY ) - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/multi_index_test.abi ${CMAKE_CURRENT_BINARY_DIR}/multi_index_test.abi COPYONLY ) -endif() diff --git a/unittests/test-contracts/multi_index_test/multi_index_test.abi b/unittests/test-contracts/multi_index_test/multi_index_test.abi deleted file mode 100644 index 57b181e2ec7..00000000000 --- a/unittests/test-contracts/multi_index_test/multi_index_test.abi +++ /dev/null @@ -1,28 +0,0 @@ -{ - "____comment": "This file was generated with eosio-abigen. DO NOT EDIT Fri Dec 7 11:56:44 2018", - "version": "eosio::abi/1.1", - "structs": [ - { - "name": "multitest", - "base": "", - "fields": [ - { - "name": "what", - "type": "uint32" - } - ] - } - ], - "types": [], - "actions": [ - { - "name": "multitest", - "type": "multitest", - "ricardian_contract": "" - } - ], - "tables": [], - "ricardian_clauses": [], - "variants": [], - "abi_extensions": [] -} \ No newline at end of file diff --git a/unittests/test-contracts/multi_index_test/multi_index_test.cpp b/unittests/test-contracts/multi_index_test/multi_index_test.cpp deleted file mode 100644 index e214d324294..00000000000 --- a/unittests/test-contracts/multi_index_test/multi_index_test.cpp +++ /dev/null @@ -1,194 +0,0 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ -#include - -using namespace eosio; - -namespace multi_index_test { - - CONTRACT snapshot_test : public contract { - public: - using contract::contract; - - struct limit_order { - uint64_t id; - uint64_t padding = 0; - uint128_t price; - uint64_t expiration; - name owner; - - auto primary_key()const { return id; } - uint64_t get_expiration()const { return expiration; } - uint128_t get_price()const { return price; } - - EOSLIB_SERIALIZE( limit_order, (id)(price)(expiration)(owner) ) - }; - - struct test_k256 { - uint64_t id; - checksum256 val; - - auto primary_key()const { return id; } - checksum256 get_val()const { return val; } - - EOSLIB_SERIALIZE( test_k256, (id)(val) ) - }; - - struct trigger { - trigger( uint32_t w = 0 ) - : what(w) - {} - - uint32_t what; - }; - - ACTION multitest( uint32_t what ) { - trigger t( what ); - - on( t, _self ); - } - - static void on( const trigger& act, name _payer ) - { - name payer = _payer; - print("Acting on trigger action.\n"); - switch(act.what) - { - case 0: - { - print("Testing uint128_t secondary index.\n"); - eosio::multi_index<"orders"_n, limit_order, - indexed_by< "byexp"_n, const_mem_fun >, - indexed_by< "byprice"_n, const_mem_fun > - > orders( name{"multitest"}, name{"multitest"}.value ); - - orders.emplace( payer, [&]( auto& o ) { - o.id = 1; - o.expiration = 300; - o.owner = "dan"_n; }); - - auto order2 = orders.emplace( payer, [&]( auto& o ) { - o.id = 2; - o.expiration = 200; - o.owner = "alice"_n; }); - - print("Items sorted by primary key:\n"); - for( const auto& item : orders ) { - print(" ID=", item.id, ", expiration=", item.expiration, ", owner=", name{item.owner}, "\n"); - } - - auto expidx = orders.get_index<"byexp"_n>(); - - print("Items sorted by expiration:\n"); - for( const auto& item : expidx ) { - print(" ID=", item.id, ", expiration=", item.expiration, ", owner=", name{item.owner}, "\n"); - } - - print("Modifying expiration of order with ID=2 to 400.\n"); - orders.modify( order2, payer, [&]( auto& o ) { - o.expiration = 400; }); - - print("Items sorted by expiration:\n"); - for( const auto& item : expidx ) { - print(" ID=", item.id, ", expiration=", item.expiration, ", owner=", name{item.owner}, "\n"); - } - - auto lower = expidx.lower_bound( 100 ); - - print("First order with an expiration of at least 100 has ID=", lower->id, " and expiration=", lower->get_expiration(), "\n"); - - } - break; - case 1: // Test key265 secondary index - { - print("Testing key256 secondary index.\n"); - eosio::multi_index<"test1"_n, test_k256, - indexed_by< "byval"_n, const_mem_fun > - > testtable( "multitest"_n, "exchange"_n.value ); // Code must be same as the receiver? Scope doesn't have to be. - - testtable.emplace( payer, [&]( auto& o ) { - o.id = 1; - o.val = checksum256::make_from_word_sequence(0ULL, 0ULL, 0ULL, 42ULL); }); - - testtable.emplace( payer, [&]( auto& o ) { - o.id = 2; - o.val = checksum256::make_from_word_sequence(1ULL, 2ULL, 3ULL, 4ULL); }); - - testtable.emplace( payer, [&]( auto& o ) { - o.id = 3; - o.val = checksum256::make_from_word_sequence(0ULL, 0ULL, 0ULL, 42ULL); }); - - auto itr = testtable.find( 2 ); - - print("Items sorted by primary key:\n"); - for( const auto& item : testtable ) { - print(" ID=", item.primary_key(), ", val=", item.val, "\n"); - } - - auto validx = testtable.get_index<"byval"_n>(); - - auto lower1 = validx.lower_bound( checksum256::make_from_word_sequence(0ULL, 0ULL, 0ULL, 40ULL) ); - print("First entry with a val of at least 40 has ID=", lower1->id, ".\n"); - - auto lower2 = validx.lower_bound( checksum256::make_from_word_sequence(0ULL, 0ULL, 0ULL, 50ULL) ); - print("First entry with a val of at least 50 has ID=", lower2->id, ".\n"); - - if( testtable.iterator_to(*lower2) == itr ) { - print("Previously found entry is the same as the one found earlier with a primary key value of 2.\n"); - } - - print("Items sorted by val (secondary key):\n"); - for( const auto& item : validx ) { - print(" ID=", item.primary_key(), ", val=", item.val, "\n"); - } - - auto upper = validx.upper_bound( checksum256::make_from_word_sequence(0ULL, 0ULL, 0ULL, 42ULL) ); - - print("First entry with a val greater than 42 has ID=", upper->id, ".\n"); - - print("Removed entry with ID=", lower1->id, ".\n"); - validx.erase( lower1 ); - - print("Items sorted by primary key:\n"); - for( const auto& item : testtable ) { - print(" ID=", item.primary_key(), ", val=", item.val, "\n"); - } - - } - break; - default: - eosio_assert( 0, "Given what code is not supported." ); - break; - } - } - }; -} /// multi_index_test - -namespace multi_index_test { - extern "C" { - /// The apply method implements the dispatch of events to this contract - void apply( uint64_t receiver, uint64_t code, uint64_t action ) { - require_auth( code ); - if( code == receiver ) { - if( action == "multitest"_n.value ) { - size_t size = action_data_size(); - void* buffer = nullptr; - buffer = alloca( 512 ); - read_action_data( buffer, size ); - datastream ds( (char*)buffer, size ); - - uint32_t w; - ds >> w; - - snapshot_test test( name{receiver}, name{code}, ds ); - test.multitest( w ); - } - else { - eosio_assert( false, "Could not dispatch" ); - } - } - } - } -} diff --git a/unittests/test-contracts/multi_index_test/multi_index_test.wasm b/unittests/test-contracts/multi_index_test/multi_index_test.wasm deleted file mode 100755 index 529a41d9caf1e5e282766bf9ff0c49a98be2619b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 16825 zcmeI3e~ew#amUZS_x-Z(u6+U9ESMkXy|~FjfvkL&jqN?g2{ewSDLsC*yi7G{n+N44a6+}W&)G8`b zQ@Nk-ocrFsU7KHN8cGFD_MLm~oH;XdX6DSyJ;ybdjzrG6==0IY0p|`xr`!P-Egy)N zm&0FoAX&C{wCoO)PVpc8E;@CfOmkTe6*c-Tn)K=ADK$#VbO^^O{@ReDSI=RjIugJg zlk%qoE*qh7QGkX57k{R*Eosdi8|t(ck4|@56V0RDDHo|LYaZ%O&(BRvHoMJl zx!Afpt>)y!z&n>1g4X=f^!!9~X{puex{|soM_NY?Eu3^^dsyg9&vlnvMGqHE9-Nq+ zKi=vboIg4@IWfI!r>m;JYCuP)0|6C#nhOspL*`m1x&~}Bs^{?Z;PC9w(N3o|*PZCJ z4z;F_wK}dQteXLxo;x!{dYnhSlJT#cHFdU^?>ynuwmup%7`I4 zm%8(aPpqT^a7=11+>kmqhQZr+>>2D`Pw#`Rz?sQPL>*l~?g8KMz)hUgCUHFw-C@CU z`OeAMr4tiVt>(hS!RAsciPld9!)nbP3?mcWkm3pX_em-khC&aBkvQb9SQZ_NlwsUfX@==74W)|6S?_-P-=U*ME9; zJA^(OMal99T%OG|l6uJ}+g!HIN12QF`pCOa466~3pFaJCZ@&22zy96tFVmAn^|DX$ z(+{;9fcwOkv-omf&XfK5zx*|N<4bv|4L~U?UG7W1G#Zz7#Wi|sZ7_duC`xlD0cHt} zY(Fou_BNOPL6+dGPL)K>}Xk;JX72oP(A77K~*`3trJnP#0t+lfc)m;yGYz25{03O?&=Gd&9c1`~0 zqF=JUMx(Du?qeeU`{|QEcw%|nWfgdJcfo5I=dx?-F4?Q;M(aubf-$eTvugfg@4@r% zeS1ia>DI^=9fhM7Bm?5}J&N8{_n_?`J*J}1R0C^S)O=Iejqgt4$M zv$8(0&R>&)M+l<6+h7em3-Yv)Uo6PK+e5-?*cu4%K%;cWaFyXNyLO{&s_aYqhRa&g ze5&1$cGoMkrF4k5f@FOqyFJC8+?_~>p)a|%pxjU;&Ch&Yi|gt_`Rbsqo_^swKlqEU z`~xIQS69q9=xEU0o~pW=9V#5uzOS8V>Oa%r5H$w?4?(%PcBdqb`5$<54SY@>HhzGnPXkY$2rUvDD6(>@dO8$at3#XaO z<1c^bp7A(azt&o|H>zu~q-#v>d^{Jhy6ruybA7ARx;`QCvszg@+wl$z4Q#6oZ3O%L zk8hb2v?wQ7GJY0Tdh|8ujLH&6dggee7U8;jCI6~XRS90WS)5Npb9jmlaXqq$XIL0|L7o<%}w{)&ElZyu={GV8H3z%l%o zVXe#^e9Wc+AM;jAU^ud!=2jSIZspDSk0st7HP;q0SWU-G;&fBp<)1}a;bcI#LF~i? z7kDFG9c?76rBBBFqhAhA?Mr_X6ke_@&^dzp-SvM+F$+(nonW=6N`59!Rl=z2IIprN zB4FQ^LyOt$3wvMvs`n=ST$02D2FY@vIPu=_&qqWSjX2+MHwxzRAIp8@L-oXbLq5d# zp}V&>k~@a+6r)B=aOp#hIpeQNw%^qDg@DP9q4uJ{D{ zhhjF|0La+HfMFq-$;-1uQ%^l&M8eWAq>aN0bcvbk-B6>F-+-T_qq2(73WVBh^-|!Y zlJ}^+{9bJ*In*{P1qhE~F}s^s%H@9~!Wq@$LG*|^UpfOJJf)Askwt1_GmR2KV9Ce3 zlTjbZ9@6{s-wSb|Pjj#VtaBF-*(9$UHiid2uqWlWTM+S|mj|fR(}+_3a0y%TrR@HCtR;?FxQ*?6VbQV)hN>sKok3B`*{_o5NHcc&GLb)t2H6iPtsg0BMT%;=Y&t6T zrKJQPu2G}i*V38?wB~EUG!kj)2v6XW2G@d?LjA*~dxs-L!dSF534hR5939$f09Y)X zW)GyYkCA%BJ7Ht)NXi;XSrb!MUrry66Vg|FX_4|Nf9qKwdz1nT{7=ujG}NWNs}{m5 z%SPZYQ3^HEz>z8?!9;8IO0Y>W3b!M0TP;?6U2nzLX^GiBU85wlmHU-*)Yrjw+*|jW z7qqNy%vXzWPYYS~RSn_KDoQHZ`s~`qDrk0?d=D#yAZ5$!Gksf5Fmo*Sx!5$dCxQqO zprbg7J40?1be4@}G~Uxl@?~hVY*U2G8yUZY@`#k!N>Uk{4+~W?y~c~+O(GMnWMlF{ zH6*%Wl$Vr?#gXXHFmMY!-fRSKRw*PGhu}Gu+*n&wNYj@UJSX*7PEB$nc(zF`R{+15 z^opPHgc4>=!mJDkv*HyK>dz`+mQ9$epaWuGlUCLwR@=6FQ>5RGEn|Nuuo;1km)G)G}ixAJ6csurQ9<wxx{dFK(#U8Yx<~1t!_ekoB%?ytl|$-_geVZiq+A z`o4JHPO+cS8JVse#hm*ty^VCLP7Vs63XDQkPc6}>5Kas`wD6RF@8l$z&T6<^_bm7Ts+Ikww#&30O z%Y;FKkF6#6$XbFI^7zY*(#>uoEy~!(Zgv~2mC+iu)(EX3T8Y7tkQ8v0J{*#!PjKuI z%x_y?BfiyLsz)}Mf|jj323jt+mT0-mTB2oyR^agxYl-HIttGgNtR=WiEtMHW>=qu_ zx@c{-mWbFy3u3AHU2T1WyUJREyRr{=1>2*~APQ|_Q~KIK{79H@^?#`riJW$Hm7$6q zUC|cLS^t>_lX*z@-a6P+Ef1{3I;eDZ#Xz(<`iqV>jT8l&+uuR3Y3bq11HopYsc#u< zD!vNlssO2I4g8QujD~n|O*m>~Mo7$p(G3?4MqRu&{8OHiZ5)V1m1p%Z|6C$bY0Nu| zM2!OlqT3s9D-aDcyn9-C<$F^rzn>)KUb2o>lBo9s_HS5CvKO%bTX<4h!q-7!VdXa@ zwD!qD)p@#9+eaTUdn^Ams(d2!6)QTKQ>fSCz2y63yvp=dl_KY}4+15=0IVy9YLun^ z%%jqOa%cW0&lib3lTZ)b&JHt;7C zp#gkB&xJoC^5kKJY7+UBfTu)auVR7I2KLU7hL`MJ3O{Pe8`x(I%e^Fv{&$*nz9eMI zpsr{eL6%))SIPMBrh7yRrMu;Opiyo!7`Pl}(b5m8ad)r4odtFC)KD|d$4>ZH?UIv_fNift!m1ABg>;MMFGkJRI{sA&8CVun~h^)F%?=>nuE2u z>>h1t zAi@BwEMW#v)JKJz?1&#$jMrA*3(qQ$P{ga& zRsIU)SiKKyD1fsn54sWx9gtL&O{&;dk7&!%Wo!K^7}J?e(iy8W#xdB|QuKA6d!w;y4Ko>ZR2V%GO>r9{U#B`$b1a+rb;k*NpRlPO8@P zoK|VeFjkxhWfhq%HQurF+?hetIx6AH37E1rO4(kEwpTk?=veViI$YBCPMm*P?VZ+? zgyfwhBLXT~D5M^+N&V;>J#Ip~inbQI#7OAFZd8fs_3=<_QFryd5DHE0ssFVb2hl?q zXO|h@`AhFRA!){Y$O&D@6wC>|nJG{l6`PoXK4At^C^mS;zHbV+fWj0mBwmx=n~B%V z@_kS9HayMQD2n(vkGIqxBA9CLBp^GwKCLJyWD5^%z5&@s8G*+$`Nlr14AwvtT=u}T zHQ+0#QT(^YJ^;=snF|nOq@-Vs!`Ez*A(b?MBVbb;fupwrj2!F2qzD}7`3X#IejP01 z^N6bXMd@gFqECDxM{+ZYvO$UnRfUDKeabLazW59TuPa1tsgo!Lq7>pPa#GB-imprb z8PPI}FD9$XZ%Q9SCelcX@*72)qWq>-D8H!{%5RogXp?J{it-yFc2Ry)E0o{V3gtIO zez{Rr`Hh&qD8H!{%5Q3g@*5)zjt#MZBc&+d*uD!mjw+b1)clFcZ$^7o(6W`sKnn+l z3R*6+Rw%!T3zZBnp)c@wv9&_^t;eIBFEo=tQB2#y1G^Ee&DIL#HzPJu`OU;(<+mQ( zm3_D?qzgT>y852ttC#c`=jI3O#1r#apVeH_FC}H2nbH5PO8P<(VGUqgF^vn^MI(HS zF>Rm8ukIPCg-n|M{K?13Lh`Jhy!deqF5j4tK+ptNb76c zo#QwRD}9E@h|jHUQqmIdNaQV9D(Ak)y@?3-702TUdX&LnIROFtpQg4H-=-~T>H!4G z#1#?L%SJ&KN*4twECoSSk|$kqmQ3e;L-d9op(LtDFp;*{Fs;rn@d9(i&H>fKNO-Uk zD88W|)}f@K3d%3I1QqM^r<_c>CyL1uF=*W6)<%@wuTyHtUIgfJWc=Oz4bl|ywTm3P zyWZk&R8dGPjLyc>4e~zfkT;!VblSLjvd zg+_343G?%jenJ4o_S$Yc==>9M?JoG&lI-MRnz&*SaI{Pe@0K664- zsHVc{C?*m1>s@p{)_~F#nE7Ej@|T`>>5c4-`IdTOBr}tAA5yaW8YHIhYl$sO&Y7=d zW19UMWTZciqRKLTu?FQrim2S89sPyvY(E202!ZEY@YOjo5~v9U!LiouLSLAG0t6ex zVuRBF%$MufzU?o0bx}0L0?AJ-G7R;%(jHqBOM??);wq_4bl$dp3M=Za2~uMS79^EQ zIfmVck$-aYW_}yFma3K{j9B_Ud!Ld2A9e}nN9~M&c;cGRW z@WnnAKFmN>W1KHj%aW0!SB+=iRt1tmRtuHZ2xS}c;wVfSguD?9=^vJ(nnkG3=D~gB z_@7tzqg?WRukE)=(`^_WXUo`=U)|4qIJvH7cx^v4^)w0|a0TZQwC`A>bQ`Bq#1C85 z%wBgnq;j(LHV)`V1zO#3(RowV#a`RbP&+Ucv2v=W_mC}87CilWB9)3E9Stj^0Cs?l zM{O`nSzV;hf-)3RJJ5{WL*QfBL3=VlJK^b{81 zrN>YihH7G{>}z7k(}8a;nBks*ZGl)Xyx>R8pP*zx4|}R+;Vgo|=Wu=r9#vJ8l7?e& zc4LQ92xZW)q`_?RhkP_ch+vCGRghKG;ubJLKTSnP6A~zQ~5`Q-~}HtY)-oMwP_Iw<35GK5;R8vGe{xx8ja=^ zppb<)n@j0|m#db(fL^0Z=|gO4!E+bd_-CJuurwteQkdiPXXKbg<~qgB;|=^?S%_kp*8)<@HHl~^pcOxADx}_bMsw4IlZ*d>>iqO zcK<3}%()vn_n@Dj^WDz$gAcYkKHLbsVZA%joaKVv#Np2T5&pDVlm2*f$xpVHxUa@T zcb-wRC;h>eKLjK_&BI*a^L9DTKo89yS(xQgA3%nRdytdNd*&IJZhN4$WD_6dGO2Do zcDWIFXnt;z3!R?3psf=S;B?_DU4P^#cNV+A)4@%u^s2kNT=(-!Tqb1FgC~7)S<=t6 zPTs6h+?Yzs{_fj9LYoUy>4xik>%_uzr>SeO>ij;P>#p{e}{y$b8O6b;o4BTd*5_tsXGYO zoLhsT+4ZxnCP3TA#{3il4}5UXD80xx=OzchMf#bqyR_aDCIaBnY6=n^V% zSXYL9n0|W6cc)r@i94Vkmero0YxO{yo!RN0RGWjVEqadvdf4wUx?W+553XH4@ie-k zT*&AeTSqQ=q#u3*89d0{Xz6lyiuKvKV_+S)0Fqw%3A^yxBfhuV$dy}8Y2!|AEYJDl zQ%Ld9{AA0+oGfpNtFxxEHvq_ehV+9=m>e1;=I7`A>^yDs1$(lEn8sj{2f!TIPe0n7 znnvFd*OceVYzHfazUeuG!2stpg$>2x3eq+Ik=9aSmu)a{XCd7=Fg#EO7TM=Q&4aVH5KZWqf--EmD-o@2@*Fps`WdNB0{aAz#NG0@=` zbZ6cCQV(DPaTd~tM)M;R=3GF}rNi_C1$vEKg<0!x#7rDWj=82coHs{!{f6i{4cix2 z0s1z@#TnT2mT=D-F@}X@S>_&Wty2BtQz%)$EFq$&0Y3!uWhK4tD7V@dSRN!2#Ek?F zPoHQ_P8>XmGt~m2e@71m!E;+FZM>N@koc0;k0S)>_2&u0-oBwMIB1aWD3Axpw +#include "noop.hpp" -namespace eosio { +using namespace eosio; - CONTRACT noop: public contract { - public: - using contract::contract; - - // Ignore type in action - ACTION anyaction( name from, - const eosio::ignore& type, - const eosio::ignore& data ) - { - require_auth( from ); - } - }; - - EOSIO_DISPATCH( noop, ( anyaction ) ) - -} /// namespace eosio +void noop::anyaction( name from, + const ignore& type, + const ignore& data ) +{ + require_auth( from ); +} diff --git a/unittests/test-contracts/noop/noop.hpp b/unittests/test-contracts/noop/noop.hpp new file mode 100644 index 00000000000..f6966c1ef1a --- /dev/null +++ b/unittests/test-contracts/noop/noop.hpp @@ -0,0 +1,17 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +class [[eosio::contract]] noop : public eosio::contract { +public: + using eosio::contract::contract; + + [[eosio::action]] + void anyaction( eosio::name from, + const eosio::ignore& type, + const eosio::ignore& data ); +}; diff --git a/unittests/test-contracts/noop/noop.wasm b/unittests/test-contracts/noop/noop.wasm index aa2acfcf85439807dd7d626276c2cc9f0d3cea10..1dc548e7e63d4b545fd292254e7133e16e3a3524 100755 GIT binary patch literal 778 zcmYjPJ&)5s5S`hzV<+nf&%5r^PU6VNxb>F?wbIm+;gi z!+0wOY+1l$#VCuDqA=4ExP=-MGa2MFaIAA-J01_Rp#gV8DojZvuhH=QW~<@2m?NUg z5wLNQSE%bV5EbZ;{|~Q;Zj$HY8ThSp-f%z$Vg}MvpzyvE9n|%Y#kbe*{@zfGT#L%4 ztAKEN&OX2W0uP)>Fa59QpMJgg4bm0D-HLDO)b)oy-+wNuWg*IVUe{Y0LW69HH;7Jk z1+;t;vMyI_2UxUuC5Fz;HYi8e&YdquXJG3ZZ1U5UuW>uie?fgM<)rE;UUK$stk>$U$((b|f642t`7O>w2=DB%a;%ekR1o z^4N$&4nE|XJHLQCl96)A1&JdEkoXxK5aw0SI{1cHp6QS3s#mXG*HF2fC?ZmyraM7% z)HJk1s@c&^(>`g(G=8j(u!Gsr5e9h5zJj&FK>wr?Q|&*?%i(Hox2hy6D3{A>u_7Z&(WIITX9pDc{Y5o@y0@qX<TO8 zzG;*@KBKfD_v3f!;uYTr;o>b**@UUD~Ms){m^40#X(P258jSr~Pi4{4O zs&4nUtb-F@Db;*PE+1KQmGVUQSuuT?5$ik0$AA9t$LXtoK9Z?%w55A^sV$wbVRPLi zw+z8rvB}fT)KMfVz*Jd*?eR*+_Q#-+DtjPL$?fLGrlj(04+zg$D9IM=3e&PB=UzJt*bjhBu=K{YH^Ymhqdj=X66v}Jf3?}GDn|#&Oszt!x8wY!D0FUdJDup5EJAEKHGz} z&xFlA$$@)P1Npyqg|js)lCW^d<_i#~+0-1u?fSf@+Q$n-vRDT*np5!yWh;&xRE5wE zNWde<7;j&(P@_0Y?)tcDLt;xtZP1qF4R}ajg(_)UHAs@W@8|}-=tEo%q@MP1c~p9v z3hfZ$6YLFC&uDfDVO)5C9PQDda67<2lwK6{raf0)1gr=!-lu0(Nr= zJG#rt`j~RXyx^hKwroba2_|vvp3g1BCRJz?3XxDE35A?ULX3sG8P0i*A|%6+Zw$!A zxMmw7XtNO=$~_WwM6F1|n$30wh*D@TQY%%nY8(C|kyO-{w7oX&kkyF8+7~VME~L0? zn^tCCCgfh@T0oB?MbSebcA&*y2b5l}GwAWF9+Z%eMiFkL0OJLW%jJtuThk(wYihM1 zGZ%Tt#YXd%Aa4=sw+P#tATic9ml5K|dW_Ok1R>&|IT1bV9YLkOer{tBtnwh_H_&T z4{KUWdiYX?^06n9zL9=CDaZI&4<0S16FjPFm%k~Oyjv~z9`9qan(}mie83MY9>NkU bJ>2Nxkjq!npk#|+D@Qwc$Rot|e diff --git a/unittests/test-contracts/payloadless/CMakeLists.txt b/unittests/test-contracts/payloadless/CMakeLists.txt index 2704b230d98..6e3cd8176d0 100644 --- a/unittests/test-contracts/payloadless/CMakeLists.txt +++ b/unittests/test-contracts/payloadless/CMakeLists.txt @@ -1,6 +1,6 @@ -if( ${eosio.cdt_FOUND} ) - add_contract( payloadless payloadless payloadless.cpp ) +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( payloadless payloadless payloadless.cpp ) else() - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/payloadless.wasm ${CMAKE_CURRENT_BINARY_DIR}/payloadless.wasm COPYONLY ) - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/payloadless.abi ${CMAKE_CURRENT_BINARY_DIR}/payloadless.abi COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/payloadless.wasm ${CMAKE_CURRENT_BINARY_DIR}/payloadless.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/payloadless.abi ${CMAKE_CURRENT_BINARY_DIR}/payloadless.abi COPYONLY ) endif() diff --git a/unittests/test-contracts/payloadless/payloadless.abi b/unittests/test-contracts/payloadless/payloadless.abi index 1b31e14ce1b..89d5af4430c 100644 --- a/unittests/test-contracts/payloadless/payloadless.abi +++ b/unittests/test-contracts/payloadless/payloadless.abi @@ -1,6 +1,7 @@ { - "____comment": "This file was generated with eosio-abigen. DO NOT EDIT Fri Dec 7 11:56:42 2018", + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", "version": "eosio::abi/1.1", + "types": [], "structs": [ { "name": "doit", @@ -8,7 +9,6 @@ "fields": [] } ], - "types": [], "actions": [ { "name": "doit", @@ -18,6 +18,5 @@ ], "tables": [], "ricardian_clauses": [], - "variants": [], - "abi_extensions": [] + "variants": [] } \ No newline at end of file diff --git a/unittests/test-contracts/payloadless/payloadless.cpp b/unittests/test-contracts/payloadless/payloadless.cpp index 9304da8e6cd..70939590e50 100644 --- a/unittests/test-contracts/payloadless/payloadless.cpp +++ b/unittests/test-contracts/payloadless/payloadless.cpp @@ -1,19 +1,12 @@ /** * @file - * @copyright defined in eos/LICENSE.txt + * @copyright defined in eos/LICENSE */ -#include +#include "payloadless.hpp" using namespace eosio; -CONTRACT payloadless : public contract { -public: - using contract::contract; - - ACTION doit() { - print("Im a payloadless action"); - } -}; - -EOSIO_DISPATCH( payloadless, (doit) ) +void payloadless::doit() { + print("Im a payloadless action"); +} diff --git a/unittests/test-contracts/payloadless/payloadless.doit_rc.md b/unittests/test-contracts/payloadless/payloadless.doit_rc.md deleted file mode 100644 index 4b365afbd03..00000000000 --- a/unittests/test-contracts/payloadless/payloadless.doit_rc.md +++ /dev/null @@ -1,14 +0,0 @@ -# CONTRACT FOR payloadless::doit - -## ACTION NAME: doit - -### Parameters -Input paramters: - -Implied parameters: - -### Intent -INTENT. The intention of the author and the invoker of this contract is to print output. It shall have no other effect. - -### Term -TERM. This Contract expires at the conclusion of code execution. diff --git a/unittests/test-contracts/payloadless/payloadless.hpp b/unittests/test-contracts/payloadless/payloadless.hpp new file mode 100644 index 00000000000..0a4e88affa1 --- /dev/null +++ b/unittests/test-contracts/payloadless/payloadless.hpp @@ -0,0 +1,15 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +class [[eosio::contract]] payloadless : public eosio::contract { +public: + using eosio::contract::contract; + + [[eosio::action]] + void doit(); +}; diff --git a/unittests/test-contracts/payloadless/payloadless.wasm b/unittests/test-contracts/payloadless/payloadless.wasm index 2b6708ebd4291759682664cfcb9a095e159ef9d1..330682f609649b4b2001c7735078b0420ee0f1fb 100755 GIT binary patch literal 698 zcmYk4zi-n(6vw~s&UTtyq8~#Dy9sqOo)Mni3tg@Gx5$DnzQciz0dnCA6&U00CcZ6rW#{VJr;G{o@;m!V_CNV zEUnib`h(auie?<1_>d{PHGwlCa~n_ERlE{i8CjJEFnR_2M{0~uX^^(W!&m!`lGt))$3O^R5nvZMCwlGKyB!N z)aF2MHtmxR%*Kxiqmz>p|CBw2hyFz+rkXv?%i;QPJ{ye6bvbAbzphC69nRJbkxaO$)`N$W`EWdZQXb9*i*i*?t97+%FdUAn5g9S-RrBU>J}8@}TCLlq zXj)B&iz5oWo5%m>rVWiV2#7P?P8d;4iB2d8)k0|<8bvC4*KCwKJ*RX-?zeB%)^!aN^RaFmyfNv zNqM6Cte8H_i1ml3{_oEpUi-Omw5xk;oZ8h18#eEoiw0J$01Jc3$74G$BEn z>-2Y}Lh;Y=Uj&R8tZ(U{2<1gZr2FkJCmVl+sIfZ>_A|wCo)x;M;ncqnFLpb%TP(*` z!#`_SsOu>Q?wy+Tt|pAGVZ?FmC#>$8gaaml7>GrVSLoE2bzzVsMoWojfP$miV@VMu zdYcH_Eev4IbBV#o38aCpF1Z)y#FW34z7AAxBYeRRUUW;JJF7ErS!gDA{wvik2bh$V zOKLoV+Qmz2#zNQ)K_Ndm_tIHFd;mC-MsHFJd3B+%8_1&j4pS?HVdga;IUU1SI#DdHpq;%Bx9BP9{44S$6nhG#(VCsGnEHelH{;p zyOM{T>#`Ws1#a0L*S05{nM2fzcMM%9HtK-oj~jZF%fA7+k>^w zgw4L>z&-33`QN|6*_IVaSU6#N;a(;BuU+Oben$cLtGA|KJTLrRC>!?Y`C1}8?*r4NqL70IXan^TpeEEhN_<@C%RDjyc5?+g zI>p-hm{MV0@K9=dZAKacCUNbaEiJ?*x6mdOBB4YQ3OSL47z_6^obwz-NQNWd1dxky zn>~o2%|>*1DYd8-Nm#Sl%K%X>+Kbf6ty#4P|B*;4YOk~{8+XiV%wg>>E%q*?jJ7td zKD|uHy+m0+k0M3ULm+mb#g_xFUdkEt_*D-|$Va0HcT#}y0!DH9OQ@}Bkx7|aEy&De z9#Yt7#BbEJ1Va4=VOt3jV{3C2Au85ml%^sG5&ztY=wa^&D)sl*_R{oN88kW|qpceM zI8$I#K`8FUsY1IGxyTUB)yTyKzM|0?p=4}AE^RmRUC2++QqUcwRN9h6NRn#bwxIv8 zrr$`h;Insaf665<%A?7=98IdG;r9DYB&{Uk{u^v2H&8K)&)rh|;8y;27;o})5 n>p9P7lOuju@er0+DIeiu&i;$(O^5U8VuBAmjJ&IIjc@$F_^GbN diff --git a/unittests/test-contracts/payloadless/payloadless_rc.md b/unittests/test-contracts/payloadless/payloadless_rc.md deleted file mode 100644 index 4ca04401a20..00000000000 --- a/unittests/test-contracts/payloadless/payloadless_rc.md +++ /dev/null @@ -1,40 +0,0 @@ -### CLAUSE NAME: Warranty -WARRANTY. The invoker of the contract action shall uphold its Obligations under this Contract in a timely and workmanlike manner, using knowledge and recommendations for performing the services which meet generally acceptable standards set forth by EOS.IO Blockchain Block Producers. - -### CLAUSE NAME: Default -DEFAULT. The occurrence of any of the following shall constitute a material default under this Contract: - -### CLAUSE NAME: Remedies -REMEDIES. In addition to any and all other rights a party may have available according to law, if a party defaults by failing to substantially perform any provision, term or condition of this Contract, the other party may terminate the Contract by providing written notice to the defaulting party. This notice shall describe with sufficient detail the nature of the default. The party receiving such notice shall promptly be removed from being a Block Producer and this Contract shall be automatically terminated. - -### CLAUSE NAME: Force Majeure -FORCE MAJEURE. If performance of this Contract or any obligation under this Contract is prevented, restricted, or interfered with by causes beyond either party's reasonable control ("Force Majeure"), and if the party unable to carry out its obligations gives the other party prompt written notice of such event, then the obligations of the party invoking this provision shall be suspended to the extent necessary by such event. The term Force Majeure shall include, without limitation, acts of God, fire, explosion, vandalism, storm or other similar occurrence, orders or acts of military or civil authority, or by national emergencies, insurrections, riots, or wars, or strikes, lock-outs, work stoppages, or supplier failures. The excused party shall use reasonable efforts under the circumstances to avoid or remove such causes of non-performance and shall proceed to perform with reasonable dispatch whenever such causes are removed or ceased. An act or omission shall be deemed within the reasonable control of a party if committed, omitted, or caused by such party, or its employees, officers, agents, or affiliates. - -### CLAUSE NAME: Dispute Resolution -DISPUTE RESOLUTION. Any controversies or disputes arising out of or relating to this Contract will be resolved by binding arbitration under the default rules set forth by the EOS.IO Blockchain. The arbitrator's award will be final, and judgment may be entered upon it by any court having proper jurisdiction. - -### CLAUSE NAME: Entire Agreement -ENTIRE AGREEMENT. This Contract contains the entire agreement of the parties, and there are no other promises or conditions in any other agreement whether oral or written concerning the subject matter of this Contract. This Contract supersedes any prior written or oral agreements between the parties. - -### CLAUSE NAME: Severability -SEVERABILITY. If any provision of this Contract will be held to be invalid or unenforceable for any reason, the remaining provisions will continue to be valid and enforceable. If a court finds that any provision of this Contract is invalid or unenforceable, but that by limiting such provision it would become valid and enforceable, then such provision will be deemed to be written, construed, and enforced as so limited. - -### CLAUSE NAME: Amendment -AMENDMENT. This Contract may be modified or amended in writing by mutual agreement between the parties, if the writing is signed by the party obligated under the amendment. - -### CLAUSE NAME: Governing Law -GOVERNING LAW. This Contract shall be construed in accordance with the Maxims of Equity. - -### CLAUSE NAME: Notice -NOTICE. Any notice or communication required or permitted under this Contract shall be sufficiently given if delivered to a verifiable email address or to such other email address as one party may have publicly furnished in writing, or published on a broadcast contract provided by this blockchain for purposes of providing notices of this type. -### CLAUSE NAME: Waiver of Contractual Right -WAIVER OF CONTRACTUAL RIGHT. The failure of either party to enforce any provision of this Contract shall not be construed as a waiver or limitation of that party's right to subsequently enforce and compel strict compliance with every provision of this Contract. - -### CLAUSE NAME: Arbitrator's Fees to Prevailing Party -ARBITRATOR'S FEES TO PREVAILING PARTY. In any action arising hereunder or any separate action pertaining to the validity of this Agreement, both sides shall pay half the initial cost of arbitration, and the prevailing party shall be awarded reasonable arbitrator's fees and costs. - -### CLAUSE NAME: Construction and Interpretation -CONSTRUCTION AND INTERPRETATION. The rule requiring construction or interpretation against the drafter is waived. The document shall be deemed as if it were drafted by both parties in a mutual effort. - -### CLAUSE NAME: In Witness Whereof -IN WITNESS WHEREOF, the parties hereto have caused this Agreement to be executed by themselves or their duly authorized representatives as of the date of execution, and authorized as proven by the cryptographic signature on the transaction that invokes this contract. diff --git a/unittests/test-contracts/proxy/CMakeLists.txt b/unittests/test-contracts/proxy/CMakeLists.txt index 45d519049ed..12a9d309d4c 100644 --- a/unittests/test-contracts/proxy/CMakeLists.txt +++ b/unittests/test-contracts/proxy/CMakeLists.txt @@ -1,7 +1,6 @@ -if( ${eosio.cdt_FOUND} ) - add_executable( proxy proxy proxy.cpp ) +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( proxy proxy proxy.cpp ) else() - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/proxy.wasm ${CMAKE_CURRENT_BINARY_DIR}/proxy.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/proxy.wasm ${CMAKE_CURRENT_BINARY_DIR}/proxy.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/proxy.abi ${CMAKE_CURRENT_BINARY_DIR}/proxy.abi COPYONLY ) endif() - -configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/proxy.abi ${CMAKE_CURRENT_BINARY_DIR}/proxy.abi COPYONLY ) diff --git a/unittests/test-contracts/proxy/proxy.abi b/unittests/test-contracts/proxy/proxy.abi index cb4ccc2fe74..de04aae26a5 100644 --- a/unittests/test-contracts/proxy/proxy.abi +++ b/unittests/test-contracts/proxy/proxy.abi @@ -1,41 +1,57 @@ { - "version": "eosio::abi/1.0", - "types": [{ - "new_type_name": "account_name", - "type": "name" - } - ], - "structs": [{ - "name": "config", - "base": "", - "fields": [ - { "name": "key", "type": "name" }, - { "name": "owner", "type": "name" }, - { "name": "next_id", "type": "uint32" } - ] - },{ - "name": "setowner", - "base": "", - "fields": [ - { "name": "owner", "type": "name" }, - { "name": "delay", "type": "uint32" } - ] - } - ], - "actions": [{ - "name": "setowner", - "type": "setowner", - "ricardian_contract": "" - } - ], - "tables": [{ - "name": "configs", - "type": "config", - "index_type": "i64", - "key_names" : ["key"], - "key_types" : ["name"] - } - ], - "ricardian_clauses": [], - "abi_extensions": [] -} + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", + "version": "eosio::abi/1.1", + "types": [], + "structs": [ + { + "name": "config", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "delay", + "type": "uint32" + }, + { + "name": "next_id", + "type": "uint32" + } + ] + }, + { + "name": "setowner", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "delay", + "type": "uint32" + } + ] + } + ], + "actions": [ + { + "name": "setowner", + "type": "setowner", + "ricardian_contract": "" + } + ], + "tables": [ + { + "name": "config", + "type": "config", + "index_type": "i64", + "key_names": [], + "key_types": [] + } + ], + "ricardian_clauses": [], + "variants": [] +} \ No newline at end of file diff --git a/unittests/test-contracts/proxy/proxy.cpp b/unittests/test-contracts/proxy/proxy.cpp index c1a1ca0e633..1a199c4a5ba 100644 --- a/unittests/test-contracts/proxy/proxy.cpp +++ b/unittests/test-contracts/proxy/proxy.cpp @@ -2,100 +2,64 @@ * @file * @copyright defined in eos/LICENSE */ -#include - #include "proxy.hpp" +#include using namespace eosio; -namespace proxy { - namespace configs { - bool get( config &out, const name &self ) { - auto it = db_find_i64( self.value, self.value, name{"config"}.value, out.key.value ); - if (it != -1) { - auto size = db_get_i64( it, (char*)&out, sizeof(config) ); - eosio_assert( size == sizeof(config), "Wrong record size" ); - return true; - } else { - return false; - } - } +proxy::proxy( eosio::name self, eosio::name first_receiver, eosio::datastream ds ) +:contract( self, first_receiver, ds ) +,_config( get_self(), get_self().value ) +{} - void store(const config &in, const name &self) { - auto it = db_find_i64(self.value, self.value, name{"config"}.value, in.key.value); - if (it != -1) { - db_update_i64(it, self.value, (const char *)&in, sizeof(config)); - } else { - db_store_i64(self.value, "config"_n.value, self.value, config::key.value, (const char *)&in, sizeof(config)); - } - } - } /// configs +void proxy::setowner( name owner, uint32_t delay ) { + require_auth( get_self() ); + auto cfg = _config.get_or_default(); + cfg.owner = owner; + cfg.delay = delay; + print( "Setting owner to ", owner, " with delay ", delay, "\n" ); + _config.set( cfg, get_self() ); +} - template - void apply_transfer( uint64_t receiver, name /* code */, const T& transfer ) { - config code_config; - const auto self = receiver; - auto get_res = configs::get( code_config, name{self} ); - eosio_assert( get_res, "Attempting to use unconfigured proxy" ); - if ( transfer.from == name{self} ) { - eosio_assert( transfer.to == name{code_config.owner}, "proxy may only pay its owner" ); - } else { - eosio_assert( transfer.to == name{self}, "proxy is not involved in this transfer" ); - T new_transfer = T(transfer); - new_transfer.from = name{self}; - new_transfer.to = name{code_config.owner}; +void proxy::on_transfer( name from, name to, asset quantity, const std::string& memo ) { + print("on_transfer called on ", get_self(), " contract with from = ", from, " and to = ", to, "\n" ); + check( _config.exists(), "Attempting to use unconfigured proxy" ); + auto cfg = _config.get(); - auto id = code_config.next_id++; - configs::store( code_config, name{self} ); + auto self = get_self(); + if( from == self ) { + check( to == cfg.owner, "proxy may only pay its owner" ); + } else { + check( to == self, "proxy is not involved in this transfer" ); - transaction out; - out.actions.emplace_back( permission_level{name{self}, "active"_n}, "eosio.token"_n, "transfer"_n, new_transfer ); - out.delay_sec = code_config.delay; - out.send( id, name{self} ); - } - } + auto id = cfg.next_id++; + _config.set( cfg, self ); -void apply_setowner(uint64_t receiver, set_owner params) { - const auto self = receiver; - require_auth(params.owner); - config code_config; - configs::get(code_config, name{self}); - code_config.owner = params.owner.value; - code_config.delay = params.delay; - eosio::print("Setting owner to: ", name{params.owner}, " with delay: ", params.delay, "\n"); - configs::store(code_config, name{self}); + transaction out; + eosio::token::transfer_action a( "eosio.token"_n, {self, "active"_n} ); + out.actions.emplace_back( a.to_action( self, cfg.owner, quantity, memo ) ); + out.delay_sec = cfg.delay; + out.send( id, self ); + } } -template -void apply_onerror(uint64_t receiver, const onerror& error ) { - eosio::print("starting onerror\n"); - const auto self = receiver; - config code_config; - eosio_assert(configs::get(code_config, name{self}), "Attempting use of unconfigured proxy"); +void proxy::on_error( uint128_t sender_id, eosio::ignore> ) { + print( "on_error called on ", get_self(), " contract with sender_id = ", sender_id, "\n" ); + check( _config.exists(), "Attempting use of unconfigured proxy" ); - auto id = code_config.next_id++; - configs::store(code_config, name{self}); + auto cfg = _config.get(); - eosio::print("Resending Transaction: ", error.sender_id, " as ", id, "\n"); - transaction dtrx = error.unpack_sent_trx(); - dtrx.delay_sec = code_config.delay; - dtrx.send(id, name{self}); -} }; // namespace proxy + auto id = cfg.next_id; + ++cfg.next_id; + _config.set( cfg, same_payer ); -extern "C" { + print("Resending Transaction: ", sender_id, " as ", id, "\n"); - /// The apply method implements the dispatch of events to this contract - void apply( uint64_t receiver, uint64_t code, uint64_t action ) { - if( code == "eosio"_n.value && action == "onerror"_n.value ) { - proxy::apply_onerror( receiver, onerror::from_current_action() ); - } else if( code ==( "eosio.token"_n.value ) ) { - if( action ==( "transfer"_n.value ) ) { - apply_transfer( receiver, name{code}, unpack_action_data() ); - } - } else if( code == receiver ) { - if( action == "setowner"_n.value ) { - apply_setowner( receiver, unpack_action_data() ); - } - } - } + unsigned_int packed_trx_size; + get_datastream() >> packed_trx_size; + transaction trx; + get_datastream() >> trx; + + trx.delay_sec = cfg.delay; + trx.send( id, get_self() ); } diff --git a/unittests/test-contracts/proxy/proxy.hpp b/unittests/test-contracts/proxy/proxy.hpp index 084258453a3..b913143fed2 100644 --- a/unittests/test-contracts/proxy/proxy.hpp +++ b/unittests/test-contracts/proxy/proxy.hpp @@ -4,37 +4,52 @@ */ #pragma once -#include -#include - -using namespace eosio; +#include +#include +#include + +// Extacted from eosio.token contract: +namespace eosio { + class [[eosio::contract("eosio.token")]] token : public eosio::contract { + public: + using eosio::contract::contract; + + [[eosio::action]] + void transfer( eosio::name from, + eosio::name to, + eosio::asset quantity, + const std::string& memo ); + using transfer_action = eosio::action_wrapper<"transfer"_n, &token::transfer>; + }; +} -namespace proxy { +// This contract: +class [[eosio::contract]] proxy : public eosio::contract { +public: + proxy( eosio::name self, eosio::name first_receiver, eosio::datastream ds ); - TABLE set_owner { - name owner; - uint32_t delay; + [[eosio::action]] + void setowner( eosio::name owner, uint32_t delay ); - EOSLIB_SERIALIZE( set_owner, (owner)(delay) ) - }; + [[eosio::on_notify("eosio.token::transfer")]] + void on_transfer( eosio::name from, + eosio::name to, + eosio::asset quantity, + const std::string& memo ); - TABLE config { - static constexpr name key = "config"_n; - - capi_name owner = 0; - uint32_t delay = 0; - uint32_t next_id = 0; + [[eosio::on_notify("eosio::onerror")]] + void on_error( uint128_t sender_id, eosio::ignore> sent_trx ); - uint64_t primary_key() const { return key.value; } + struct [[eosio::table]] config { + eosio::name owner; + uint32_t delay = 0; + uint32_t next_id = 0; - EOSLIB_SERIALIZE( config, (key)(owner)(delay)(next_id) ) + EOSLIB_SERIALIZE( config, (owner)(delay)(next_id) ) }; - struct transfer_args { - name from; - name to; - asset quantity; - std::string memo; - }; + using config_singleton = eosio::singleton< "config"_n, config >; -} /// namespace proxy +protected: + config_singleton _config; +}; diff --git a/unittests/test-contracts/proxy/proxy.wasm b/unittests/test-contracts/proxy/proxy.wasm index 78c4a6dcacf34018e88537940421f193613f91b5..c09311385bec5e82bc38b6306b33942f0dc9bee1 100755 GIT binary patch literal 18398 zcmds?#%ApwJ)ukERLz}OkH&YB$&jk-H?E<>%0gd;6})5Y1fztWjX6;lM1VQ*0;m;lqg5$M~a~m5Q!SV21a6HmQ za*jXY#_^Erm{(i|{NGk>5_Vmnp_hb;+qxSOJ{POh7+FEKd1?wrA$1 z2i^q@J$}U?V$-wBbM0Wnz=GZV?XDqb>*TZRP`K2cUTx2|=g+iPgHeqeW87MI8Dd37 zrbpvET3=y#_iRkhYBTpOLwKNTp}*GdI`r$@oLWB9c6SZOf$2rZqS5K;^`+(2x$cP@ z9Ea-D)9ur}yTc=S^mP{6|J`xcjH7iLe+%r8x^%&g9wYIobKYh2DQ zpo^wug@kz7VWtLG3EIl)^2+jBdrlW~>$89gep*Y7Rif|CpK3d??-E!k)0y_b=tg|Y z&B3mpXEX!&G z?|X1I|Kz{^{+~Xv(FgQP8u#RHed03)r#J42XWswSXYcM0JCtQ*kWT~lLs6J){9I5L z3$1u>Pz2Y;K^_+I;ov|VxQ|A`+fFly4n-ha-rSjtf)@Ce2X%GZcu>^JlkVYUlo!cu z<6#+;Pk$lEqp^@>nzbSUDibCAPyhw{^DgyH&JP_v+PoBWvTKTwLMd$Ohe8Mgh1Lz38 zp6jBsH^?53!&KbZx6q23aS`neMDt`c(MrI7kE0TjqeJl_$UdtFMF_>9EiSZbw~q&7 zQI_d`fHm+SF88rQQQMh`k$@GKf_w}zHr{GXd~b3OfFc0x?362~7h1_)Ml@64G4ump zgrQkc%V(QO88*|butoE+-4L5hMSI;xp^u{0nTjUMB0t)yIZ4__1I`6g$Vk)z>CW9e z2&bZ5%&AR96Uf_i(1Zi~c+A@|kFV%&)g}B0UKAqz9`KHa<}|>cIa-31CPaGl%T= z2ud5_IWvflhYPKmcjnEs{G9o_6esv~yOKNnzFkFJId5n#zTUGT=j7%2D<{vI4R1yb zDQJDjj2h$e^+nyx=n8(m;rx8PXyn&*XvR-+7&!pB1VlUppx zug~vmhTMHG=-@eJF6Hz6%NP2W$n#!a1~&!A3VGM)UG8I18hQA_`)=8Ilvp_x-NEC2 zL_QGj!_tnn@CAxnJ_LUDFF$(2ff$=ameK*4T&r%z3XH;C$#owT>I~wahv1Dv8i$|X z$n`%5a_alVm&s^PD=-z2oiLH5P!{(puU_pMc*Xp0YyM>Ph$&nzU^LDP(OUU8dh1w7 z*m{Elb+7^jmc3Qn;Cu?(EKL^K<0MTRUb$p9vU}o0&p}6Cvm)sO;WVi9AuA%MS#m2| z4Sm2ianO-dE7So8T>%t(WzLdj2tisPwd3FJD`M%T-vD}zUOk>)=4vA++8 zkz&fof^<@8P`wzzK%-er_VW!NQy#+~0u7Hc_At(;uUk2XoREMtaRbEtt< zNA6MH1Viy0`#}7y1Fopn)6SZ1@! zeJIZ%`Ix8x-9(zG98pvO#ngWRltj3D=%B+|3_~Qy{wb#(iNhK;BThOyb5>FAhvx!D z=44z$v@2F&axuWXc$j$!6&XIoghh!aCJ%_m>JG;!x8h#2Y`@~-`L@E}V0zDF>zA(W-jZ zqOxHoD`_3!Vcm`9HXwsbfW(6#vP*$f!S*9Tb6A0(6PF^ygmdQ&$WC|x(nY)lIpZk8 zr$XF4TqsEu0%sNe^^@eE7Bp*rS1Ue-Y zu}dhJVzOh*t}Bl5nJB4;Iy}f~^3?e&OoCRNu5(&Km*s2aF7x;FrQ~VrO5u|AO9BEy zy+{>28#H+clqyIB#WJ5(iic75UPndI!1}@^0Wk%y1~$$i8r`ZUEx$pbAZqXb8>03b ziyIU zsgx;{S0|}(VhL@7(C^$}D!2704sAer(VBXfD&oc9^y*E)4xumGNEXUq(1Xa2H50|_ zsNB&hLsuf`sa&Z=u!bep;9?x3;;GcwE3{0;kWYj>zFEkd?#uCo%G7UDU@#w2B-vw$3CWt%e8d|6No6+e0F=9qfyINHIjo33UyX{taMEZu~AHv)UN+ zAKa_ML4JMtXN;xAy&Yv=u~+H&R@iHX2lz1S?V6A$I?YB*W&l8XJ;9w8!2VWsbD{ok zQ_<_S0ofxuRFm4doQSzp%0$$R9*XLZ*LGYE9@E6AuD%rXE=d>nFp6xTz=xBx)56xr zexW_kK~WuQc4(5hSuHQTsCDUB7pDcqFTAcL3#O6LCl)oMk1;N9r5S3FjX}L9P zCei33iJC)E2%>~1f3hUbI}#Euxg%u!VtBe0b1jEH?~>jtUYU3*L{^^g=Sq)Z+th+zXPGv6HL_b9M{1rl*ng6y|lxJ=0B6bgT$Jf@Nn&Z#m87(U43`a*tp zMN!3Lig0aLiA5EUWgztOv<>MZl6n-*Rz0h&9jMJg*SM`aF5(=Axac@6U+TFCIMp6f zI&Zel0;F~3cZKzk7cpl{?T&spFH*O0WH?qu2+>Y{*yYuT$#oP^dbVvWcTrgEZF4$J z%tu;Q4=3eja_2bJHrtK9DWwmyTfaL=+GF?m=x$c}k+egd8?l1{gexl@#Xg@)szYV|TgisH zNt!0ZvCe@v6{dn5z+4}%CWnQ_NG@i2v5!2@r*sw-d1yC3M`ikQi9=TY=V~MI`6>IP z8djWsf*Wz{lcCTa5*9c=5gN|OYm(_gIxas+&FuvTI9MiME7$__ae;wm>b?Q7GwV~K zy5C~J5HR)wE)DDv5A@+n0o@Kg?GarPJvD!~!LCo-!Tx0JHidLkdT4h@UTb<5<0ut1Fwnapt&E%SQvMpAMuM0M{5h1ILwNu7m*VcPZ zdxP2U`(qx3>)zN>oK+AnNH$rT3gEDHPn}nE|B3+i%2njYYVtpw0pqW-zW(F!C2nHgdx3(75+Sc19-*p{1o*8^T z^LH6qMjyNr5>SIGsI%@kaO-#?oC#h-(>*!7FetI(j%{63Gu@GkDE}3UALP57Kd~5z zw^xyt{gF$`)Bt+95{8B|FDtL%YcHRbN3*1?u_Y-JVZD?rQVWz^A$9o}j(ov3t6a@Y zSHh)a$VIW-yZj<0DYI7+rQ6Syaw@xIRMA*aQo-(_HjTE{7!5_v2HjJ&C4ug14BDU~ zB*^|Tx?~8C4qYyYqasv~mcZ*^k(8=BZZr_k_1Ni?A6)K4pLGn$N+IbLMk3^Vl+{TM z*&6ZM5}W^9yw+K*o)?iT^gg8@qTeo;ax8x%{QGf-=np2oICMvi;F5&cs}d>g;=u-u zq$MT5HgL)V#hDeSCQIwmNM~#-Nc!N~)AV}6K~Mx?-|MNl{LX`*O7tM)Il#u1T9-h? znni<9zqLRhOLcjV9+y_V6kSO|2sNj(#2AXP)V&B9Mza z5MzBvSGn5BNWrmfB-4NSP4zlW|D$FD*F@pn(Dr29Y-Hkk|G%QKBZ)Z5?!u2Rj@;y= z)a05x)l;;&e8Y-;QJxyc+gC~{$mF-vR)njn>?r%Rt^!;o{GC}DI)ns^V)$Rfi@Lyy z8II?4?S~m|UfhEAz<{f;PnjMNQ-oH)fcCfb>nk3aL+GdeC@ zKBf!VeZ>GbEGeIU?3q`?19*tX@4_}T((NMaU|~6-<1vYI3)&n`l)L)lF|39wv-A}w zLa3TpY#}|)U7($8=nL7xx)+E-JyQx5se@ZLtkzWg6&3Cjr3xA*8Ee7G zks)oi1_GFpH@aqo8h5U5PYc#i;%)z^G|QRp@zDBzDvz@5)$d0RVtw@+1XZaYXB1ze z`VG#m?7(uVZ9DEx{XQDS5nYsvT@%$&i<;;R%FnU1^J?m`uy+*uk|_DWpHR zmf2Y~)BHl^Fyf?P;QWXHS`Rz*J#2RWk;zKGbfmI>T2lH&I2hC~DV0AhDJ$qpS^XQSQbS5*SA289tJg&D zs_0!!UP95>LV-U$MH@JYDc#n=NE|RqrmdF7%eK7iU$94diZ_YcjSi??T8u6;8(?PFuQi%R!bQIH*(^cby zH`QrNubQ%3#>neyP~^t?)0PT)oVK)d$k4}DD-XYo(pt;LjNhNTB>w3n{D9u-(kraR zw{&=aON{VZeNv&>ANAG#)=SXp?Tfu1IZXo()y_lh@+wLyNxNjR_jcGvv3EH8?w`* zZC9@!MdjcWMcI(>QS=J_b!ZXXy3M9pYL;!Osdr$s+83H3lE9yHMx-D+XDdwYO)NrR zXY?5_y=}P;OLu+!Wrvrw8J!*E8-IWZ%CHEo;rz;ME6}MRTw+iH=bJ}b_yH3BR|G)Q zTblH2TL)PR#QbQsL#kk3j0yOXp?i97w$S??p=<7G(@>P??_DfYtrzFVffpUpg0>it zc&WwQ6#Cf5{r5j3xxO(Hu}QjXdat96AkyF;P2PsK=rIIhdPNLn7f(boS=%-tt#0Gt0HRExMiU@GAb0l=ZXLHo z%fn_h4IFr@92H40QK}h}vs$*wvFY%1(KzFuQ_E5w8XqM)cUm8X=h{va5Hjp+eZ_iV z?8JefcC=$XD3xklOG-_v-|2o{y{xjrc zIL{)9stgJpohvv56u?awkQ;;YSq(3*LfuMqMMFU~P|l9kbiC!U00kGxJBNpG>Ad$j zE~#++v~Zx*Mz7AER&sonn!{Xy37R)-jkxVErinR1#BYcx&eN6#M&ZU#$QQ9wPknnh z5?f4%85l4cz<4x3>U%=cIzTGh=8@fJ*M)p}@=%Gyzjp*9FF7mr{UZ=LDbG@dH}P1^ zIKWe^cvI%-s21HeK|bLHUY7ZpmZUw!9aDMw{1LIR!$uRNwlL%8kAOkmErPqHZHM67 zZ5$MgN$r$$@=oXZfTe;^Z;6^bFF$-i9Ze)v#FFk5(slh_R>9c_KfDc84X&i8Q_*vv z@@76?IFe;A)+Pe`;eyxva=Y7|U%J0oe)m#)wdgJv!HJpq#rB*oXBPSG27V)>Sed!M zy%q%e27NYOynDWTvY2Zx&OA`?>hh`L7QN-WsQBoaC0o{>@CDH9=gV%leQL$<1D*A?cEK-DEH9my zzki)yGAZ~Ch_eqco3DLludgAQ1c~8ZG0*)1%1vSn2Q{-=F|(%Yr?^hvy*l4*bHnMchBd8pme*!!X|MH81*w|SrIp!*-fEBy9~OL1W<6`O z{%O2^y6yJ6{Md;TYbz_Ah2C^;_H-v`YWn1A2gGMOCp+}a1Y>F|+9!Imiwo1>)}CIS z{f$mAZp|y5_RRE1UyvIDr#q)lES(Q*pN;k-i)=9HcdT}Lz9VDQ>BTc0|J3w5n4b5f z7@wYATUcC~>7BgY(=VHz?ko?Un$y##*XDaC?+7LYm&f)DV~3BcYmW>bnYz#!Or4m9 zGw0S8R(mH`XM;&ipQLGivE8F_ws+DaFlNJsWAb{;a0IhD=X%g;=A?jct0DQeRcqT` zBR4a9W)z$G4cPQn))r3mPR;2+3Cxjiul&rF(}#O+mw$G6gz1g(dRSe&9OUak^}Q#;{w2E`e9ZA^c7;cPcJtZTa)98pJ`j(6>_ zjt}&1yRAJx`{=^-nfCm2FF2;=c6W30yLWnco7?YGJK#3Azi0d}7WaYZXW}?o4==>) zE;tk%h=b^$|L4M~C@G_A+p)=@3aU?kJt(4Tq7_esx#LFvkL@nw!;?G*x5Pm!(Tt$g zsDf4+A9R6B4mCbeCN)UcaB26}ASkoy%|`{Xl>sR_=(6hmZuLsITqXBS20XpLJM8(~ zy6^gV-}MLUzE6`u=|@7TUK<=-;<8JJUI{<`xGn0^D(sf2i@|>)%KJz%H@3lSA)c!m z^HtKl^a(?fElq;jA+`cu+RUxNM447^J~9bafH#V03Zk}JEAjhCt3I|j-OonS3T(@? z9>SxxSR9)SLHs!(&aD0(gpWw*lE*=Wr~HK|#>isZe=(@`&Xv)gz=aGbW;Rz(q3#gXdC%U~*g@9K&m%`)5{1;CHNpn923=v!pf$JkKn!b?T7dfIMZ4J+mw zR@X);^eBbkT_Y89v$wk^*bb3KcCzuI9-G`$56(Xtg^?)vonWqv_XNcoB;hVRbn%7X z{K8i+|Ji3>3?48J)1p6Xc@SDe_WXxG{KtR&!n&U0%5{rh1{=Hi6&FuMd&}hTB=o5E zbzLIbB*jsr&4KfSlc9(3+>77-yMH*O#~9KUZ{pJI+C5je3eR2a;#=ZEcZ@2gOv$wR zb1y#ew=cbSu=uUo=>ThBfY6&=d~0y93YQNXir)&lNN#Zy=_(#%#CHPdl|LPZu}DzN zm0>G#;hsP=C??9d`e=fNu;>R{p_Ubv+q>1$G9#I2_)Om90z{xf&;l?B4n)YGdb;}~ zgv^;#FzvqTtKTr4bVyIJ4F>3jK}Z!4*!+Gmr4U9RZ$$BWbOD?$h9=?FbIeU483Z}* zFQ*u6;>~ipk$CR_6ygo7B;IaByy2;^Gahas;IWq`iO@d<8zsC7&8~Ud5LbU~n~17^ z3%VtYft-*jWE+yhtK*|y;bVe*4P*w|rb!+hv#g`&Z`jLVSaMTdy=;_{MQC9Upm8ln zQ#>>d#Lf^Y#;Pa3X&QQ|Yz#CrtaeB~TCw!GXobai3z}DN>x!#F=7nHz``zI!oPQIY z!QH`LV@A!DBqj0wNLH#4qmb6Ocmd$yu5JOW>iNK5 zk<;QfNsOmQQ&{e=mx+O(jY$lC5Y&)v=K-kTJQXdKp)Nfa&LQ@4E51=GVVKZ^N-zTy z?hpzWLkVHev_dKo9sJ4EK~gG1kd4ZGQ!*b{k1bctqkUD7u8TocVhX{c->!8L!~Pm| zQHwWC5y8RSc3}$Ykun8!G)&bRYZSm^izTM6rnq8`T!Zn4(YllG@ZQAKb$=v@3Lu(i zziHd9ZgD3QWrp+LPEq1eN_-h5#x{EZa2=Z)Qx*ck(c9}I*1 z)Aj01Tzx*gLV@LmQQ#zZN%d*u9*)XRKp!*ZY)=nZ+0Ta$RDY@gEPPo~fHM&t%EU%L zP<^-VMAGE<{gqJaOS2j1Kl$1-w;zb7=w`ocbHuG3h`%EFVVL)R z2T=dG!Wi)jDdXxZp?+bFk+;|16_~b+bwQq91tMHRP>b)(sTV| zEJxpH)XnBdU9{i&WHv-^%m#!`2w?}V_OrI6A7GM{{}#Cf`VbLJ6D{c{nXBTyi3qL1 z8v^8#PJ+*pbrL@uJi@_Py8MtxcB391Z5loVjR0>69rK$=Ee_!-h{_>1BN4b`)t9}i z77D2(kSHP`G$5i=;LN-LVU4O+f@L&{=(S9GmmbI8BvsZ$kYz%bCoMo|yd#=0xMc<) z7x!(Sd_V0hARk(VW*yC+Y6XQ$V3yXH5xtL~r3s~o4Ms6b*5d~lSv*~)9-~wk$%~C* zw1Y7EuZFn|#BN0_hg|}B5rM4;H%nIVE+dN@IU+5dL5@g^!z9JVnUNzla>QCZfm$pc z)8iMH6+=#Nt_iV&l87>$tENsPY{|1_rqm#G86sEws_(@rgN4S$M<+vKM`lMZd#}$q zfIBsBX$--6EXZ-yv(cGJX?3paGU1kaF?^T|$p)RWL6ILcj}_CPNP{x4O^dzvkHFx1gy#i ztU{iZbsLvV<>XaH>Y{xHpB0#A@Sp@@pXk3c%o#{Pr8`IhhTcaW zOpSa(fCLwB^X^+~C?gWsq&Q#-Mw4dh5_lht(XcSXvI5}%r|Zb1c)LQt5aV9FgUcR& z!TR2#pv@A8&&FY6Jwhs#eTA&$0|zp&(w0Z-OjkUKmCE;3rZG?%6VZmWApy70CE7FC z?v>W~8V4zX(DbPdBmIhdMpu-KYQy~%N0u@g#L~`wRwyrmc6xlgcGmcyomazW&$aN` zHHy#8KXrU|T^%2^^!sVtuu>B7Ai-_JCzLsqNG0#Lf zO>ZvK>W^M}!RkJ=RAH|ib7QVa#c9kcPGgwG;&3a)ULu%d@*_%KgW~=05$k4F6Pd3g zGNjt>nl2+QB`?X4url`Yi`*B7W#MU7@6Gh?sY7RFi)cmDOQ|w0MQ)L7*&yqZ2JPrN z8nTZ&~iXSjNXUdqw)t8c5DMX<1fY08KJGM)c3&y?R(g0;e`VC3+o46pC zP8Vz6V|U8br$A$>+X8?hrG^yoowb5_FJ(~;@v4eNk!#MiCdB}-&fFOCR~?5Ex=Xkl zkn3Ue0O=n*pX!#ONG(z>VHG_i(8G$J_?F1JQXc7)PZIgV+S=g%hLa!DlD?bKGO+dJ zxCT!vW0D#Mp^!ikb7<{JIPh?{95dw?`uKaL z+UDrC_RnSkqKy_H>RW(_eX@##StP&$j0xnJw@5(q7D+}&77hc8scQI~>!~PA>QavC zf@WB;Kmx}$n~ooX&Ou&E^hAUA(Mf#|pDpAxy8+}U& z@vIBYNqxXA7i7^n@gZR^aHIR{I)ax>3#2gsDl=Fh?P@2U|9KRqc*Xudk*P_j!UZ(v zTqxAM7cLW_iwRDv_4iB?;PaPR)JTZRiq(J95Ub*Dv2PD3WCd7WB2Mf}hS`lKR6}Q1 ztpLTxjNTo<7!yU=cFP?o-lC_}-jiG=nu5I4a+#Ikw^eDs2461|8s+2(dpT|OaGJ=z>& zu^s1#uxUgKR|vfnla9Tn<;MIH;UL_QsMk`w!oQUFcT(%QLVITinv+ANE&Mi^Py7YL#4iaSyDNOk|bY5Nu%;5p|U8+ z)OAFtWP~MD&2Im$&>eeE@%Q~ZH@G7nkSVxA(1>23&p^-=hYZwPOH0EE?JX8-4u!10 z=xbcQ3C9Jy4jH{|EyqX1E_LH`Es1}Kj4@oMj<950chN8`N%6*PYY>*C7JT%#kL1+R z0=!huPzEh#>C0d(a}tQY6sMBWI;PQNdXidzYta-r6}n9I<=DiTB8`}!temhUG`SV} z=&DQ}gSQe|i#yft#n-|TS+^0n)om@X7DJlA4%18#qLykOmef*BSaKaw{r5i`56qs- z2jj{hOiAs`Eb?fPC7vtZTK7UyAHFsQMHc+)zysrs%oHjQlcQ-23LsX}GPfkcdauK| z@gTg~x&&e#&~ot&V-+!OQ3CM}(-IAqHoHMK_2XCd=>KaTDca0i{~Lh-w;yjq+lh`Lx>< zMuCB}M7BDWt^OSEdXURDsN{OEQ8g}W2hiLyxjQ3XhQPkCh@N95Yj`mwYfLfa3h#_| zTyqRbpX3+MHWlqE35usqU%JL55GCI99!XNe@(q8ihyACJS(HSMO(f*1fPg?8AQ#=_ zq?aI4nWLaMLnbBjn~H7*44_F&=dKEfDN~rnegVbyE&DY!+|-``H=EjLo5CWi@&u<| zg}}847^j*wr=XPu_hs6p_9hA^*GJ((6mFV3@REgu<-gxQYVfD}bt-7txy~HkT|H&T zSVS2Ln(Xk=B-zd!uut`w1NJH0JQnmeo;hq7z8Q4?Xmbc1WnxV!h>jc@&l1HWTT&3* z--CF@j59!0(pT;dwxPa05 zP*fN_k%*u5!BgL?pwHlC=MPYAyPa2Tz{|W%q*pf#F9Jcxhi$TR7S{wXgwb5(uuzSg z15Ie{-`E0$vMHjo->lCHgPZyKK@F5)mPlQ7Yjk~;HFM_Hc-uc$E%Yr8zCKeSsh1Y zo^)cGWW0tuR3!;81r3GNrc73P2?jNHWppBfvN()3&?xyZoVUp~5aGqJc0)rg9})qp zypaM!Uj7usXGBr^RH)@?u1w$8H#dWQ<6mS%{Ju%#yzi_P9})lGjVb8Bb!RU}p%$bFyauE2iR}~;s z>n2mC>XXQ#E-jZS#R_sx1UOtV)+(6N%Ox`YI#xlm{fU%bPqPA%UdTi}V_kG$NhbI%^ym@-P9mOev z5_{_O3(`hAJBcM~e6OVg_`e-gl&HZ*L`Y`ZpNOuj3fs4$s%^56os?cz74~{2ZdrwW z)roZ#R;MApysCn+3b7kznZ#U)Dr7`0RDQj8P=3AWlfNsCG(0ci^ck^xdG{rGI5|2M z?+_(wFBl37a})3A5;-~&;0HH5Iy26oIEW3d>VWMKt?%dnFLWizLp;jN-dLuYeOp%T zLyrd4)(sZSR9J-u%9Gh{Urgi-MG*DQRobwbPECZd?S3xVN*z%9x zS(0)in6E>_td|tS&a_jASsc!{1)8nABPf17&#m=nKIh+9<#kO0I|lH!wJKc}%H78@ z{q^GFA*MzLwV*8qBs#U2p91a=)h|FtlJf~<7#6nFUmN2lOM}|6&8XDv_vMX1mBVj@ zyewyLn?VILiUv^F_3ecLsw*9`9RtOrNTgv%00kJV59^5ov1vRRm0E<6H$>1<2V@aD z;n&*+2M9x1#wWq}uuEAkGd_T`V`-%y7$2&?KH=LiTYLCH6lTK;9F;>BHXL}>n;L>_ zo}qE36<6c-X1ByvJr|a-SA8jtb9x)>h%{9H#gQu4YqS<{F4@=yTQ=A}S7UUe8YhUa zLKA=6BEXw1%1o5UL*&sAh$k94>j}HINhZrmhrEzsG#o*cDLNGZkRV`PYY2xY{rg=W zHmhmiNUzGlaDs^v%@~7kg<%T+;j(LE3dAV8l_UZ|mpfEq^lU^8m zcIf47slA*^EjwvYU3{JhG{`S`DN=s!317wb;VQ#gBgM@pD>2;=^l9z!dabgYZhI&OiqK z5^~A!2*dU2##&I(KjSe(`8A!pGr}qfP#-T7Z_kSdSU>-d5YJ6pqzzVrMje;8r~;S| zzY61L@j(r*Zp4^WrX4jkaEd5-S7yX)0xSq9>o4oTmG%17Z)%g-vT)#t2Q@gi%rvUd zrA(B3tP63)ES!1g%rx;*h}7;{C2JBfU^qX9I+}~UHerWcBrclDF)&~>fIY~d4&?x8 zrZ|{0|DTTs)zi!E{Vtt08s`k6M=c%5^tI67|K9s>RO=znA zAKUFFYpQ6&9;Pa$}|HuN^GjgThrTaW_4z zhj-)TazDM`)R!pspi{aS={r*<_XK4_`{77&?qF~e8jG>L2Q4KmG_aU?`<_Xb?x}m3 zD%!%9iFv)o(+9S!Bf$T(1Jel9k+22=ps-bsh_{x^mT8k+ltAh*&<-s9J|+Wjk4JEi zg}be(c-sUXN%d@_NNNjbU6fvLYl#bfLk%|6)P^V)X+w^NqZ~{6&7^EbwLGMmz1YVP zA^2!FlEc2JaYibkYM|UlCL@-D(I%u;W+^Q-A`4d1YAp3?Q$5C;_nhDw(=r1grK!i* z3G=dvNl-nHHXx5$Me<;U1ju4&z^=7_zS6X!;I@tbe(Z&@*19TJi#T%}(;ih>1)TWUWQ%?>rGg zP1T*Nryut&W@28qKH81Y4Ph(q0dZydoVP}1h3o;7qSy^p@hT}xY^O!uPz`a-f0*20 zB+IiCiwh@b9}R-)$;&~0>O(7w3y->$&I!IJb@uV<>GnLIPfnj)Sv<{8r!(WuwpZOu zXO-^?dFU;=#fACv?vai=!AM%#C;7b6*++~v^u*%nCBEY9Fvvjh$?goxz07j2|3IhL zn`ObpvkT1WE#B?u`PkQke49Hv+dIWF^X>E2$k$l&1&d#_6zVc)+-^=Ci?46oj>2>&U_mtx!&=o$xWv$r-8w1h{3yVEJ6)e@& zJJs$vucb>5? zGj`X59esu@f;_a+URd?t7#r1|{3h!;_{C7Fx_NqbWws~6@SVekHK3lB*>Vj&dB}gk u7IdfE=RLcZxS8#(dd2~dFCX$Z`Y|xYljh9g{25S%MsNU52{%5J4*myufN-$@ diff --git a/unittests/test-contracts/snapshot_test/CMakeLists.txt b/unittests/test-contracts/snapshot_test/CMakeLists.txt index 0e85b946055..a628753d325 100644 --- a/unittests/test-contracts/snapshot_test/CMakeLists.txt +++ b/unittests/test-contracts/snapshot_test/CMakeLists.txt @@ -1,6 +1,6 @@ -if( ${eosio.cdt_FOUND} ) - add_contract( snapshot_test snapshot_test snapshot_test.cpp ) +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( snapshot_test snapshot_test snapshot_test.cpp ) else() - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/snapshot_test.wasm ${CMAKE_CURRENT_BINARY_DIR}/snapshot_test.wasm COPYONLY ) - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/snapshot_test.abi ${CMAKE_CURRENT_BINARY_DIR}/snapshot_test.abi COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/snapshot_test.wasm ${CMAKE_CURRENT_BINARY_DIR}/snapshot_test.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/snapshot_test.abi ${CMAKE_CURRENT_BINARY_DIR}/snapshot_test.abi COPYONLY ) endif() diff --git a/unittests/test-contracts/snapshot_test/snapshot_test.abi b/unittests/test-contracts/snapshot_test/snapshot_test.abi index b82233291d0..31c2f2e5344 100644 --- a/unittests/test-contracts/snapshot_test/snapshot_test.abi +++ b/unittests/test-contracts/snapshot_test/snapshot_test.abi @@ -1,6 +1,7 @@ { - "____comment": "This file was generated with eosio-abigen. DO NOT EDIT Fri Dec 7 11:56:43 2018", + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", "version": "eosio::abi/1.1", + "types": [], "structs": [ { "name": "increment", @@ -43,7 +44,6 @@ ] } ], - "types": [], "actions": [ { "name": "increment", @@ -61,6 +61,5 @@ } ], "ricardian_clauses": [], - "variants": [], - "abi_extensions": [] + "variants": [] } \ No newline at end of file diff --git a/unittests/test-contracts/snapshot_test/snapshot_test.cpp b/unittests/test-contracts/snapshot_test/snapshot_test.cpp index 65ef6f6d854..84e28e53ec7 100644 --- a/unittests/test-contracts/snapshot_test/snapshot_test.cpp +++ b/unittests/test-contracts/snapshot_test/snapshot_test.cpp @@ -1,82 +1,33 @@ /** * @file - * @copyright defined in eos/LICENSE.txt + * @copyright defined in eos/LICENSE */ -#include +#include "snapshot_test.hpp" using namespace eosio; -namespace snapshot_test { - - struct [[eosio::contract("snapshot_test"), eosio::table]] main_record { - uint64_t id; - double index_f64 = 0.0; - long double index_f128 = 0.0L; - uint64_t index_i64 = 0ULL; - uint128_t index_i128 = 0ULL; - checksum256 index_i256 = checksum256(); - - auto primary_key()const { return id; } - - auto get_index_f64()const { return index_f64 ; } - auto get_index_f128()const { return index_f128; } - auto get_index_i64()const { return index_i64 ; } - auto get_index_i128()const { return index_i128; } - const checksum256& get_index_i256 ()const { return index_i256; } - - EOSLIB_SERIALIZE( main_record, (id)(index_f64)(index_f128)(index_i64)(index_i128)(index_i256) ) - }; - - struct [[eosio::contract("snapshot_test"), eosio::action]] increment { - increment(): value(0) {} - increment(uint32_t v): value(v) {} - - uint32_t value; - - EOSLIB_SERIALIZE( increment, (value) ) - }; - - using multi_index_type = eosio::multi_index<"data"_n, main_record, - indexed_by< "byf"_n, const_mem_fun>, - indexed_by< "byff"_n, const_mem_fun>, - indexed_by< "byi"_n, const_mem_fun>, - indexed_by< "byii"_n, const_mem_fun>, - indexed_by< "byiiii"_n, const_mem_fun> - >; - - static void exec( uint64_t self, uint32_t value ) { - multi_index_type data( name{self}, self ); - auto current = data.begin(); - if( current == data.end() ) { - data.emplace( name{self}, [&]( auto& r ) { - r.id = value; - r.index_f64 = value; - r.index_f128 = value; - r.index_i64 = value; - r.index_i128 = value; - r.index_i256.data()[0] = value; - }); - - } else { - data.modify( current, name{self}, [&]( auto& r ) { - r.index_f64 += value; - r.index_f128 += value; - r.index_i64 += value; - r.index_i128 += value; - r.index_i256.data()[0] += value; - }); - } - } - -} /// multi_index_test - -namespace multi_index_test { - extern "C" { - /// The apply method implements the dispatch of events to this contract - void apply( uint64_t self, uint64_t code, uint64_t action ) { - require_auth(code); - eosio_assert( action == "increment"_n.value, "unsupported action" ); - snapshot_test::exec( self, unpack_action_data().value ); - } +void snapshot_test::increment( uint32_t value ) { + require_auth( get_self() ); + + data_table data( get_self(), get_self().value ); + + auto current = data.begin(); + if( current == data.end() ) { + data.emplace( get_self(), [&]( auto& r ) { + r.id = value; + r.index_f64 = value; + r.index_f128 = value; + r.index_i64 = value; + r.index_i128 = value; + r.index_i256.data()[0] = value; + } ); + } else { + data.modify( current, same_payer, [&]( auto& r ) { + r.index_f64 += value; + r.index_f128 += value; + r.index_i64 += value; + r.index_i128 += value; + r.index_i256.data()[0] += value; + } ); } } diff --git a/unittests/test-contracts/snapshot_test/snapshot_test.hpp b/unittests/test-contracts/snapshot_test/snapshot_test.hpp new file mode 100644 index 00000000000..dce23efb538 --- /dev/null +++ b/unittests/test-contracts/snapshot_test/snapshot_test.hpp @@ -0,0 +1,46 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +class [[eosio::contract]] snapshot_test : public eosio::contract { +public: + using eosio::contract::contract; + + [[eosio::action]] + void increment( uint32_t value ); + + struct [[eosio::table("data")]] main_record { + uint64_t id = 0; + double index_f64 = 0.0; + long double index_f128 = 0.0L; + uint64_t index_i64 = 0ULL; + uint128_t index_i128 = 0ULL; + eosio::checksum256 index_i256; + + uint64_t primary_key()const { return id; } + double get_index_f64()const { return index_f64 ; } + long double get_index_f128()const { return index_f128; } + uint64_t get_index_i64()const { return index_i64 ; } + uint128_t get_index_i128()const { return index_i128; } + const eosio::checksum256& get_index_i256()const { return index_i256; } + + EOSLIB_SERIALIZE( main_record, (id)(index_f64)(index_f128)(index_i64)(index_i128)(index_i256) ) + }; + + using data_table = eosio::multi_index<"data"_n, main_record, + eosio::indexed_by< "byf"_n, eosio::const_mem_fun< main_record, double, + &main_record::get_index_f64 > >, + eosio::indexed_by< "byff"_n, eosio::const_mem_fun< main_record, long double, + &main_record::get_index_f128> >, + eosio::indexed_by< "byi"_n, eosio::const_mem_fun< main_record, uint64_t, + &main_record::get_index_i64 > >, + eosio::indexed_by< "byii"_n, eosio::const_mem_fun< main_record, uint128_t, + &main_record::get_index_i128 > >, + eosio::indexed_by< "byiiii"_n, eosio::const_mem_fun< main_record, const eosio::checksum256&, + &main_record::get_index_i256 > > + >; +}; diff --git a/unittests/test-contracts/snapshot_test/snapshot_test.wasm b/unittests/test-contracts/snapshot_test/snapshot_test.wasm index 27692dbf5403439922fe4e628484155633410516..48e05489e19289ffe656762b9d1399e4a4bf3c25 100755 GIT binary patch literal 7286 zcmbtZTWlOx89wLC%_%#bT#% zVZGUD)avWqb4qh-V&P0}x&1<;bEdt%vQTSQPAbFwXnwuZX{>Z>oyL5ld9l$^o?vLS z*P88GeQmAL=_;RFrCM!qxn1wBudFq@i*Bz7tZUsiC=p>mu>II-E^O2m+UsYQ8}3x) zcv8+zmfI_5_c$JN$CZ=AcgEf3gAbkBWA~wlE7`74j_UK>W_zW!Q18}jYt0uMDo?Ue zr%_+14V|e1w{zzk=jT^1sSz$FhQ+PTw-*{}l>4I~f40#T=>%}32A!N+?lw=TkN^d& z7Z$pUCsdKk65!WY!GKdpiF=2f2z`kcn^6C1r+L2KxunJjaNte=cVCV3=xBB{tg_t* z?%o4x7|TI-4$bbqn<*G)!#5_~4XSgw_(Wk917AucJ~ibJ#r9 zN&+W^O5Yg*m@-c%`t>A7F_gtXy<<=q^qY@(1-G=|sX&H(;3~J0ysWVWxg9W=D$ID1 zNCWQ4Ma8_S`|FDN;n4gR=IJfnvMM@u2o$IdAWJmR4Ji!z6UF8L;BZDk#VmjY7$Ww~ ztmO%=My;fPtItERh3XZXtLn>meBtwl!CR)foF(#j1PrM@-&-2ANA4s@j9a=D!(TdriW-10c!H6r23Ai=1 z^+(m3GAi*a=9V_I`~%&S@Jj9ioMyBHc?L$*459)CfWxIyhf))@vVqkA<_z(%r5QmP zfO4T?(&-lVAQYbP8$97_(Z_bfKUUIkkLcbo3pK8Hu%@j4cu6rV(sQjufx#Gjkf1WN zRzF5=i2#A|LscyteGXBt^2M#?xwgdk8( z7-UC8T>)sDSx^ts4Q^QdcnMm;63hf3h}1yZgp*B@4y*?zkjR2@k%oLCS*U0{30(FG z(wF>afXLv^cq?7Ku$1`601@Ya51>fi$SXF8W@10paW1q%G#Wny=Y}-L++aD_0JfNN zIY?4WWM!CkL%|JThUv$;VHQe(2IoM&+;V9t7^K-?JA(x@U;liNwmdirc>Ba5x&re- zfNYFtm0=Y)dvrw4B3G0-&=2#>O&-CEG%z&{6(_1f5Ixi2Z3v5)8Ct9D7N`EHB%qT6S!(fK6}_&t!s7`vwOLJ5B3xd%xR8 z5HibTD^Ng<01%!#fS^wYWQ#0Fz8#(M<}jeZOk+TKdKLrn`7BYdVvYed#kXJpHHUeO z5zS~#3~H8di#D>zVR+!oWwvMnUM)zFVqQ;*m&yLD9kHVpb73HF3qP^u%^QzE_yAeG zc$t!n1IeKBA^N~QwL!# zf*i2~^l)&^(gP82pPAn9)kWPH-iWRdh5F#V` z%z|klIsqS~E2i($Y*bM`?R^F)C&Y>PC=Ay`KYb0EbTrzZpqwF}<-w5PiVi?{dWvnu zc@U(xh%RUXtg>Pv9DpGqA8nKWbDzAooBU8U?UO&4Kv^K~4aiSsW?st^Q2 zJLUZw2<9noMle4l?@$r$6TE8|!7}oov>sG1<7HUdtR4$-u(MOz5Bk#Xq3ogJ-6wwc zE@dC0w6tKBax$;W2r>B#iZ>jp@1SA|Phc}ofEF@DcH0*J0C&h$sKm||e)=8Va*5YZ zZw#i~xenuUd<|)BNP$pmSz@*Uc4!4R zaJpUoL5{>wyRBhQV9*_gZ{rPmXGci+`4@$WPe4E2^q*u8h~5mn47xS2?xpupW4`on zh!nry)uE|mZcG8EOU0up!OVL1hF>>Cs_vBp8Bzz2f^QI)EUxG;oH*(!ft+sNY#h%{3Q8qX)BJqCMT ziWe4I`$x{nq=!Rh>El7)BvP^~i&0i$G#ApN_VKt7L2UyX!6&RRSV`OKBC;dCAxb}n zn@lKi9wmltyd(*vmk_B72hzjl=&@Lod7NW0rS$#IgECR{ItJ+r{m00ttQ<#k++`Q^EX8hvdL0gY1d8=@oCceNUicm}mk79%w;kC>N zkE~$5ZzsWNHI7-37A~GvV={{{E6J>kSrIc|;9$xO4gzCFxL^S1W0v)3iT9|wi%V!F z%8bk&zzodl z)9R?~5$>Y{xQ`6rj=K>?(}+Wk9O z$@U)z0~LKlg=n^_BCDB|x`Ien_ba%k)f`aZXc5GSO zNvi-Q%MGZ=I^5LSlP=3i0`8IX;91wI6#8OS)vEN^XvGHsD__<8XLHT?<51-8R-8>TZMi(R_ebu Cg(Nuu literal 8380 zcmb7JUu+!5d7qixy*qL5#1$zkH04%vYqh0a$E7TXqTK-IP^erihm_#o8&;kL90_{tYJ_S{4zYqUi!k^@jYi~NWO=Q`5y626|8pqT?C5GA0y?Y z=U!MVt19NoRK7CmjFIKDei>GdOy=g%kkhl&#?Ol6q`A^>cRLM)ui4mb|9(qVMfF~* zx!M?6Q#DSPwpv>&+jrDi-i$%^%~oHmkcl(Mi{16je*3B#CqdL)?jr3J$95N#%bVSm zjg|FgyVKZi_L^I*eyg{O;mSs9)u)yv!=Fnw)B_Bm$Hi%tRBN?!wc5nQe<>vZDKNv^tRSI3bfLt3&B9UgF_V$l>4K%b#W=btE;rC+q+{_+-R(~n%j-#=58y{ zV-4?>R%bOZQFD8H^Ny-VjjwlZXKJR@`1b8)M&pc^ZoT%fwhT&_EOaXXF~(dsex} zW+M$QtHfHJC~FoE4!-jjfBIMd`lFwyw`hK%le8Y%z|CgPW{c`tpm6g;l{xjQ!q8%7 z-d|>IusFCDEZ~C4*Uf_eZ?ZKI)gl`z0tgz&+ItwBD#QN5F}&| z6)bW1O%qI~=Y;6FV4*&d6Fd?E{Bn6R=V&sjTik&^xDLf(aVXqYt_7b!mNnOc1A}Yz zTJVVXW7mSmyiQyTzRh`DAMpwWerRk7!jsH=69FhP(+E?;%t5UHtsP?_Gq( zF-RDmBx9~x29|!nW|`+Nyje+J`mLJ6WaNItK+PDH#%}Osa7-l6KsV3$jHD~3-ZPms zNjZ(@P)P6%)dRbvQb@H~JqFLDNC=FlIF=9ujHjr=5-OH?Lq( zj_^t+k5&@6Vg~q8@J!Clq1t^O#qc{f~bJjLXw+WgLu>N(nZe$H&4+R0OF zC|*ivD1IdUkC;3)XZk0Z6em=CEd2!tZ9JO(agiHhZjO&$5pQHG z(@Y`+;7+@~sIbpR0wG6-GZzF$-{VNRL_P<(}8%@(183#gf-iW~tOCWW-2bmN)Z-T@`{@jjWFwwhMs6n4Rz?%+dqJsR<{%7w|( zSe-EyKPY|;bIU?lW@s7es;~^oU@K>m z>1pJSnKPIDg@A%-1bk)$+W=C8BA^m4fFNG1zbv4peMTTpa=^kFQ;d!v(V?>{W{21o z;3!12eieO%)q*u>P>BS*pd#I1nB-GMl9z~@IJDR$aKwmtU9ljo zu-6>s^%U+TugkWQ=QSPm6`#!tQ}C|*;CKpLm6_~4Ne?EkNcv9I*%86$Dah7LOD&0= zhb8_`t@VJhhBh-M+;HVDyH6!)c+1RT(kNLZ48#4%36h&9(6TG~+2rFVSfiIrM#zEG zKbO1x|8XbwALA~X!y+^q$*;u^0v!!}L}(~XLJPCurnYcOYdjUWzyA2a#|({oz(KuS z&;opfEObA3@bU9+!lNa)UH8*68sgF$7t^xfN7+qnCNuRAW#OL&|uC^skXln${<;er2Oo`BvV+K^y2zni@tRzz?#c90$W!7G3D;a9Cu2HLo z#mUwKRULX^NCy#t`p2@AvcCsJ7`N@4J*XfoYzU|*+fX1(k&ic}n89cR&>jf5VKh0S@|2BWlsi~kq-c+T1-@FH>3OE{if>}hie$Fw14JV~Kr zBqo<6~R*zlGsvuxjb;NAkU!e$$2xy zWi|q0I<8VmoRW+u=oC|iIL|za|1t>lz#zkrUDhWT&PeP`gGKw!7g4h4+ZJaq8k5m1 zMrUL+j}gESHi@QbDD-kN2|e0$JYkX^RgDqpk=8c&ZW_I+_}GlM2e)2TXJwRNRFlyZ zMpcYLp@aQ7U1>;%uA0CBIyeFH#nLqRqWTjqJ8z*H% zx(OMP?wpKBmvHpm6lOx;1>B$#FnUTxRN_2FP*%UHF3B9}etAguOGCP6SP#W%bLrTT zVBD!ZMi8wJ(Y!XmYru=P>SC&sxm2+MLy{{8j(>AKH2_{~&cVI13iv>-sknpyCo}O` z#ofddg|pGmJ|Ay@D`Jjj%Xd8n=O|3$$rru}F@?M!c^O+-Lb)(-!XkzFEfy4c6J!W$ zc&0TordsE^v|1{zKr~c);iM}0{EKmCNcc${1X zFvtI)b!mV(!%YrKxy9+Re5THqUhdSWsLTT>r5MTE5P8u(oER>|+a*t<KnJ*hMG<>cFoVRS7nz!$t3HzwaXlcNlhfG_$&y@iizyht*KLc^|haG6l&D zK3EiG*rtMe0LJw&SO7Kqc0BFkY-RJnwgXqN#%3m(a(wXG-^pD-wUE3dl@1y9j!@+u z3_(&u;8ik@0kh}7Kl|)6!0!eiU%7{{5>1LX*B|!}U5bYXCM5g{V*m&5Kz~XR-SI{g z;~Vb5p?~b4ahOQQXb1O=SrYIK7eks>xma_M`dN8LoKkb@b@4eI39n%blAS3Pqwlry z76pyW>&!s-({Iec9mWR8Z4`W!G2q`D7i}%~3JsgkGEn zKVxI^Lm8kcLlKmqDo;V;Iop>T7ul_nSw$KatC4I~ON+cv@8ofCi2pp&6=||Rax~H` zI282z*O6QVlMk*-6ZIMj4o^tH&-brG9#ErC2_IaCPDMt+1_Px7WZ$hcd<}1uXb<|w zjRiFgRdG_khAP!L+kon$4PeXn!F6nZ`e zZ6U*hIrex=NI1`I5FWf8H3i*+5#8~cxX+f0x`#gDk*Pjx0b5txlrt1PdS=*C7!<*v z3JpRtLW3+O0Cu@CB^bkFAJq$6(4PpNh&9Z^BGu50GpQnA&JBEz;Uk|7+}ktpu)TN-djO*uq67%*`DiG!yH>>=`5k4Azo zasx==nTS&HB}pV^wdW9Yq-*>*0O-PH@g%<|e-9HP(9FE&Q~w?RIn5uC{jZ)eATMuI+X<@7U#*T>&Mzu3@W{ zPi#WE(%stL#5X_?2`hX-UWLqq4^ zu3hPNcKf}(6)L6psx`j4((H7)eY*lS{g&-6XV|`Lv9I66_h{Q)vrVk8Lmh1H_h2t} zeu|x?+dUYF8+^WspUv59;{&?IXTYKDG;XWA+FrW@N_^LfU-Tj@DZTA}*|v8r6@lcf zJ$xB$;6r$8ADi&`Yp7*T`U`6LW9 - -namespace eosio { - - CONTRACT testinline : public eosio::contract { - public: - using eosio::contract::contract; - - ACTION reqauth( name from ) { - require_auth( from ); - } - - ACTION forward( name reqauth, name forward_code, name forward_auth ) { - require_auth(reqauth); - INLINE_ACTION_SENDER( testinline, reqauth)( forward_code, {forward_auth,"active"_n}, {forward_auth} ); - } - }; - -} /// namespace eosio diff --git a/unittests/test-contracts/test.inline/test.inline.wasm b/unittests/test-contracts/test.inline/test.inline.wasm deleted file mode 100755 index 91b4ccb686bd74139e230a7ed6ab2cc0c4c20a36..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 5068 zcmcIoOKcoj6@8!j#nnBoKt3MZ1Fvd=( z4vNxYgwU+mq7f@rEFeq7PeMk@Vm3%4Bv|lUu|Q%oi$Tf)F`RQ>wQV8-VnH1HRlU#s zoO|wlw_DY_8(Jx)uXb;k$pdvGnml-bLE*oiU}_TnsvCA9&l-zXw%{)pBMg|+eAW|X zey?ZjVDnB+*V*xFs&>esuwgPpO8IhAY- zHdeOpsYKRy2BX_+JA;0Gdwff!oXU3xo2&h`&Gog-K{yfDH@9%waMkS&#{HY?TPwqr zTlLyzf4knPHwNRu&MqEThJ)2`#J&07wuyPP1p;Bq3yUUG$Mmt8IF6z`?{uOljyoNj zM0u2VqF8ThYh$aGPEOg0_7C?}K2iShTY5UAk93Ffws!6rmG}F%2K9FTX1zPGdZr)X zf@9Xi_4fAqJ(b7(*EjDr>SWS?eRciIchqT4)QvjDi5e4gCuGN0$1lB9udlti*}qe- z_s8maPS?%T)AJ{ToTuj(I1h18&o3U!wDLMPo#$qLP%XmbZ)>e5FDmbbl|7@18Pjvx zsn@y+59Z24{QLD^-~Y}Z)mOTzegEALzx&&tO$7BT1xX3kk>&)T_*RzU#P zitc~PH)l_xk}MEMh~W14jyW|fhHY0fN} zsEj$NGBLgIQ>ZF^1*WC`8__kGlonNKItC(TfWp+zHhz0FtPG?(gB^N4chMDf(Jo;z zz3%_8uL|QLztlM6v%=2fc~PF53r69^;l-ul#p#Q-aFN6D3gyya`bi5c=5u64Vz9>#sv9m-r zmWVw3(a+!cRL~!bh0s+r6`pAl2G}JVL#QxkivkKG*igl^sofP9r>VIh@P9HUp6CZq zy|4bCpr4;&FCn<}tPrGrZ&X>IT|E*W!i)$tAxPF2|FT$cv1k|D2+p+;#F@GOF^HE+ zz;xE0FLelM_8&q@Iy0>L--LBvg}DCI21dUJbQxI$<)Xa=^pQgd8~Lcf+8DnHepB*W zI4_JJRvF`#`RH~Pd#0uS66h|>%fOEm6o|&c0g6roEigos;v>>e1XT$#!RuFrQ;1lp zY!6jYT~^BB2hu$RTf(pXq^WdQb#Q5sEBG~xoT9Z-tI8A>Y2Y|S^)xO9GfI0X&q5Sx z)l?48Kq0DvXs!~nMf}ky1;f(70URM|45)5N8ZMXA$r?z1A`XfOA6}O{sb9943|tnuBKr8qcRiI#pdiv`gWCF;T6TOCoW!w zq7)_GS@@Lm77G8*(2=y0%r z8atuNI0*;vWe!tkr4Ct5eC0Y(ey=$)O^C44@*Oh3b*b54+eg8H6Z2BYP?NuBEIP3V zQF|c}I(P?|V8&2@5!yvSj=+~OVfuavZaGVsEMNkH>DVRyLp9pDtY%$|v!I1n)8wOf zA>xKtrDkvwM+%T{64f+dO|{5I9pI=mffuGG&`SfVsqll$7ABh#@^~YW^51BINScL~U55}oLVOXjLwpX+1xzSk_&E@DL$zYN66aRjIDBJ z;yMlj9&Y!iqs?ZId}xc=?)8^!7&IjG(@*Ed$>tOh6{U8-JF#*#n#W@p5a|%AQfu&eT=`eOd8NO#B-mRk%6*S@bzmsJ%a{QfUh;|rWZT?}<6LMcLLh3e?+ zLO?s5d+Y&+;ax^IJL?S@+msrsk)ZiayjE<#85k~#F^&WKd|n{7huz3OUXKx|h0VO| zBqPyb;I_kzv@d%o*8%~}0`s^PM}o-1T`hvBP`b-|u{gtf54Yamd9$M6vMBp^;Uz*5 z+(ljkg?D|Gm4P629ReVA%7><*@XB#_H@$py!xbizXx)$5`55;KI3Io8V?nhu>yTac{DN?unL=| zjDinre=OK{r;1+2l}>-qVGMg}9;U_)lUKXC6>DiedS70sAMhx#MOZrPB|6xtun~LT zC(=I|m=1=;AOJfbzRITquwsNcP-WWD80lYvA6*68?Gf8?_m0_9(e@}19!U_wX8AD_ zFKNTiA|M;15FCo(P)3Jhm(d}IiL2=M0!9g(!4=9+jPS+!iPRJEhCG~74J{K%5+k{n zy9TNdXd!9EsDP>mvd+^@ATB@Y$bA4lFbW7J~a&p^kRX zQ4670Y?OS5qX@*lC+V@@OZyE>j1+|^zpt${;c~4J>7Tr%fhN0B|Ky=Yxg%Zph_=o$ zaMHEO?E!z-wYX4?qzcIkdpO*INR^P1ti;Urwoo0~ns3uR+)`h=yR$YPsEv9ZUq|)d z*xA~^pTS_&-K}@s>R=aN+2Cco<+e80@41@;w}PFRso%iYRZhMilC3LS8{6yn3JaS= SR6WMGSc)HZ^O)bNsec0gOH}j# diff --git a/unittests/test-contracts/test_api/CMakeLists.txt b/unittests/test-contracts/test_api/CMakeLists.txt index 00b4bcb0ee2..5cc77922002 100644 --- a/unittests/test-contracts/test_api/CMakeLists.txt +++ b/unittests/test-contracts/test_api/CMakeLists.txt @@ -1,5 +1,5 @@ -if( ${eosio.cdt_FOUND} ) - add_executable( test_api test_api.cpp ) +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_executable( test_api test_api.cpp ) else() - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/test_api.wasm ${CMAKE_CURRENT_BINARY_DIR}/test_api.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/test_api.wasm ${CMAKE_CURRENT_BINARY_DIR}/test_api.wasm COPYONLY ) endif() diff --git a/unittests/test-contracts/test_api/test_action.cpp b/unittests/test-contracts/test_api/test_action.cpp index 266fd030c23..bf1985ae3ef 100644 --- a/unittests/test-contracts/test_api/test_action.cpp +++ b/unittests/test-contracts/test_api/test_action.cpp @@ -4,7 +4,6 @@ */ #include #include -#include #include #include #include @@ -110,9 +109,6 @@ void test_action::test_cf_action() { float f1 = 1.0f, f2 = 2.0f; float f3 = f1 + f2; eosio_assert( f3 > 2.0f, "Unable to add float."); - // verify compiler builtin api access - __int128 ret; - __divti3( ret, 2, 2, 2, 2 ); // verify context_free_system_api eosio_assert( true, "verify eosio_assert can be called" ); diff --git a/unittests/test-contracts/test_api/test_api.cpp b/unittests/test-contracts/test_api/test_api.cpp index 67c720c1f0f..598990dc1a7 100644 --- a/unittests/test-contracts/test_api/test_api.cpp +++ b/unittests/test-contracts/test_api/test_api.cpp @@ -10,10 +10,8 @@ #include "test_action.cpp" #include "test_chain.cpp" #include "test_checktime.cpp" -#include "test_compiler_builtins.cpp" #include "test_crypto.cpp" #include "test_datastream.cpp" -#include "test_fixedpoint.cpp" #include "test_permission.cpp" #include "test_print.cpp" #include "test_transaction.cpp" @@ -51,21 +49,6 @@ extern "C" { WASM_TEST_HANDLER( test_types, char_to_symbol ); WASM_TEST_HANDLER( test_types, string_to_name ); - //test_compiler_builtins - WASM_TEST_HANDLER( test_compiler_builtins, test_multi3 ); - WASM_TEST_HANDLER( test_compiler_builtins, test_divti3 ); - WASM_TEST_HANDLER( test_compiler_builtins, test_divti3_by_0 ); - WASM_TEST_HANDLER( test_compiler_builtins, test_udivti3 ); - WASM_TEST_HANDLER( test_compiler_builtins, test_udivti3_by_0 ); - WASM_TEST_HANDLER( test_compiler_builtins, test_modti3 ); - WASM_TEST_HANDLER( test_compiler_builtins, test_modti3_by_0 ); - WASM_TEST_HANDLER( test_compiler_builtins, test_umodti3 ); - WASM_TEST_HANDLER( test_compiler_builtins, test_umodti3_by_0 ); - WASM_TEST_HANDLER( test_compiler_builtins, test_lshlti3 ); - WASM_TEST_HANDLER( test_compiler_builtins, test_lshrti3 ); - WASM_TEST_HANDLER( test_compiler_builtins, test_ashlti3 ); - WASM_TEST_HANDLER( test_compiler_builtins, test_ashrti3 ); - //test_action WASM_TEST_HANDLER ( test_action, read_action_normal ); WASM_TEST_HANDLER ( test_action, read_action_to_0 ); @@ -147,21 +130,11 @@ extern "C" { WASM_TEST_HANDLER ( test_transaction, send_cf_action_fail ); WASM_TEST_HANDLER ( test_transaction, stateful_api ); WASM_TEST_HANDLER ( test_transaction, context_free_api ); - WASM_TEST_HANDLER ( test_transaction, new_feature ); - WASM_TEST_HANDLER ( test_transaction, active_new_feature ); WASM_TEST_HANDLER_EX( test_transaction, repeat_deferred_transaction ); //test chain WASM_TEST_HANDLER( test_chain, test_activeprods ); - // test fixed_point - WASM_TEST_HANDLER( test_fixedpoint, create_instances ); - WASM_TEST_HANDLER( test_fixedpoint, test_addition ); - WASM_TEST_HANDLER( test_fixedpoint, test_subtraction ); - WASM_TEST_HANDLER( test_fixedpoint, test_multiplication ); - WASM_TEST_HANDLER( test_fixedpoint, test_division ); - WASM_TEST_HANDLER( test_fixedpoint, test_division_by_0 ); - // test checktime WASM_TEST_HANDLER( test_checktime, checktime_pass ); WASM_TEST_HANDLER( test_checktime, checktime_failure ); diff --git a/unittests/test-contracts/test_api/test_api.hpp b/unittests/test-contracts/test_api/test_api.hpp index c3065d5d0ef..865923fcfb2 100644 --- a/unittests/test-contracts/test_api/test_api.hpp +++ b/unittests/test-contracts/test_api/test_api.hpp @@ -175,8 +175,6 @@ struct test_transaction { static void send_cf_action_fail(); static void stateful_api(); static void context_free_api(); - static void new_feature(); - static void active_new_feature(); static void repeat_deferred_transaction(uint64_t receiver, uint64_t code, uint64_t action); }; diff --git a/unittests/test-contracts/test_api/test_api.wasm b/unittests/test-contracts/test_api/test_api.wasm index 98c15da3d2af30cec843cd94557de274c0c441c3..c7a7601ace1eba1bdb1353776620bcfc72ebbbaa 100755 GIT binary patch literal 67533 zcmeFa31C&#wKsmwxnpv3!$DCapq_gP5Rf5^Qb%^4QacOPul>IFy%!?{NdgHZLDAPr zw5ZtUYtUVAuBOV1+LaUAy-ZvCN-bExAkKh#^k+~Ge6rkCRnepB&}bEu1K$35oI%rW@G z+|2T0knbFtW&+|_$zYDlsCx{-;hzklp`89ACx`m@!$N7oKsf+7hSl)T*U{A1*c`Ia z1pa6~F+{qz90geZYx3{c3E~)lNJ&9p&WQUpWV>H?zwWKCJ}}kVd4%6Gx3{gUvw2=i zZ%cDe+tIC#%jBBYuAa88=9ZqG*6v=%lak%7E%TZa#hnzBs}FDOwW&^;2|F-hZdYe- z>$2YF`Q5Fp0^7+jFT1F9(cHyH*+|cVmdVqmJ6YxwwW;Q0T6S@FTW4>NlVbtDx4WgY z$2K1$>Mve;*uu8CEz*hJwneQ@o~3K&9oF17Z`t&z%{{$c-K|cQw9(VrIj?zM>-^U4 z?$&uuHPZ?xIKK_aZPTYZ1xb-oZJi6-I$O2pH7Fxsq)-hD4GDXFL~HZn?yh-D=eBnD zC_##CJxFRhqHSU8;b^7E;x*l^OP02Ew>Gyd?Oov1O48h=z@oFaIYxSLSgN~qZrkEE zWNOh&%VAv@1&7rJCkF;(p4aLOVtNrccDF9-0zT}CkWA^?EaiA*QuW$s%*w zq{*8MMU+<6-L|-O(Y#618?}sYLG(-BE=+En3rxD3J6ex&e4?b5~IS?%riEL+DRNq}3} z($m|#v& zr;=Lh_RM#7Bw&rroA2xt$Ck`jINSBMEbi)QK5StZNzvUpzipY$zd^~2&ZUbq)t4!l zmFr+HIk$KIJiyq6<$*5PChRxRwFP>Rv?~h$wU(X*3wzt9IJ>1zbdRU*$fi=Cnbdfzf_k>1wQ`6U&nKDPT=|uit&$BLuLL~ zSC>Pg=jBpfF6ZXTBhs0=Oe&krdiXz^t;;#-jJw!%-Au}L-0Y62<*w;l=MG88OOihEv!PtK)%P#aXQGyMbeN^*u&k zv(c9wjlXZ>8ALO)%*nTD>7wSIt9p57g?Ce4cDZ{DFk9yeX2vxjv03M|Cqj@meyWU|&zh@k z^*YJ-KZmm8+RI*p=a|LqV^U62AO1Z1>dQ}lcDDJ6l$E??QC{DVZ(2Qhws|VbE2jdd z!D}d|JIWc8Ub?oFHGauU%?v!_wwJsh+mu^b&Nex7icZ0s8Ki^MtfCWS%;F`1hx|eS z5;k~K%c=Ij>i{}4+JQ{j?LbvGNOb_7z*^oPR^T0A{;0hfld4PNea;<9B7p=jAP5_9 zr+Ys_TLFHr<*zs0yB@#hTDKkl=G*DhPT- zw+6^NCx>Imza0Z71c)g|=3+8~L3n=I{@c~l)T9;%Jl6nV#QXHT} z9^&g<4*kzvH$|58JIbjNs9NtR_R<3v)CcMq^^9>oFZ4b({TV?S=~rIjf^jqE*z}j! zdJ4fo?HCY1s>jd-sLezlBjwegfTm>p`& zKVS%b1CrCdwEjpe$h1%Q@@^*---#egnGBXYbZtY^H@VEby z&t<)IRduljD#HQ4mt0cdV4+XCoT4;++RkNXx_6**{!DZu#c7)|%RgVN1wrlwK~nxz zZW+={S%MO1W*~xS{~#r@60&(u2NY%t`-5{Sto8@pKno9YlgTqn6cj)J(=+8`&Y+~E zcg!*yhzDVYX&9#dgA|U6wBfLUy#Wa|4w_|7y-OwLR16~+V7&9*mjxZ;G&pa6dAc_h z{Xkv@6EzzLbG|dwoR6xQr)KPQuMt0cQIk?bhWOpz-6&v9Bs@|dY5E%kl5oRVwK2wv`zW4RP67jbK z;P(lb074!(pqQ>n75wdq{3iV2Z9YrYHZ3IoI+s(+K)l6hSmnDwk$24N6`3gAC))Q6 zFO`PjB}QS;QFfs>F^8$mTuT*n4nh-Z2~*53r#i}*60nXku#RZaWT~IBL59{*CbEuF zfk*2oP3tI&{6fI_7APc4tp;y6K%pK~4osuW<^W*?vLwD6Wc`Bz7h06&6!l#e3LUb{ z!~cMe{OR5h{qgYw8>9{$@zIn&d#KbXXQh6S?eJ>>mO#oOY^w9D46F+zp}cSN+$@w` z_#Y9@D+wSmI|BDhzzJG{Fla9ZkaSrJO3!S>pIj-27Q6sip7{_7)H9%fXWU-u_aOV3 zVddNbMJUP~Bx;GH4DvwBI3NThawJ0?%22DssZWYdO&Zh9DdpIA+BA-iK2{vVRD^AE{Pjl`Mn zCNnh>XCCV&`l*pP^Q2^^M&isXYBGc( z0i+@(QYp(>H{J=r_hPL>(I8qftCP{P**B3n!c0%Z>die)B0AD!6R}a|*d(0M=GA1` z26JOFcT7bqW6j6}pxwvU!qG z5@Cut*PSH_+1&Uu$}}u>euj0sk~^mK7Kj`YcSQ)ZWX=qON>ny9!rVy&8LT(MZD^#~ z%Z5goN5VRz&F{jJmUyf*##|E?7;Eke3+!$#2vf(Icf;U#^Ke*Tf*E65m}q_#<~PQ{ zN#^db=w!ITY}qO151>j0E&_AX{YsTJ_tQ;AvV_bFCL%XxLiQt1gB@W9d^A^j;7T_n z+{MI;3G8RdoSlr8O?@JBgkl?(tv73uWk;HEiL#^2%j`~AcC>jW8Er5dl2yl;ofBop zDk;ONyPGqTWyhK8lF{)>wy^L7b80eoV#wjrxkj^RqM1o{@vI%6Y%XL)(qM{t%9%yO z!ZoKppyWU20msym`mil6!J*zm(r-HRL)zXJ!YrAxg>SRD-(*gfL9ai-cfJ6>f7vnQAlHgRG!6&lShHf@sG z(S|0QE5p{On3uqjun;|Sl_kkKah2XU=;MplyFUTZS|R01m7=RGJpzY_+#O+VWv{|$ zz4>-BI?^O#qs&9eC}e9g+F)+8JS~97R7S^|b;;b_%`cL5$C>Anx#Ly5gh)*gZV6)( zWxT>zBTT3S{*z295u0pYNP?YWE=rc2YE~tq)67nZs?*JT$*Ox)itricN6D(6GJc}! zo@Qe*bFWGy_BL-NbN7j9zpvQ=P{@+|nYE!FxX$vI>H*=6ZPEi2SOi%z|G{P%EUQjr zaD?KCz$6+ zSq3MXPuWnT4h<70nZ0e|Wb<+eh(gGwPE}FLqSKVp3{E#Co4<$oby#$Uc{>E~Df4<5 z+|#@f2KO?Lg~7c==n~XE=9w^YU$HHixSzQ#R6=*yA+bsbV-hMM+CWf3*-#R0ixQ$~ z6G~Y|+jxyg!Mn}WXkf_@b&K;>bCbD59 zI#vfgY-e|KUb5;q^S+%eQh0oYE}39XOO~Bz_DD3^XbOqgqzVN)xl;R1F@qCDr>Zgt z5t?S+Og0ivUqNUOwL!wdGb+jQDRXtQ?w;6dNi?)q1zGlvs3Ot{gIJc8)aSy0gN^ehD969 zYhmgb738cn*8Dh3-CboIQ^&;+#>b6Kh$i<$$SaFXqgpns1v@uvY_hToQ>U0Iw&+y2 zt!;3cc|Qa&-P{=l_b|T?gEM0J@Tu6!+*5T7!R}@Dv9R`zgvvgVB-vN3Bo^EcSXpcG zxeDF=L+t2QZd5?0W6vyJ7ct%#FN~nk+D!v&%+MLfcz`8PGESQ`8HY`ZeMnSPN%s2!_67|6V6=9weK5Hy0j^Sq8;jgs z3|kb!G%_v1T+1~uE$V=#1x$&^4YvLh=E;ZeGH4-1cul#jI25a~yWjtRA{#O%5}th&3YKyh3p;PK|}Bo-4Yjp~W! z#AI$G*5io|O)^hYjE4o>&4_;=^J6ab~p5A8(!w>rc=%JM$-+v%=KI zc;Pz9yci-lIo7pP%G$S#7pNua<{uX$AUMg~!T||uPL8dMDLhA`R@noV{wf|p{%v$-$cmN2Uy2q9Wph%< z2qR+KrCxi%L1d!&%)Hvrf4bDU?xWtjtu9q z>IP8?M^@)J^|di|e7tI%VCpP@iRP3Lfks_Yu;`@Np*z{+Y|$yEY8KA{rOe8QRe>pm zf0g=VD2!DFSU)zJ)EysMWSe)%y%ct!q%xl!D90xF2y=T#!TQ*dJkop+RvQ)h%to6b z5rrdpI3}KYV`G82ySX>4KQ4CUjgOZ!6U+@^fr+vtBwCG;7@icnpC`vlfGM#=ukaCw z3zq<<$Bv0T%>7}%W*Bk#vcRX}@!m7WYcDg*c5H85U9;#uvEOpvc%try)sY?M?^F!) z3!l(L64m)v46|%N5bu&Kq8Tci<3k#cFqeeEdUID892qZWN5um^+B_Cgt|9gwjWG?D zMq@=1vw_{scSBI)%!gs6@v#$kg85TeU}C%$Y*g!s5GIMgnxVVnr;Jx1Wg5i()9kcTl_on zGn8`?hjG1L&f91T#DjCA%0|`OP_DL7^*59YHkw6Ycs6Qm6!wd3{vH@<^qK13Krs-( zzat(}@pr^1#I45-$8E;VZK8-q*VUO z8G2VdoC0c_XQ%+MqqGeWBI-l(3J4|sHb4l88Ug@uVf9`))UDrk6E<7 ziIH08Rd!I?ut=o4!wV4up^)wd0{vNtXbj;$7rV2Tm}l&Wpldj(hl91yTlCEb;n7n4 zAQKj6_Hbs?)mu9};ff=+Wsj#wp+4b0ce|&1 zD9MwV4Xirdt7nG+A4iunI46k8Ktzwl1FK&H!4_{OCwgGqB|N|h+#BpEXCMqHg0QKP z!~ac|Wax-%cI81~X@pI>rUu^pxLw8fcWB@+HqhV&_As(wr6;M?e?r29?P#q(C2OSx zD|V?Kr!DpTwKg4MgQ3QMoLbYp%Q1EWWIBc*3a&v=r=C9-=N*a88sN$jc1Ge($-JT7 zOy)>7_uCl(T`{5(!5G@iI_u#%?ok!P;}U}lEQP}Z!Gae`(&jXj3&+L7ZW&-SFu(xs9ioM653*^*Y6hpH8_k&sjJ$prp|oW>p5aa+ zmBfb}$CAo)1dLk~7DsH?5J$2e8X5qz`>~Ow(*fti61YKZ%{1pEa7*>a4aK(>ITnqv zLz5wTyv!2fhsG2?aVI6F2Ra0XqDab22eZgLP=>+fAiCk9KXK+77cW7fJkC+W#hbzT zY{~F{$V2(*0e;M=QWX!cd!?#EYNqz?@9W>7s&@1s$Mw>gY}NKV?6_0i&O?U{-(}a` zO63vtBZE=Zg4@Wu3No{bqu8~Y&%hA{4$0uaJ}weXGbyAU z2#S<>iocvLPN|uU?LMBX=TZE?n?=q?;dF7TNu%s6oCc#n&mp&*KCn2#WRQ9QpdL`1 zVzLPDkK=l*5TsD9?0&I0wPp$%WfE@q*~AfMSCnl+1_8On{c85*eS!2Bi<8YTW{{CG#gan8y~C1QvlGz_e*#ZS46WH&lG&`O0=#Jj6Hw-+g$C4^Y2Gx+ zpmf=;u!?Q={OMz90lp=0mad#8epdKygo`|aEL90Q30u-lnfkLv^B^F zdE{WC6GGgF;id*9F-$rUF2t;yuVJn0AC4I27R&)Sd|VWZQ(9aY1{R=V%cnu zYP)m1piawXXT-(=2;$VL38_1xrmVn`73gVJwAXP|Pg1a=GJo|eo`s5Yvir*%jf$rK#kmwN>&NJMm$3x?cSDhQJV z`m)!R&&cT{LkxZ;Dja3@gYu4&@em6_`xw)oI>N8rxCxb|}cy8&Z(jx_!Zn zM1}42XK=xk5c3N%p9X1rwFQ$$VzD4RRa21B#sw9QW_7`Ez;JnMD1PzX&)dV>k-2Z$!9%1HL-?wHgQV>H)trBdBump0HGmK$VZA? z`)Q+D2s?|Mk`L@jbCiczkn$|$`R&m7;eTXms5Edh?l`$elrz+QOUhZ?)nPTJrE+&Id33DM6_?F zOP}L5q`9mAohoq1K#;cBr6j1ZOT1RTbzMr<4rz-nZSh+9V`+?10PYQ(S!Hieu>MT` z7vQ=uI0Bk1Urvp8YKl-^SYB`xkO@78>v$agjD3ayw@pjEU9*qTrR(E{bQK?Q9H!%y2EOefGV5J(45a4O#c$fK@ID!IvzAW)JQ5SXku za&(mcf$T6fJkbp8Phl;BVTj!m{vfCjMK0B9P+Iyz{Y_((t6hr%+QR);$T^Ti1RY6( zgPv$nuKRmzQ5wK72)cq^qIbNgiHmL0JHiKEgfEv~LG^xJl-_BlwRiAFp)Hym+B={} z%F2&W2!x8dmfuX*9GXBFs|xCFd)F{erFVrh(ry*qYuhH&7Rl2IF!-elG^==z5|mA9 zj82h!AdtuvlHE!%(#}dN7{Q!c06j%|S_CZsqL1uoq@>7hObK%A za}G?e*Mg;x!~l$>l2RhY1tz+ZfXU0J5lm#|%W@qxPndZ^$)jncqmaZxm@FkQfi@4= zRHz6$Yha~@Dq$baU14m@H&mqoEA!evoEe}c$R#=z$#`6XMW|Bb(s$w9l)ZV8utQ*< z(RD#Pq!FR|C=^JDwh10*5&Z-s1i0g$>)m$0OCpGuQ}nKYTliz`6tQdIE(zfg%C&?- z01A*m5JmI;Vz3w|EQl;jM}EXmTUW(+2h)lihNCCsu-vdRX0H^5xGY2P9YN{0q@2+m zXiOp6A~c-fu&N;mGzeE1n#>FS%1a=4Ooo;sMRwobqJ;T(M1X%}O!Ou? z7$1BGGUtZL74mQtQyoF}i=yQ#Z%&K7faRw8(rIhx&}P9U@IH93aX|&Aa8U}^*&z;b z!(R+&+QeWovK-v7QqI5BO_;(QV&DrA2Fn@}3m3x6;)N5gKy6e~zQq;)FF~}VP4(xB zE{P|rD#C=LGK0aE&Z3tPUyw_Cxtv*;&?(+|=TkZKUuNmQgk~W*RQOVVU^Z|~3x`S? zH$TXz{juuV^&Ysj!TpJRqOgNLJ0roVYUpNmtfb+%U`wwpjE;EPK0B1Gb^` z8Qd05Skyn?OIiaXHd?5?$f{-9?cf=aq*_YJ3H^xy6C;Ei zQ_9gbfq}3h_QkAdAKBCmXRmM%Yz#06$Ay{zYz&P=$zZh*XE7Sg8ZfF(BGDX4Zl-=> z$9X-O3{5`)3|*?%auwwi{Rhxd=stNmL!Ra`NL~b~f}F@lJRPJjD~qWoOIo^sYS3?r zymC>cmWp+-F45u`RP|%R5SuD?zNS5>N-Q!Xek4{-g?mL56BlDn0QV!6N)6m9n5cd^ z|3!VCEk$FYRAo}7TFf5!tZLOwUW=JT(ro*GL2Yeq+VL{knqj%B>O#uRW32=?GG-7K zNw)18X$ihpN{w>9IEZgD<%8T8i-X|Rs-?45d=XVFkS@b?A>C?gb}R%SF8o_T6)GN3 zEW&qITQdY?u{x&~F1gy8?F#{{793m&a4~pL%?^N8W(7dWBO7itaq{L>TVNZ3iVvz! zG|v!>-SGCHi1H+9Re&`f>@Fc69l{lZCSr(v0F?*9$yWOmtXbK4pvkfpf4pb~mwPR#!SA4S z#-}?7%m|{Z0hluNViREQa)&qDVHy^#r2ysb7)82WZsus5Un@_@%4iyjSrPP_Q{ou3v_ecsbXn5aa?B z)Er^qz~@Ut(-sYL2cbzaT7*1oPxN4|Jd`QK6BC;R)x8|u>rR<BgM8sG zgIDnyD7j8g#jp@%JC{(Fa#DKP5_^%w+(ly<`Uvw)q!%mrfXZfg2FJe~yNsOp@TT`h z$bta_@Dry)KQ_dcCYM(gY$tKyO?E8mpSVBiT@x=Te&kU}uJ|1P2)N<2(E(lf6>p!0 z)(F>`sAW$`(IBRdB)^3IZWvi<^k0&M87R+_vXSK}%qidm&L$wB7clpfmHO8X5wZU6 z0T7|KaSCVW^nI?r=zXr!9)mc2(*2sryg(K~(uSm6aQ% z@|mpsBr78%m7lehpG4(Dqsq@$R$eBRv12;jThGe6RXg2#%vN5H%3qBt;~m!o+P|43 zmCs_!_p&lV((?Vb^1Y~hP*fSGG!m8XP=cMu%IjDeA*p<)t-KDE4~#0~gh*KVgj4%$ z%a^jUJhqIGRKD3(z8;myTsl+`*kR@MFZJ1$uVH1l@f?GYRF-F)G0xYb@{FkR45|Cj z+ADAW)}yD`x;L}#<*bX4)V*FS+8rM^3+FjI_Ifbyu=3LQ?l+TX!YuQZQ;= z2r0~o-=BH+_fLN5%)6C8j(Z$+`&bttsk_3~?L%FNAX^ttWuxwLX|GS7Ejtr0ufv5% zBU8k%s(k*dCZK0(bz!!ik`wC&-DWxf&Q7d__&*+Y#F~@3AND(!dK4-dnj~ZoxE3=f z@cBSTU}o?(Ce{k&oSA?%CJN;sKhajse5svFIg^HbLhK+YpD^eA<(jp#W?h2!#F@v+ zuv&h5DR+joZE6B+8n12I=ze2`GzuFE3lW$~=mbpe5J}X9)Iwo&0b(t)5ZZ3-3F4HN z8*NSK2(V&T;DNI=4G^@EgDU*dzz}pibKyI}WaP%# zaQ=u15KuKEo0t>#;gX7cb1r8WClLZNxoTKK?qUc;G~_RXi6Ccy)OK-b!4gU!IR6Gd+oj^s<-2j4J%CW*fQ`5gP6LqJ9>b5V7qGT$A8=1#0P7`bgof4&kCY5- zz4U(sK3O|N1ag3)OxB%D3(ctT5%@>&l?rertk(76%Z~rQCcB$<;3}_Y9X>Z>>wOM1g}$8LttL}I1(U~y_sC9Qh9iAMTr{7eBy^n zgG}vCEcaU?+OgWQt-jd{mfd;0>&)N5)64pg;LPVt- zS2H{yPoMu^FWoQ%C5Ta;a@-~fr?-}@gv$Ydm2kPFgu_er&@oELS;d{;G|SYCpf9!9rM(V*JqeIh^9c!5x3Qc1}@F37TXId?{fF{COE1KqLlr zw2XFuOEpu2YX}hQ91-q;!H&xC?5hf4G;seJV#QFVqskDt6f-q-%$IKvP=`phS|r0c z3(zxIZhM?6%_uKHIE&nrVm?ygb%na$&J0Ev9$LQjjl+oHqiL<TR%yf+mVR!~qz z(C`#B$n$h!8)2PKZwhwk2bz^>vk^N;F8613R%S3OnTW5bfO$B?;2H84n#Od85iHwH zA3agMi9R-Uv)%X>-AouP8BHVwb)G9B4^mGuPMdM_ zkbp2k7$_G49R%Ugianj*=g}z4P??j!OD7}ev}1mc0~=yO*fGM?aGe_bba4-_i)bO zY(?b#Rq#>3X@ZD2KxCJtkMTZu0aJJlrrkeErFet4f|o|U?n zvVp6j72=moK)@**T3Q5vqZtB?UeA>-%?91m0I1Wp3Y51spuV$Jpi)}{>h!GwmEIaq zXKWRy%+`QfwN;?9TLbF5TLmhY1f_fd?P?V~2NUdqglIfNRlj-8pqdX^0`>9ge*#*P zHMSYlS)UxJvp+dd=WGM0$~go|;e$QgbHsaL?Q~2)J)76#AinJrreYR<9+^Z!rFj!6Y{-C&UqXj?fZdS2$*TWW0>Uh3uD-p>SVDxXM2c z(g{%w8Un@X*6z7!o3F(mp8w@fAthN8OUPJ=83VtfezUkl<_3Nc_sy4$ii(~-tGFgeNrw;`=8`ALH&c7t9P(B&dN z$fRz>Y*2*$hBFQY1N68Cn{-4mTfzm*YWOu97UBet^`HS98Y9L)gJTV(^9Z3OoERbV z4F`I-NeC!J6i=ZFE=6oSYc1_3(2=v=7#4I+4*hADaR3GSt46yyoF3&G*ta;YAgfN? zNQBfWxqvJmd;GuG3}leYlU-{yrI zH$Jw=WeF$?0)jlev4KGY8v2PRC9nO&Q#G8pSQ8iF@++m3RTT3wOdF{}4VVq8J7kFS zK%^L8A2DkVZ3nFpauE`$qL}$AE_C3ZD88V*Tp*2^w-~cgDT?h$gPf`{o@9Xr%q*Ze zjsdKF{$vfr3LXC&!oYvXq1B;V6WSyhlRL&U2smTUAHEMXTeYkWNj4USgx81|0$vAdK-L{!l29gFop4Cj+}=}k&<#s zE2B_Ls1WcwlU#u}%A}HwCb|6vL!N_0_XI3AwppKSv!P#`C=xcNl9r9dD?4Oj*eH2Y zcGxUR7`Dx#BnN(U?`tzC)g@7ags33H-vXt&El??N3g_c%$#A1&u(79e+%wfsx>y65 z0)+w2!(uZJBjizzU||%Vq?R#IPd~~G`N%SQX#`d|rez&&ILcwDEdvHQz^F3E=<8tf z8NJ>B!3r7XL%h_gWsAKcpv zAXEYW02}2!={F@;gD|Ufs{a3(PU(k><#1mkJGBJ@gKMlb#9>-HQlOQX*C4O)gu=Mv zwKWumi3TVY6m|-iq&7MH50UO)NTH-WB^>p!4rP7ZV^Cl-EaQb0cojgdN0oVk8uJFq ziyvA98`(uLQ(_?v;JL~D2~jxg zz>1ukw{{{AcKDPh!vHcGMiun=Q{?#x_=$;kZpByc65gT`65ZBP|ukL5}%hO4) zD8=rtrrz12-Cs!9{g^umyT1_G{V>~bMqTWF96YhsXKeRlgsk0PqjrA{`EWQ+aKr8g z6bGR?n6nzHwEK}YoSx)^5Ks#qC%>UUV7jSAg&jw|4u=P=`y1tA;>$zVp>1v_?f{lJ zWH0v;YB1|~x)KGgi5i~o5S@lQG#w?(mQod_YE4jOEpd;bkFvz=sd}-*F>dTFmk0}B zW?N&r8c|r{yjdc-T1YJMsB&zHvob94YFoLWlbh0TQ%k(s&U`FzV2M|Wz$f}8cr7a} zapAUX33n4PEW|=AarvUESmHF-KpZS@L|!py?Z>UN#IekUe5NHX*F~zt%A_TZ?hq~7 zkz%c@jjonB4iM`??08j8@`>PB=7Cz_WbbpW;QLt4kfh*)tonF@kIs-#@QD!IMrR0R zU((B8;S7lc9$akxH#e1d>qIj4lXx&OI0_3d4UmL;m3m z;SS(GoFV^noFSLz#oR2=2s))owv4Q(T?Yr>@W{4&Jr5JlsvEN*AEiS`-%l?u*k(O` zS>iGK`!aqjjLmh-pm}MnO^8j2T5GdWJ<#xZ+hc@s>IRV+bVtkGnrjIX$7-OUaY|Yu z)4d2K^kQj$;^?us*~@5Q_IzDdksylu0<{K^?F`@gg47zWTDW z!5}>80S7Yb1=X-%u%m&Si}1C}$<4fm>ey3wTIpaD+h#ailQ0;?OvGDVk;_rFNi1Q7U7lzSE1sxkr2TB9{gy%c8@eqft$ zLf~JpG>C&m5zcX}T8(Xa9mQ*L?O1IE3_*sa$6J_=+-K#blE?y*ODqcmhiB$^%M8ao8RTWPuU0mOGhxh;D{yL4hqrUY z`a)aQ;&cp;U+_k*zV70f-yqI?2tdFd;GtLw;20gA$5qRpkLiXX8~_i4Fx7Z%21|ed zh+qX!uRl`(C6#bC^Q;I4nGbXhR zM%On;GThD6gC2mgP&bX`YK(4R%C50DQ(Hq>$lkDw^qc+Eon^-!2e~+nZ)S+C4S@C( zh{xT8a*DQAy7ClACM=0d#Zt(NPJz&&k5eFcuscZO6bPIyI7*SRr$9XQ0`tzEUgFoQ zU))Z^AqyCx*c}wNYYymL>6Sg1iX~YDpeu3$f{*#JH}b$uhA;8a)OWFpK@}|2c#Z%U z+cB*1If4v)gup5~N06b9lI9|)kV%{)z~TC&_mVw<_Y!8RSmY@+$20U%MwQ{26c;vz z8>NvGo+Gf{%M4Oc#mt`q)AYy_eX#K^@%(&gd0F5ZRf$ z+XjIUdM}9|2_(l~Gu)7tdU_R$l+bI|c0*cxoEdsAW#e0U0@q#GozQ!U^ZV!?C3Rtg zt2fmf!^84*?POH2CJgrIL(8(XU3bbuum0RJF~vjQ^FMp!K`qNFoDtR$4lh60f&Cp` zB5PwdO$6^^;~3ZfO!7__lmzl8+Leb~+;Umc;NIr)#FZ=Vi2;sWu=7u25dsfqK#=UL z=HZ2NcGBT`EI|k!5WrT+jh>g$i&;1V>{8&NoZWWU?R~=cWi@7Q&|9K9=^#Tm^n9WV z81bM5pCJYM~6QJU^U6Y=J5?>H5W*Nc8`0?F?{MI))m$bl82nD;0=-4B^LyqYFEFZ{LYhV z$grC+4rOUld^d#By8lrxxH{DxkUPF60-?P>q4bh6blm28}nLg`mCc-_j7&2GyV}uXEqaCwP1bjje?}$XcpaH zf3Rq*IH-xXb^P?gZ5^^HD-s7Z#a|R2&=jQ(Wc1QmctEp(_))Kd!6|4k2lYJWV^Hxa zuqFNhKU+rwwp#sFT*G1zgh1ehcrP_b9HGQ(_I76hECuY7Y`+2X4w5)+0Eqv+T=rx+)svd#|aM192rO1m&MUA z&US0ri0r|kO3M5Hsr$0rjRisA!Q&1sh?D*A*_VCKb&PA?JqNNHzh|ARzc<<6XYKD_ z?C*K!Y5uMD_j&t!?D?90q5ZwX{=RH~k6)wtm)hTZ?C)>v?@1SE{+0IkXZH7X`}-dk zYW_9$_aXcHru{wjBF+DS{e9H_Zm_>+T&(%m+21GlTXSFfz!4^3yTauX!$qevaN;NStJzN+ZpEr+mbvM>pqGWl z^HVTRgd(!^oo!x{Y|Ti5m~B3caEVKGd0%a|v^n-XuWnMm3mb1;P z%%e_Xr}d-{9$o<$uEBV^2!lZ)!I*>^;|)|CIJyM31zhJ0$a@pe(liTZ$bl$ALLM&F zEyI<9t^E(zg`O&MFy`fk7X5*n^uW8ls-+$?3X3s7T;aMQB>6>AN z*~RS{ug=g*!8zV#wCBa>g_A#$kbPgF5*8q>BaZ;2-P71`RYHv z|AIb0a0otD;5FU(?(Z(SO3Kg1euvi-KTj|RcamN6!M*sx7%nhJLB25Abj~l2|FPnG zh_v$H*^k|OsX$Sv-@W?Di!QNHt5N=g>%R3pjn65<+@K)_z9jW{a?RxNiN~L!9fJun zCUvO4BX34~O}~8Yhw_I_*HR{W3y!q@kZTR`4FKPZ9yg2Eamtl@k7`<(O zPqM=49Q%8X{k_BfK4E`<%U|BT_L^>p-Uc~@uj(|N8+CMc5zmHJaCt54j~9hJxTEbg zt&J!wT!9ksbWYmcjEo9~eVd^6K)dOqs7f;#z@S3dlU5Mnmo@Wh=9 zuj!$P;-dv43mWcmrJnXSR7zTEU~5USbf(LN|od)Hm6sP!Kt(ne7?*7XOtB+MQDM`;i?W|Xi(S(mzHt1LcI4n-YU7UegaZ+_^#dPKE4Q5g*Pj?u`6vqWsxKECv(h=M*RUN6MG` znyz^H<#m6u%tQfr{hb@v|3Tujivi-R?mzRlVZ4O+U;h11fBvK7&nZsu|CQf~^O~-` z^~E24m;iEq#G$i`tn|$f&p7=r7HApqSKoekjmGB`$LsiE$X<4EksZHptfvIW*XF#Y zdqOUpUF4Sz-n!<2Cxr{OgHK;@$+HrjQyk}iou8ufntqh%G>LHB71!R_N9rmK-nrad%~)1^9tG-0D$(GLFTXXSt2f57~K5FNTv@$#Ls5S3WLtTRRp#JTa#@)<1?H zy2J2B(^1RZdhYo@e%E%M7`<@c>XqSi8{;?gTX_G7-FI1QQPG^8=g8t#$!&g z-e1no_IXWDh9XsMjqm@*9VbYS=M+cy-*)9Cqo(I0nW%*>`*kQI=M>BS|Kex+U>ro^ zKnvaS+66yR@h}H$oiZ=n>F(u*0tjns{~65mCc4L1H0pVR(pF*$+5)7%99lgbRBqC`Ls&$nbqbu0{B(&gV58 zMsByrR!A8hd^CiXtRd33aX&si8X5vN_4>Y-qC1F!R#$3Owx_Z62UiBdJoJR?Ne zrQjcpcaQw9@KqzhlE}h|hwR02}Z_m2ov%)AqW$2E>B9x%PPdG z+9vp630cFw1HJ(&TpPSe7UJyjW`ZFyX3fL$T~$;ANp=3;O_7SeIcWK#+r+O+(5(oir`TKIRkP#k{|`V1=g|C`@3P$Hp|dZEY0(aDfVo9*OJ zW8R1m3_JPPB6wGfAlGdomXCF@r1QVyVwLqVu)X_lfbDHDw$%myU-<0{MfqDU)^Qfu zSd&bcW!~pgZg~03=50ZfSVfQL(ft35rQOHq`8|Jwp5Gq#JYDer8LlZ0TJEF%gI+#o z{mIY0^U$%Y4p2YS%{N_n%O4ok72d|z9(wmJjdE4d_qvU$Kj5;BPu%xM=FTqC+`sAO zC(nJGQ96eDp1!lnzuJ{wrIo*{e0-$o@>?%h|6$z38KEcZE8uobq`eyFsV^puVyO!(Vw&h+W-_wR;^t=bo{`Hfe zRqJQXUH5z@k;XpU@6m1@G?d~)q_MB;;*YeP^0;sc976-pCCg_N2T0!;f}_bPq1U3= z6|mgOrH*G`;eR^_htZewqa<-0b?{wH8m&AHB&9Bcf`qlrtBGQz;t5BLxBE35Dj<-9 zp?b0BgRd(=``QmTyUa%0HXB;`YA`LNn2N_GeCBZgq=)g{N4%sg&tJn)mG%GAbqf9* z=yu1y0r1Rvd6#^RJ=21Ra9r-J);k`$uuWfIz|iZ#9ytRBXAZaQ;eXbhd;I7J7=$SX z;caoCWw&-QN3gk#s{<&6EnThU@UXyn*pl#Mm$&dvD#i0Bu6+h0&+`HL!Q}+3AUrvw zhd6YXk!heT&!8|Q?{@MP92qojv++YsV4Du(CPB%W6>Jbt0m}p17K9gp!0mDm-3e<~ zd`T@t1OUj_@q5S77p&yW9hf5rqydTdRditkZUH{}47`4AF8nMTznfIz{R)q38`&nL zg~Km%W%$~8+2fbF`0+OI4wy~$ne=OOAb)OOT&pI51#9Uz0n1wq;b24rq?Z$xpX@0M z*>IMP1Vp<$N=3ZM_QL2muhS1<7L7iShg~T}Sk7RD2;;p^Ae>f&(?l4&vI)X?2e0re zy%PU-@u5B(P(#=Bg$i6S>Gz?&OWf#e5crD)<(iNORn9?!NlkVEG5DAg9*f6%RS1|% z9#_ltJFb=skE`YEaW%QRk>hbS+}^-0xtz!FFzkRV$JH>%I3K6GTqRgW_Pi3BvoC#m z{ulU?0`S0SY89t&nqKhuyC5^(Wg_)(fB^TDz%hyMA?7;#i#H*2{5?eUO3BPg!n;hk zQZ8i148hYlGG8sVb3DZ(hXdszbk5R|4`3zfs6^(s@hly=B?`Gq31a82P6IySqhI&~ z-W8%mzEFwRfiO{2l-Ms+@&h|g00|CQk*tHL(hm)T^*{JKVtsvtfZwRZm~6uO=i4#C z1hK4-S1va5jYl8ODLw zY&D*%7_erLgKPx1h0k*oE>P<8O&{o*k_VxO?IsFqoDJsi^9U~v8=^^l)hQ{nD@0)Q zg-|)Piq8>?%$6@ysLY1ht6vDsLj)$j5K5-QvoDtSa9om;A0+=mCWMLv@EP!!-h7ILXjb+U91nS3z->Ikn3^D_;AZuKZ^ z6XD##M$djVgXRK(^23*MhDSbUmtGp+Z93>yX5d{E;T@bvgY*k8AA0gp4ZWx+ua9Fg zEafLP_JcAOVVNSkVH)cfH8_WOToQ1hqwtl^UcRj7aa%40?m-ZSmUTby2B|tGvQ&E% z3HU;YNArrE0a~C~FS~*lw}Arw=izLe^5pw){thU|=1_#niE132yDhEQ2B3?{I3OP- zfzl$YpYA`-RWUAuF~@Kw{u2 z0yCBf1ASo)rDVFIv5IdWipe7_;fwF2inW_ytyv;`bTytl*raAS%k^Cf^h3@NI)&f4 zo4k+2sV9BldsdZHlwSYWJSr5<({x8Tzb#!m$^(9~TZsmKju(lw;rzwC?>vS`SNH>#)wPnG`!sml)p=}MO&t1kv7M_^4ABn`%hhTb`&BFA2 zxr$adR{_bx_QKZMX?&W=9(InIUDgs=$`i4xxeDZP$zv~@fC}f2kT7Xl_k$xvPkD;E6Sb-d7SznIl_v8NmLK%WiiWK1sIBHVMr5DmN`hdN> zP#LkASF+D6@zEQTf(lE?ogURtBwB6SbKDe$fsoZ=QyxtlR?cw6ET5o|JlS%}Q@`L< z#{tbZXoz^>&n6QnUfAUiMTcB#0+YhP)@2I;%CcpPFEV+64I6TR+e%Mm?47oO?6ZbN{p~Z} zbyNFHUp)5o#p5hcqIJf*aPc)aS=U#5?~E>{=$ab^T|CgCA(k}BLKc%7RKjdQdzjX+ z8G{XLXAmo4uyOqmUZG>-T$a&T<06}%8Q6kJ7`C+)?bEs@?5c?QFqE4<=*#)-LO$}S zd-$L)eJ1fmJbBPJaS@NN*@n!Ryco?T0ditK^vGHvA6mYXXT&TgXUV6qVaZCY%vF|U zV#}U1fH!*)k2>tD;YB<-sSi_N^Nee+-@%F&78c%(j;lu(@j|ARYgh81Z{i}JJm~9@ z*^?LXoad@IRClGN`I-S>od$_D*?dDo~+L{ z(#4^!y#W_!s0*D5jXB@~EmSE`;{z_sypLX%p@?0UA(UK}Z3Z^v8jzu5^l!Bo`+>U9 zvY(*CK&DHj!yrqaDZIeyFrd#g=`g@B(ReFk22(&)iOAl>g?5E&vq2LztMFc|b=Rm1 zO)_wc*i$(Qp@P@dc@HIY*KomywmI=B@tUC5Ap1fWHXBd6L$oyPA-b^PV*y+WNR?IU z4oSGMDZa#oolW!(&8PN-Y^wssq7lV>gPISLZhL9S`r1ldrC8Iq1R>1Ua2X264fJ_j zD2~F9Qd{(345%UDo}{F$(2!iPY^ov2+qejh6;6b67c5a9SdHkW=CG^_p+s!3SE(zS%g1DAcFVHSxNrPRkc{sSRFpsEAiWO zD6$&`!X2VmWL+3t!5&m7JmGonF?3Z2uDuM)JF*B0M~2jry4XRaX$5Nex&%POdfNp2 z1DA<{rBgEQ@Vg_R`oOaQRP_4aa_f&pFS-dnf8rxy^G7_X#5pJEqP zy@VCj*@?G2Hy14^fgy46W_mLN2VDSe(h2+=0XbbQpHS9ilV^@ug6RvH09vOp0r@FG zRKu*~{~Q=ofG7t@L6K!M=(E7aCL@+9D4j9YSbP}1`D*(JXn9#X;|nTS`UwIl?pSln zTYHJxxa1qS&?!R21g5A%=Z!t_i3w#v;1(BeOIhG(js+9B^xmQyR4!msegZ_@xaLE5 zNsvFnrUL@Le;|b9=!riy{MMXo3pt$Iph!r3;;aM0X%nP$I4%1q6J;IqqciZu1gEea z#`%_}jo}~X8@Cq71UKvFqnG@o@KeuC+o1v;PLD|~;cQ9isK9psKEPRTeE?eMEJ6y` z&ans0ebW-%LqLurn9DtG6#6ZA#fPNd{yGPmjg1ZZO(6MLx*)sj{T!$HsrirTDUg(i_# z-pqh248kKxz_vn}PouLIj0GBP06;rpBq2OI3Tt)tmt4!!IBqDzK~N%g1QEa{DVadv z&@)2b6i}261#vt$9RNyyLCKZ!@K6ZN&}xn_(T^D8!5}|lKNL2s=m2hr= zpY{dCeVK=l=2F4zC=fU&r4^iMp}NDgz0WPHU2b)bI+qfF#G$bx6?L2ON!?Fd8K+C|^3Q{TwsZ0Wo6A=GPNe9KL z^#rCPN>N~Pm5gS{i(cV951xGwCr6-r0yI^R2_g@g<3Ofpi{pri0Na$EXa_4S&$+9vLI?=i;iCwq>;`q72=^zg!rXG+!91czbuFvQVzzs zEk^=rIj&K(9F#rUm*b%2G`}1uxce5DVTgub8w_#TS<>`tgY1QVMUWx=iXcP!6+wpd zD}o4Nk6n=6Q5qz&J4i@BM<~d8dpf&G8auUXRBo5396x@D;qvL{lb28WK7#h!qw1FN zs-!)qtoFBBLg82`?Qy!-(w<~e+Os=KQ+7vbIs~E2R*q3bd!Zxgt@z^H8bWNJ^j12` z%PQzP&Z+TOn^0gRrYjh49VeV}o`0un$ut~=*2z;QH)deZdZ9gA_AVr!hDj%xY{X*YM3 z5WC4QaB4TndUz5AX*)_t8JKC|_cXCm2rRM_QA8=byM&1BSP5}J$4H2jZ9oWaKQ1g- z6UdI1Ja%)0gjlmIA=WHOh&2O*6xj(9B0A$GM0Ca>q{xnv5OYULh`IF$0og{2?0$$U zvU^F0P3|cnHu)(DvB?<-xwzM0k;O+I_mDgyJ4HgQIaxxiIY~mS$=iF1?7k8rI{Qe7 z=3gj7f5Z^OK6yf{gB53; zcOQ=<2n~rFecb%pMasR`3>`yE?RU{v*wO*mUgx-TiiOgw{>37+TGpN9kg`@^V^oS z&TBsGsNU9|U~X4uPjC0qxxHQ8&it0Pg(%nC6|^i|*fqDMw>4PYa(HWxC&>$2YF`Q5Fp>A%Tfi9v%<()DLyN)h?CKnEiv=u3tbshBYygxps6J`Rge{YN z)tynnEz|q@xZswF{WJ;=WoLtVt-T~@XA6;viM{~^b{VjTahX%^atgI=okz4RY?~M0 z-=!qo`AE!?V%3OTf=FF&cS~naV)DcQZb9}Ahjg|awy;$u!Mu6F{DoaDy%SLN5#*JR zXzgyBe^k)gh2d^)>FH_h?hWR)bOwjD;%6b|4ylNOBNunK9f7%YIHnm#s%Jsh(uMP6 zBv8~22Fk9!+bN7#+}$;A>D<=tEtb3)C3l&3*cQt?g)(LBSkTkjId8MgzK?7pU3Z)D3@K(3RXkc?n7i$lGIzG174gd4F0kZY4j1T z@qpmDnZgd;txJ}+b+u!pRLXPXK;4^#m1l=*mW z2>`l!TNWmy^U}7?-sw}Ddx;%hBP#5+cr884DZD0BsPA0> z7F!C5)7cxKv#o7l#eQ-FMe%Y@VaMJ@iwVn?j^V7l4{~cDbS6!nq4->c68n(M^V=XR zBANzWXuUh=T>wre9$lTSK@W6_7K2FX?UKQne-u!79x#T4EZs9Cw~=vmM* zDd2dpvGqv#RLfy==izTF{?5nW!|`_k{8-U zerbgFCemg;_q}Nse5d8;8}BzgpSdP5leT;1y)T}0#W5FFpZB|0K6l8YXO&J{J$Ce$ zZo7Q?jeC!{=)>1;Ip7QXkC?ajZ)(S$_0un1^7)I;cg(d9Dq7PfO^(o7hqRf&XTN>- zbC*x7u6g6%Ui()5;vK(oTjz!aKWMmo^w}>>x-50av{irE;kLbo<=(w=m%U#7%Cd9T zOj>*VJ(unBMC#zz=WaLSzNrVmFnXvGG<$an(q?|^jI)2(_v&lbUbpg;x`TFUIPWvgpI@FbZ1peh{P92SdC#L`es|q-qYnPpRloY@ z6F>LcDiEn3XcFmpg7&|D>x^^HJAcVVS2Qoo{PStEZ=SQ@_Mhzd^V_DL_UZxG?tK1~ zsu$)Ra@DS@27l|Dr~Jzy-~G>E#&bXHzWmHl=|!*1Kk=rf+OL7L)>mRpTCYW#-`%#j zbj`{udxBTJYhJ${-s4w^P;N?u=D?2>H
p_7wKpVLHPVB&QOy0mE}<2atQx7G zazTEk(nO{p(K0Im3E2h$nT5;oajeuoJv)hi*DH ziLfQxJM>}fJeNAz(a9xAog^l1F5u~}+8wAGxrXXYj0#Jt{4ab|6GzEvB`dLId~TQM z8d?o2u~+!%E{#m8opmH1^=ike-J@0&?fq5l_6`#6jiLZ(f<&`-5*P-=s$~(?-dR92 zYk)4vUAoSy@ay8P8MW<2V$PG_0+Zdbk6uggwijuq>17V)M64*%PSwg(iDr8%RT}U1 zy1kjIgR3@kU`3JS;_A&LnqH(%tSA%aC#qJYO03q*fqZGV&g?9|)2$g&zwWW@BJYXx z5}(_>C3@U(q<1(=c!}NTZIC|Z;}hDkKlr|cc4BTJf11$BHRY8TcP>ZuS7sFQc8M>$ z4QXE|Zh@(I+3}YKD{~9^oWv;}RZzi3$zoIr3VBG6q2kg){$7vzY$)H|qZO9tM?G3J zTwBQGS&Y`3hJr<5qtxXqTnyJD93e&w*Op9+Vv-YhubxdEwm@PwpWCx`iyjhUUd+x) z6*fSYRAm?Os9M8Ci?;G3?uZdX)!^fD&$jG6?$^tg&EYYQ^MwnPy$ZPcU50De9ndFH!Qc9}S1VWtt{T`cD&!up`B2bM2vtoOtke#6G1a30Fp^?K z1YFztd9vXppZ1GlY(D?_U-j4m?$>_{8^%BH-vPR7yw#3z6<@%|zcmV&AKt2mFt6r- z&Pe+YXoGa=fL9U2ZZ3Vr+0aD|Qm*mnfo*`&qveu01DA@dkcSVh%NFw22j7ME*yQ6Z zZ$XM*gFIBv!oo$PoJW3Ph*uj5SZ7s-Q%tQ^Q=-t8cSw1i*?3_}7q*DsNckLUHVr|1 z%&!iafNJy`ItuBjp?%nC9yV+O46%9`f^c4Mcs$ay;Q?-Bbdf?MPYR8=ZTMISbidVz zznt0}xt!E&boY-h_2jKb1hO-{--y9z#K93ZRe3a@krs>Gp|tw2)DLO-ERVlC@+C;@ z9XS_#9nzby1tsIt!zkD-&KL(D`ersj?K)(xgV@8&RuH>BD##wl@$RE~K{wTW+ws^F^qVUv0nPvQ8b|_oMJ#(^9A!_&ToVSpBvN)0}qkWL;H=0^< zcXTvT-!T?U1TT$=#FogtG>}gp6Tz1AZDZWWsmN{OqKxR8^F#)wytr?|WnQTITgpU_ZJ$}n|T6&|MwV-UrNXGvHcPBG#I zUHB}7->eYEeohni*P8KtV^#VR6^(6=pdg0|ULRMju}8GVUL&qU#DyHV{Kr>vO`o5u z>$*c+dx;Afe7-LG zlw{YDEOI2fNtay*+2{({XifJ0TVMNazPtWd`C~zC>uejOSCBMvsGs}WeyU?17TnIC zT>fX#0F`SIaV;V)h+(WD_9+&mavm#XOb*(Bs)i!oe5cNs&rXdlYO}8nD6#Ew-z1K9Cnz5b4eIE zRPV*xeooXWta3XZ{7w^gm-l?fCQe?@@4vIloQJTVnHW|U1(>bxW@40ECWK-S96uoy zqi)*-Kl4Q!Ya#T4KI{w5^3nw+gnzS!jwMn}nkI!xLKdE65cbe4BU*BZ@+v3gDiZ7WxsWxN9eC6cE zfe9gw*o2WsqiJL3NV#xrcPwM=el+=?Y~#Fw1siA?=MSd)^LZ0%^NCaXz*MDE($I$Z zsjqqTP)xxNCLdbN$U1P$w|w=~I`GA3Q)zL0IJFj1$7xt1OCqK@Q*D||Z;#rDRcnLY zdbQdVO#i3az*FB%pM&CaX7on!Cug(?{hM3Va|A(kOT}?U!DeW<`1C*BZHh&!e$R*jXJ`B>KWZ$ zSJ7Z)6LEI60%up+Apm;v;>GCIg|vMKP_URUcrOH|-1S~O((7|Vc)*+pJvY1}w@c4; z(J8OZweu`ib4TVTfZ{ce=t*k2SxtYR_o`DdlVq=8M}BI3J3cEf3Rc;h*GwBdwENda z&%_Ph@QFu$B-_B-BupOjBWILq?KcqIh8nna2SUGUEpLm0=K zefjo{-JyBh{IB8E?hCqN92YGJfNS?JXo>z}-Q0^oyh}kS(#!&Hq_YZQYj3783J)5? zMOiyAFyagWowA@Q+rmwSjlkEsuqV>_g%L>i7q&zCw2-s}FC0)I$-ALyXwk;y}}l zfjTj4ICI%pSkJlAdd_z1c@}j-$^DCVzQ@o#)A7Duujb{}u6?&EE~j%s!Dd2#-z|0_q2 zTfGSNsj+4c`sQbA>S=bB?LAKo9?7n;^`t{!X0Lul^W^Ds!#xhj2W#iQ$k<$#>CgVo-8jQehY0t^d(3=4}nw zF5Yrms6mj?`~N>IhqW>eXQQB^yMW1{FnJ>_*I3Rs@kTrRVbiWVXa|(BV?hrdax?*QMmDsfiAvk%*=EWk8kY7a?*HF~YanBM%c&yEbcGKqY&)r!N zP2UrN0vGKGLwabBr{g{<@Cd)OC&1snHdyTE@S&AQ^xR`_WY7T?f`uhbnOekTSBr78 z+y*8o$4KoI?4Xns@wfH{yR}uyiK^^g&y@Y^`P{veP_+Ap{X9OV2|OB|l_X$8>NFER ztnH$D1+&Q}Ui4u!LjBVpHbL;t_Raf`Bme%s5B+n{UK8dO+6I{DCIW}B20hd$IlO-m zSsW9A6(#(SCIUhFgJI*REO;M_6uu?;_k#ZYMgJ~bq0#r~-%|biyZ&8LtkL&tU#oIO zM?BNN%U5a)2la26{++Z+!>`o8NA&M){W}f&Z>rcD{d-*hey@LLt=8xp^zRqyw+&N% z)Dd&nXbfBQ?-~7DPG9L>Q-cx9uX=fOR#X>Yxig>oaU+LMm@L5hZ+7M0?b6N($`$T@$j@!cQmP%wDN`4P*h7I{Jxp}(s1&con2pskh-gO~ z@}clv->Qmnh10g*nsfi`#xcDy9>lnXb}+##0-U|@^6t4r)j({e7Ol6fx^q}aG&4EN znr-=o&j_6K%WHdY6jwP)UsKqxd{hNO!nl( zy`!yAc=ycvb2h08d#MVKPMvXHE1);sjmACP{@q^fR;ITZx0G?KA8q_qgL|2s0GC(L z6U;bGh+BK!&Yx)ZRxaX7#MR_SgzdjxoWEHwhyh->t!(Jy^F@zWJQ3avaf)+z!%Vj+ zPHWv91#f2;Z&yK5_NdanBz=16g%fLZX|mS-f?1PR>2`1hY}-`2YPAj%|I^*`wrX&q zxfVZr)VHgG2@@8PYg33U#y$S({H>oa>^Qg8+DkS6j*99fnB76&w=H+hR$ZYC_~5LY ze`;`|*^8e#5@huzHJ?_PE5S_l*mrgNWxIcfet7?uqw7^QiDplWIt|KWO|^-1rlLuj z<&OMx`Ki{#1hWbBqU+OkX{r*<9+;m5xYbmPBY*S;(D?=Jm&T>oCtzvcROGEFfiFDmo&^Sj%$exenzx&Ci| z-p~+<<{DU;w7vh#nN1R?p*lEK1^JstdRRS4%hBH-tlp@b!(Dm)!l1Rv12HHd&Xy)sMslLrtn4Nxy_R5+YnjaIURE6HNrvI#$BZeqLDcOy|QEKknYI zU#`&LL~}!ZRuh!lf}DhIu$eAuOwm-&BwNFaY4C;?-AFFwq<- zuV$u3B(k$g)rrYEdHuG1xtdTQ;3uoT`9%d2%?)tpimTUQ_#o^WsN27jB6L)Kp*6r1 zMG9|zeWZA5rToFm8!qZ#5K(vE*f@=-iRSw98fI!pB46&`zu;HBxmMyjSoHp`-*toq z@Qu$G&(!)vqB%mI!Ay}P_QxMje}7Sh>yzg<&Fz4xc>_|tdFqudGqpyLrXNo|{YVEx0Ir*KfATD?5n;+rG~Jai`n;b* z2%K?}Klwb?Fu@eg-~1xbF;rg5OkuqIi@*kT|Q~0ud1}`l5ELecGcBY^~|h2Zj|{y&TuaqTEc+v5K6a=CBGYM!6ZUd9sdgx~Y!7 zFat$5_2J?`IlhCkdlBz;GK$sV6Hhi^-}4P8{aFb=ezJGaE1G>Y)p8|-h=L&A=2T$g z`#2LKQo!B zSX|xso$uPGYJvZ%C=e6%WM+~ic^W2YQrkvV+Xiaem|W}z-OWtSfA1IVJ17P4Bj0*1 z(lNy3D9Ka#jdLw4E>qD;->QZ7uPD?g$y4~G^E2HNmE225UG#Y&nAg70jm_jkFT7N% z2$MBp4N8W|FVQD6K`G~ZFSKHQ{PBe_mdO1swqYOfJ{RkA|w7orJPMp|AhAEHNKwT&JTg7-m4c=$O03oJU-v+poFTVvruamZ8%8c(?0`*u}=&?*{Dj`we+e*)%8Gc_YZ? z@{LID{k=DTSmuP|wBO2Bp@gM3hGM7N@lFhHc{2vNOf_A5ldiYL#Yr$8*_K_k!mvE@HDqy$2(9&|*tCqGyWZvTfGUr)Z_VIcdG zfA%mHa?Q$zq1i>{o59`XQ98KyJvt8V$v>q6bN{CqNJl^JBWq9W@Ei~sX;JfQCD$JR z=4ACs%Fa&7vcDF!3`rWEmXa(83Z79(wf~7`X$&f!PW>Bd6HxIiN%G9h%19nPI3+7P zDlH}565Phpgaq*$CV;cwM9NH*egeLN8v|xIT~f z!t0u9BvOBtCqM5{3vX+xQK%5StvU2OKCn>{RJbXsy4ukq9?>;w>%CMO@B2d1iJ2v^1ryy(ynOINjO z5@j=Rf2j+h4-^zeGjHozP#p$kWTXLgk@CaSb7GOdguLTu9VjCJ`NlDz+yUT36cxI! zlbVvw$99qm{h^a$sCBu8x|9md0OU9%EhEXA1KM1oC1;Etl$HXhfS~N0tkHvswiJ-5 zM@~}C@WDz}R?>K&@X$kL7@j^95FT+zPGeFA=VW97!gCGDoH8;qXS}5XhesKboSl>0 zs#OwcXx%gN^_Rhl7aEdddwO6^rQ%hFWE`ACMdB5PBnKKD;6vac8IlGehu}kU8!QlhKxP)K zmklIh5y_Y`gqneEi6purgL`jDq=AL2ZAqlggxhOLq`KgOS`yzz2G_-sv2hdXp12d1 zoKdML4ZO3^3K;n)t)jT*mD~U}YYsRWMWsRp_qUSU@YE4$Bhxc7M`dN_5E9q2lG~WE zY<%8!t^bd}tiSNRbVx2cj_5`OWN(2uaVCY9XYuJR+~Ss=~vV~6x= zU-MLrjWxF<*@V8T1S)k)*+c^WPSF2$0-%BfAY;!ZRSa!kI(a56MQ`;CL z3l?olY>SOJ^}+#{RNvO!$O3Gajm({0vHf6V4IOZ*B}LjEg46c012cnIkP(DcPgbaxBBMsfEvMWsc0- zf-?)rg9cCDE0gA1$6Y~^zEw@EEvD&dX$&kF%;oBs(3cEU(ak?RS z(#ZZhsoXZfiFy0ryhD=HlSZagl%p23;C);S8Q!u!1zqZ*>|2G{!^3xaI@f zX<|ICBV`GU-`fHtFwR=7F%ASy+_2h~E`jk29l4Ddf3zJS#<^=$#)T&EZCPV;GlB1n zhO>1Cl9k)~n^>52{#qC`BORS5Bg-;4DGe(;bsZd(N-l$kXgwS-4o)Of^5~Hx$5)^b z4@%}<=_ArJ#->|(YV*wz;A+! zG!fpld6`+Hn77XMikWqCIJb^T(LDH958Em;3&Pt5+fg%XX~As)Cb_K9B(WcP$012+ N*#roZ@kCES_%8!VpqT&w delta 12426 zcmcIqcU)A*x8J$D)VmAU21s2HX`&Pjpklei7WElHjp3(0T@ec+C}2rl>&di*d zxwqCCs;dlTH}%&&pV@Q!F|+?6Wa;&b*+M?nHIc7!%@3Wug^5<3OqrV}bFoaFn)`z8 z2=lD_QooQHbkaZ`;day9-&>UaL-t7$bh6G{6nfI1Stna_!VG4XBlYL;?k}?ee24o$ zzd>QFw~3i`W~Ok5>P54jYV&3uEqnJ5)0uSop0X|!f}(`Vo?!wp3(a)kmq6`p(uty3 zC%Wp)I%%-zr6WB+0vaHy*bJWI(T+XKS9ydY-R}|6dg!0|+`$id&)I+VsMEI%H0xz^ zPPk4CXUy70Cm+*I4%3J0%zCpR{U4Iw0Un9z`t&h!MnD~ zSKw%VtVIwD0`5Hp4(A`V3}YcpdF&TjZDMRNf64bdq+|TbS%&?dpU7CA{Yh&hWBJ?| zFh4K@1~jAhQPg>~RF2It3osU%lficc^k#GUy?|uapSKIlWK;S4z%aIk*9P`Sn({&< z_Yd+xDIw??q_<~v@s2`G{bbo~fC)C0i6?}6M2Hc`nVHFfInsMfw0klG^}Lu(4%4CM zI{6D-4w!XP9&Z`k7J|A2cSM>O`~dCF4w;4W*CCy}eqYJD=!jhxrF*cFo6Vis^Zak- zcyw#2IgSay9S&W}3V6S;;GhhQn=Iv^IhqH^Eprqnz({29xnYGJGIhlA4_zZ#v&2NI zL3(aE(Rg^9y{wR5YLm>0c=PZmhQSCgLi=`y_h6a4KD-Q=*=b2|zbN?nh)ej{FH)+(7q==?GzIF_{on71P<7mqaz;t&b^H z8Zs=P0PX;h;SP;aP7>?e`J>TdYzptwp{?-u5&%)N;D) zAT@I$DPZGlnWzGd#NV8WDxm0!i&u0FRI^F(0S!%G&77L90*&I< zITKYt5qB)UKikAl59rKaPx7`8NeE(|N0+H~KeLR#n(`FimH4@|c)5zIS`%DuJW9+?C7ct>VDL>tjRb<)arwvRxq9L`$XW57K%4aR=G^{E&tHC__KA#I$%K7C!gRns@?U%~O_r1Ut z@vi-zbz8_8R;w}g75z#X<26D z+bBO3XrwvJ3&Vq`LXA|lr8Fv0CRqVA8ece|uj(evoRp1Jl)*{b`#9?%v?`g;PKohp zqJ?ORU?MfqF_MQ2THG+Gy9VX^Iy4>AB@HSAxQ1KbVyHjke?aS({!vtb-;b!|N4iX zj~)`IR`Eu2uH z2GMMgFI4c9VH1SQ6@1;W7&e}NGOP<0Y`5o<+kX#D3m9$U?R93EO;h$R?B|lQ1?rN7 zD6=k2m#jC-MhSb+a~*^3pvFR_2JI~BW=DHtxUx4MdaiZQFHJa^anQ!|({nvA z5fX;`v&DSi@I*F;TZc!wl&@scc)o3TD%$nK@E}&sU7wG4sZ_9CpQqN&cs`O<@-@#V zx>PCH&z|oJY_rT5R>e~@pKCD@JF-D)lG-0+M@F@L0|J-QE>y#ED4N;8q!L~@G8Eg@%8_l69vT^j^y`r^F7Q{h(=@6VbZ3le zgAtrDs)Nf`V3+YNqXK<0WWP`kV_OOiu$6@)!lXvoFOTZL*mCYUI)at*gwZ?M1b%IF zAIOXx(-Yo7_!Yc#%rsz_B|-@UTJls&5=w=ZZbbw+`KF^DR5m|m2E8*q&T~YJX{AJ)9IW>`$+DoTK zQk?Ymv{@L|_=0w5{m6n^RC6ipf@(br+oC+aa0n_K1f~?2kcLPzpr{wK@i9dlVcPwo zWO!Qa^!Cxo+mrS2jw1r>rb}Z|N|-^m_CY*l075Uv=g96-XFg|or~f(LIyb!y4h0mS z3;r|$D6Hl#Gs4+w9y6l=Evuff45?RDEFV6zHA>TFQrDi$j7J(j%gpL{`m9()(yz|y z%U-B-RX&Ew{TeEN=%^eBlPp+Ktf}5awf9qPl+ca*ml{oS=G$UT$4=tfOLD7Et4ih%FR=F~r|9<5aVf+|;1Wt7l9%gX`Ez^CM;%jZDPx z4w&~}or>w5WDUoOX@oT=IztrS(d+eA_NsCeuE$YWJrL@mbt`QwSvL$yP%h(ltP!Yr zbV(r6ua?AlRa4Wb2`Hfb`NgeRHIG{w!e5#1%MTUDVh#AAI5Dh-`3pU1Kk(B|w{)}( zq@yh@@33LT8rkC>DAziUg-+@3gk z{R;*5nz_CV3&8$)36OSk9`(#^{=ewi-{y1l=+69HG-Skr|7ejR8$(@;tsR|!bvN=u z3)-L~R~NMP%aA2dTZv}Z-lpbqGn8ARj6MY}#3pF(vCy3q_gh3e;Ic)eccsLY)v$&hJ!EUNqd#R^Hos5~+aPZWQ(O6~GV{~d$l5)*JBcO`F-mb7w^`R7A zP^aqMY3HjG1o91B6`avLgRfa)MsxqIAB_IjcIRi8CbsMyrtc%KtW-j@Fl=~O%E#D! zOU5$RhrhTi#QV!iINwS-QN#N#RPx$obnEf)vffCWEgztyVcfbrS}T3rQ1UG$oLuVQ z*`>f=mZk&Su8goVmGnI&^xz`rMEC+@Q{ zc{e^7tys=4V!-Qn4#X7gwkrt3ezu}BoJL$ZjKRGutHO~=d;E~DU6tayl@y{;1_~MA zZnUSmN^sRx9dWh{v&Dfg!!{J@W?MAUv$mc{n^%&Q1kv|S;229^DLtpiV4tGmfaX;ko)zxIl>VLN68d@MC z)+8ZRAGIcg)$&)?_Io`&fxE_9Ru4$)U-!i zhSqdIQ|H!bR%yX|9Bqc)q#yNb4yi7M6-v8;c>Z?ZzaC}3^|5Rh zjWVk(-=Q^)4nJFX=nh(;)~%oMe>HH+H&>!Hk+pBb_pa7Pge!}wN(%tN)3UM}{?feXg(vHd$U?bzOqy~&?_GhDBe{@)E1 zy=Tq;wmlJlHEc$uzhz1of0iG4b1S4xwA-wEm3l(@m6 zg6i@vSPBHq<&}WnMUKkN33a8lkxF51IAol*H{Ux*SqAon)3HMcb>TnVX#rVl_YH$z z{IZYs%)qxJJFKUDALkMby-spNhorY@?ZQrs`(t%mFgISj@@a3AiM;9h2rya8}S?b4k@Am=c$^CSm6b=-CbNYd3C|!FXGT@(-qVLd)Hq7PM z4+Np!lLMs3|6pv$K`f9-LauzGibO?Za9E}l220ZxzTr?jLjTVWC1TRKA71`% z7Mla;*v=#VY`y)XBiNN_;dNSKXd7<2#Qe^5GLz4(fz^@U)AEY7D!fsK;_r!i0|Eu*RpE@Fyr)Rd@TJG%T;5^w*-iZD zu_bMfDk}~q1O|joPRD2r=dqkHSG9;)D!t3|-c9OSu^KWpq6_>lTCY>iSYO@z?u-8E z@?nLn65P|?pSSRQ?KdS?7h2Qhi>vw7cm0K*SM!JO`o~ngMq-E$dtu(&_ zWteo*G-Ci=C$t3N$J1*|wkus5Mz3ON2d?b7tV$lHNIw4a|4Ie0eb^J{rZIuRsIZa^vwM1x`2l z0KWELCEuy^bdwSA{RaLFQw#Ly2$l|V@zYxVjh6qSRY81c5C@@%8fqU^Zj6J zy2%95jXsuvcpj%Q#aM{%g|y8L{Y^Ky0Y35DosHX+{$-dX`SBCIq{D3m#s5Our;mT! zt&XQEcE_&|ud6V4vE``O2#Xdarkh%*pa)R%6qw(5e@Ivhl5((Nc+yP-A6@*-Lv^?^ zOx}F$i4foBh$pda2+~YrGe>cjQR0>le8Roa{P*d>$cR_iT%#A!8CYi+O``mnVtpfD z_d%<0SFJ(Hq{{$t-y#gM*2aoOiIR^|$zD7?`I?Rk7yw%At(H-Hf z+I2T@#qcR7le})y)QZRaK>=sRJttf1apuJ|%Hhc$rpA+dT&$V1f1x^;nghJ~{;$6; zQ0ER!h`;YUu~4CAm^w)t`T7q#Hls>cKP^7@aCXI|Rn}o9FaG_9q3jjz`B5D6;$1#k z(Oe0Ct}90k$8^8)QFt>NtWUqcx!}u-`+wi1RBV1KrmZZO;XLlyOQ-z2j!6zgv+|#d z*Z$|G?u6i$Q>~aApLwcN-|fuUkp{tj|HS=Enk16^+QUbeRXE+`4|rz-J>3-0gg?U+ zCspyEPqk@7_*I`&{`iy9m2?x1$!U+4{_^WR1}`5UF~(DaPCi<&dbVo0Ab>ybtove)3ew!Uv~J^93YcMvmEK^+II8l^#rM|D zYping^48BZFqo+Ou5X)9)C^NQX+1M`pekn%9V&mIGy^k*xQ#8csxpXmO0XB}jc`cU@n3F9b4fsvF@FiosaSr_`nB zUo=MX`Z}{qxb%`SlGoM+yS9-wFk=L7^{IJi7^+`C!Md_~+U~Fw;Z|M7D}wgWj>9iK zv}4kco_aj%3Q|X{#2QLErKD7rIvU&ZPwIjVq0(E7FZ{HPd5W=(w#&%|x;4cqVT{zP zkNg0)Hq00%@}6~JZX`KO3gh=aZO1n9kkf%|H%~o1d}5kvb5*0%2w@L*K*kUfkGpHl z5{ec{LVN$Z%=kC&b4oYK9Cyhe!RX^_!Zlpo^R!Bc*$q$M=tIung9N1tenJ9IkY-+l^o5x-3g3Tl6VkArF^=V5r5KbSTAp@ij<1gLFRSkTuAruhKo<+gD?t_M5Byow#OS3uY&H%{6-TGF(qW+Vy(ALZ5=8+>X1?0`ur@d%o(M z`8Y%dew&W_wY+cFyWtA_q@ZVV^(*>`?O;=8}2E9dW(H2ClI0#-dEjz9Q& z7)q^gj6yo|Mt`KoZ{Sk8{QEo<^S>v=!S5#^4g2ABr0@K&6X}#6{H(J=N#a+{X7wdXnAi3>K?zBhq?*(PeRLY z_j7<5_sc@0w|>c_TQi)i@x2%d;;~Mb?ZY1Y>K4~>YC-wYg&lIU^UR^$%!vs) z9erjbN^(}wq#4;}2M5rL`T56T*ijxlHn+sbGNM>G(UM>;$}TLTO89Ih=1KP!_+-Y3 z&=G_h{?e0~R(v$$gzp4Az7i0^YNe#;I z*-R50>A^=cO>m^+iUViTDwDwa(v`2W-ahzhM&W-dA+bU3v9gt4vfi!m)l3r-i7&Ht z#BH_@zM5%5A}!Ze*zny-@7S91rZiH94`)QFVA9m=DdQ57;u{nlStT%^*7#KB&qSrw z<+eD1wK5|96a5P(=13;=K{Bq%9HYnQc!*kW!LE%P(uL92}z)A0^mjz1=^vJI+9Sk zgQP$QHBu6F5Fc}>wG`+ik;dodTdYN()e&u6{`9fA*?=w(R9I9nJ&S0U0U3uBS&AlQ z$prGTZU~a(8SqTdi7LDuD#X=HN zhsF--4pw|@A-eXW?_)_TKC%!ESr$@>FDpdXsg}HP76AAvLNrXVOhpm9plGm+%|a2M zI*6`C)77fGZJ7qvPWWu4ZGnMBSP|}tE;L2JhuB2aTgIY<_(b%|(h+|2)B;$q5J-e8 zq9J=cbpwYMQSd|o@71C}vkPy}qCozH=Vei#w%`R>6kbFD&%dG}A(4C(kGY~d8r4As zH?6V)M!AF9P`ty6UI05i1$Z1(<)99pT}7`+Ig@jzLjpiwB2=KPYD-nwg^`iDLk;*o^@s69q`;F`W0kb#*Hp2u;dk5 zuwLcona5iufTy2}^hU=!ES2Hoj7UIT&Y+K!!H>?;&oINW$YkE=kI8U@-uPxu5nT6A@+g7-<&TOvjV|SL^1`&hkNnKU? z05(J(a3+iD{kTjO<7^{6Sg`GB57t7sS7m$NgQc~?HL7TeH5X(TPR}hePb#Dy9tny?ngrQA8=vG9|k~q$2a&8rv(LEJC=ap!o_L(DuzljmBH3Qs%bqO2 z<}D)d_N$w_cqDSBj$SA7{UZdOF&xam`- zywHF`C?}eRCLw066?pxZ6hRvz{hKCTw Date: Fri, 12 Apr 2019 10:40:45 -0400 Subject: [PATCH 363/680] Move create_snapshot to an asynchronous call create the snapshots on demand as before but instead of returning add them to a ephemeral (in process) queue as the irreversible block passes the height of the snapshot, determine if the snapshot was created on a fork AND prune snapshots outside of the main chain promote snapshots inside of the main chain regardless return to the HTTP RPC callers at this time with a success of failure --- .../chain/include/eosio/chain/exceptions.hpp | 2 + .../producer_api_plugin.cpp | 33 +++- .../eosio/producer_plugin/producer_plugin.hpp | 5 +- plugins/producer_plugin/producer_plugin.cpp | 141 +++++++++++++++--- 4 files changed, 157 insertions(+), 24 deletions(-) diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index 6c3e504d349..c238308aa2e 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -460,6 +460,8 @@ namespace eosio { namespace chain { 3170007, "The configured snapshot directory does not exist" ) FC_DECLARE_DERIVED_EXCEPTION( snapshot_exists_exception, producer_exception, 3170008, "The requested snapshot already exists" ) + FC_DECLARE_DERIVED_EXCEPTION( snapshot_finalization_exception, producer_exception, + 3170009, "Snapshot Finalization Exception" ) FC_DECLARE_DERIVED_EXCEPTION( reversible_blocks_exception, chain_exception, 3180000, "Reversible Blocks exception" ) diff --git a/plugins/producer_api_plugin/producer_api_plugin.cpp b/plugins/producer_api_plugin/producer_api_plugin.cpp index 0ef7631c868..6473bfe948f 100644 --- a/plugins/producer_api_plugin/producer_api_plugin.cpp +++ b/plugins/producer_api_plugin/producer_api_plugin.cpp @@ -24,6 +24,13 @@ static appbase::abstract_plugin& _producer_api_plugin = app().register_plugin { + template + fc::variant operator()(const T& v) const { + return fc::variant(v); + } +}; + #define CALL(api_name, api_handle, call_name, INVOKE, http_response_code) \ {std::string("/v1/" #api_name "/" #call_name), \ [&api_handle](string, string body, url_response_callback cb) mutable { \ @@ -36,6 +43,25 @@ using namespace eosio; } \ }} +#define CALL_ASYNC(api_name, api_handle, call_name, call_result, INVOKE, http_response_code) \ +{std::string("/v1/" #api_name "/" #call_name), \ + [&api_handle](string, string body, url_response_callback cb) mutable { \ + if (body.empty()) body = "{}"; \ + auto next = [cb, body](const fc::static_variant& result){\ + if (result.contains()) {\ + try {\ + result.get()->dynamic_rethrow_exception();\ + } catch (...) {\ + http_plugin::handle_exception(#api_name, #call_name, body, cb);\ + }\ + } else {\ + cb(http_response_code, result.visit(async_result_visitor()));\ + }\ + };\ + INVOKE\ + }\ +} + #define INVOKE_R_R(api_handle, call_name, in_param) \ auto result = api_handle.call_name(fc::json::from_string(body).as()); @@ -46,6 +72,9 @@ using namespace eosio; #define INVOKE_R_V(api_handle, call_name) \ auto result = api_handle.call_name(); +#define INVOKE_R_V_ASYNC(api_handle, call_name)\ + api_handle.call_name(next); + #define INVOKE_V_R(api_handle, call_name, in_param) \ api_handle.call_name(fc::json::from_string(body).as()); \ eosio::detail::producer_api_plugin_response result{"ok"}; @@ -88,8 +117,8 @@ void producer_api_plugin::plugin_startup() { INVOKE_V_R(producer, set_whitelist_blacklist, producer_plugin::whitelist_blacklist), 201), CALL(producer, producer, get_integrity_hash, INVOKE_R_V(producer, get_integrity_hash), 201), - CALL(producer, producer, create_snapshot, - INVOKE_R_V(producer, create_snapshot), 201), + CALL_ASYNC(producer, producer, create_snapshot, producer_plugin::snapshot_information, + INVOKE_R_V_ASYNC(producer, create_snapshot), 201), }); } diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index 66030cc587e..2a0e3017713 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -51,6 +51,9 @@ class producer_plugin : public appbase::plugin { std::string snapshot_name; }; + template + using next_function = std::function&)>; + producer_plugin(); virtual ~producer_plugin(); @@ -81,7 +84,7 @@ class producer_plugin : public appbase::plugin { void set_whitelist_blacklist(const whitelist_blacklist& params); integrity_hash_information get_integrity_hash() const; - snapshot_information create_snapshot() const; + void create_snapshot(next_function next); signal confirmed_block; private: diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 67a90d0887c..f9ffdf512b2 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -88,6 +88,63 @@ using transaction_id_with_expiry_index = multi_index_container< > >; +struct by_height; + +class pending_snapshot { +public: + using next_t = producer_plugin::next_function; + + pending_snapshot(const block_id_type& block_id, next_t& next, std::string temp_path, std::string final_path) + : block_id(block_id) + , next(next) + , temp_path(temp_path) + , final_path(final_path) + {} + + uint32_t get_height() const { + return block_header::num_from_id(block_id); + } + + static bfs::path get_final_path(const block_id_type& block_id, const bfs::path& snapshots_dir) { + return snapshots_dir / fc::format_string("snapshot-${id}.bin", fc::mutable_variant_object()("id", block_id)); + } + + static bfs::path get_temp_path(const block_id_type& block_id, const bfs::path& snapshots_dir) { + return snapshots_dir / fc::format_string(".pending-snapshot-${id}.bin", fc::mutable_variant_object()("id", block_id)); + } + + producer_plugin::snapshot_information finalize( const chain::controller& chain ) const { + auto in_chain = (bool)chain.fetch_block_by_id( block_id ); + boost::system::error_code ec; + + EOS_ASSERT(in_chain, snapshot_finalization_exception, + "Snapshotted block was forked out of the chain. ID: ${block_id}", + ("block_id", block_id)); + + bfs::rename(bfs::path(temp_path), bfs::path(final_path), ec); + EOS_ASSERT(!ec, snapshot_finalization_exception, + "Unable to finalize valid snapshot of block number ${bn}: [code: ${ec}] ${message}", + ("bn", get_height()) + ("ec", ec.value()) + ("message",ec.message())); + + return {block_id, final_path}; + } + + block_id_type block_id; + next_t next; + std::string temp_path; + std::string final_path; +}; + +using pending_snapshot_index = multi_index_container< + pending_snapshot, + indexed_by< + hashed_unique, BOOST_MULTI_INDEX_MEMBER(pending_snapshot, block_id_type, block_id)>, + ordered_non_unique, BOOST_MULTI_INDEX_CONST_MEM_FUN( pending_snapshot, uint32_t, get_height)> + > +>; + enum class pending_block_mode { producing, speculating @@ -160,6 +217,7 @@ class producer_plugin_impl : public std::enable_shared_from_this _accepted_block_connection; fc::optional _irreversible_block_connection; @@ -249,6 +307,22 @@ class producer_plugin_impl : public std::enable_shared_from_thistimestamp.to_time_point(); + const chain::controller& chain = chain_plug->chain(); + + // promote any pending snapshots + auto& snapshots_by_height = _pending_snapshot_index.get(); + uint32_t lib_height = lib->block_num(); + + while (!snapshots_by_height.empty() && snapshots_by_height.begin()->get_height() <= lib_height) { + const auto& pending = snapshots_by_height.begin(); + auto next = pending->next; + + try { + next(pending->finalize(chain)); + } CATCH_AND_CALL(next); + + snapshots_by_height.erase(snapshots_by_height.begin()); + } } template @@ -938,35 +1012,60 @@ producer_plugin::integrity_hash_information producer_plugin::get_integrity_hash( return {chain.head_block_id(), chain.calculate_integrity_hash()}; } -producer_plugin::snapshot_information producer_plugin::create_snapshot() const { - chain::controller& chain = my->chain_plug->chain(); - auto reschedule = fc::make_scoped_exit([this](){ - my->schedule_production_loop(); - }); - if (chain.pending_block_state()) { - // abort the pending block - chain.abort_block(); - } else { - reschedule.cancel(); - } +void producer_plugin::create_snapshot(producer_plugin::next_function next) { + chain::controller& chain = my->chain_plug->chain(); auto head_id = chain.head_block_id(); - std::string snapshot_path = (my->_snapshots_dir / fc::format_string("snapshot-${id}.bin", fc::mutable_variant_object()("id", head_id))).generic_string(); + std::string snapshot_path = (pending_snapshot::get_final_path(head_id, my->_snapshots_dir)).generic_string(); - EOS_ASSERT( !fc::is_regular_file(snapshot_path), snapshot_exists_exception, - "snapshot named ${name} already exists", ("name", snapshot_path)); + // maintain legacy exception if the snapshot exists + if( fc::is_regular_file(snapshot_path) ) { + auto ex = snapshot_exists_exception( FC_LOG_MESSAGE( error, "snapshot named ${name} already exists", ("name", snapshot_path) ) ); + next(ex.dynamic_copy_exception()); + return; + } + // determine if this snapshot is already in-flight + auto& pending_by_id = my->_pending_snapshot_index.get(); + auto existing = pending_by_id.find(head_id); + if( existing != pending_by_id.end() ) { + // if a snapshot at this block is already pending, attach this requests handler to it + pending_by_id.modify(existing, [&next]( auto& entry ){ + entry.next = [prev = entry.next, next](const fc::static_variant& res){ + prev(res); + next(res); + }; + }); + } else { + // write a new temp snapshot + std::string temp_path = (pending_snapshot::get_temp_path(head_id, my->_snapshots_dir)).generic_string(); + std::string final_path = (pending_snapshot::get_final_path(head_id, my->_snapshots_dir)).generic_string(); + bool written = false; - auto snap_out = std::ofstream(snapshot_path, (std::ios::out | std::ios::binary)); - auto writer = std::make_shared(snap_out); - chain.write_snapshot(writer); - writer->finalize(); - snap_out.flush(); - snap_out.close(); + try { + auto reschedule = fc::make_scoped_exit([this](){ + my->schedule_production_loop(); + }); - return {head_id, snapshot_path}; + if (chain.pending_block_state()) { + // abort the pending block + chain.abort_block(); + } else { + reschedule.cancel(); + } + + // create a new pending snapshot + auto snap_out = std::ofstream(temp_path, (std::ios::out | std::ios::binary)); + auto writer = std::make_shared(snap_out); + chain.write_snapshot(writer); + writer->finalize(); + snap_out.flush(); + snap_out.close(); + my->_pending_snapshot_index.emplace(head_id, next, temp_path, final_path); + } CATCH_AND_CALL (next); + } } optional producer_plugin_impl::calculate_next_block_time(const account_name& producer_name, const block_timestamp_type& current_block_time) const { From 92b750ec26e46b385d4296c0d6bb4c0b18469a69 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Fri, 12 Apr 2019 10:43:47 -0400 Subject: [PATCH 364/680] actually remove temp files for forked away snapshots --- plugins/producer_plugin/producer_plugin.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index f9ffdf512b2..174c2e37cf6 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -117,16 +117,19 @@ class pending_snapshot { auto in_chain = (bool)chain.fetch_block_by_id( block_id ); boost::system::error_code ec; - EOS_ASSERT(in_chain, snapshot_finalization_exception, - "Snapshotted block was forked out of the chain. ID: ${block_id}", - ("block_id", block_id)); + if (!in_chain) { + bfs::remove(bfs::path(temp_path), ec); + EOS_THROW(snapshot_finalization_exception, + "Snapshotted block was forked out of the chain. ID: ${block_id}", + ("block_id", block_id)); + } bfs::rename(bfs::path(temp_path), bfs::path(final_path), ec); EOS_ASSERT(!ec, snapshot_finalization_exception, "Unable to finalize valid snapshot of block number ${bn}: [code: ${ec}] ${message}", ("bn", get_height()) ("ec", ec.value()) - ("message",ec.message())); + ("message", ec.message())); return {block_id, final_path}; } From 992fcb4f1360458a711e66798adbd1812b11d40f Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 11 Apr 2019 23:08:49 -0400 Subject: [PATCH 365/680] store all wasm code in a separate index Store all WASM code in a separate index outside of the account index. This allows for deduplication of identical code in memory and will also allow greater accuracy in pruning the WASM instantiation cache in the future --- libraries/chain/apply_context.cpp | 5 ++- libraries/chain/controller.cpp | 4 +- libraries/chain/eosio_contract.cpp | 36 +++++++++++++--- .../include/eosio/chain/account_object.hpp | 5 +-- .../chain/include/eosio/chain/code_object.hpp | 35 ++++++++++++++++ libraries/chain/include/eosio/chain/types.hpp | 1 + .../include/eosio/chain/wasm_interface.hpp | 3 +- .../eosio/chain/wasm_interface_private.hpp | 42 +++++++++++++++---- libraries/chain/wasm_interface.cpp | 4 +- plugins/chain_plugin/chain_plugin.cpp | 13 ++++-- .../state_history_serialization.hpp | 1 - 11 files changed, 121 insertions(+), 28 deletions(-) create mode 100644 libraries/chain/include/eosio/chain/code_object.hpp diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 060e3a74095..d9e960515bf 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -70,7 +71,7 @@ void apply_context::exec_one() (*native)( *this ); } - if( (a.code.size() > 0) && + if( ( a.code_version != digest_type() ) && ( control.is_builtin_activated( builtin_protocol_feature_t::forward_setcode ) || !( act->account == config::system_account_name && act->name == N( setcode ) @@ -82,7 +83,7 @@ void apply_context::exec_one() control.check_action_list( act->account, act->name ); } try { - control.get_wasm_interface().apply( a.code_version, a.code, *this ); + control.get_wasm_interface().apply( db.get(a.code_version), *this ); } catch( const wasm_exit& ) {} } } FC_RETHROW_EXCEPTIONS( warn, "pending console output: ${console}", ("console", _pending_console_output) ) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index d71501e854a..ac63e7a1e4f 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -43,7 +44,8 @@ using controller_index_set = index_set< block_summary_multi_index, transaction_multi_index, generated_transaction_multi_index, - table_id_multi_index + table_id_multi_index, + code_index >; using contract_database_index_set = index_set< diff --git a/libraries/chain/eosio_contract.cpp b/libraries/chain/eosio_contract.cpp index 026efefb4a2..cf167cce746 100644 --- a/libraries/chain/eosio_contract.cpp +++ b/libraries/chain/eosio_contract.cpp @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -145,21 +146,44 @@ void apply_eosio_setcode(apply_context& context) { const auto& account = db.get(act.account); int64_t code_size = (int64_t)act.code.size(); - int64_t old_size = (int64_t)account.code.size() * config::setcode_ram_bytes_multiplier; + int64_t old_size = 0; int64_t new_size = code_size * config::setcode_ram_bytes_multiplier; EOS_ASSERT( account.code_version != code_id, set_exact_code, "contract is already running this version of code" ); + if(account.code_version != digest_type()) { + const code_object& old_code_entry = db.get(account.code_version); + int64_t old_size = (int64_t)old_code_entry.code.size(); + if(old_code_entry.code_ref_count == 1) { + db.remove(old_code_entry); + } + else + db.modify(old_code_entry, [](code_object& o) { + --o.code_ref_count; + }); + } + + if(code_id != digest_type()) { + const code_object* new_code_entry = db.find(code_id); + if(new_code_entry) + db.modify(*new_code_entry, [](code_object& o) { + ++o.code_ref_count; + }); + else { + db.create([&](code_object& o) { + o.code_id = code_id; + o.code.assign(act.code.data(), code_size); + o.code_ref_count = 1; + o.first_block_used = context.control.head_block_num(); + }); + } + } + db.modify( account, [&]( auto& a ) { /** TODO: consider whether a microsecond level local timestamp is sufficient to detect code version changes*/ // TODO: update setcode message to include the hash, then validate it in validate a.last_code_update = context.control.pending_block_time(); a.code_version = code_id; - if ( code_size > 0 ) { - a.code.assign(act.code.data(), code_size); - } else { - a.code.resize(0); - } }); const auto& account_sequence = db.get(act.account); diff --git a/libraries/chain/include/eosio/chain/account_object.hpp b/libraries/chain/include/eosio/chain/account_object.hpp index ca314b23146..46fd18015de 100644 --- a/libraries/chain/include/eosio/chain/account_object.hpp +++ b/libraries/chain/include/eosio/chain/account_object.hpp @@ -13,7 +13,7 @@ namespace eosio { namespace chain { class account_object : public chainbase::object { - OBJECT_CTOR(account_object,(code)(abi)) + OBJECT_CTOR(account_object,(abi)) id_type id; account_name name; @@ -25,7 +25,6 @@ namespace eosio { namespace chain { digest_type code_version; block_timestamp_type creation_date; - shared_blob code; shared_blob abi; void set_abi( const eosio::chain::abi_def& a ) { @@ -100,6 +99,6 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_sequence_object, eosio::chain::ac CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_ram_correction_object, eosio::chain::account_ram_correction_index) -FC_REFLECT(eosio::chain::account_object, (name)(vm_type)(vm_version)(privileged)(last_code_update)(code_version)(creation_date)(code)(abi)) +FC_REFLECT(eosio::chain::account_object, (name)(vm_type)(vm_version)(privileged)(last_code_update)(code_version)(creation_date)(abi)) FC_REFLECT(eosio::chain::account_sequence_object, (name)(recv_sequence)(auth_sequence)(code_sequence)(abi_sequence)) FC_REFLECT(eosio::chain::account_ram_correction_object, (name)(ram_correction)) diff --git a/libraries/chain/include/eosio/chain/code_object.hpp b/libraries/chain/include/eosio/chain/code_object.hpp new file mode 100644 index 00000000000..b2e636eb73b --- /dev/null +++ b/libraries/chain/include/eosio/chain/code_object.hpp @@ -0,0 +1,35 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once +#include + +#include "multi_index_includes.hpp" + +namespace eosio { namespace chain { + + class code_object : public chainbase::object { + OBJECT_CTOR(code_object, (code)) + + id_type id; + digest_type code_id; + shared_blob code; + uint64_t code_ref_count; + uint32_t first_block_used; + }; + + struct by_code_id; + using code_index = chainbase::shared_multi_index_container< + code_object, + indexed_by< + ordered_unique, member>, + ordered_unique, member> + > + >; + +} } // eosio::chain + +CHAINBASE_SET_INDEX_TYPE(eosio::chain::code_object, eosio::chain::code_index) + +FC_REFLECT(eosio::chain::code_object, (code_id)(code)(code_ref_count)(first_block_used)) diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index d681c349844..211b3a745fa 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -191,6 +191,7 @@ namespace eosio { namespace chain { reversible_block_object_type, protocol_state_object_type, account_ram_correction_object_type, + code_object_type, OBJECT_TYPE_COUNT ///< Sentry value which contains the number of different object types }; diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 3bc971cdd3d..25f009afd2b 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -1,4 +1,5 @@ #pragma once +#include #include #include #include @@ -82,7 +83,7 @@ namespace eosio { namespace chain { static void validate(const controller& control, const bytes& code); //Calls apply or error on a given code - void apply(const digest_type& code_id, const shared_string& code, apply_context& context); + void apply(const code_object& code, apply_context& context); //Immediately exits currently running wasm. UB is called when no wasm running void exit(); diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index c3af34d79ea..1094a1826b0 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -19,10 +20,21 @@ using namespace fc; using namespace eosio::chain::webassembly; using namespace IR; using namespace Runtime; +using boost::multi_index_container; namespace eosio { namespace chain { struct wasm_interface_impl { + struct wasm_cache_entry { + digest_type code_hash; + uint32_t first_block_num_used; + uint32_t last_block_num_used; + std::unique_ptr module; + }; + struct by_hash; + struct by_first_block_num; + struct by_last_block_num; + wasm_interface_impl(wasm_interface::vm_type vm) { if(vm == wasm_interface::vm_type::wavm) runtime_interface = std::make_unique(); @@ -50,19 +62,21 @@ namespace eosio { namespace chain { return mem_image; } - std::unique_ptr& get_instantiated_module( const digest_type& code_id, - const shared_string& code, + const std::unique_ptr& get_instantiated_module( const code_object& code, transaction_context& trx_context ) { - auto it = instantiation_cache.find(code_id); - if(it == instantiation_cache.end()) { + wasm_cache_index::iterator it = wasm_instantiation_cache.find(code.code_id); + if(it == wasm_instantiation_cache.end()) + it = wasm_instantiation_cache.emplace(wasm_interface_impl::wasm_cache_entry{code.code_id, code.first_block_used, UINT32_MAX, nullptr}).first; + + if(!it->module) { auto timer_pause = fc::make_scoped_exit([&](){ trx_context.resume_billing_timer(); }); trx_context.pause_billing_timer(); IR::Module module; try { - Serialization::MemoryInputStream stream((const U8*)code.data(), code.size()); + Serialization::MemoryInputStream stream((const U8*)code.code.data(), code.code.size()); WASM::serialize(stream, module); module.userSections.clear(); } catch(const Serialization::FatalSerializationException& e) { @@ -84,13 +98,25 @@ namespace eosio { namespace chain { } catch(const IR::ValidationException& e) { EOS_ASSERT(false, wasm_serialization_error, e.message.c_str()); } - it = instantiation_cache.emplace(code_id, runtime_interface->instantiate_module((const char*)bytes.data(), bytes.size(), parse_initial_memory(module))).first; + + wasm_instantiation_cache.modify(it, [&](auto& c) { + c.module = runtime_interface->instantiate_module((const char*)bytes.data(), bytes.size(), parse_initial_memory(module)); + }); } - return it->second; + return it->module; } std::unique_ptr runtime_interface; - map> instantiation_cache; + + typedef boost::multi_index_container< + wasm_cache_entry, + indexed_by< + ordered_unique, member>, + ordered_non_unique, member>, + ordered_non_unique, member> + > + > wasm_cache_index; + wasm_cache_index wasm_instantiation_cache; }; #define _REGISTER_INTRINSIC_EXPLICIT(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 54feb1b6a87..7f363154ed0 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -57,8 +57,8 @@ namespace eosio { namespace chain { //Hard: Kick off instantiation in a separate thread at this location } - void wasm_interface::apply( const digest_type& code_id, const shared_string& code, apply_context& context ) { - my->get_instantiated_module(code_id, code, context.trx_context)->apply(context); + void wasm_interface::apply( const code_object& code, apply_context& context ) { + my->get_instantiated_module(code, context.trx_context)->apply(context); } void wasm_interface::exit() { diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index dec98b41b55..1e555ece079 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -2050,8 +2051,9 @@ read_only::get_code_results read_only::get_code( const get_code_params& params ) EOS_ASSERT( params.code_as_wasm, unsupported_feature, "Returning WAST from get_code is no longer supported" ); - if( accnt.code.size() ) { - result.wasm = string(accnt.code.begin(), accnt.code.end()); + if( accnt.code_version != digest_type() ) { + const auto& code = d.get(accnt.code_version).code; + result.wasm = string(code.begin(), code.end()); result.code_hash = accnt.code_version; } @@ -2069,7 +2071,7 @@ read_only::get_code_hash_results read_only::get_code_hash( const get_code_hash_p const auto& d = db.db(); const auto& accnt = d.get( params.account_name ); - if( accnt.code.size() ) { + if( accnt.code_version != digest_type() ) { result.code_hash = accnt.code_version; } @@ -2082,7 +2084,10 @@ read_only::get_raw_code_and_abi_results read_only::get_raw_code_and_abi( const g const auto& d = db.db(); const auto& accnt = d.get(params.account_name); - result.wasm = blob{{accnt.code.begin(), accnt.code.end()}}; + if( accnt.code_version != digest_type() ) { + const auto& code = d.get(accnt.code_version).code; + result.wasm = blob{{code.begin(), code.end()}}; + } result.abi = blob{{accnt.abi.begin(), accnt.abi.end()}}; return result; diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp index 5893c49dde4..92d7cf6da3e 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp @@ -112,7 +112,6 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper(obj.obj.last_code_update)); fc::raw::pack(ds, as_type(obj.obj.code_version)); fc::raw::pack(ds, as_type(obj.obj.creation_date)); - fc::raw::pack(ds, as_type(obj.obj.code)); fc::raw::pack(ds, as_type(obj.obj.abi)); return ds; } From ae4f241df1188d848773065c2a816a2b285c6807 Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Sat, 13 Apr 2019 01:52:59 +0900 Subject: [PATCH 366/680] Fix more hard-coded executable names --- programs/cleos/config.hpp.in | 2 ++ programs/cleos/httpc.hpp | 4 +++- programs/cleos/main.cpp | 34 +++++++++++++-------------- programs/eosio-launcher/config.hpp.in | 1 + programs/eosio-launcher/main.cpp | 27 +++++++++++---------- 5 files changed, 37 insertions(+), 31 deletions(-) diff --git a/programs/cleos/config.hpp.in b/programs/cleos/config.hpp.in index 3fe2051d74c..d9d5f45b1de 100644 --- a/programs/cleos/config.hpp.in +++ b/programs/cleos/config.hpp.in @@ -3,10 +3,12 @@ * * \warning This file is machine generated. DO NOT EDIT. See config.hpp.in for changes. */ +#pragma once namespace eosio { namespace client { namespace config { constexpr char version_str[] = "${cleos_BUILD_VERSION}"; constexpr char locale_path[] = "${LOCALEDIR}"; constexpr char locale_domain[] = "${LOCALEDOMAIN}"; constexpr char key_store_executable_name[] = "${KEY_STORE_EXECUTABLE_NAME}"; + constexpr char node_executable_name[] = "${NODE_EXECUTABLE_NAME}"; }}} diff --git a/programs/cleos/httpc.hpp b/programs/cleos/httpc.hpp index 8d8ba5d67bc..f90e58046c4 100644 --- a/programs/cleos/httpc.hpp +++ b/programs/cleos/httpc.hpp @@ -4,6 +4,8 @@ */ #pragma once +#include "config.hpp" + namespace eosio { namespace client { namespace http { namespace detail { @@ -128,7 +130,7 @@ namespace eosio { namespace client { namespace http { const string wallet_remove_key = wallet_func_base + "/remove_key"; const string wallet_create_key = wallet_func_base + "/create_key"; const string wallet_sign_trx = wallet_func_base + "/sign_transaction"; - const string keosd_stop = "/v1/keosd/stop"; + const string keosd_stop = "/v1/" + string(client::config::key_store_executable_name) + "/stop"; FC_DECLARE_EXCEPTION( connection_exception, 1100000, "Connection Exception" ); }}} diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index caa24ae5ccf..ac6e8d4fdc0 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -252,9 +252,9 @@ fc::variant call( const std::string& url, } catch(boost::system::system_error& e) { if(url == ::url) - std::cerr << localized("Failed to connect to nodeos at ${u}; is nodeos running?", ("u", url)) << std::endl; + std::cerr << localized("Failed to connect to ${n} at ${u}; is ${n} running?", ("n", node_executable_name)("u", url)) << std::endl; else if(url == ::wallet_url) - std::cerr << localized("Failed to connect to keosd at ${u}; is keosd running?", ("u", url)) << std::endl; + std::cerr << localized("Failed to connect to ${k} at ${u}; is ${k} running?", ("k", key_store_executable_name)("u", url)) << std::endl; throw connection_exception(fc::log_messages{FC_LOG_MESSAGE(error, e.what())}); } } @@ -876,8 +876,8 @@ void try_local_port(uint32_t duration) { auto start_time = duration_cast( system_clock::now().time_since_epoch() ).count(); while ( !local_port_used()) { if (duration_cast( system_clock::now().time_since_epoch()).count() - start_time > duration ) { - std::cerr << "Unable to connect to keosd, if keosd is running please kill the process and try again.\n"; - throw connection_exception(fc::log_messages{FC_LOG_MESSAGE(error, "Unable to connect to keosd")}); + std::cerr << "Unable to connect to " << key_store_executable_name << ", if " << key_store_executable_name << " is running please kill the process and try again.\n"; + throw connection_exception(fc::log_messages{FC_LOG_MESSAGE(error, "Unable to connect to ${k}", ("k", key_store_executable_name))}); } } } @@ -935,7 +935,7 @@ void ensure_keosd_running(CLI::App* app) { } } else { std::cerr << "No wallet service listening on " - << ". Cannot automatically start keosd because keosd was not found." << std::endl; + << ". Cannot automatically start " << key_store_executable_name << " because " << key_store_executable_name << " was not found." << std::endl; } } @@ -2331,17 +2331,17 @@ int main( int argc, char** argv ) { CLI::App app{"Command Line Interface to EOSIO Client"}; app.require_subcommand(); - app.add_option( "-H,--host", obsoleted_option_host_port, localized("the host where nodeos is running") )->group("hidden"); - app.add_option( "-p,--port", obsoleted_option_host_port, localized("the port where nodeos is running") )->group("hidden"); - app.add_option( "--wallet-host", obsoleted_option_host_port, localized("the host where keosd is running") )->group("hidden"); - app.add_option( "--wallet-port", obsoleted_option_host_port, localized("the port where keosd is running") )->group("hidden"); + app.add_option( "-H,--host", obsoleted_option_host_port, localized("the host where ${n} is running", ("n", node_executable_name)) )->group("hidden"); + app.add_option( "-p,--port", obsoleted_option_host_port, localized("the port where ${n} is running", ("n", node_executable_name)) )->group("hidden"); + app.add_option( "--wallet-host", obsoleted_option_host_port, localized("the host where ${k} is running", ("k", key_store_executable_name)) )->group("hidden"); + app.add_option( "--wallet-port", obsoleted_option_host_port, localized("the port where ${k} is running", ("k", key_store_executable_name)) )->group("hidden"); - app.add_option( "-u,--url", url, localized("the http/https URL where nodeos is running"), true ); - app.add_option( "--wallet-url", wallet_url, localized("the http/https URL where keosd is running"), true ); + app.add_option( "-u,--url", url, localized("the http/https URL where ${n} is running", ("n", node_executable_name)), true ); + app.add_option( "--wallet-url", wallet_url, localized("the http/https URL where ${k} is running", ("k", key_store_executable_name)), true ); app.add_option( "-r,--header", header_opt_callback, localized("pass specific HTTP header; repeat this option to pass multiple headers")); app.add_flag( "-n,--no-verify", no_verify, localized("don't verify peer certificate when using HTTPS")); - app.add_flag( "--no-auto-keosd", no_auto_keosd, localized("don't automatically launch a keosd if one is not currently running")); + app.add_flag( "--no-auto-" + string(key_store_executable_name), no_auto_keosd, localized("don't automatically launch a ${k} if one is not currently running", ("k", key_store_executable_name))); app.set_callback([&app]{ ensure_keosd_running(&app);}); app.add_flag( "-v,--verbose", verbose, localized("output verbose errors and action console output")); @@ -2398,7 +2398,7 @@ int main( int argc, char** argv ) { bool pack_action_data_flag = false; auto pack_transaction = convert->add_subcommand("pack_transaction", localized("From plain signed json to packed form")); pack_transaction->add_option("transaction", plain_signed_transaction_json, localized("The plain signed json (string)"))->required(); - pack_transaction->add_flag("--pack-action-data", pack_action_data_flag, localized("Pack all action data within transaction, needs interaction with nodeos")); + pack_transaction->add_flag("--pack-action-data", pack_action_data_flag, localized("Pack all action data within transaction, needs interaction with ${n}", ("n", node_executable_name))); pack_transaction->set_callback([&] { fc::variant trx_var; try { @@ -2421,7 +2421,7 @@ int main( int argc, char** argv ) { bool unpack_action_data_flag = false; auto unpack_transaction = convert->add_subcommand("unpack_transaction", localized("From packed to plain signed json form")); unpack_transaction->add_option("transaction", packed_transaction_json, localized("The packed transaction json (string containing packed_trx and optionally compression fields)"))->required(); - unpack_transaction->add_flag("--unpack-action-data", unpack_action_data_flag, localized("Unpack all action data within transaction, needs interaction with nodeos")); + unpack_transaction->add_flag("--unpack-action-data", unpack_action_data_flag, localized("Unpack all action data within transaction, needs interaction with ${n}", ("n", node_executable_name))); unpack_transaction->set_callback([&] { fc::variant packed_trx_var; packed_transaction packed_trx; @@ -2541,7 +2541,7 @@ int main( int argc, char** argv ) { code_hash = old_result["code_hash"].as_string(); if(code_as_wasm) { wasm = old_result["wasm"].as_string(); - std::cout << localized("Warning: communicating to older nodeos which returns malformed binary wasm") << std::endl; + std::cout << localized("Warning: communicating to older ${n} which returns malformed binary wasm", ("n", node_executable_name)) << std::endl; } else wast = old_result["wast"].as_string(); @@ -3270,7 +3270,7 @@ int main( int argc, char** argv ) { std::cout << fc::json::to_pretty_string(v) << std::endl; }); - auto stopKeosd = wallet->add_subcommand("stop", localized("Stop keosd."), false); + auto stopKeosd = wallet->add_subcommand("stop", localized("Stop ${k}.", ("k", key_store_executable_name)), false); stopKeosd->set_callback([] { const auto& v = call(wallet_url, keosd_stop); if ( !v.is_object() || v.get_object().size() != 0 ) { //on success keosd responds with empty object @@ -3299,7 +3299,7 @@ int main( int argc, char** argv ) { fc::optional chain_id; if( str_chain_id.size() == 0 ) { - ilog( "grabbing chain_id from nodeos" ); + ilog( "grabbing chain_id from ${n}", ("n", node_executable_name) ); auto info = get_info(); chain_id = info.chain_id; } else { diff --git a/programs/eosio-launcher/config.hpp.in b/programs/eosio-launcher/config.hpp.in index f60e6ab19e5..f733308dc1b 100644 --- a/programs/eosio-launcher/config.hpp.in +++ b/programs/eosio-launcher/config.hpp.in @@ -11,6 +11,7 @@ namespace eosio { namespace launcher { namespace config { constexpr char version_str[] = "${launcher_BUILD_VERSION}"; + constexpr char node_executable_name[] = "${NODE_EXECUTABLE_NAME}"; }}} #endif // CONFIG_HPP_IN diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 7e6bfbaf7b3..fe73e1fb305 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -48,6 +48,7 @@ using bpo::options_description; using bpo::variables_map; using public_key_type = fc::crypto::public_key; using private_key_type = fc::crypto::private_key; +using namespace eosio::launcher::config; const string block_dir = "blocks"; const string shared_mem_dir = "state"; @@ -490,18 +491,18 @@ launcher_def::set_options (bpo::options_description &cfg) { ("shape,s",bpo::value(&shape)->default_value("star"),"network topology, use \"star\" \"mesh\" or give a filename for custom") ("p2p-plugin", bpo::value()->default_value("net"),"select a p2p plugin to use (either net or bnet). Defaults to net.") ("genesis,g",bpo::value()->default_value("./genesis.json"),"set the path to genesis.json") - ("skip-signature", bpo::bool_switch(&skip_transaction_signatures)->default_value(false), "nodeos does not require transaction signatures.") - ("nodeos", bpo::value(&eosd_extra_args), "forward nodeos command line argument(s) to each instance of nodeos, enclose arg(s) in quotes") - ("specific-num", bpo::value>()->composing(), "forward nodeos command line argument(s) (using \"--specific-nodeos\" flag) to this specific instance of nodeos. This parameter can be entered multiple times and requires a paired \"--specific-nodeos\" flag each time it is used") - ("specific-nodeos", bpo::value>()->composing(), "forward nodeos command line argument(s) to its paired specific instance of nodeos(using \"--specific-num\"), enclose arg(s) in quotes") - ("spcfc-inst-num", bpo::value>()->composing(), "Specify a specific version installation path (using \"--spcfc-inst-nodeos\" flag) for launching this specific instance of nodeos. This parameter can be entered multiple times and requires a paired \"--spcfc-inst-nodeos\" flag each time it is used") - ("spcfc-inst-nodeos", bpo::value>()->composing(), "Provide a specific version installation path to its paired specific instance of nodeos(using \"--spcfc-inst-num\")") + ("skip-signature", bpo::bool_switch(&skip_transaction_signatures)->default_value(false), (string(node_executable_name) + " does not require transaction signatures.").c_str()) + (node_executable_name, bpo::value(&eosd_extra_args), ("forward " + string(node_executable_name) + " command line argument(s) to each instance of " + string(node_executable_name) + ", enclose arg(s) in quotes").c_str()) + ("specific-num", bpo::value>()->composing(), ("forward " + string(node_executable_name) + " command line argument(s) (using \"--specific-" + string(node_executable_name) + "\" flag) to this specific instance of " + string(node_executable_name) + ". This parameter can be entered multiple times and requires a paired \"--specific-" + string(node_executable_name) +"\" flag each time it is used").c_str()) + (("specific-" + string(node_executable_name)).c_str(), bpo::value>()->composing(), ("forward " + string(node_executable_name) + " command line argument(s) to its paired specific instance of " + string(node_executable_name) + "(using \"--specific-num\"), enclose arg(s) in quotes").c_str()) + ("spcfc-inst-num", bpo::value>()->composing(), ("Specify a specific version installation path (using \"--spcfc-inst-"+ string(node_executable_name) + "\" flag) for launching this specific instance of " + string(node_executable_name) + ". This parameter can be entered multiple times and requires a paired \"--spcfc-inst-" + string(node_executable_name) + "\" flag each time it is used").c_str()) + (("spcfc-inst-" + string(node_executable_name)).c_str(), bpo::value>()->composing(), ("Provide a specific version installation path to its paired specific instance of " + string(node_executable_name) + "(using \"--spcfc-inst-num\")").c_str()) ("delay,d",bpo::value(&start_delay)->default_value(0),"seconds delay before starting each node after the first") ("boot",bpo::bool_switch(&boot)->default_value(false),"After deploying the nodes and generating a boot script, invoke it.") ("nogen",bpo::bool_switch(&nogen)->default_value(false),"launch nodes without writing new config files") ("host-map",bpo::value(),"a file containing mapping specific nodes to hosts. Used to enhance the custom shape argument") ("servers",bpo::value(),"a file containing ip addresses and names of individual servers to deploy as producers or non-producers ") - ("per-host",bpo::value(&per_host)->default_value(0),"specifies how many nodeos instances will run on a single host. Use 0 to indicate all on one.") + ("per-host",bpo::value(&per_host)->default_value(0),("specifies how many " + string(node_executable_name) + " instances will run on a single host. Use 0 to indicate all on one.").c_str()) ("network-name",bpo::value(&network.name)->default_value("testnet_"),"network name prefix used in GELF logging source") ("enable-gelf-logging",bpo::value(&gelf_enabled)->default_value(true),"enable gelf logging appender in logging configuration file") ("gelf-endpoint",bpo::value(&gelf_endpoint)->default_value("10.160.11.21:12201"),"hostname:port or ip:port of GELF endpoint") @@ -578,8 +579,8 @@ launcher_def::initialize (const variables_map &vmap) { server_ident_file = vmap["servers"].as(); } - retrieve_paired_array_parameters(vmap, "specific-num", "specific-nodeos", specific_nodeos_args); - retrieve_paired_array_parameters(vmap, "spcfc-inst-num", "spcfc-inst-nodeos", specific_nodeos_installation_paths); + retrieve_paired_array_parameters(vmap, "specific-num", "specific-" + string(node_executable_name), specific_nodeos_args); + retrieve_paired_array_parameters(vmap, "spcfc-inst-num", "spcfc-inst-" + string(node_executable_name), specific_nodeos_installation_paths); using namespace std::chrono; system_clock::time_point now = system_clock::now(); @@ -645,9 +646,9 @@ launcher_def::initialize (const variables_map &vmap) { if (vmap.count("specific-num")) { const auto specific_nums = vmap["specific-num"].as>(); - const auto specific_args = vmap["specific-nodeos"].as>(); + const auto specific_args = vmap["specific-" + string(node_executable_name)].as>(); if (specific_nums.size() != specific_args.size()) { - cerr << "ERROR: every specific-num argument must be paired with a specific-nodeos argument" << endl; + cerr << "ERROR: every specific-num argument must be paired with a specific-" << node_executable_name << " argument" << endl; exit (-1); } // don't include bios @@ -1538,7 +1539,7 @@ launcher_def::launch (eosd_def &instance, string >s) { bfs::path reerr_sl = dd / "stderr.txt"; bfs::path reerr_base = bfs::path("stderr." + launch_time + ".txt"); bfs::path reerr = dd / reerr_base; - bfs::path pidf = dd / "nodeos.pid"; + bfs::path pidf = dd / bfs::path(string(node_executable_name) + ".pid"); host_def* host; try { host = deploy_config_files (*instance.node); @@ -1557,7 +1558,7 @@ launcher_def::launch (eosd_def &instance, string >s) { install_path = specific_nodeos_installation_paths[node_num] + "/"; } } - string eosdcmd = install_path + "programs/nodeos/nodeos "; + string eosdcmd = install_path + "programs/nodeos/" + string(node_executable_name); if (skip_transaction_signatures) { eosdcmd += "--skip-transaction-signatures "; } From 4b3de8a38c37e923b99bf1b58921bba557cd67d4 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Fri, 12 Apr 2019 13:52:33 -0400 Subject: [PATCH 367/680] fix bash issue and add -p to pipeline.yml --- .buildkite/pipeline.yml | 10 +++++----- scripts/eosio_build.sh | 2 ++ 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 19bbdf114ff..9ab24bbe88d 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,7 +1,7 @@ steps: - command: | # Ubuntu 16.04 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y + ./scripts/eosio_build.sh -y -p echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":ubuntu: 16.04 Build" @@ -22,7 +22,7 @@ steps: - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y + ./scripts/eosio_build.sh -y -p echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":ubuntu: 18.04 Build" @@ -43,7 +43,7 @@ steps: - command: | # CentOS 7 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y + ./scripts/eosio_build.sh -y -p echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":centos: 7 Build" @@ -64,7 +64,7 @@ steps: - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y + ./scripts/eosio_build.sh -y -p echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":aws: 2 Build" @@ -87,7 +87,7 @@ steps: echo "--- Creating symbolic link to job directory :file_folder:" sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y + ./scripts/eosio_build.sh -y -p echo "--- Compressing build directory :compression:" tar -pczf build.tar.gz build/ label: ":darwin: Mojave Build" diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 684f4d4c870..7710e363802 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -117,6 +117,8 @@ function usage() } NONINTERACTIVE=0 +PIN_COMPILER=false +PIN_COMPILER_CMAKE="" if [ $# -ne 0 ]; then while getopts ":cdo:s:ahpy" opt; do From da1cd0817e728f652d39193b330763607165dc87 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Fri, 12 Apr 2019 13:55:57 -0400 Subject: [PATCH 368/680] update chainbase submodule --- libraries/chainbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chainbase b/libraries/chainbase index 8ade03c693f..8a153c42842 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 8ade03c693ff4ccde3debb6ccfd0fda2b37b0c8a +Subproject commit 8a153c428429a62ce727814a1ba04d3fcdc2bc83 From 454fa3d77830b4bd2c54c0244b4fcf38d31ad907 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Fri, 12 Apr 2019 15:08:43 -0400 Subject: [PATCH 369/680] disambiguate snapshots that are being written from snapshots that are pending finality --- plugins/producer_plugin/producer_plugin.cpp | 28 +++++++++++++++------ 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 174c2e37cf6..e0816496dfe 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -94,10 +94,10 @@ class pending_snapshot { public: using next_t = producer_plugin::next_function; - pending_snapshot(const block_id_type& block_id, next_t& next, std::string temp_path, std::string final_path) + pending_snapshot(const block_id_type& block_id, next_t& next, std::string pending_path, std::string final_path) : block_id(block_id) , next(next) - , temp_path(temp_path) + , pending_path(pending_path) , final_path(final_path) {} @@ -109,22 +109,26 @@ class pending_snapshot { return snapshots_dir / fc::format_string("snapshot-${id}.bin", fc::mutable_variant_object()("id", block_id)); } - static bfs::path get_temp_path(const block_id_type& block_id, const bfs::path& snapshots_dir) { + static bfs::path get_pending_path(const block_id_type& block_id, const bfs::path& snapshots_dir) { return snapshots_dir / fc::format_string(".pending-snapshot-${id}.bin", fc::mutable_variant_object()("id", block_id)); } + static bfs::path get_temp_path(const block_id_type& block_id, const bfs::path& snapshots_dir) { + return snapshots_dir / fc::format_string(".incomplete-snapshot-${id}.bin", fc::mutable_variant_object()("id", block_id)); + } + producer_plugin::snapshot_information finalize( const chain::controller& chain ) const { auto in_chain = (bool)chain.fetch_block_by_id( block_id ); boost::system::error_code ec; if (!in_chain) { - bfs::remove(bfs::path(temp_path), ec); + bfs::remove(bfs::path(pending_path), ec); EOS_THROW(snapshot_finalization_exception, "Snapshotted block was forked out of the chain. ID: ${block_id}", ("block_id", block_id)); } - bfs::rename(bfs::path(temp_path), bfs::path(final_path), ec); + bfs::rename(bfs::path(pending_path), bfs::path(final_path), ec); EOS_ASSERT(!ec, snapshot_finalization_exception, "Unable to finalize valid snapshot of block number ${bn}: [code: ${ec}] ${message}", ("bn", get_height()) @@ -136,7 +140,7 @@ class pending_snapshot { block_id_type block_id; next_t next; - std::string temp_path; + std::string pending_path; std::string final_path; }; @@ -1044,6 +1048,7 @@ void producer_plugin::create_snapshot(producer_plugin::next_function_snapshots_dir)).generic_string(); + std::string pending_path = (pending_snapshot::get_pending_path(head_id, my->_snapshots_dir)).generic_string(); std::string final_path = (pending_snapshot::get_final_path(head_id, my->_snapshots_dir)).generic_string(); bool written = false; @@ -1066,7 +1071,16 @@ void producer_plugin::create_snapshot(producer_plugin::next_functionfinalize(); snap_out.flush(); snap_out.close(); - my->_pending_snapshot_index.emplace(head_id, next, temp_path, final_path); + + boost::system::error_code ec; + bfs::rename(temp_path, pending_path, ec); + EOS_ASSERT(!ec, snapshot_finalization_exception, + "Unable to promote temp snapshot to pending for block number ${bn}: [code: ${ec}] ${message}", + ("bn", chain.head_block_num()) + ("ec", ec.value()) + ("message", ec.message())); + + my->_pending_snapshot_index.emplace(head_id, next, pending_path, final_path); } CATCH_AND_CALL (next); } } From 42838f0a89d6882b7f8e979b332986a8f59e157c Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 12 Apr 2019 15:25:23 -0400 Subject: [PATCH 370/680] Added protocol_feature_tests/get_sender_test unit test to test the GET_SENDER protocol feature. #7028 Added a new test contract, get_sender_test, to enable the unit test. The protocol_feature_tests/get_sender_test replaces api_tests/get_sender_test. --- unittests/api_tests.cpp | 46 ++---------- unittests/contracts.hpp.in | 1 + unittests/protocol_feature_tests.cpp | 53 ++++++++++++++ unittests/test-contracts/CMakeLists.txt | 1 + unittests/test-contracts/README.md | 2 +- .../get_sender_test/CMakeLists.txt | 6 ++ .../get_sender_test/get_sender_test.abi | 69 ++++++++++++++++++ .../get_sender_test/get_sender_test.cpp | 30 ++++++++ .../get_sender_test/get_sender_test.hpp | 43 +++++++++++ .../get_sender_test/get_sender_test.wasm | Bin 0 -> 4180 bytes .../test-contracts/test_api/test_action.cpp | 65 ++--------------- .../test-contracts/test_api/test_api.cpp | 6 +- .../test-contracts/test_api/test_api.hpp | 3 - 13 files changed, 215 insertions(+), 110 deletions(-) create mode 100644 unittests/test-contracts/get_sender_test/CMakeLists.txt create mode 100644 unittests/test-contracts/get_sender_test/get_sender_test.abi create mode 100644 unittests/test-contracts/get_sender_test/get_sender_test.cpp create mode 100644 unittests/test-contracts/get_sender_test/get_sender_test.hpp create mode 100755 unittests/test-contracts/get_sender_test/get_sender_test.wasm diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index b1eb0a14da6..6571d930b7a 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -2086,7 +2086,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_test, TESTER) { try { set_code( N(erin), contracts::test_api_wasm() ); produce_blocks(1); - transaction_trace_ptr txn_trace = CALL_TEST_FUNCTION_SCOPE( *this, "test_action", "test_action_ordinal1", + transaction_trace_ptr txn_trace = CALL_TEST_FUNCTION_SCOPE( *this, "test_action", "test_action_ordinal1", {}, vector{ N(testapi)}); BOOST_REQUIRE_EQUAL( validate(), true ); @@ -2221,7 +2221,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest1, TESTER) { try { create_account(N(fail1) ); // <- make first action fails in the middle produce_blocks(1); - transaction_trace_ptr txn_trace = + transaction_trace_ptr txn_trace = CALL_TEST_FUNCTION_NO_THROW( *this, "test_action", "test_action_ordinal1", {}); BOOST_REQUIRE_EQUAL( validate(), true ); @@ -2231,7 +2231,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest1, TESTER) { try { auto &atrace = txn_trace->action_traces; - // fails here after creating one notify action and one inline action + // fails here after creating one notify action and one inline action BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); BOOST_REQUIRE_EQUAL((int)atrace[0].parent_action_ordinal, 0); @@ -2289,7 +2289,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { create_account(N(fail3) ); // <- make action 3 fails in the middle produce_blocks(1); - transaction_trace_ptr txn_trace = + transaction_trace_ptr txn_trace = CALL_TEST_FUNCTION_NO_THROW( *this, "test_action", "test_action_ordinal1", {}); BOOST_REQUIRE_EQUAL( validate(), true ); @@ -2408,7 +2408,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { create_account(N(failnine) ); // <- make action 9 fails in the middle produce_blocks(1); - transaction_trace_ptr txn_trace = + transaction_trace_ptr txn_trace = CALL_TEST_FUNCTION_NO_THROW( *this, "test_action", "test_action_ordinal1", {}); BOOST_REQUIRE_EQUAL( validate(), true ); @@ -2532,40 +2532,4 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { } FC_LOG_AND_RETHROW() } -/************************************************************************************* -+ * get_sender_test test cases -+ *************************************************************************************/ -BOOST_FIXTURE_TEST_CASE(get_sender_test, TESTER) { try { - - produce_blocks(1); - create_account(N(testapi) ); - create_account(N(testapi2), N(testapi), true, true ); - - set_code( N(testapi), contracts::test_api_wasm() ); - produce_blocks(1); - set_code( N(testapi2), contracts::test_api_wasm() ); - produce_blocks(1); - - using uint128_t = eosio::chain::uint128_t; - - uint128_t data = (N(testapi2) | ((uint128_t)(N(testapi)) << 64)); - CALL_TEST_FUNCTION( *this, "test_action", "get_sender_send_inline", fc::raw::pack(data) ); - - data = (N(testapi2) | ((uint128_t)(N(testapi2)) << 64)); - BOOST_CHECK_THROW(CALL_TEST_FUNCTION( *this, "test_action", "get_sender_send_inline", fc::raw::pack(data)), eosio_assert_message_exception); - - data = (N(testapi2) | ((uint128_t)(N(testapi)) << 64)); - CALL_TEST_FUNCTION( *this, "test_action", "get_sender_notify", fc::raw::pack(data) ); - - data = (N(testapi2) | ((uint128_t)(N(testapi2)) << 64)); - BOOST_CHECK_THROW(CALL_TEST_FUNCTION( *this, "test_action", "get_sender_notify", fc::raw::pack(data)), eosio_assert_message_exception); - - data = ((uint128_t)1 | N(testapi2) | ((uint128_t)(N(testapi2)) << 64)); - CALL_TEST_FUNCTION( *this, "test_action", "get_sender_notify", fc::raw::pack(data) ); - - data = ((uint128_t)1 | N(testapi2) | ((uint128_t)(N(testapi)) << 64)); - BOOST_CHECK_THROW(CALL_TEST_FUNCTION( *this, "test_action", "get_sender_notify", fc::raw::pack(data)), eosio_assert_message_exception); - -} FC_LOG_AND_RETHROW() } - BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/contracts.hpp.in b/unittests/contracts.hpp.in index 72be3742cb9..4f09e318ddf 100644 --- a/unittests/contracts.hpp.in +++ b/unittests/contracts.hpp.in @@ -41,6 +41,7 @@ namespace eosio { // Contracts in `eos/unittests/unittests/test-contracts' directory MAKE_READ_WASM_ABI(asserter, asserter, test-contracts) MAKE_READ_WASM_ABI(deferred_test, deferred_test, test-contracts) + MAKE_READ_WASM_ABI(get_sender_test, get_sender_test, test-contracts) MAKE_READ_WASM_ABI(noop, noop, test-contracts) MAKE_READ_WASM_ABI(payloadless, payloadless, test-contracts) MAKE_READ_WASM_ABI(proxy, proxy, test-contracts) diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 13f07ba5e0c..7985cb4cd01 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -966,4 +966,57 @@ BOOST_AUTO_TEST_CASE( forward_setcode_test ) { try { c2.produce_block(); } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE( get_sender_test ) { try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + const auto& tester1_account = account_name("tester1"); + const auto& tester2_account = account_name("tester2"); + c.create_accounts( {tester1_account, tester2_account} ); + c.produce_block(); + + BOOST_CHECK_EXCEPTION( c.set_code( tester1_account, contracts::get_sender_test_wasm() ), + wasm_exception, + fc_exception_message_is( "env.get_sender unresolveable" ) ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest( builtin_protocol_feature_t::get_sender ); + BOOST_REQUIRE( d ); + + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + + c.set_code( tester1_account, contracts::get_sender_test_wasm() ); + c.set_abi( tester1_account, contracts::get_sender_test_abi().data() ); + c.set_code( tester2_account, contracts::get_sender_test_wasm() ); + c.set_abi( tester2_account, contracts::get_sender_test_abi().data() ); + c.produce_block(); + + BOOST_CHECK_EXCEPTION( c.push_action( tester1_account, N(sendinline), tester1_account, mutable_variant_object() + ("to", tester2_account.to_string()) + ("expected_sender", account_name{}) ), + eosio_assert_message_exception, + eosio_assert_message_is( "sender did not match" ) ); + + c.push_action( tester1_account, N(sendinline), tester1_account, mutable_variant_object() + ("to", tester2_account.to_string()) + ("expected_sender", tester1_account.to_string()) + ); + + c.push_action( tester1_account, N(notify), tester1_account, mutable_variant_object() + ("to", tester2_account.to_string()) + ("expected_sender", tester1_account.to_string()) + ("send_inline", false) + ); + + c.push_action( tester1_account, N(notify), tester1_account, mutable_variant_object() + ("to", tester2_account.to_string()) + ("expected_sender", tester2_account.to_string()) + ("send_inline", true) + ); + + c.push_action( tester1_account, N(assertsender), tester1_account, mutable_variant_object() + ("expected_sender", account_name{}) + ); +} FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/test-contracts/CMakeLists.txt b/unittests/test-contracts/CMakeLists.txt index c5942b66fa9..90b0c6484f6 100644 --- a/unittests/test-contracts/CMakeLists.txt +++ b/unittests/test-contracts/CMakeLists.txt @@ -9,6 +9,7 @@ endif() add_subdirectory( asserter ) add_subdirectory( deferred_test ) +add_subdirectory( get_sender_test ) add_subdirectory( integration_test ) add_subdirectory( noop ) add_subdirectory( payloadless ) diff --git a/unittests/test-contracts/README.md b/unittests/test-contracts/README.md index 9b0138ed7a6..3164e58d81d 100644 --- a/unittests/test-contracts/README.md +++ b/unittests/test-contracts/README.md @@ -2,6 +2,6 @@ test_ram_limit contract was compiled with eosio.cdt v1.4.1 That contract was ported to compile with eosio.cdt v1.5.0, but the test that uses it is very sensitive to stdlib/eosiolib changes, compilation flags and linker flags. -deferred_test, proxy, and reject_all contracts were compiled with eosio.cdt v1.6.1 +deferred_test, get_sender_test, proxy, and reject_all contracts were compiled with eosio.cdt v1.6.1 The remaining contracts have been ported to compile with eosio.cdt v1.6.x. They were compiled with a patched version of eosio.cdt v1.6.0-rc1 (commit 1c9180ff5a1e431385180ce459e11e6a1255c1a4). diff --git a/unittests/test-contracts/get_sender_test/CMakeLists.txt b/unittests/test-contracts/get_sender_test/CMakeLists.txt new file mode 100644 index 00000000000..cd633da3bae --- /dev/null +++ b/unittests/test-contracts/get_sender_test/CMakeLists.txt @@ -0,0 +1,6 @@ +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( get_sender_test get_sender_test get_sender_test.cpp ) +else() + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/get_sender_test.wasm ${CMAKE_CURRENT_BINARY_DIR}/get_sender_test.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/get_sender_test.abi ${CMAKE_CURRENT_BINARY_DIR}/get_sender_test.abi COPYONLY ) +endif() diff --git a/unittests/test-contracts/get_sender_test/get_sender_test.abi b/unittests/test-contracts/get_sender_test/get_sender_test.abi new file mode 100644 index 00000000000..0048a2c7eeb --- /dev/null +++ b/unittests/test-contracts/get_sender_test/get_sender_test.abi @@ -0,0 +1,69 @@ +{ + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", + "version": "eosio::abi/1.1", + "types": [], + "structs": [ + { + "name": "assertsender", + "base": "", + "fields": [ + { + "name": "expected_sender", + "type": "name" + } + ] + }, + { + "name": "notify", + "base": "", + "fields": [ + { + "name": "to", + "type": "name" + }, + { + "name": "expected_sender", + "type": "name" + }, + { + "name": "send_inline", + "type": "bool" + } + ] + }, + { + "name": "sendinline", + "base": "", + "fields": [ + { + "name": "to", + "type": "name" + }, + { + "name": "expected_sender", + "type": "name" + } + ] + } + ], + "actions": [ + { + "name": "assertsender", + "type": "assertsender", + "ricardian_contract": "" + }, + { + "name": "notify", + "type": "notify", + "ricardian_contract": "" + }, + { + "name": "sendinline", + "type": "sendinline", + "ricardian_contract": "" + } + ], + "tables": [], + "ricardian_clauses": [], + "variants": [] +} \ No newline at end of file diff --git a/unittests/test-contracts/get_sender_test/get_sender_test.cpp b/unittests/test-contracts/get_sender_test/get_sender_test.cpp new file mode 100644 index 00000000000..a3574dfae4c --- /dev/null +++ b/unittests/test-contracts/get_sender_test/get_sender_test.cpp @@ -0,0 +1,30 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include "get_sender_test.hpp" +#include + +using namespace eosio; + +void get_sender_test::assertsender( name expected_sender ) { + check( get_sender() == expected_sender, "sender did not match" ); +} + +void get_sender_test::sendinline( name to, name expected_sender ) { + assertsender_action a( to, std::vector{} ); + a.send( expected_sender ); +} + +void get_sender_test::notify( name to, name expected_sender, bool send_inline ) { + require_recipient( to ); +} + +void get_sender_test::on_notify( name to, name expected_sender, bool send_inline ) { + if( send_inline ) { + assertsender_action a( get_first_receiver(), std::vector{} ); + a.send( expected_sender ); + } else { + check( get_sender() == expected_sender, "sender did not match" ); + } +} diff --git a/unittests/test-contracts/get_sender_test/get_sender_test.hpp b/unittests/test-contracts/get_sender_test/get_sender_test.hpp new file mode 100644 index 00000000000..632c2905326 --- /dev/null +++ b/unittests/test-contracts/get_sender_test/get_sender_test.hpp @@ -0,0 +1,43 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +namespace eosio { + namespace internal_use_do_not_use { + extern "C" { + __attribute__((eosio_wasm_import)) + uint64_t get_sender(); + } + } +} + +namespace eosio { + name get_sender() { + return name( internal_use_do_not_use::get_sender() ); + } +} + +class [[eosio::contract]] get_sender_test : public eosio::contract { +public: + using eosio::contract::contract; + + [[eosio::action]] + void assertsender( eosio::name expected_sender ); + using assertsender_action = eosio::action_wrapper<"assertsender"_n, &get_sender_test::assertsender>; + + [[eosio::action]] + void sendinline( eosio::name to, eosio::name expected_sender ); + + [[eosio::action]] + void notify( eosio::name to, eosio::name expected_sender, bool send_inline ); + + // eosio.cdt 1.6.1 has a problem with "*::notify" so hardcode to tester1 for now. + // TODO: Change it back to "*::notify" when the bug is fixed in eosio.cdt. + [[eosio::on_notify("tester1::notify")]] + void on_notify( eosio::name to, eosio::name expected_sender, bool send_inline ); + +}; diff --git a/unittests/test-contracts/get_sender_test/get_sender_test.wasm b/unittests/test-contracts/get_sender_test/get_sender_test.wasm new file mode 100755 index 0000000000000000000000000000000000000000..89dc670d5a6550a816a1b0e533962e58ed2497d1 GIT binary patch literal 4180 zcmd5Z)hB|JUE_e}7gt&ZRn);L+A*OiNu!Agt|{V(61d-b!qku+y1ULU;s!b|;;`0ChFnAm#n%olV#YB)ds z>a(|hc;+WEz^(N_)vpL{+0t9Tee?Y*!4!0!-rOA1@g_ff#_<>K&Z}08u!G~ zy4(bmTk$;I&l;Cu+nXs4 z1^h-w0Us#LE`}+%8hxyp0?8aZboI?bBH6Hy`2#sf$*D%F0rH5Gex{L2)|I;m>H|(R zfPrdD=5oE0Slbf?&_TLKcVIQ7Pfqg!>8d3o;?FU-8TwP^GN{T1JM_+A36tzBs93D1 zQB}gF^o4*Y>LBBJQ%(beuHXyMuDB4&Kz=2cQ%pvY{#r`x>kR-qi^KpKj8woaw66;0 zZ?L9t%qi<%hGZ`sFjOqo5crJYwTbYj62nEDRKLXApw~AWB zJog3ZW`d=XO-;iT^pNNdj9ttSOnFMBn*}^`h&6A@JeJvvs=Q$`!6^b8yaR3vj|Lt{ z@tyFwg_7mqjNQ-5vpImk21>=mupUI5HLnl3*~0-qJ{`)QVY6wFyFImUI5jmk1vh62 znb1E;5p&WlY8=!mPS-xGm`?+&5k4_KiEO};KtXWniWynK9$_RY2nNgC%fx-C0)+%~ zdQx+Fo`BP+haJX%luhc=gvai2TUgCGOFMb&#GNpi*wn^m$9`UGW9-ek+zs|@lEuDd zU#9>L5sv!yVO6ovWBcTGmlhhBcl)q;z3YI9Lt`~MET=F#T?Rpj70#qL3`?jYS22eG z61@0;QRY)QbzFLv@2ekgq3tmi1@Kt9V?WknQ=34w;uP%mDZmN~nz=y&gqb4!zC@Q} zHp816WF<#FXJmvbvmlYV!VtX*l%S{OiR<58lkQKv(kz85X)z_;9}uWxo-ye}4_2g2 zjvnB|039-raSKNf5vm$hi{#!yV8C0)R>&0-BDi-7Y}bg-k}SnXGTp>zVKFVU6o>@K z>||($J|&qN*;Bl$uwY<^b>J|vI2GYJPq<=yJOVY=;r0$BKGP_CvJ@#@spvrFfmH7`orr>N{bU z%MaWAw3Jb3rn4~qU&0LGAmJP~*94!&u7b0E6cShr#47~kk7xV<2{RzU4!5sO(5iwp zG%kdRubI*uK z%25t8yzlHF@uNZ!e_truWx}V|SeanNRf0PgI?@H3$(PSZQsFL6j6wN8}$E&~M1Qgy_mMG?E=xqF8v)r*W?v|q9dAVBo9I&9=eJqh#&1Dp zdLEe-CNupfGDq=^m>d8dl2{2O4o`*%2Xi3OCzmfP$-Sei!1?J(ZQ{=~KKw#%Y%;j} z58~AiME<{%zTu%}d#PyI-imE5Ev+mzdxc$XK3uF*`15XC?Ur3$>Dd#_-r^%f4mS^- XJl<{h3JlQrb(@tbt?fF=417v7gC literal 0 HcmV?d00001 diff --git a/unittests/test-contracts/test_api/test_action.cpp b/unittests/test-contracts/test_api/test_action.cpp index ae906b620c1..65a8d31554c 100644 --- a/unittests/test-contracts/test_api/test_action.cpp +++ b/unittests/test-contracts/test_api/test_action.cpp @@ -14,15 +14,6 @@ #include "test_api.hpp" -namespace eosio { - namespace internal_use_do_not_use { - extern "C" { - __attribute__((eosio_wasm_import)) - uint64_t get_sender(); - } - } -} - using namespace eosio; void test_action::read_action_normal() { @@ -275,7 +266,7 @@ void test_action::test_action_ordinal1(uint64_t receiver, uint64_t code, uint64_ print("exec 1"); eosio::require_recipient( "bob"_n ); //-> exec 2 which would then cause execution of 4, 10 - eosio::action act1({name(_self), "active"_n}, name(_self), + eosio::action act1({name(_self), "active"_n}, name(_self), name(WASM_TEST_ACTION("test_action", "test_action_ordinal2")), std::tuple<>()); act1.send(); // -> exec 5 which would then cause execution of 6, 7, 8 @@ -284,7 +275,7 @@ void test_action::test_action_ordinal1(uint64_t receiver, uint64_t code, uint64_ eosio_assert(false, "fail at point 1"); } - eosio::action act2({name(_self), "active"_n}, name(_self), + eosio::action act2({name(_self), "active"_n}, name(_self), name(WASM_TEST_ACTION("test_action", "test_action_ordinal3")), std::tuple<>()); act2.send(); // -> exec 9 @@ -293,7 +284,7 @@ void test_action::test_action_ordinal1(uint64_t receiver, uint64_t code, uint64_ } else if (receiver == "bob"_n.value) { print("exec 2"); - eosio::action act1({name(_self), "active"_n}, name(_self), + eosio::action act1({name(_self), "active"_n}, name(_self), name(WASM_TEST_ACTION("test_action", "test_action_ordinal_foo")), std::tuple<>()); act1.send(); // -> exec 10 @@ -301,7 +292,7 @@ void test_action::test_action_ordinal1(uint64_t receiver, uint64_t code, uint64_ eosio::require_recipient( "david"_n ); // -> exec 4 } else if (receiver == "charlie"_n.value) { print("exec 3"); - eosio::action act1({name(_self), "active"_n}, name(_self), + eosio::action act1({name(_self), "active"_n}, name(_self), name(WASM_TEST_ACTION("test_action", "test_action_ordinal_bar")), std::tuple<>()); // exec 11 act1.send(); @@ -323,7 +314,7 @@ void test_action::test_action_ordinal2(uint64_t receiver, uint64_t code, uint64_ eosio::require_recipient( "david"_n ); // -> exec 6 eosio::require_recipient( "erin"_n ); // -> exec 7 - eosio::action act1({name(_self), "active"_n}, name(_self), + eosio::action act1({name(_self), "active"_n}, name(_self), name(WASM_TEST_ACTION("test_action", "test_action_ordinal4")), std::tuple<>()); act1.send(); // -> exec 8 @@ -351,49 +342,3 @@ void test_action::test_action_ordinal_foo(uint64_t receiver, uint64_t code, uint void test_action::test_action_ordinal_bar(uint64_t receiver, uint64_t code, uint64_t action) { print("exec 11"); } - -void test_action::get_sender_send_inline() { - - eosio_assert(internal_use_do_not_use::get_sender() == 0, "assert_sender failed"); - - uint128_t tmp; - read_action_data( &tmp, sizeof(tmp) ); - - uint64_t to_acc = (uint64_t)tmp; - uint64_t sender_acc = (uint64_t)(tmp >> 64); - - eosio::action act1(std::vector(), name(to_acc), - name(WASM_TEST_ACTION("test_action", "assert_sender")), - std::tuple(sender_acc)); - act1.send(); -} - -void test_action::assert_sender() { - uint64_t sender; - read_action_data( &sender, sizeof(sender) ); - eosio_assert(internal_use_do_not_use::get_sender() == sender, "assert_sender failed"); -} - -void test_action::get_sender_notify(uint64_t receiver, uint64_t code, uint64_t action) { - uint128_t tmp; - read_action_data( &tmp, sizeof(tmp) ); - - uint64_t to_acc = ((uint64_t)tmp & 0xfffffffffffffffeull); - uint64_t sender_acc = (uint64_t)(tmp >> 64); - bool send_inline = (tmp & 1); - - if (receiver == code) { // main - eosio_assert(internal_use_do_not_use::get_sender() == 0, "assert_sender failed 1"); - eosio::require_recipient(name(to_acc)); - } else { // in notification - if (!send_inline) { - eosio_assert(internal_use_do_not_use::get_sender() == sender_acc, "assert_sender failed 2"); - } else { - eosio::action act1(std::vector(), name(to_acc), - name(WASM_TEST_ACTION("test_action", "assert_sender")), - std::tuple(sender_acc)); - act1.send(); - } - } -} - diff --git a/unittests/test-contracts/test_api/test_api.cpp b/unittests/test-contracts/test_api/test_api.cpp index 9679a8e6b77..417c89a5da4 100644 --- a/unittests/test-contracts/test_api/test_api.cpp +++ b/unittests/test-contracts/test_api/test_api.cpp @@ -42,8 +42,7 @@ extern "C" { WASM_TEST_HANDLER( test_action, assert_true_cf ); if ( action != WASM_TEST_ACTION("test_transaction", "stateful_api") && - action != WASM_TEST_ACTION("test_transaction", "context_free_api") && - action != WASM_TEST_ACTION("test_action", "assert_sender")) + action != WASM_TEST_ACTION("test_transaction", "context_free_api") ) require_auth(code); //test_types @@ -72,9 +71,6 @@ extern "C" { WASM_TEST_HANDLER_EX( test_action, test_action_ordinal4 ); WASM_TEST_HANDLER_EX( test_action, test_action_ordinal_foo ); WASM_TEST_HANDLER_EX( test_action, test_action_ordinal_bar ); - WASM_TEST_HANDLER ( test_action, get_sender_send_inline ); - WASM_TEST_HANDLER ( test_action, assert_sender ); - WASM_TEST_HANDLER_EX( test_action, get_sender_notify ); // test named actions // We enforce action name matches action data type name, so name mangling will not work for these tests. diff --git a/unittests/test-contracts/test_api/test_api.hpp b/unittests/test-contracts/test_api/test_api.hpp index 6de8c87149d..bbcf9965352 100644 --- a/unittests/test-contracts/test_api/test_api.hpp +++ b/unittests/test-contracts/test_api/test_api.hpp @@ -75,9 +75,6 @@ struct test_action { static void test_action_ordinal4(uint64_t receiver, uint64_t code, uint64_t action); static void test_action_ordinal_foo(uint64_t receiver, uint64_t code, uint64_t action); static void test_action_ordinal_bar(uint64_t receiver, uint64_t code, uint64_t action); - static void get_sender_send_inline(); - static void assert_sender(); - static void get_sender_notify(uint64_t receiver, uint64_t code, uint64_t action); }; struct test_db { From da8b444f1bb418362514885d0154e0d5b73de5fe Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 12 Apr 2019 15:29:50 -0400 Subject: [PATCH 371/680] undo changes to committed test_api.wasm --- unittests/test-contracts/README.md | 2 +- .../test-contracts/test_api/test_api.wasm | Bin 69868 -> 69134 bytes 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/unittests/test-contracts/README.md b/unittests/test-contracts/README.md index 3164e58d81d..13937d4b107 100644 --- a/unittests/test-contracts/README.md +++ b/unittests/test-contracts/README.md @@ -2,6 +2,6 @@ test_ram_limit contract was compiled with eosio.cdt v1.4.1 That contract was ported to compile with eosio.cdt v1.5.0, but the test that uses it is very sensitive to stdlib/eosiolib changes, compilation flags and linker flags. -deferred_test, get_sender_test, proxy, and reject_all contracts were compiled with eosio.cdt v1.6.1 +deferred_test, get_sender_test, proxy, reject_all, and test_api contracts were compiled with eosio.cdt v1.6.1 The remaining contracts have been ported to compile with eosio.cdt v1.6.x. They were compiled with a patched version of eosio.cdt v1.6.0-rc1 (commit 1c9180ff5a1e431385180ce459e11e6a1255c1a4). diff --git a/unittests/test-contracts/test_api/test_api.wasm b/unittests/test-contracts/test_api/test_api.wasm index 1ff6ec5b96155d4788de43e71ca1586b67657436..9d81f87f7aab5ab1300c99930bf976dda3a8d430 100755 GIT binary patch delta 12331 zcmcI~d0Z67w*OSmu=WhJ8zAd|$R^670aRR?s4?m@f*QkBpN@zNA}HXJb#RLt3R37` zf{Jm&T{CK2;~oT2Zr3_ci$hs-|(TT&Qj-8ojO%@ zs;Y0VHY~0(l-<(b@O*yfohQuxhmfV$&u4S_DAz>3%r!rB#zrPubuwjcp3KEEX;SXX zxAz&3Bta+ZyhWiq{h4*LH7CqqW;s$H9`F7d>&v&g z_w(x?#(J8VS!ZSncc@-8>!~(x;?cZkpD>+Cr|&N7LLn$hsO%ml5VO!k2Yw0Eo+h0r znsuV9&a9ILh+aC<10^kkoThfSC3kK>p-(! zHs^%v#Bj!}t#tAc-GnfGxX!FM3(|iRqggKn+kZ5@uV-dn)8r;=#Y>y^WC8r^W*NM5 zi+lx+=0}tHX%%h zp6lc!c%gU6k(OQf@YPWH0f5 znB&o{h2}UW0CzBSAuHg$!-9h{FmAGxgXU-+AUDrZoB$({!DodPw#(EJ%fECDXw4Gi zs0Qh|?L_0@t#-3Qez{dLE8K3h_^1L zRB6cIfC9J!M20&wMmb5WYvYebi?J!ZSG(52KTG(;b^$5Bl`si2O!ml$Rz_zB%Tdee zvV+vbiKLW?#E-OVDImMju4RyeG1`eyDJzUk;uOZ%xR$97#-lnXP8DcS_mMME1r#|m z;sQwFN;NwW7m(=?_oH(S6=)FmlQU5T8i>C+6IDRb6&J7Q>ZfMo;{)oOzMMHVT?HD% zt#&4=fFkZld>^)+pY7X`zn$c5AD9rtJP$8Y?S5_%e?K*uu4Whhd}=IDd*%Y$%zcvjx~*TzWW6+!H%UmdZ%R7Ny!U8T z&Mjhc3JCJE@pgO4Kqej62%3hGcArjW!TVez4I}M6Q|B?^YAHY4r9_iGijV3#S*R=J z#oeR5ca%9=PSQtlQ@6K;BV|OAikCF%fkIdW5201bd`3!) zPa`cvQv?&Kk&a$1`eo7yKbI`PYjp9VC@0NqA z_Ddl)b#%)?D`vc;Zs)^@=sBs#hU4>gx!;GrbSI z?~n8jNE0$r*ixQ@^ew(7qa(cTD$@1bcVK6>l@CN}=cNO?v;F+s!0x6a6{-X7`To~G z^nAp?@D?$GWY)z9)r?D{pM84sO1t6msS5t$psrroJzAxe8&_AI>YOFlR`7QQ&1&X^ z3N?skgM6`qrwkq|T&duz2gk56{FA|*uwc8rnB4YzXnK>;Cf-(Omf2)w@4|jADO;c} zNr*D*(sapsvuu>G2ff%n=q_r^Rcg@AqHcDyH-;;FNJU_kI z4HF?@h(DXp`wdBCGr4t0q)YiyCXL~nhoquiKMV-#9$bCqwoN^)R-k-~d}$I3i4Hl>N%^c8o3No+BbyDNh)&jg95k zNA!Zs$dTRQ9fV)POGi!yhFKz%FrYb4wIrcbXz7A@6QTquOmAXFK|~Zt;~|*!NkVf(rU(fqOoN~mQ#m zl~yqSv|{b&E_DO8f-3(&l~IDDvsVH`ow=eEu4Tk^ow!f}SJyvr#VcHu#PtPnp#-j1 z|HL(2;lhMU63!8qI<@w`5@>bO_(Oui^%lwckhoByPJMzhb?QTOY5>q`r_MF*)N!>_ zE2;7ks*DmSx^RYr*_uhlA7qcWCh%?43w<$DzY-<8d*d$eMNQuzz2TuGHtLi;SQ1t&w>yo~0qXSNXxtl5Z!oP1*vrgxGx94Drs)|}`JQG8#o*IU^e%2BuuM`86qsEgJuwXtN~U?@SkjNi3J zpyuHPfk?kv5a+dmnnq1P0qxH(ZoyXYxP>A7^*O%$KyfVAfFFtz!>XCT(4F=JKkal& zN83O;+S2k4I~VZMg>OXKr);rf2_{!_lL=b8^v;p>O_Ivkci7>SAo^=mf00 zjvsil6*_Y5&DMSyvIJ@i(d^pO)KqSQa&wf?r@*<`1nu4Cx|8DG^JoWLG>;T-n)f$~ zAI~d+;sx`EK=C*8yIAY(rkb^fdP>yEn0fUFPwg0uRaQMlr#2=j*9s?MEuzFI*b-@sMDIn6Wp$^~XL_doijPB}5CuhKHqm zq}{h<6l1;kD~m$BzpRAwEu|ARyzgQquUSO59v?62iL}Y$zDgR#t&5|z(#Q2B-%`TK zr4F9$3jAehIL;;?ltk?qyjPj#S#|hji7l6yHsx5REcW z$N+bvJ=Il$t1fGgvt^hq4tyE5K}a{)qLF@T>yET(B}qxHB#fo90dtBl_Ma*b3hWf$ z_Vx&9_p0iL3zwl)`6#cL8qXh91-2~m$05yt=^JKvPQ9VS-4a&Z4|)8G6=ca3|8B{Z zv_M3xOhTwWd}Ro$;jgds^@OkK$#tcTeC5h%u#0!Rbw@XqVgOIKK^;DRh*81HhT|W>-2X(1O!{ zbF5np&dTz=xA=_B0l|vFk6^=qqe66YV0&Xxb$_C);6u)X{4ZipCXx3S&jyV^!5k%Of4UtRuk9hZ%D&XaK}# z140GWCtWaC1TyXh;xW8Oi+#^W?_QVoA21`1I|->>A)xKD*)$|ebG?5YF}i)zbVBufnKy> z7QeAC2=$)sBR&56V?*|11xpfgLTB-;@SH{y}HUfR)8u$ zgpWO{;T8L5K&j7xq3sXQ(j8Cd8Zs+&(Xn(oZc^({v$O6N3+5gNnUEGNuWQi^4G+BwSejwW@kSPq#Q(FOh&t=B1sps#Ly z_eGy{`Jh5p3GQhpXV1M*^G(UMxz=>~(sF+7sK4;@a{lP3e@xX|B!>8~c}45p)q~XT z1IrWB<>N#n*SM}M!QGApCuA7u@kqY09+78?TA-L1Zb^$}3I*^AJ5n`(=SMR;Sj|G( z{AE}6EV^@U^VGrE9)+}Bw>I3-8e|dxFPpRT+Y%zDoA4@wIh(e?Q6b&r2M}kO%8j-) zH;?HuOgd?@F@Vnd%|ZC_?5dJ2O4kO{vsK!@t2?i#k_Ri2kNte5(;w?K1_KyuTfSJc zPPK)}3-BL%7H@aJ-hj`(eepw;Kf~n9yPh(0@sx+vUxXu6&4)oK;)I$V#Utk zr<>dXA3Lz^#73=&9)Rn1S{)TKOwHx1iX>l>bpP`U-#)nb?)mN3AtpSYrIj4I`Q)(z zr<;5LU;jTP->LL;lM(QPdj1SkGxX>XmJV|9vs(U*mj9yVbG13@Tcs6tX!$Y9jXvD- zWUw{eWP<1hAIm_%jpLGHECk>}+J^f6rkmUVpZM+Wy3I=eGE9>E`sw%x|6?7}kuW?5`i5bQ8gc=YR7^ z9j*+MH(zxk#J4E|K5QF;G|kw=QJij+xa9+%aBnpKB)#Yu@l2X)^ddR~>r|sjls{9f zZ{({#Xc6wJHAtCs86fVNhe6ibSTQeA@-d3M%c+1SGJ=R`{Yc5z=qXAEnbCu{I29(k zBmPvo?gp+HKJiqN*Dab_@t8j-;83^oR7*V$rI92WcH&^I`iYRO#B8;`5JYR9s$W9c=RA-+vg&Ugw@4#W645 z>7yl0mH6a_au#q*_p2X;H=)5g^Zl(izr3{f_w7o>rl(_C%W@eG%$_}T%FpYFy)Ln*ti~ zXPDxoD*p57R;>uX?32nLe^R=VZo)Y>?eW52e!Z{28K!p9ES~alt0shB@MLMlV~sC} z=YAY$9f^UxLr)#Xcxuq8$8Rp5p;|5o;LqD?znH0lGVZEVBtD}7R!?IriQYQqn|vg7lp1u6s`24S|fCYKOXnVld|O ziM1*ESBz1-uGZ`lF1>1uoHbjNd=ghOOfvX9L*|o_co3xHQ$~sz#{+!XEB`j3Fc* zm&=+Z6fKg75N<1GoTu4EiR(zI*h}6;lh7C}O0OA%9P83fW(?%)+7D*7AU zMQIN+x;d)gy;tz({b~a}LXEDXG=mvksC9UHnQ^gSuyebH)wjzaO2rHz7nc8UZvK3v zSGrt$`GkjNMl)ah`3r0g|N8UJK5t=iV1uD#oYaZ3N$K(q9(Sn|HjkW3ku05;U+Tj4 z@pG42vzPh9OOdIKWQEYyMzWhTar zI$gYSuP>9I)hr-&);j+g+?8Pv{-XKr7?r<32k?1cF6r)Xba9OMGG;XV6=M*F1Y`7& z!g_iia+Us}6QwB#@^sP!o_Se)-w5VwFViPHUtBh`JnPOJp9SS#Q6D^l z`Nvn<;T7ojD>-1v{;DU+o4=Z0|DrU6o#r)H2jG15+f}+V3jR6)A0Z9+`uTbedUFb9 zA92&QA)sepql>opuEjv@H`n?&am~0M%uev?>+~vRxRHdk^NoCkJ`qQ`t@mC4=JCC@ zeAzd1aEJ{2HXRpWdEc&a!yWZ0LC-iF--5UOt{+aa@Bfg*cYH^8!rv=t@IUE^YQ@wz z{_vk+D7Cyf9O<;1eUKiziCg3H@AFX1|DF*0zaNV8Eq!WLPXCLsDKkkCs zfwzXTO#Aj*W9USA`!3zG#oeCnLYDw~ZvHlu*WT^}2ERMoh~d;-GKtFOah%oAFjww}_^QPT-vM|$or4f$w;H#EKIMQ*|fir29Nnl;+(l=O7AAHuL@IRA~STFZT+0rjr&ldQsr4fn5 zm)Y9my4eSxwKO7;mg`Gw_^_mBY;}2K8Y#nl6*BCIIDxBv94^a3hKWZPiHa zNvPdHQlR}BDTz9WuPxMC3UrD{V{-E?)*{eqi8eZa%Bb9IKo~jG+0Ju zp@?r0MAxDzYSkUKOap5pe74l~rh!FR5$=gDG)2IN*hJJ@MxliGMD)ti5q`v^0$8sQ zNQ5h*A$tsU1BVt-@I(O*%A!EC3y;I1K>mdHUs0g8;E7ihUO@rxv!Wp(k$e=dt)e>` z)lLQ1FS7zhxt-cjJd}!F06RPbcr;Yypbp+eMX&KW6LKf!RUT z`4ticenR3M6V;u5gv7I@==F@#kC1q!6um6?^`wM;gvG0*==F-zkC0_RntDzfGq^|7 z+9sQuZc8sx9tlNvGLB`^c(AOq-E(2j2p4U(2v-&gz zWGaRoV8tv9>O_a1vu}?}O83a%U#t-dnan zca{=>Lx|W6rp+!W$R2IZo}QgGr6_x}Ct9%EmhaBY!r8ZNOWavUHp}+0J4uO%)~L@CcQF}q%*BJ=!8+v}bzLb$Kw{2lw zo-DxTEh6yve3dOoWD)FF6*mGnR%y!>F`WCVNrb;`JMmqvwjBbW@JK^GCB`b7(Fn#v zt5wDqj9~nFwQaZ&j88S>Dj-?4Z3{6TeMe=yM~vURV|z@Dv(~7{Zum}yt+n+tf$^k< zE2VE|ZmzMdFoALIT9xq{G1k~_6XQo3F3t;#x7OOac!6=@I+f81B->&u@d9J5hC54) zch}jj5@YFll`+H{j5}3Y~RKM!t_UtrF%!dSs1JeHk!rRz&0I zDHA8YT#rI1Cz=N5O~}iimS-LW&jH)44NUYMn_V&?n-LnVa3$7*a`Vja!6vBxr2FoifDq3k-w Pl3PfCPBhFLq|^NmWq+#7 delta 13033 zcmch830zdw+y9(9Gi-MTu7b+WFrt7WA{yeBTvBtZ1T|ADMZg6S5OD36v< zDi<`j#x1S1G_}+;x6BpOOf3tu`c-c4_qq2D4At-bzyHtY{k+4+bI*CU^PJ~7%X1bN zt}y&iVJNwy|JkeOw!4p+*=s~*;gEvNoQ%wTormK)VT4})1}owzZt;AY+t~1rE1781 z$@FpeV#c(*yzEK3{mg6I6@3vi=%nYkulpTKFCS5QUT&Bm=wzLbC?wIJMJL;`tOg6q zl6vtFkAJb=e3?gY+Y46K)yyn93sb1W^`b>j+);u~Ok{#hVFW(N%A{Uaomr<(l6B#b z8zn4BvI@j2)YE~TuwBhMQMBkpH=RW%rHI}-;sy|<-l7WaD~^WL#yVLVZ51MPJkv9T z_2Dx-!;!A>Y#Px2uUsDBhV%iB8=lAYO+qYs*^(8Z6C)TSDf@M!t@;R^MQ;(LAw+1= zOJR<4=8yEoa3bXu^)3AUdY`ZeUhEgaSJw|`!ThLiDvxTATLUxlO}-&46z~HItQ)iZ zoyPHTgSCvM@R5yfAbrlil=XE~`HPI@I(`T+F_y>w2%N?Ga$8U!(q%zi*#TY^)Ry() z{=oy;Se_qjWmkB4a8IP$Lc;iy;85g-PVnO$zV_lBLz<&BGNikLZPxTtW?^!0mUM`T z4lib)rcPy}tvWPSC!g14$?$-b%UNg>Fhqv7Mmjk3Hz<}GHWB%wVQswkEN1QD7;=?v z=VETQv||JLvz9pMJ<-yV34m3GztOP(qW+l8Bu2i95}+kHOi=m*?A{mtl%*_m2d$&X zepxj*o@XtH9i$_Y-*vTM^f(eVQWd3>e{jL^dX0CmLVmb$TQ-hAX&ePxMMV@s@s$ys z*&u#7q6CnfCSmM4U)ZEO(hIgQelkS#Jfd0TGly5x=g1K+MZUxe__oN%An2%%61rOr zUCjnK{TQkQaw=%YG!N&KBHM!gc+_B&qoeyHZ;Nh<$|?%TVIN0_A-xtI zfb>ChYtXc8HV`yL&Dy!jM^CFO|8lcpMSlOF0<;=xM35Rp7UiPF<>pk!@68i!-!MJ1 z2~%o1FFYy*_L0Ax4-deMaapn?34s<}v_3H$fM|VOxB$2%D_Pf8uR|^>%c|G%7NM=) z!-7t)V3Hj_PTjQOoB3Dg*xKqPi;jr=!4Q!pdq{<&8ExY$Lqie|5*P&wA z4}>0)J1UkvvLtGXN~a87YKoo=sz-NFQT|rFiVAi?fqB1<2IoqlZX=Zus1rW6bw{@vgk==9upJYe!bEHoy_v(^-QWy9@mk-b=z2q{DFq_%!<~L&$qHHS;-TM z7smu7Iw=BND3p9HbGQpi$t%oNF#!T(JctPhamr}tLa5{w!d5MUi12y!GrC34Kqvjz zIu{1DP+RLYSEO2~Mc#Hrss*L4t7;_=)lWcdP)BE7Ma-oxwNNW=kt;NlM{g$lMu(3#+{Qs)8JKrSL>u<4qK}n z=JRE-N`0xA$=;y;@R>Ls)A}^~zO+p*O?Dc|=CLh(9A#}jXI{T)93>0+3%y$K;_e=f zl?i>BG^0e7q{&ZvFVQ0SlxRRze43+W`?*Y5S;F7%Fkh3N&U<$pD{L*{`#MJZRBPm_ z?&Guw+Rur{|tXDbyP zf8lv&GZhEBHgjlAmE_{O%_N*wrG~7j6XqwZTBU-l)6AiK`3pVSNq+N%Hb{f|B(k%- zAJPkaL7#T$afgxK?geY~%w8$nYh4=5*+PE1U^Z0^(joS7V5%Xep zmR-nDvSi6Sh(?hbC0NvzA8|*55TOJg7yET)Z}6c0fowic=pS$V1k{Y57}VH!3b_%y zynjb%*Ku%wk#s-BtNX_p&nuWFFOuFvUyKyam-Bl4{P@ZjLyVUc)V>!x0QKO-X6zDg zI^ad$QHZ|`Qk~RMJRnX0(f)xASQbAwuqRu9-+0G2<{R zF3k*TUJUgtEJ`p)#pD-;c(sKN>#X8%im4SfMG9?sx6J36otJ0!VoUh7%+Dd_-QkFj z`Q_oSLyf^B#v(mFVj%m1M~-|QhFCKaK{#(Zsw>j$Q6b)Bbb&%6KMIYwebfXn$k$u) zr?Z|yt}v?r-F?f2e!S!8P>F)YgvaqY*@?*Q&u#`weV<*ziuu3BbO6Wh zF$+M~EhnCpIws{rQm|W*I}tt%%xezqy5+40+fRAz!FF|Qm@|;$ea7|$+dE_1!{k@T z(#F7;--RvXJ@Yvt?ko9i*>c{npha}zyKVJx&P^eVrAuZ~JF7vqbwk{006yuOj#R(ikp}3qWr0IBLo5 zadAik$6GKFbQmAQdX=6J<+H~}vsHZS_@+RO3~67_YE*(R#9dCGtGgr*-eW>T& z+2_<^>{6?#10#p-xp_ix_-YoIJXQO7NJ+q*A*x$Mg&e59{|i-=LUoR)s)!0XP(@7) zwze}p)p?>qP8yLQTtc40^79C~hXVXNjU>4WGq)@AM1>h+C_y+$%*tp=5YA}K zC&Bzu4KucLDs!;fj9aY~?h8~kc0D>7IVgB_Vx7hwP#b%NsP+>Ta-a&HR7*8`(Ns;= zO`_UORLFs9;H28dp1eL)ll3D}Z6zw?sIl9Tr^arDzO?SCjZIY5J+W!SCks!PXuJ=J zcRleUN4)Q9yz9XmSHl~p^1gZftDsG{D-TuQ*C}nCX9xFc;zkbo`AiN{9Q#w*jgslr z4^|FQsFo1b5~4y5RJ|q#DK2|+X2sl3N@iB57FkPFB}9cBX;OwfwW0)?(1u8Dg}Ny=E7tSMs~B?JyT3>Src~g+&Hr%fIq4$Sto&U<=&!^(+j! zt*-}}&)QjAU4O{Kw%{Z`|9VrUg(RBf{ppkj2CxVTLNJa=lpTFk%_gslhL(?D+YA!WVJeetfxgOKPdN?HjsdE5 z{NAep9y)F3$ct?>XJ4?Xb9M{fb!zdkekcg&mCh_MR(*7R1P zDONY^!N{X=w6AlcT$H*wmauf+oBmI>@mS5m^|XrfA7=;ig;N^wDKiGbROe@8qYYhW zKIhw4Hia8a0koEpb>Nt9`I?!H;fv2^(%Sgb%!WwaW?_YNM9=adZQ^Hlfi{AxHrTA! z(WY$nKWPI`eK&hPsxO=~0M#Fx(<$O_Zc&aAbc$PQPBU7}4Q!&=MR6(lQuT2km(Zi1 z5_)9MrO@L4*;~SRV$uJvFZkY~c)xB|eK+~+5+&rf(s_|yN^`Jz!&u8_VL;`GZnu}9 zx3MWWx>}8+EAKU&L;s>$upa=kRn za|@g_OO@P#1u4KZSV;IZCEcW?e=O|bUcp4ELAV<~UYW#;ieq7w-NkLx(L#wa{!h!Q0A zyug4X1a!)>)@(C3mA3?4$MSwi7nMgN-CLf7^kF$k313W*UW;oX+X&*gxcCEsea@FG z9fI0_U)meTY+aY-B2Vk{jb$ML;{q{c40KFlcvd~HMUaGf`6v%uz7lQdxFLj}UEU}{ znO|xwM01CpPGkI)vlMt39$(F+6$xxBe|ANfbt6MVaPX06H7aK!7Gr|Y(PE_OMo*m> zHk`X+0_0EI;|uGQ@9B>Gl|89^%gUj!nY5|}bbNYMESfN8m13=iA4Wuhn}xesJSkjazZnLx9{LZ!(o*QY!#ftncx4ah& ze5dzl4>R<=F)dTV1$CqvF;WF>q$(l1QI5*aLchYaMLDZGwgaae58r!U8Kv8mQMz3j zrQ7?%7gx8_7R7Bxq_1{dO7_<&$EDI9p0tCehQUfYen$d3#W(GEi>85{ZJS}RVK1Vy z;yfK&5{oce>VZD2f~X_o_9TJeSaP@Rq%GeMJM+OhWLGpQEZP-`blWaJw>?zh0e*B> zNU(Ftw>VGNBWe%cxnXxq*oVxD^&(rIxspj<7Q-f~6B=2K0oXm~5W%o3eQu_X2-;Zcojzec82AGvPMb7(;Je5v{IpSTu@clSkY3zGH9wGV9 z?=OVLb;tAj!G&m`3G)Mm^Cr4bz*(wZ3mF`ay~D`F7}_?-{2vW%o%koi{!K1@6N?SL z4()qJ`~IqZ7q3?FyR`3l?fZxJT~?vu_o`o;d`Sa5(!Q(SQVI5H-zx1p?QIqQmi9fM zeQ#*rS=dICVr#YUVeNZM`xdQH@f)=7QRUl-$vl@na@0TE{Ctn%CqLW$d4X&-8Q?BOM?=&SW?VSF>=O0FQA=$8RT1N--X?upW-lDiAKv(H~GC993atpowCQebmzwTo+Qo0Q$d> zyx3VxF$*ZpTYPcn0>Y|A?1+|B+TXtUi7wU5qHaS@aJc*TG?8wB1y@#g$@6@mQ~fd^;KIjvSO zfNnUGf7SlZ`K$=}yFbowu9L-^0`2ODD>^6z)Lc{$lx= zBWpEovew?RqG@kycJM;k{_gp=*Jx#;f4Fnu7PXveZpc4A7}#6J_8u0I8d0PxB;P-G z`uZ^Ihbk)7 z?28FW2e+CU66gF6Z$5rxQdszEk(!SN&cRI~5rUq2!7msgmRr`rr#inNeesE0%q?+sDC`H}E&z&2eLJif5 zGnFFmaiFiwkE9&@UoeyzPmH~>#uhINj=zy zFFf3=L$Z7lM-X0Jbjr_rzvKkeEB$%uo3E~Y+6geVA!mF!wfOEE>U`wE?;mdS>=tHf zO$f&?2Y!51G}a{HdrwiI&^ z%8%ao_22iEa;iC2n$5Q#XtB^(s<}BX zNO4a(5+B?j4<$R>C}@y%&&t2jTyKgcf!};}pkijNfPEJ?oYjh9gxz^<<1E6anwv>$ znW+V_e7SdT>2F%Qh-lx+H+TG@l`SY=`+VtK)%U69XlV{J#Sqt@e?GMR@x+DfRNr&DrRQe}hG zhUc`7=<+;D7%Fw`ChuIrla9r*#ys~}b9Rd_KNid!eD|>dVb7>WP(@3% zB}4~Q;<3j=TiwOR-OFNBVndQAc9Vahy}T*ZIjwAGrVvqji$jbQmBB|S!|sS)3IB8ad4 zVu-tXnV8PqPRzkQ(!vwX+*g;8?hQdPzUE@Bm;>cyHGqPz7q6gN>xmc({a#CC$%P9{&ML3c8f z$KU%!mu_+y{K!|Iit)6X+(c<6GZ|ePI*b|`>RRtBHT4=qX$C_ChZ#G9FFQTdJ5`=~ z;h-l?1ZZzK_c-$co6BE1)1l!?%)*EvC>berpif?kT+J)bv}Zy5#+gW#%1viGu@899 zvrX8)c;4B_j&)>NXpte=QXO!J$@EX)Fn;=M3!h(BVB}QO^p6~t7oT%Mepp^meoUAo zH3P)c=K>n22!&-gZSho=>~ldlosiUTQIeHk`Q&WmxIoGgm>w~%Q)uN;z z#E)8kUP=MvKeQnf282a-(yJQmJ#m^dSSL#35yk1GY<~JH%~6U zToqiT{p*s8t?=sX^NT$cY(GBoQYbsd=Uf^9+|^4d;BR%g89T&ZzT8uzcyKwC9p-^o z=#py6l?0?auH-7XAf9wJ72DFtyDtDb;cg2qRn2mz?KPrk&aTC9&s+Yys>&S)JGZLd zMh)|?jlkA5@n!;V_DuqEX-c}}8@joYucsj8*Iz+;?>ZeJcf3JI$dhm6AiaHqAZ@-S z{uSRYN1AwZGt%#F#<9=1|99K5lvQj7!hVFNn>~%&~-bvcwZ3 z9?^)BoL)F;e5Qpc_}vHY=mWn#uwudRe`xW;D;QBM7@5|}QkYp#NGy0v(fi@kzIX=X zf@lpy6<_&qf(;L0T;Od0$MwJHGbXd3AZd;XgE1fX2#!Le77-sdOt_|@RMxlSIReX(HJcfDjs4Lsg<;T9ba_s-O zTSGj4sY6C$@%UxiUmc=;kq`J?hO$7Hlhh^rBdhcABxlg@bg@>j5;Hs>i% z8VH*>ANeFK@(74(ZB3p#4T!5r2v)^6Kk3#2>scLb39|{XzokXS=}!%nKy8{@u5l@) zIViae&(2M=6#}<_a2dJdhGl1>R7RzO!u)aRgjD3C6=Nfw34$lzXAG_;DRE`-Zl(F01ws>Pd&8>5VTtg0yPV?}Qi z+dPGG2CB*e&qI5j!2E2uF%`YXPnbAqGBJaKaJV1^gUgSAxDOS*pK|#T5Eq@IcbdzO zfVjXEy(hc;2zU}ebJq#O`**2-yxzw8n^Owu?$H~Hk~z}yMuFyAdp$jSN+?@m@2+Q& z!p{Xz&w=^=duw}XZX(LMv zLI^MV!f2WK`I#A(%!!%l;|en~yr99a_C-c!5sFvX9Y)rU9kAauvbK#8Er?#!q{95P z9P%$B40Mw;wO`{{RcvXW;>JSlnQqKiaICaXc4Ns6aG)xhV=VcZ1>>>{Eu#vkg^%oC zx-tI*oHvNRG}IE>3@Nlw`)P`%;fWIn71KK21=9wY9jokN?yONF&LqSJ$}o*jbRkg& zEzT)KKN`V*M^)Q%+?js>PA)_#Cv8k-O|43`rEl8bbZ1Qkhw=?H$45q!MmQZnf3^SU z4$u9nqP*K^DAH;1YI|o7uGN;ywf!lHF8;n{VK8Ttx(xA zRa~n$;4ax?J;C-!MR||VP^8GJx9m1gu=%T-bFq{!NL?E8rAu!=K})o`TAExWe}wxTsETY-im zMK-Ll4-vt3RK=OcX*km42m5AXTd-DTo1mdck Date: Fri, 12 Apr 2019 16:14:08 -0400 Subject: [PATCH 372/680] use standardized [[noreturn]] --- libraries/wasm-jit/Include/Platform/Platform.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/wasm-jit/Include/Platform/Platform.h b/libraries/wasm-jit/Include/Platform/Platform.h index a646778ae8a..edaeef071d8 100644 --- a/libraries/wasm-jit/Include/Platform/Platform.h +++ b/libraries/wasm-jit/Include/Platform/Platform.h @@ -134,7 +134,7 @@ namespace Platform Uptr& outTrapOperand, const std::function& thunk ); - PLATFORM_API void immediately_exit(std::exception_ptr except) __attribute__((noreturn)); + PLATFORM_API [[noreturn]] void immediately_exit(std::exception_ptr except); // // Threading From 0903cb837426e5f939f9cd993dcba63903dfbd35 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Fri, 12 Apr 2019 17:01:38 -0400 Subject: [PATCH 373/680] Update CMakeLists.txt --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e35c50a286a..46237575c9d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -49,7 +49,7 @@ set( INSTALLER_APP_ID "68ad7005-8eee-49c9-95ce-9eed97e5b347" ) if(${EOSIO_PIN_COMPILER}) message(STATUS "Pinning compiler to Clang 8") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++ -v") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -stdlib=libc++ -lc++abi") endif() From 407e2f77f52181b84a0c3771da3d16afbee4e511 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Sun, 14 Apr 2019 02:57:04 -0400 Subject: [PATCH 374/680] reverting libc++ changes --- CMakeLists.txt | 12 ++--- scripts/eosio_build.sh | 2 +- scripts/eosio_build_ubuntu.sh | 85 ++++++++++++++++++----------------- 3 files changed, 51 insertions(+), 48 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 46237575c9d..0134a71237c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -47,11 +47,6 @@ set( GUI_CLIENT_EXECUTABLE_NAME eosio ) set( CUSTOM_URL_SCHEME "gcs" ) set( INSTALLER_APP_ID "68ad7005-8eee-49c9-95ce-9eed97e5b347" ) -if(${EOSIO_PIN_COMPILER}) - message(STATUS "Pinning compiler to Clang 8") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -stdlib=libc++ -lc++abi") -endif() # http://stackoverflow.com/a/18369825 if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") @@ -127,6 +122,13 @@ set(THREADS_PREFER_PTHREAD_FLAG 1) find_package(Threads) link_libraries(Threads::Threads) +### uncomment after we get Boost to build with clang 8 and libc++ on Linux +#if(${EOSIO_PIN_COMPILER}) +# message(STATUS "Pinning compiler to Clang 8") +# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++ -v") +# set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -lc++abi") +#endif() + if( WIN32 ) message( STATUS "Configuring EOSIO on WIN32") diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 7710e363802..68ab0848f89 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -332,7 +332,7 @@ printf "## ENABLE_COVERAGE_TESTING=%s\\n" "${ENABLE_COVERAGE_TESTING}" mkdir -p $BUILD_DIR cd $BUILD_DIR -$CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true $PIN_COMPILER_CMAKE \ +$CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=false \# $PIN_COMPILER_CMAKE \ -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS "${REPO_ROOT}" diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 514c5f7a9b6..51648f1aaa1 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -154,6 +154,48 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" +echo "BUILD_CLANG $BUILD_CLANG8" +if $BUILD_CLANG8; then + printf "Checking Clang 8 support...\\n" + if [ ! -d $CLANG8_ROOT ]; then + printf "Installing Clang 8...\\n" + cd ${OPT_LOCATION} \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && git checkout $PINNED_COMPILER_LLVM_COMMIT \ + && cd tools \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ + && cd lld && git checkout $PINNED_COMPILER_LLD_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ + && cd polly && git checkout $PINNED_COMPILER_POLLY_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && git checkout $PINNED_COMPILER_CLANG_VERSION \ + && mkdir extra && cd extra \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ + && cd clang-tools-extra && git checkout $PINNED_COMPILER_CLANG_TOOLS_EXTRA_COMMIT && cd .. \ + && cd ../../../../projects \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ + && cd libcxx && git checkout $PINNED_COMPILER_LIBCXX_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ + && cd libcxxabi && git checkout $PINNED_COMPILER_LIBCXXABI_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ + && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ + && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ + && cd ${OPT_LOCATION}/clang8 \ + && mkdir build && cd build \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + || exit 1 + printf " - Clang 8 successfully installed @ ${CLANG8_ROOT}\\n" + else + printf " - Clang 8 found @ ${CLANG8_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + printf "\\n" +fi printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) @@ -163,7 +205,7 @@ if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ && cd $BOOST_ROOT \ && ./bootstrap.sh --prefix=$BOOST_ROOT \ - && ./b2 -q -j"${JOBS}" install \ + && ./b2 -q -j2 install \ && cd .. \ && rm -f boost_$BOOST_VERSION.tar.bz2 \ && rm -rf $BOOST_LINK_LOCATION \ @@ -254,47 +296,6 @@ if [ $? -ne 0 ]; then exit -1; fi cd .. printf "\\n" -if $BUILD_CLANG8; then - printf "Checking Clang 8 support...\\n" - if [ ! -d $CLANG8_ROOT ]; then - printf "Installing Clang 8...\\n" - cd ${OPT_LOCATION} \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ - && git checkout $PINNED_COMPILER_LLVM_COMMIT \ - && cd tools \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ - && cd lld && git checkout $PINNED_COMPILER_LLD_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ - && cd polly && git checkout $PINNED_COMPILER_POLLY_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang/tools \ - && git checkout $PINNED_COMPILER_CLANG_VERSION \ - && mkdir extra && cd extra \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ - && cd clang-tools-extra && git checkout $PINNED_COMPILER_CLANG_TOOLS_EXTRA_COMMIT && cd .. \ - && cd ../../../../projects \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ - && cd libcxx && git checkout $PINNED_COMPILER_LIBCXX_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ - && cd libcxxabi && git checkout $PINNED_COMPILER_LIBCXXABI_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ - && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ - && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ - && cd ${OPT_LOCATION}/clang8 \ - && mkdir build && cd build \ - && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - || exit 1 - printf " - Clang 8 successfully installed @ ${CLANG8_ROOT}\\n" - else - printf " - Clang 8 found @ ${CLANG8_ROOT}.\\n" - fi - if [ $? -ne 0 ]; then exit -1; fi - - printf "\\n" -fi function print_instructions() { return 0 From b4f260ebd5ec2872a69893f6c6b54e3cafbdb915 Mon Sep 17 00:00:00 2001 From: Kayan Date: Mon, 15 Apr 2019 11:27:19 +0800 Subject: [PATCH 375/680] wabt optimization --- libraries/wabt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/wabt b/libraries/wabt index bf353aa719c..ae8189a9d45 160000 --- a/libraries/wabt +++ b/libraries/wabt @@ -1 +1 @@ -Subproject commit bf353aa719c88b7152ee09e7f877a507cb7df27b +Subproject commit ae8189a9d45e9453bd947c778bf5f3d7255b0627 From 63a960d507a82a84d6e0d91ed4ce3b68e9a72efe Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 15 Apr 2019 11:56:04 -0400 Subject: [PATCH 376/680] refactor account and code table objects State history plugin needs to avoid outputting account_metadata deltas when relevant fields do not change. --- libraries/chain/apply_context.cpp | 45 ++++++----- libraries/chain/controller.cpp | 10 ++- libraries/chain/eosio_contract.cpp | 75 ++++++++++--------- .../include/eosio/chain/account_object.hpp | 56 ++++++++------ .../include/eosio/chain/apply_context.hpp | 2 +- .../chain/include/eosio/chain/code_object.hpp | 16 +++- libraries/chain/include/eosio/chain/types.hpp | 21 +++++- .../eosio/chain/wasm_interface_private.hpp | 29 +++++-- libraries/chain/wasm_interface.cpp | 6 +- plugins/chain_plugin/chain_plugin.cpp | 52 +++++++------ .../state_history_serialization.hpp | 42 +++++++++-- .../state_history_plugin.cpp | 2 + .../state_history_plugin_abi.cpp | 34 +++++++-- unittests/bootseq_tests.cpp | 10 +-- 14 files changed, 267 insertions(+), 133 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index d9e960515bf..7034539521d 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -58,10 +58,11 @@ void apply_context::exec_one() r.act_digest = digest_type::hash(*act); const auto& cfg = control.get_global_properties().configuration; + const account_metadata_object* receiver_account = nullptr; try { try { - const auto& a = control.get_account( receiver ); - privileged = a.privileged; + receiver_account = &db.get( receiver ); + privileged = receiver_account->is_privileged(); auto native = control.find_apply_handler( receiver, act->account, act->name ); if( native ) { if( trx_context.enforce_whiteblacklist && control.is_producing_block() ) { @@ -71,7 +72,7 @@ void apply_context::exec_one() (*native)( *this ); } - if( ( a.code_version != digest_type() ) && + if( ( receiver_account->code_id._id != 0 ) && ( control.is_builtin_activated( builtin_protocol_feature_t::forward_setcode ) || !( act->account == config::system_account_name && act->name == N( setcode ) @@ -83,7 +84,7 @@ void apply_context::exec_one() control.check_action_list( act->account, act->name ); } try { - control.get_wasm_interface().apply( db.get(a.code_version), *this ); + control.get_wasm_interface().apply( db.get(receiver_account->code_id), *this ); } catch( const wasm_exit& ) {} } } FC_RETHROW_EXCEPTIONS( warn, "pending console output: ${console}", ("console", _pending_console_output) ) @@ -95,12 +96,23 @@ void apply_context::exec_one() throw; } + // Note: It should not be possible for receiver_account to be invalidated because: + // * a pointer to an object in a chainbase index is not invalidated if other objects in that index are modified, removed, or added; + // * a pointer to an object in a chainbase index is not invalidated if the fields of that object are modified; + // * and, the *receiver_account object itself cannot be removed because accounts cannot be deleted in EOSIO. + r.global_sequence = next_global_sequence(); - r.recv_sequence = next_recv_sequence( receiver ); + r.recv_sequence = next_recv_sequence( *receiver_account ); + + const account_metadata_object* first_receiver_account = nullptr; + if( act->account == receiver ) { + first_receiver_account = receiver_account; + } else { + first_receiver_account = &db.get(act->account); + } - const auto& account_sequence = db.get(act->account); - r.code_sequence = account_sequence.code_sequence; // could be modified by action execution above - r.abi_sequence = account_sequence.abi_sequence; // could be modified by action execution above + r.code_sequence = first_receiver_account->code_sequence; // could be modified by action execution above + r.abi_sequence = first_receiver_account->abi_sequence; // could be modified by action execution above for( const auto& auth : act->authorization ) { r.auth_sequence[auth.actor] = next_auth_sequence( auth.actor ); @@ -791,19 +803,18 @@ uint64_t apply_context::next_global_sequence() { return p.global_action_sequence; } -uint64_t apply_context::next_recv_sequence( account_name receiver ) { - const auto& rs = db.get( receiver ); - db.modify( rs, [&]( auto& mrs ) { - ++mrs.recv_sequence; +uint64_t apply_context::next_recv_sequence( const account_metadata_object& receiver_account ) { + db.modify( receiver_account, [&]( auto& ra ) { + ++ra.recv_sequence; }); - return rs.recv_sequence; + return receiver_account.recv_sequence; } uint64_t apply_context::next_auth_sequence( account_name actor ) { - const auto& rs = db.get( actor ); - db.modify( rs, [&](auto& mrs ){ - ++mrs.auth_sequence; + const auto& amo = db.get( actor ); + db.modify( amo, [&](auto& am ){ + ++am.auth_sequence; }); - return rs.auth_sequence; + return amo.auth_sequence; } void apply_context::add_ram_usage( account_name account, int64_t ram_delta ) { diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index ac63e7a1e4f..f70439d3b23 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -36,7 +36,7 @@ using resource_limits::resource_limits_manager; using controller_index_set = index_set< account_index, - account_sequence_index, + account_metadata_index, account_ram_correction_index, global_property_multi_index, protocol_state_multi_index, @@ -849,14 +849,14 @@ struct controller_impl { db.create([&](auto& a) { a.name = name; a.creation_date = conf.genesis.initial_timestamp; - a.privileged = is_privileged; if( name == config::system_account_name ) { a.set_abi(eosio_contract_abi(abi_def())); } }); - db.create([&](auto & a) { - a.name = name; + db.create([&](auto & a) { + a.name = name; + a.set_privileged( is_privileged ); }); const auto& owner_permission = authorization.create_permission(name, config::owner_name, 0, @@ -902,6 +902,8 @@ struct controller_impl { authorization.initialize_database(); resource_limits.initialize_database(); + db.create([](auto&){}); // reserve 0 code_id (used in account_metadata_object to indicate no code) + authority system_auth(conf.genesis.initial_key); create_native_account( config::system_account_name, system_auth, system_auth, true ); diff --git a/libraries/chain/eosio_contract.cpp b/libraries/chain/eosio_contract.cpp index cf167cce746..8eb68721726 100644 --- a/libraries/chain/eosio_contract.cpp +++ b/libraries/chain/eosio_contract.cpp @@ -86,8 +86,8 @@ void apply_eosio_newaccount(apply_context& context) { EOS_ASSERT( name_str.size() <= 12, action_validate_exception, "account names can only be 12 chars long" ); // Check if the creator is privileged - const auto &creator = db.get(create.creator); - if( !creator.privileged ) { + const auto &creator = db.get(create.creator); + if( !creator.is_privileged() ) { EOS_ASSERT( name_str.find( "eosio." ) != 0, action_validate_exception, "only privileged accounts can have names that start with 'eosio.'" ); } @@ -102,7 +102,7 @@ void apply_eosio_newaccount(apply_context& context) { a.creation_date = context.control.pending_block_time(); }); - db.create([&](auto& a) { + db.create([&](auto& a) { a.name = create.name; }); @@ -136,59 +136,64 @@ void apply_eosio_setcode(apply_context& context) { EOS_ASSERT( act.vmtype == 0, invalid_contract_vm_type, "code should be 0" ); EOS_ASSERT( act.vmversion == 0, invalid_contract_vm_version, "version should be 0" ); - fc::sha256 code_id; /// default ID == 0 + fc::sha256 code_hash; /// default is the all zeros hash - if( act.code.size() > 0 ) { - code_id = fc::sha256::hash( act.code.data(), (uint32_t)act.code.size() ); + int64_t code_size = (int64_t)act.code.size(); + + if( code_size > 0 ) { + code_hash = fc::sha256::hash( act.code.data(), (uint32_t)act.code.size() ); wasm_interface::validate(context.control, act.code); } - const auto& account = db.get(act.account); + const auto& account = db.get(act.account); + bool existing_code = (account.code_id._id != 0); + + EOS_ASSERT( code_size > 0 || existing_code, set_exact_code, "contract is already cleared" ); - int64_t code_size = (int64_t)act.code.size(); int64_t old_size = 0; int64_t new_size = code_size * config::setcode_ram_bytes_multiplier; - EOS_ASSERT( account.code_version != code_id, set_exact_code, "contract is already running this version of code" ); - - if(account.code_version != digest_type()) { - const code_object& old_code_entry = db.get(account.code_version); - int64_t old_size = (int64_t)old_code_entry.code.size(); - if(old_code_entry.code_ref_count == 1) { + if( existing_code ) { + const code_object& old_code_entry = db.get(account.code_id); + EOS_ASSERT( old_code_entry.code_hash != code_hash, set_exact_code, + "contract is already running this version of code" ); + int64_t old_size = (int64_t)old_code_entry.code.size() * config::setcode_ram_bytes_multiplier; + if( old_code_entry.code_ref_count == 1 ) { db.remove(old_code_entry); + } else { + db.modify(old_code_entry, [](code_object& o) { + --o.code_ref_count; + }); } - else - db.modify(old_code_entry, [](code_object& o) { - --o.code_ref_count; - }); } - if(code_id != digest_type()) { - const code_object* new_code_entry = db.find(code_id); - if(new_code_entry) - db.modify(*new_code_entry, [](code_object& o) { + + code_object::id_type code_id; // default is 0 which indicates no code is present + if( code_size > 0 ) { + const code_object* new_code_entry = db.find( + boost::make_tuple(code_hash, act.vmtype, act.vmversion) ); + if( new_code_entry ) { + db.modify(*new_code_entry, [&](code_object& o) { + code_id = o.id; ++o.code_ref_count; }); - else { + } else { db.create([&](code_object& o) { - o.code_id = code_id; + code_id = o.id; + o.code_hash = code_hash; o.code.assign(act.code.data(), code_size); o.code_ref_count = 1; o.first_block_used = context.control.head_block_num(); + o.vm_type = act.vmtype; + o.vm_version = act.vmversion; }); } } db.modify( account, [&]( auto& a ) { - /** TODO: consider whether a microsecond level local timestamp is sufficient to detect code version changes*/ - // TODO: update setcode message to include the hash, then validate it in validate + a.code_sequence += 1; + a.code_id = code_id; a.last_code_update = context.control.pending_block_time(); - a.code_version = code_id; - }); - - const auto& account_sequence = db.get(act.account); - db.modify( account_sequence, [&]( auto& aso ) { - aso.code_sequence += 1; }); if (new_size != old_size) { @@ -217,9 +222,9 @@ void apply_eosio_setabi(apply_context& context) { } }); - const auto& account_sequence = db.get(act.account); - db.modify( account_sequence, [&]( auto& aso ) { - aso.abi_sequence += 1; + const auto& account_metadata = db.get(act.account); + db.modify( account_metadata, [&]( auto& a ) { + a.abi_sequence += 1; }); if (new_size != old_size) { diff --git a/libraries/chain/include/eosio/chain/account_object.hpp b/libraries/chain/include/eosio/chain/account_object.hpp index 46fd18015de..d7471e02cf8 100644 --- a/libraries/chain/include/eosio/chain/account_object.hpp +++ b/libraries/chain/include/eosio/chain/account_object.hpp @@ -5,6 +5,7 @@ #pragma once #include #include +#include #include #include @@ -17,15 +18,8 @@ namespace eosio { namespace chain { id_type id; account_name name; - uint8_t vm_type = 0; - uint8_t vm_version = 0; - bool privileged = false; - - time_point last_code_update; - digest_type code_version; block_timestamp_type creation_date; - - shared_blob abi; + shared_blob abi; void set_abi( const eosio::chain::abi_def& a ) { abi.resize( fc::raw::pack_size( a ) ); @@ -53,24 +47,37 @@ namespace eosio { namespace chain { > >; - class account_sequence_object : public chainbase::object + class account_metadata_object : public chainbase::object { - OBJECT_CTOR(account_sequence_object); - - id_type id; - account_name name; - uint64_t recv_sequence = 0; - uint64_t auth_sequence = 0; - uint64_t code_sequence = 0; - uint64_t abi_sequence = 0; + OBJECT_CTOR(account_metadata_object); + + enum class flags_fields : uint32_t { + privileged = 1 + }; + + id_type id; + account_name name; + uint64_t recv_sequence = 0; + uint64_t auth_sequence = 0; + uint64_t code_sequence = 0; + uint64_t abi_sequence = 0; + code_object::id_type code_id; + time_point last_code_update; + uint32_t flags = 0; + + bool is_privileged()const { return has_field( flags, flags_fields::privileged ); } + + void set_privileged( bool privileged ) { + flags = set_field( flags, flags_fields::privileged, privileged ); + } }; struct by_name; - using account_sequence_index = chainbase::shared_multi_index_container< - account_sequence_object, + using account_metadata_index = chainbase::shared_multi_index_container< + account_metadata_object, indexed_by< - ordered_unique, member>, - ordered_unique, member> + ordered_unique, member>, + ordered_unique, member> > >; @@ -95,10 +102,11 @@ namespace eosio { namespace chain { } } // eosio::chain CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_object, eosio::chain::account_index) -CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_sequence_object, eosio::chain::account_sequence_index) +CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_metadata_object, eosio::chain::account_metadata_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_ram_correction_object, eosio::chain::account_ram_correction_index) -FC_REFLECT(eosio::chain::account_object, (name)(vm_type)(vm_version)(privileged)(last_code_update)(code_version)(creation_date)(abi)) -FC_REFLECT(eosio::chain::account_sequence_object, (name)(recv_sequence)(auth_sequence)(code_sequence)(abi_sequence)) +FC_REFLECT(eosio::chain::account_object, (name)(creation_date)(abi)) +FC_REFLECT(eosio::chain::account_metadata_object, (name)(recv_sequence)(auth_sequence)(code_sequence)(abi_sequence) + (code_id)(last_code_update)(flags)) FC_REFLECT(eosio::chain::account_ram_correction_object, (name)(ram_correction)) diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index d1eac6495dd..828358e73b7 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -543,7 +543,7 @@ class apply_context { bytes get_packed_transaction(); uint64_t next_global_sequence(); - uint64_t next_recv_sequence( account_name receiver ); + uint64_t next_recv_sequence( const account_metadata_object& receiver_account ); uint64_t next_auth_sequence( account_name actor ); void add_ram_usage( account_name account, int64_t ram_delta ); diff --git a/libraries/chain/include/eosio/chain/code_object.hpp b/libraries/chain/include/eosio/chain/code_object.hpp index b2e636eb73b..b50835b5293 100644 --- a/libraries/chain/include/eosio/chain/code_object.hpp +++ b/libraries/chain/include/eosio/chain/code_object.hpp @@ -13,18 +13,26 @@ namespace eosio { namespace chain { OBJECT_CTOR(code_object, (code)) id_type id; - digest_type code_id; + digest_type code_hash; shared_blob code; uint64_t code_ref_count; uint32_t first_block_used; + uint8_t vm_type = 0; + uint8_t vm_version = 0; }; - struct by_code_id; + struct by_code_hash; using code_index = chainbase::shared_multi_index_container< code_object, indexed_by< ordered_unique, member>, - ordered_unique, member> + ordered_unique, + composite_key< code_object, + member, + member, + member + > + > > >; @@ -32,4 +40,4 @@ namespace eosio { namespace chain { CHAINBASE_SET_INDEX_TYPE(eosio::chain::code_object, eosio::chain::code_index) -FC_REFLECT(eosio::chain::code_object, (code_id)(code)(code_ref_count)(first_block_used)) +FC_REFLECT(eosio::chain::code_object, (code_hash)(code)(code_ref_count)(first_block_used)(vm_type)(vm_version)) diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index 211b3a745fa..e701e92c4fb 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -153,7 +153,7 @@ namespace eosio { namespace chain { { null_object_type = 0, account_object_type, - account_sequence_object_type, + account_metadata_object_type, permission_object_type, permission_usage_object_type, permission_link_object_type, @@ -307,6 +307,25 @@ namespace eosio { namespace chain { }; } + template + static inline auto has_field( F flags, E field ) + -> std::enable_if_t< std::is_integral::value && std::is_unsigned::value && + std::is_enum::value && std::is_same< F, std::underlying_type_t >::value, bool> + { + return ( (flags & static_cast(field)) != 0 ); + } + + template + static inline auto set_field( F flags, E field, bool value = true ) + -> std::enable_if_t< std::is_integral::value && std::is_unsigned::value && + std::is_enum::value && std::is_same< F, std::underlying_type_t >::value, F > + { + if( value ) + return ( flags | static_cast(field) ); + else + return ( flags & ~static_cast(field) ); + } + } } // eosio::chain FC_REFLECT( eosio::chain::void_t, ) diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index 1094a1826b0..eef846a7537 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -27,9 +27,11 @@ namespace eosio { namespace chain { struct wasm_interface_impl { struct wasm_cache_entry { digest_type code_hash; - uint32_t first_block_num_used; - uint32_t last_block_num_used; + uint32_t first_block_num_used; + uint32_t last_block_num_used; std::unique_ptr module; + uint8_t vm_type = 0; + uint8_t vm_version = 0; }; struct by_hash; struct by_first_block_num; @@ -65,9 +67,18 @@ namespace eosio { namespace chain { const std::unique_ptr& get_instantiated_module( const code_object& code, transaction_context& trx_context ) { - wasm_cache_index::iterator it = wasm_instantiation_cache.find(code.code_id); - if(it == wasm_instantiation_cache.end()) - it = wasm_instantiation_cache.emplace(wasm_interface_impl::wasm_cache_entry{code.code_id, code.first_block_used, UINT32_MAX, nullptr}).first; + wasm_cache_index::iterator it = wasm_instantiation_cache.find( + boost::make_tuple(code.code_hash, code.vm_type, code.vm_version) ); + if(it == wasm_instantiation_cache.end()) { + it = wasm_instantiation_cache.emplace( wasm_interface_impl::wasm_cache_entry{ + .code_hash = code.code_hash, + .first_block_num_used = code.first_block_used, + .last_block_num_used = UINT32_MAX, + .module = nullptr, + .vm_type = code.vm_type, + .vm_version = code.vm_version + } ).first; + } if(!it->module) { auto timer_pause = fc::make_scoped_exit([&](){ @@ -111,7 +122,13 @@ namespace eosio { namespace chain { typedef boost::multi_index_container< wasm_cache_entry, indexed_by< - ordered_unique, member>, + ordered_unique, + composite_key< wasm_cache_entry, + member, + member, + member + > + >, ordered_non_unique, member>, ordered_non_unique, member> > diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 7f363154ed0..cedb2f444fa 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -212,13 +212,13 @@ class privileged_api : public context_aware_api { } bool is_privileged( account_name n )const { - return context.db.get( n ).privileged; + return context.db.get( n ).is_privileged(); } void set_privileged( account_name n, bool is_priv ) { - const auto& a = context.db.get( n ); + const auto& a = context.db.get( n ); context.db.modify( a, [&]( auto& ma ){ - ma.privileged = is_priv; + ma.set_privileged( is_priv ); }); } diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 1e555ece079..e7f754f885f 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -2047,18 +2047,19 @@ read_only::get_code_results read_only::get_code( const get_code_params& params ) get_code_results result; result.account_name = params.account_name; const auto& d = db.db(); - const auto& accnt = d.get( params.account_name ); + const auto& accnt_obj = d.get( params.account_name ); + const auto& accnt_metadata_obj = d.get( params.account_name ); EOS_ASSERT( params.code_as_wasm, unsupported_feature, "Returning WAST from get_code is no longer supported" ); - if( accnt.code_version != digest_type() ) { - const auto& code = d.get(accnt.code_version).code; - result.wasm = string(code.begin(), code.end()); - result.code_hash = accnt.code_version; + if( accnt_metadata_obj.code_id._id != 0 ) { + const auto& code_obj = d.get(accnt_metadata_obj.code_id); + result.wasm = string(code_obj.code.begin(), code_obj.code.end()); + result.code_hash = code_obj.code_hash; } abi_def abi; - if( abi_serializer::to_abi(accnt.abi, abi) ) { + if( abi_serializer::to_abi(accnt_obj.abi, abi) ) { result.abi = std::move(abi); } @@ -2069,10 +2070,11 @@ read_only::get_code_hash_results read_only::get_code_hash( const get_code_hash_p get_code_hash_results result; result.account_name = params.account_name; const auto& d = db.db(); - const auto& accnt = d.get( params.account_name ); + const auto& accnt = d.get( params.account_name ); - if( accnt.code_version != digest_type() ) { - result.code_hash = accnt.code_version; + if( accnt.code_id._id != 0 ) { + const auto& code_obj = d.get(accnt.code_id); + result.code_hash = code_obj.code_hash; } return result; @@ -2083,12 +2085,13 @@ read_only::get_raw_code_and_abi_results read_only::get_raw_code_and_abi( const g result.account_name = params.account_name; const auto& d = db.db(); - const auto& accnt = d.get(params.account_name); - if( accnt.code_version != digest_type() ) { - const auto& code = d.get(accnt.code_version).code; - result.wasm = blob{{code.begin(), code.end()}}; + const auto& accnt_obj = d.get(params.account_name); + const auto& accnt_metadata_obj = d.get(params.account_name); + if( accnt_metadata_obj.code_id._id != 0 ) { + const auto& code_obj = d.get(accnt_metadata_obj.code_id); + result.wasm = blob{{code_obj.code.begin(), code_obj.code.end()}}; } - result.abi = blob{{accnt.abi.begin(), accnt.abi.end()}}; + result.abi = blob{{accnt_obj.abi.begin(), accnt_obj.abi.end()}}; return result; } @@ -2098,11 +2101,15 @@ read_only::get_raw_abi_results read_only::get_raw_abi( const get_raw_abi_params& result.account_name = params.account_name; const auto& d = db.db(); - const auto& accnt = d.get(params.account_name); - result.abi_hash = fc::sha256::hash( accnt.abi.data(), accnt.abi.size() ); - result.code_hash = accnt.code_version; + const auto& accnt_obj = d.get(params.account_name); + const auto& accnt_metadata_obj = d.get(params.account_name); + result.abi_hash = fc::sha256::hash( accnt_obj.abi.data(), accnt_obj.abi.size() ); + if( accnt_metadata_obj.code_id._id != 0 ) { + const auto& code_obj = d.get(accnt_metadata_obj.code_id); + result.code_hash = code_obj.code_hash; + } if( !params.abi_hash || *params.abi_hash != result.abi_hash ) - result.abi = blob{{accnt.abi.begin(), accnt.abi.end()}}; + result.abi = blob{{accnt_obj.abi.begin(), accnt_obj.abi.end()}}; return result; } @@ -2119,11 +2126,12 @@ read_only::get_account_results read_only::get_account( const get_account_params& rm.get_account_limits( result.account_name, result.ram_quota, result.net_weight, result.cpu_weight ); - const auto& a = db.get_account(result.account_name); + const auto& accnt_obj = db.get_account( result.account_name ); + const auto& accnt_metadata_obj = db.db().get( result.account_name ); - result.privileged = a.privileged; - result.last_code_update = a.last_code_update; - result.created = a.creation_date; + result.privileged = accnt_metadata_obj.is_privileged(); + result.last_code_update = accnt_metadata_obj.last_code_update; + result.created = accnt_obj.creation_date; bool grelisted = db.is_resource_greylisted(result.account_name); result.net_limit = rm.get_account_net_limit_ex( result.account_name, !grelisted); diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp index 92d7cf6da3e..876cd4f3c7c 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp @@ -106,16 +106,48 @@ template datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { fc::raw::pack(ds, fc::unsigned_int(0)); fc::raw::pack(ds, as_type(obj.obj.name.value)); - fc::raw::pack(ds, as_type(obj.obj.vm_type)); - fc::raw::pack(ds, as_type(obj.obj.vm_version)); - fc::raw::pack(ds, as_type(obj.obj.privileged)); - fc::raw::pack(ds, as_type(obj.obj.last_code_update)); - fc::raw::pack(ds, as_type(obj.obj.code_version)); fc::raw::pack(ds, as_type(obj.obj.creation_date)); fc::raw::pack(ds, as_type(obj.obj.abi)); return ds; } +template +datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { + fc::raw::pack(ds, fc::unsigned_int(0)); + fc::raw::pack(ds, as_type(obj.obj.name.value)); + fc::raw::pack(ds, as_type(obj.obj.is_privileged())); + fc::raw::pack(ds, as_type(obj.obj.last_code_update)); + + fc::raw::pack(ds, bool(obj.obj.code_id._id)); + if (obj.obj.code_id._id) { + auto& index = obj.db.get_index(); + const auto* code_obj = index.find(obj.obj.code_id); + if (!code_obj) { + auto& undo = index.stack().back(); + auto it = undo.removed_values.find(obj.obj.code_id); + EOS_ASSERT(it != undo.removed_values.end(), eosio::chain::plugin_exception, + "can not find code_object"); + code_obj = &it->second; + } + fc::raw::pack(ds, as_type(code_obj->vm_type)); + fc::raw::pack(ds, as_type(code_obj->vm_version)); + fc::raw::pack(ds, as_type(code_obj->code_hash)); + } + + return ds; +} + +template +datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { + fc::raw::pack(ds, fc::unsigned_int(0)); + fc::raw::pack(ds, as_type(obj.obj.vm_type)); + fc::raw::pack(ds, as_type(obj.obj.vm_version)); + fc::raw::pack(ds, as_type(obj.obj.code_hash)); + fc::raw::pack(ds, as_type(obj.obj.code_ref_count)); + fc::raw::pack(ds, as_type(obj.obj.code)); + return ds; +} + template datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { fc::raw::pack(ds, fc::unsigned_int(0)); diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 716ba92019a..738d31dfcd6 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -465,6 +465,8 @@ struct state_history_plugin_impl : std::enable_shared_from_this(), pack_row); + process_table("account_metadata", db.get_index(), pack_row); + process_table("code", db.get_index(), pack_row); process_table("contract_table", db.get_index(), pack_row); process_table("contract_row", db.get_index(), pack_contract_row); diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index 74c7f8d7520..6154b65ee54 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -188,18 +188,36 @@ extern const char* const state_history_plugin_abi = R"({ ] }, { - "name": "account_v0", "fields": [ - { "type": "name", "name": "name" }, + "name": "code_id", "fields": [ { "type": "uint8", "name": "vm_type" }, { "type": "uint8", "name": "vm_version" }, - { "type": "bool", "name": "privileged" }, - { "type": "time_point", "name": "last_code_update" }, - { "type": "checksum256", "name": "code_version" }, + { "type": "checksum256", "name": "code_hash" } + ] + }, + { + "name": "account_v0", "fields": [ + { "type": "name", "name": "name" }, { "type": "block_timestamp_type", "name": "creation_date" }, - { "type": "bytes", "name": "code" }, { "type": "bytes", "name": "abi" } ] }, + { + "name": "account_metadata_v0", "fields": [ + { "type": "name", "name": "name" }, + { "type": "bool", "name": "privileged" }, + { "type": "time_point", "name": "last_code_update" }, + { "type": "code_id?", "name": "code" } + ] + }, + { + "name": "code_v0", "fields": [ + { "type": "uint8", "name": "vm_type" }, + { "type": "uint8", "name": "vm_version" }, + { "type": "checksum256", "name": "code_hash" }, + { "type": "uint64", "name": "code_ref_count" }, + { "type": "bytes", "name": "code" } + ] + }, { "name": "contract_table_v0", "fields": [ { "type": "name", "name": "code" }, @@ -439,6 +457,8 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "table_delta", "types": ["table_delta_v0"] }, { "name": "account", "types": ["account_v0"] }, + { "name": "account_metadata", "types": ["account_metadata_v0"] }, + { "name": "code", "types": ["code_v0"] }, { "name": "contract_table", "types": ["contract_table_v0"] }, { "name": "contract_row", "types": ["contract_row_v0"] }, { "name": "contract_index64", "types": ["contract_index64_v0"] }, @@ -461,6 +481,8 @@ extern const char* const state_history_plugin_abi = R"({ ], "tables": [ { "name": "account", "type": "account", "key_names": ["name"] }, + { "name": "account_metadata", "type": "account_metadata", "key_names": ["name"] }, + { "name": "code", "type": "code", "key_names": ["vm_type", "vm_version", "code_hash"] }, { "name": "contract_table", "type": "contract_table", "key_names": ["code", "scope", "table"] }, { "name": "contract_row", "type": "contract_row", "key_names": ["code", "scope", "table", "primary_key"] }, { "name": "contract_index64", "type": "contract_index64", "key_names": ["code", "scope", "table", "primary_key"] }, diff --git a/unittests/bootseq_tests.cpp b/unittests/bootseq_tests.cpp index a3df02b652c..561a04622cc 100644 --- a/unittests/bootseq_tests.cpp +++ b/unittests/bootseq_tests.cpp @@ -213,10 +213,10 @@ BOOST_FIXTURE_TEST_CASE( bootseq_test, bootseq_tester ) { set_privileged(N(eosio.token)); // Verify eosio.msig and eosio.token is privileged - const auto& eosio_msig_acc = get(N(eosio.msig)); - BOOST_TEST(eosio_msig_acc.privileged == true); - const auto& eosio_token_acc = get(N(eosio.token)); - BOOST_TEST(eosio_token_acc.privileged == true); + const auto& eosio_msig_acc = get(N(eosio.msig)); + BOOST_TEST(eosio_msig_acc.is_privileged() == true); + const auto& eosio_token_acc = get(N(eosio.token)); + BOOST_TEST(eosio_token_acc.is_privileged() == true); // Create SYS tokens in eosio.token, set its manager as eosio @@ -279,7 +279,7 @@ BOOST_FIXTURE_TEST_CASE( bootseq_test, bootseq_tester ) { // Total Stakes = b1 + whale2 + whale3 stake = (100,000,000 - 1,000) + (20,000,000 - 1,000) + (30,000,000 - 1,000) vector data = get_row_by_account( config::system_account_name, config::system_account_name, N(global), N(global) ); - + BOOST_TEST(get_global_state()["total_activated_stake"].as() == 1499999997000); // No producers will be set, since the total activated stake is less than 150,000,000 From 5da99d166c9173e069bc6c9741de841b24c3b34f Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Mon, 15 Apr 2019 14:25:08 -0400 Subject: [PATCH 377/680] ship: prevent spam --- .../state_history_plugin.cpp | 36 ++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 716ba92019a..abd757ec1b5 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -53,6 +53,39 @@ static bytes zlib_compress_bytes(bytes in) { return out; } +template +bool include_delta(const T& old, const T& curr) { + return true; +} + +bool include_delta(const eosio::chain::table_id_object& old, const eosio::chain::table_id_object& curr) { + return old.payer != curr.payer; +} + +bool include_delta(const eosio::chain::resource_limits::resource_limits_object& old, + const eosio::chain::resource_limits::resource_limits_object& curr) { + return // + old.net_weight != curr.net_weight || // + old.cpu_weight != curr.cpu_weight || // + old.ram_bytes != curr.ram_bytes; +} + +bool include_delta(const eosio::chain::resource_limits::resource_limits_state_object& old, + const eosio::chain::resource_limits::resource_limits_state_object& curr) { + return // + old.average_block_net_usage.last_ordinal != curr.average_block_net_usage.last_ordinal || // + old.average_block_net_usage.value_ex != curr.average_block_net_usage.value_ex || // + old.average_block_net_usage.consumed != curr.average_block_net_usage.consumed || // + old.average_block_cpu_usage.last_ordinal != curr.average_block_cpu_usage.last_ordinal || // + old.average_block_cpu_usage.value_ex != curr.average_block_cpu_usage.value_ex || // + old.average_block_cpu_usage.consumed != curr.average_block_cpu_usage.consumed || // + old.total_net_weight != curr.total_net_weight || // + old.total_cpu_weight != curr.total_cpu_weight || // + old.total_ram_bytes != curr.total_ram_bytes || // + old.virtual_net_limit != curr.virtual_net_limit || // + old.virtual_cpu_limit != curr.virtual_cpu_limit; +} + struct state_history_plugin_impl : std::enable_shared_from_this { chain_plugin* chain_plug = nullptr; fc::optional trace_log; @@ -453,7 +486,8 @@ struct state_history_plugin_impl : std::enable_shared_from_this Date: Mon, 15 Apr 2019 14:52:18 -0400 Subject: [PATCH 378/680] Revert EXCLUDE_FROM_ALL --- libraries/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index 39d0398305d..54bb2f80e09 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -2,7 +2,7 @@ add_subdirectory( fc ) add_subdirectory( builtins ) add_subdirectory( softfloat ) add_subdirectory( chainbase ) -add_subdirectory( wasm-jit EXCLUDE_FROM_ALL ) +add_subdirectory( wasm-jit ) add_subdirectory( appbase ) add_subdirectory( chain ) add_subdirectory( testing ) @@ -26,4 +26,4 @@ get_property(_CTEST_CUSTOM_TESTS_IGNORE GLOBAL PROPERTY CTEST_CUSTOM_TESTS_IGNOR set_property(GLOBAL PROPERTY CTEST_CUSTOM_TESTS_IGNORE "change_authkey import_ed decrypt_ec decrypt_rsa ssh logs generate_rsa import_ec echo\ yubico_otp wrap_data wrap info import_rsa import_authkey generate_hmac generate_ec\ - attest pbkdf2 parsing ${_CTEST_CUSTOM_TESTS_IGNORE}") \ No newline at end of file + attest pbkdf2 parsing ${_CTEST_CUSTOM_TESTS_IGNORE}") From ce9a01ab6b150c7d00c8aba3cad73b8e02b84526 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 15 Apr 2019 15:51:09 -0400 Subject: [PATCH 379/680] Disable building Programs and Emscripten stuff --- libraries/wasm-jit/CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/wasm-jit/CMakeLists.txt b/libraries/wasm-jit/CMakeLists.txt index c06e45b5252..fc691f83a95 100644 --- a/libraries/wasm-jit/CMakeLists.txt +++ b/libraries/wasm-jit/CMakeLists.txt @@ -66,11 +66,11 @@ endif() add_subdirectory(Include/Inline) -add_subdirectory(Source/Emscripten) +#add_subdirectory(Source/Emscripten) add_subdirectory(Source/IR) add_subdirectory(Source/Logging) add_subdirectory(Source/Platform) -add_subdirectory(Source/Programs) +#add_subdirectory(Source/Programs) add_subdirectory(Source/Runtime) add_subdirectory(Source/WASM) add_subdirectory(Source/WAST) From c0a97e3bb888cfa6b5c43595648a5d57f7aede80 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 13 Mar 2019 15:45:25 -0400 Subject: [PATCH 380/680] add libicu as dependency for .deb packages Somewhere along the line nodeos picked up a dependency on libicuuc --- scripts/generate_deb.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/generate_deb.sh b/scripts/generate_deb.sh index e8c22d154fb..9686c904036 100755 --- a/scripts/generate_deb.sh +++ b/scripts/generate_deb.sh @@ -23,9 +23,9 @@ else fi if [ ${DISTRIB_RELEASE} = "16.04" ]; then - LIBSSL="libssl1.0.0" + RELEASE_SPECIFIC_DEPS="libssl1.0.0, libicu55" elif [ ${DISTRIB_RELEASE} = "18.04" ]; then - LIBSSL="libssl1.1" + RELEASE_SPECIFIC_DEPS="libssl1.1, libicu60" else echo "Unrecognized Ubuntu version. Update generate_deb.sh. Not generating .deb file." exit 1 @@ -37,7 +37,7 @@ echo "Package: ${PROJECT} Version: ${VERSION_NO_SUFFIX}-${RELEASE} Section: devel Priority: optional -Depends: libc6, libgcc1, ${LIBSSL}, libstdc++6, libtinfo5, zlib1g, libusb-1.0-0, libcurl3-gnutls +Depends: libc6, libgcc1, ${RELEASE_SPECIFIC_DEPS}, libstdc++6, libtinfo5, zlib1g, libusb-1.0-0, libcurl3-gnutls Architecture: amd64 Homepage: ${URL} Maintainer: ${EMAIL} From 3103f7018aa2251713728a8a7e4de87ebf3bb5a7 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 15 Apr 2019 16:00:44 -0400 Subject: [PATCH 381/680] bump libyubihsm library to 2.0.1 Previously this was using something newer than 2.0.0 but older than 2.0.1 --- libraries/yubihsm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/yubihsm b/libraries/yubihsm index e1922fffc15..9189fdb92cc 160000 --- a/libraries/yubihsm +++ b/libraries/yubihsm @@ -1 +1 @@ -Subproject commit e1922fffc15d0720ba08f110a66b9c752774e107 +Subproject commit 9189fdb92cc90840e51760de5f297ac7d908b3cd From b2be6f69c58bed4fd877346a838e49f030f121df Mon Sep 17 00:00:00 2001 From: "johnsonb@objectcomputing.com" Date: Mon, 15 Apr 2019 15:40:42 -0500 Subject: [PATCH 382/680] Added reporting status while waiting for block number to arrive. --- tests/Node.py | 20 ++++++++++++++++++-- tests/testUtils.py | 8 +++++--- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 3e31c396d5f..98916b03ca1 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -674,9 +674,25 @@ def waitForNextBlock(self, timeout=None, blockType=BlockType.head): ret=Utils.waitForBool(lam, timeout) return ret - def waitForBlock(self, blockNum, timeout=None, blockType=BlockType.head): + def waitForBlock(self, blockNum, timeout=None, blockType=BlockType.head, reportInterval=None): lam = lambda: self.getBlockNum(blockType=blockType) > blockNum - ret=Utils.waitForBool(lam, timeout) + blockDesc = "head" if blockType == BlockType.head else "LIB" + count = 0 + + class WaitReporter: + def __init__(self, node, reportInterval): + self.count = 0 + self.node = node + self.reportInterval = reportInterval + + def __call__(self): + self.count += 1 + if self.count % self.reportInterval == 0: + info = self.node.getInfo() + Utils.Print("Waiting on %s block num %d, get info = {\n%s\n}" % (blockDesc, blockNum, info)) + + reporter = WaitReporter(self, reportInterval) if reportInterval is not None else None + ret=Utils.waitForBool(lam, timeout, reporter=reporter) return ret def waitForIrreversibleBlock(self, blockNum, timeout=None, blockType=BlockType.head): diff --git a/tests/testUtils.py b/tests/testUtils.py index 107be3f087a..b0fb044c4c1 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -142,7 +142,7 @@ def cmdError(name, cmdCode=0): Utils.Print(msg) @staticmethod - def waitForObj(lam, timeout=None): + def waitForObj(lam, timeout=None, reporter=None): if timeout is None: timeout=60 @@ -161,6 +161,8 @@ def waitForObj(lam, timeout=None): stdout.write('.') stdout.flush() needsNewLine=True + if reporter is not None: + reporter() time.sleep(sleepTime) finally: if needsNewLine: @@ -169,9 +171,9 @@ def waitForObj(lam, timeout=None): return None @staticmethod - def waitForBool(lam, timeout=None): + def waitForBool(lam, timeout=None, reporter=None): myLam = lambda: True if lam() else None - ret=Utils.waitForObj(myLam, timeout) + ret=Utils.waitForObj(myLam, timeout, reporter=reporter) return False if ret is None else ret @staticmethod From bb0901e88602b789e8a0390553d6e043c30deda1 Mon Sep 17 00:00:00 2001 From: "johnsonb@objectcomputing.com" Date: Mon, 15 Apr 2019 15:41:40 -0500 Subject: [PATCH 383/680] Fixed script to verify that block was actually found. --- tests/nodeos_startup_catchup.py | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index 03a55936385..5acfc2006ae 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -92,11 +92,24 @@ def lib(node): def head(node): return node.getBlockNum(BlockType.head) + def waitForBlock(node, blockNum, blockType=BlockType.head, timeout=None, reportInterval=20): + if not node.waitForBlock(blockNum, timeout=timeout, blockType=blockType, reportInterval=reportInterval): + info=node.getInfo() + headBlockNum=info["head_block_num"] + libBlockNum=info["last_irreversible_block_num"] + Utils.errorExit("Failed to get to %s block number %d. Last had head block number %d and lib %d" % (blockType, blockNum, headBlockNum, libBlockNum)) + + def waitForNodeStarted(node): + sleepTime=0 + while sleepTime < 10 and node.getInfo(silentErrors=True) is None: + time.sleep(1) + sleepTime+=1 + node0=cluster.getNode(0) Print("Wait for account creation to be irreversible") blockNum=head(node0) - node0.waitForBlock(blockNum, blockType=BlockType.lib) + waitForBlock(node0, blockNum, blockType=BlockType.lib) Print("Startup txn generation") period=1500 @@ -114,7 +127,7 @@ def head(node): startBlockNum=blockNum+steadyStateWait numBlocks=20 endBlockNum=startBlockNum+numBlocks - node0.waitForBlock(endBlockNum) + waitForBlock(node0, endBlockNum) transactions=0 avg=0 for blockNum in range(startBlockNum, endBlockNum): @@ -133,32 +146,33 @@ def head(node): Print("Start catchup node") cluster.launchUnstarted(cachePopen=True) lastLibNum=lib(node0) - time.sleep(2) # verify producer lib is still advancing - node0.waitForBlock(lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + waitForBlock(node0, lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) catchupNode=cluster.getNodes()[-1] catchupNodeNum=cluster.getNodes().index(catchupNode) + waitForNodeStarted(catchupNode) lastCatchupLibNum=lib(catchupNode) Print("Verify catchup node %s's LIB is advancing" % (catchupNodeNum)) # verify lib is advancing (before we wait for it to have to catchup with producer) - catchupNode.waitForBlock(lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + waitForBlock(catchupNode, lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) Print("Verify catchup node is advancing to producer") numBlocksToCatchup=(lastLibNum-lastCatchupLibNum-1)+twoRounds - catchupNode.waitForBlock(lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) + waitForBlock(catchupNode, lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) Print("Shutdown catchup node and validate exit code") catchupNode.interruptAndVerifyExitStatus(60) Print("Restart catchup node") catchupNode.relaunch(catchupNodeNum) + waitForNodeStarted(catchupNode) lastCatchupLibNum=lib(catchupNode) Print("Verify catchup node is advancing") # verify catchup node is advancing to producer - catchupNode.waitForBlock(lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + waitForBlock(catchupNode, lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) Print("Verify producer is still advancing LIB") lastLibNum=lib(node0) @@ -167,8 +181,8 @@ def head(node): Print("Verify catchup node is advancing to producer") # verify catchup node is advancing to producer - catchupNode.waitForBlock(lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) - catchupNode.kill(signal.SIGTERM) + waitForBlock(catchupNode, lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) + catchupNode.interruptAndVerifyExitStatus(60) catchupNode.popenProc=None testSuccessful=True From bbc57fd5047349d1b013bb493fe6fca49848ff7e Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 15 Apr 2019 16:49:26 -0400 Subject: [PATCH 384/680] Protect against used wavmIntrensics exceptions hitting a brick wall on unwinding --- .../Source/Runtime/WAVMIntrinsics.cpp | 40 ++++++++++++------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/libraries/wasm-jit/Source/Runtime/WAVMIntrinsics.cpp b/libraries/wasm-jit/Source/Runtime/WAVMIntrinsics.cpp index a85d0e21fcd..9e38eeebea0 100644 --- a/libraries/wasm-jit/Source/Runtime/WAVMIntrinsics.cpp +++ b/libraries/wasm-jit/Source/Runtime/WAVMIntrinsics.cpp @@ -9,9 +9,14 @@ namespace Runtime { static void causeIntrensicException(Exception::Cause cause) { - Platform::immediately_exit(std::make_exception_ptr(Exception{cause, std::vector()})); + try { + Platform::immediately_exit(std::make_exception_ptr(Exception{cause, std::vector()})); + } + catch (...) { + Platform::immediately_exit(std::current_exception()); + } __builtin_unreachable(); - } + } template Float quietNaN(Float value) @@ -145,19 +150,24 @@ namespace Runtime DEFINE_INTRINSIC_FUNCTION3(wavmIntrinsics,indirectCallSignatureMismatch,indirectCallSignatureMismatch,none,i32,index,i64,expectedSignatureBits,i64,tableBits) { - TableInstance* table = reinterpret_cast(tableBits); - void* elementValue = table->baseAddress[index].value; - const FunctionType* actualSignature = table->baseAddress[index].type; - const FunctionType* expectedSignature = reinterpret_cast((Uptr)expectedSignatureBits); - std::string ipDescription = ""; - LLVMJIT::describeInstructionPointer(reinterpret_cast(elementValue),ipDescription); - Log::printf(Log::Category::debug,"call_indirect signature mismatch: expected %s at index %u but got %s (%s)\n", - asString(expectedSignature).c_str(), - index, - actualSignature ? asString(actualSignature).c_str() : "nullptr", - ipDescription.c_str() - ); - causeIntrensicException(elementValue == nullptr ? Exception::Cause::undefinedTableElement : Exception::Cause::indirectCallSignatureMismatch); + try { + TableInstance* table = reinterpret_cast(tableBits); + void* elementValue = table->baseAddress[index].value; + const FunctionType* actualSignature = table->baseAddress[index].type; + const FunctionType* expectedSignature = reinterpret_cast((Uptr)expectedSignatureBits); + std::string ipDescription = ""; + LLVMJIT::describeInstructionPointer(reinterpret_cast(elementValue),ipDescription); + Log::printf(Log::Category::debug,"call_indirect signature mismatch: expected %s at index %u but got %s (%s)\n", + asString(expectedSignature).c_str(), + index, + actualSignature ? asString(actualSignature).c_str() : "nullptr", + ipDescription.c_str() + ); + causeIntrensicException(elementValue == nullptr ? Exception::Cause::undefinedTableElement : Exception::Cause::indirectCallSignatureMismatch); + } + catch (...) { + Platform::immediately_exit(std::current_exception()); + } } DEFINE_INTRINSIC_FUNCTION0(wavmIntrinsics,indirectCallIndexOutOfBounds,indirectCallIndexOutOfBounds,none) From aa01c7516b987dc6a02d7fb2b901f0df130e4aed Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 15 Apr 2019 16:58:50 -0400 Subject: [PATCH 385/680] sync fc: remove last remnants of boost mutex usage --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index ae6ec564f0d..800d6c21b8b 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit ae6ec564f0db6d3378348ef6b475042e332e612a +Subproject commit 800d6c21b8be9316a651e926a5b2affcc3c52c8e From 6d7a5e7840a7e8575e47d3cfdc1a55720fb9b94e Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Mon, 15 Apr 2019 17:39:44 -0400 Subject: [PATCH 386/680] Artifact check (#7129) --- .buildkite/pipeline.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 19bbdf114ff..c3b449fc07f 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -3,7 +3,7 @@ steps: echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ + tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi label: ":ubuntu: 16.04 Build" agents: queue: "automation-large-builder-fleet" @@ -24,7 +24,7 @@ steps: echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ + tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi label: ":ubuntu: 18.04 Build" agents: queue: "automation-large-builder-fleet" @@ -45,7 +45,7 @@ steps: echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ + tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi label: ":centos: 7 Build" agents: queue: "automation-large-builder-fleet" @@ -66,7 +66,7 @@ steps: echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ + tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi label: ":aws: 2 Build" agents: queue: "automation-large-builder-fleet" @@ -89,7 +89,7 @@ steps: echo "+++ Building :hammer:" ./scripts/eosio_build.sh -y echo "--- Compressing build directory :compression:" - tar -pczf build.tar.gz build/ + tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi label: ":darwin: Mojave Build" agents: - "role=builder-v2-1" From d0746b67b17e3e8f305f86f032ed84e3398ae4f1 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 15 Apr 2019 18:00:26 -0400 Subject: [PATCH 387/680] implement RAM_RESTRICTIONS protocol feature #6105 --- libraries/chain/apply_context.cpp | 47 +++++++++++++++++-- .../chain/include/eosio/chain/exceptions.hpp | 2 + .../eosio/chain/protocol_feature_manager.hpp | 3 +- libraries/chain/protocol_feature_manager.cpp | 22 +++++++++ 4 files changed, 69 insertions(+), 5 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index cac2840dcb7..703e9b742bd 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -85,6 +85,29 @@ void apply_context::exec_one() control.get_wasm_interface().apply( a.code_version, a.code, *this ); } catch( const wasm_exit& ) {} } + + if( !privileged && control.is_builtin_activated( builtin_protocol_feature_t::ram_restrictions ) ) { + const size_t checktime_interval = 10; + size_t counter = 0; + bool not_in_notify_context = (receiver == act->account); + const auto end = _account_ram_deltas.end(); + for( auto itr = _account_ram_deltas.begin(); itr != end; ++itr, ++counter ) { + if( counter == checktime_interval ) { + trx_context.checktime(); + counter = 0; + } + if( itr->delta > 0 && itr->account != receiver ) { + EOS_ASSERT( not_in_notify_context, unauthorized_ram_usage_increase, + "unprivileged contract cannot increase RAM usage of another account within a notify context: ${account}", + ("account", itr->account) + ); + EOS_ASSERT( has_authorization( itr->account ), unauthorized_ram_usage_increase, + "unprivileged contract cannot increase RAM usage of another account that has not authorized the action: ${account}", + ("account", itr->account) + ); + } + } + } } FC_RETHROW_EXCEPTIONS( warn, "pending console output: ${console}", ("console", _pending_console_output) ) } catch( const fc::exception& e ) { action_trace& trace = trx_context.get_action_trace( action_ordinal ); @@ -360,7 +383,17 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a if( !control.skip_auth_check() && !privileged ) { // Do not need to check authorization if replayng irreversible block or if contract is privileged if( payer != receiver ) { - require_authorization(payer); /// uses payer's storage + if( control.is_builtin_activated( builtin_protocol_feature_t::ram_restrictions ) ) { + EOS_ASSERT( receiver == act->account, action_validate_exception, + "cannot bill RAM usage of deferred transactions to another account within notify context" + ); + EOS_ASSERT( has_authorization( payer ), action_validate_exception, + "cannot bill RAM usage of deferred transaction to another account that has not authorized the action: ${payer}", + ("payer", payer) + ); + } else { + require_authorization(payer); /// uses payer's storage + } } // Originally this code bypassed authorization checks if a contract was deferring only actions to itself. @@ -459,8 +492,12 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a } ); } - EOS_ASSERT( control.is_ram_billing_in_notify_allowed() || (receiver == act->account) || (receiver == payer) || privileged, - subjective_block_production_exception, "Cannot charge RAM to other accounts during notify." ); + EOS_ASSERT( control.is_builtin_activated( builtin_protocol_feature_t::ram_restrictions ) + || control.is_ram_billing_in_notify_allowed() + || (receiver == act->account) || (receiver == payer) || privileged, + subjective_block_production_exception, + "Cannot charge RAM to other accounts during notify." + ); add_ram_usage( payer, (config::billable_size_v + trx_size) ); } @@ -536,7 +573,9 @@ bytes apply_context::get_packed_transaction() { void apply_context::update_db_usage( const account_name& payer, int64_t delta ) { if( delta > 0 ) { - if( !(privileged || payer == account_name(receiver)) ) { + if( !(privileged || payer == account_name(receiver) + || control.is_builtin_activated( builtin_protocol_feature_t::ram_restrictions ) ) ) + { EOS_ASSERT( control.is_ram_billing_in_notify_allowed() || (receiver == act->account), subjective_block_production_exception, "Cannot charge RAM to other accounts during notify." ); require_authorization( payer ); diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index 5a6f9678cb5..63208de8e52 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -261,6 +261,8 @@ namespace eosio { namespace chain { 3050008, "Abort Called" ) FC_DECLARE_DERIVED_EXCEPTION( inline_action_too_big, action_validate_exception, 3050009, "Inline Action exceeds maximum size limit" ) + FC_DECLARE_DERIVED_EXCEPTION( unauthorized_ram_usage_increase, action_validate_exception, + 3050010, "Action attempts to increase RAM usage of account without authorization" ) FC_DECLARE_DERIVED_EXCEPTION( database_exception, chain_exception, 3060000, "Database exception" ) diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index 43e799fba4a..c2f0140433f 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -23,7 +23,8 @@ enum class builtin_protocol_feature_t : uint32_t { restrict_action_to_self, only_bill_first_authorizer, forward_setcode, - get_sender + get_sender, + ram_restrictions }; struct protocol_feature_subjective_restrictions { diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 5281a41ac5b..ca0457ce11d 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -132,6 +132,28 @@ Forward eosio::setcode actions to the WebAssembly code deployed on the eosio acc Builtin protocol feature: GET_SENDER Allows contracts to determine which account is the sender of an inline action. +*/ + {} + } ) + ( builtin_protocol_feature_t::ram_restrictions, builtin_protocol_feature_spec{ + "RAM_RESTRICTIONS", + fc::variant("1812fdb5096fd854a4958eb9d53b43219d114de0e858ce00255bd46569ad2c68").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: RAM_RESTRICTIONS + +Modifies the restrictions on operations within actions that increase RAM usage of accounts other than the receiver. + +An unprivileged contract responding to a notification: +is not allowed to schedule a deferred transaction in which the RAM costs are paid by an account other than the receiver; +but is allowed to execute database operations that increase RAM usage of an account other than the receiver as long as +the action's net effect on RAM usage for the account is to not increase it. + +An unprivileged contract executing an action (but not as a response to a notification): +is not allowed to schedule a deferred transaction in which the RAM costs are paid by an account other than the receiver +unless that account authorized the action; +but is allowed to execute database operations that increase RAM usage of an account other than the receiver as long as +either the account authorized the action or the action's net effect on RAM usage for the account is to not increase it. */ {} } ) From f9ab46362e255b9fb099d97aa5f71917ad8769e2 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 15 Apr 2019 19:03:25 -0400 Subject: [PATCH 388/680] fix api_tests/ram_billing_in_notify_tests to not activate RAM_RESTRICTIONS; fix api_tests/deferred_transaction_tests to catch the right exception given RAM_RESTRICTIONS activation #6105 --- unittests/api_tests.cpp | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index f2e20a948dc..b5fcd21503f 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -589,25 +589,31 @@ BOOST_FIXTURE_TEST_CASE(require_notice_tests, TESTER) { try { } FC_LOG_AND_RETHROW() } -BOOST_FIXTURE_TEST_CASE(ram_billing_in_notify_tests, TESTER) { try { - produce_blocks(2); - create_account( N(testapi) ); - create_account( N(testapi2) ); - produce_blocks(10); - set_code( N(testapi), contracts::test_api_wasm() ); - produce_blocks(1); - set_code( N(testapi2), contracts::test_api_wasm() ); - produce_blocks(1); - - BOOST_CHECK_EXCEPTION( CALL_TEST_FUNCTION( *this, "test_action", "test_ram_billing_in_notify", fc::raw::pack( ((unsigned __int128)N(testapi2) << 64) | N(testapi) ) ), - subjective_block_production_exception, fc_exception_message_is("Cannot charge RAM to other accounts during notify.") ); +BOOST_AUTO_TEST_CASE(ram_billing_in_notify_tests) { try { + validating_tester chain( validating_tester::default_config() ); + chain.execute_setup_policy( setup_policy::preactivate_feature_and_new_bios ); + + chain.produce_blocks(2); + chain.create_account( N(testapi) ); + chain.create_account( N(testapi2) ); + chain.produce_blocks(10); + chain.set_code( N(testapi), contracts::test_api_wasm() ); + chain.produce_blocks(1); + chain.set_code( N(testapi2), contracts::test_api_wasm() ); + chain.produce_blocks(1); + + BOOST_CHECK_EXCEPTION( CALL_TEST_FUNCTION( chain, "test_action", "test_ram_billing_in_notify", + fc::raw::pack( ((unsigned __int128)N(testapi2) << 64) | N(testapi) ) ), + subjective_block_production_exception, + fc_exception_message_is("Cannot charge RAM to other accounts during notify.") + ); - CALL_TEST_FUNCTION( *this, "test_action", "test_ram_billing_in_notify", fc::raw::pack( ((unsigned __int128)N(testapi2) << 64) | 0 ) ); + CALL_TEST_FUNCTION( chain, "test_action", "test_ram_billing_in_notify", fc::raw::pack( ((unsigned __int128)N(testapi2) << 64) | 0 ) ); - CALL_TEST_FUNCTION( *this, "test_action", "test_ram_billing_in_notify", fc::raw::pack( ((unsigned __int128)N(testapi2) << 64) | N(testapi2) ) ); + CALL_TEST_FUNCTION( chain, "test_action", "test_ram_billing_in_notify", fc::raw::pack( ((unsigned __int128)N(testapi2) << 64) | N(testapi2) ) ); - BOOST_REQUIRE_EQUAL( validate(), true ); + BOOST_REQUIRE_EQUAL( chain.validate(), true ); } FC_LOG_AND_RETHROW() } /************************************************************************************* @@ -1235,7 +1241,7 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { // Payer is alice in this case, this tx should fail since we don't have the authorization of alice dtt_action dtt_act1; dtt_act1.payer = N(alice); - BOOST_CHECK_THROW(CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_tx_with_dtt_action", fc::raw::pack(dtt_act1)), missing_auth_exception); + BOOST_CHECK_THROW(CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_tx_with_dtt_action", fc::raw::pack(dtt_act1)), action_validate_exception); // Send a tx which in turn sends a deferred tx with the deferred tx's receiver != this tx receiver // This will include the authorization of the receiver, and impose any related delay associated with the authority From af752063aa8589c6f18c075b96e938bc9274bcd5 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 15 Apr 2019 19:35:41 -0400 Subject: [PATCH 389/680] disable_all_subjective_mitigations now disables the subjective mitigation preventing RAM billing in notification contexts #6105 --- libraries/chain/controller.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index ce3e3b2b732..93535a72ce3 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -2907,7 +2907,7 @@ bool controller::is_producing_block()const { } bool controller::is_ram_billing_in_notify_allowed()const { - return !is_producing_block() || my->conf.allow_ram_billing_in_notify; + return my->conf.disable_all_subjective_mitigations || !is_producing_block() || my->conf.allow_ram_billing_in_notify; } void controller::validate_expiration( const transaction& trx )const { try { From 85c5359c585644945848850db018e548170e21ea Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 15 Apr 2019 19:41:20 -0400 Subject: [PATCH 390/680] configurable curve for https ECDH In some cases using prime256v1 can improve interoperability of the https server. Provide an option to switch the curve used for ECDH --- plugins/http_plugin/http_plugin.cpp | 39 +++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index 3345fcdb68c..1fe6bd2fac3 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -45,6 +45,11 @@ namespace eosio { using std::shared_ptr; using websocketpp::connection_hdl; + enum https_ecdh_curve_t { + SECP384R1, + PRIME256V1 + }; + static http_plugin_defaults current_http_plugin_defaults; void http_plugin::set_defaults(const http_plugin_defaults config) { @@ -147,6 +152,7 @@ namespace eosio { optional https_listen_endpoint; string https_cert_chain; string https_key; + https_ecdh_curve_t https_ecdh_curve = SECP384R1; websocket_server_tls_type https_server; @@ -191,7 +197,7 @@ namespace eosio { //going for the A+! Do a few more things on the native context to get ECDH in use - fc::ec_key ecdh = EC_KEY_new_by_curve_name(NID_secp384r1); + fc::ec_key ecdh = EC_KEY_new_by_curve_name(https_ecdh_curve == SECP384R1 ? NID_secp384r1 : NID_X9_62_prime256v1); if (!ecdh) EOS_THROW(chain::http_exception, "Failed to set NID_secp384r1"); if(SSL_CTX_set_tmp_ecdh(ctx->native_handle(), (EC_KEY*)ecdh) != 1) @@ -366,7 +372,9 @@ namespace eosio { return true; } - http_plugin::http_plugin():my(new http_plugin_impl()){} + http_plugin::http_plugin():my(new http_plugin_impl()){ + app().register_config_type(); + } http_plugin::~http_plugin(){} void http_plugin::set_program_options(options_description&, options_description& cfg) { @@ -394,6 +402,11 @@ namespace eosio { ("https-private-key-file", bpo::value(), "Filename with https private key in PEM format. Required for https") + ("https-ecdh-curve", bpo::value()->notifier([this](https_ecdh_curve_t c) { + my->https_ecdh_curve = c; + })->default_value(SECP384R1), + "Configure https ECDH curve to use: secp384r1 or prime256v1") + ("access-control-allow-origin", bpo::value()->notifier([this](const string& v) { my->access_control_allow_origin = v; ilog("configured http with Access-Control-Allow-Origin: ${o}", ("o", my->access_control_allow_origin)); @@ -679,4 +692,26 @@ namespace eosio { return result; } + + std::istream& operator>>(std::istream& in, https_ecdh_curve_t& curve) { + std::string s; + in >> s; + if (s == "secp384r1") + curve = SECP384R1; + else if (s == "prime256v1") + curve = PRIME256V1; + else + in.setstate(std::ios_base::failbit); + return in; + } + + std::ostream& operator<<(std::ostream& osm, https_ecdh_curve_t curve) { + if (curve == SECP384R1) { + osm << "secp384r1"; + } else if (curve == PRIME256V1) { + osm << "prime256v1"; + } + + return osm; + } } From 7517d702921acd90f52ce1a9c31a864c7253e110 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 15 Apr 2019 21:29:16 -0400 Subject: [PATCH 391/680] Limit ubuntu versions eosio_build.sh runs on to 16.04 & 18.04 --- scripts/eosio_build_ubuntu.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 5561b14a450..f20b37d4951 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -40,9 +40,10 @@ case "${OS_NAME}" in fi ;; "Ubuntu") - if [ "${OS_MAJ}" -lt 16 ]; then - printf "You must be running Ubuntu 16.04.x or higher to install EOSIO.\\n" - printf "Exiting now.\\n" + . /etc/lsb-release + if [ "${DISTRIB_CODENAME}" != "xenial" -a "${DISTRIB_CODENAME}" != "bionic" ]; then + echo "The only Ubuntu versions this script supports are Ubuntu 16.04 and 18.04" + echo "Exiting now." exit 1 fi # UBUNTU 18 doesn't have MONGODB 3.6.3 From f50a32d310b9703bc89d637aaeaf955ecb853310 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 15 Apr 2019 22:44:30 -0400 Subject: [PATCH 392/680] don't store code_object id reference in account_metadata_object regress to storing code_hash+vm_version+vm_type in the account_metadata_object instead of an id reference in to the code_index. This avoid a lookup in the index for every action --- libraries/chain/apply_context.cpp | 4 +-- libraries/chain/controller.cpp | 2 +- libraries/chain/eosio_contract.cpp | 12 ++++----- .../include/eosio/chain/account_object.hpp | 6 +++-- .../include/eosio/chain/wasm_interface.hpp | 4 +-- .../eosio/chain/wasm_interface_private.hpp | 26 ++++++++++++------- libraries/chain/wasm_interface.cpp | 6 ++--- plugins/chain_plugin/chain_plugin.cpp | 20 ++++++-------- .../state_history_serialization.hpp | 19 +++----------- .../state_history_plugin_abi.cpp | 11 +++----- 10 files changed, 48 insertions(+), 62 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 7034539521d..f71bf1d8db7 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -72,7 +72,7 @@ void apply_context::exec_one() (*native)( *this ); } - if( ( receiver_account->code_id._id != 0 ) && + if( ( receiver_account->code_hash != digest_type() ) && ( control.is_builtin_activated( builtin_protocol_feature_t::forward_setcode ) || !( act->account == config::system_account_name && act->name == N( setcode ) @@ -84,7 +84,7 @@ void apply_context::exec_one() control.check_action_list( act->account, act->name ); } try { - control.get_wasm_interface().apply( db.get(receiver_account->code_id), *this ); + control.get_wasm_interface().apply( receiver_account->code_hash, receiver_account->vm_type, receiver_account->vm_version, *this ); } catch( const wasm_exit& ) {} } } FC_RETHROW_EXCEPTIONS( warn, "pending console output: ${console}", ("console", _pending_console_output) ) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index f70439d3b23..8aa44689dd9 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -293,7 +293,7 @@ struct controller_impl { cfg.reversible_cache_size, false, cfg.db_map_mode, cfg.db_hugepage_paths ), blog( cfg.blocks_dir ), fork_db( cfg.state_dir ), - wasmif( cfg.wasm_runtime ), + wasmif( cfg.wasm_runtime, db ), resource_limits( db ), authorization( s, db ), protocol_features( std::move(pfs) ), diff --git a/libraries/chain/eosio_contract.cpp b/libraries/chain/eosio_contract.cpp index 8eb68721726..e2b3e6546be 100644 --- a/libraries/chain/eosio_contract.cpp +++ b/libraries/chain/eosio_contract.cpp @@ -146,7 +146,7 @@ void apply_eosio_setcode(apply_context& context) { } const auto& account = db.get(act.account); - bool existing_code = (account.code_id._id != 0); + bool existing_code = (account.code_hash != digest_type()); EOS_ASSERT( code_size > 0 || existing_code, set_exact_code, "contract is already cleared" ); @@ -154,7 +154,7 @@ void apply_eosio_setcode(apply_context& context) { int64_t new_size = code_size * config::setcode_ram_bytes_multiplier; if( existing_code ) { - const code_object& old_code_entry = db.get(account.code_id); + const code_object& old_code_entry = db.get(account.code_hash); EOS_ASSERT( old_code_entry.code_hash != code_hash, set_exact_code, "contract is already running this version of code" ); int64_t old_size = (int64_t)old_code_entry.code.size() * config::setcode_ram_bytes_multiplier; @@ -167,19 +167,15 @@ void apply_eosio_setcode(apply_context& context) { } } - - code_object::id_type code_id; // default is 0 which indicates no code is present if( code_size > 0 ) { const code_object* new_code_entry = db.find( boost::make_tuple(code_hash, act.vmtype, act.vmversion) ); if( new_code_entry ) { db.modify(*new_code_entry, [&](code_object& o) { - code_id = o.id; ++o.code_ref_count; }); } else { db.create([&](code_object& o) { - code_id = o.id; o.code_hash = code_hash; o.code.assign(act.code.data(), code_size); o.code_ref_count = 1; @@ -192,7 +188,9 @@ void apply_eosio_setcode(apply_context& context) { db.modify( account, [&]( auto& a ) { a.code_sequence += 1; - a.code_id = code_id; + a.code_hash = code_hash; + a.vm_type = act.vmtype; + a.vm_version = act.vmversion; a.last_code_update = context.control.pending_block_time(); }); diff --git a/libraries/chain/include/eosio/chain/account_object.hpp b/libraries/chain/include/eosio/chain/account_object.hpp index d7471e02cf8..28b899ba238 100644 --- a/libraries/chain/include/eosio/chain/account_object.hpp +++ b/libraries/chain/include/eosio/chain/account_object.hpp @@ -61,9 +61,11 @@ namespace eosio { namespace chain { uint64_t auth_sequence = 0; uint64_t code_sequence = 0; uint64_t abi_sequence = 0; - code_object::id_type code_id; + digest_type code_hash; time_point last_code_update; uint32_t flags = 0; + uint8_t vm_type = 0; + uint8_t vm_version = 0; bool is_privileged()const { return has_field( flags, flags_fields::privileged ); } @@ -108,5 +110,5 @@ CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_ram_correction_object, eosio::cha FC_REFLECT(eosio::chain::account_object, (name)(creation_date)(abi)) FC_REFLECT(eosio::chain::account_metadata_object, (name)(recv_sequence)(auth_sequence)(code_sequence)(abi_sequence) - (code_id)(last_code_update)(flags)) + (code_hash)(last_code_update)(flags)(vm_type)(vm_version)) FC_REFLECT(eosio::chain::account_ram_correction_object, (name)(ram_correction)) diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 25f009afd2b..5ddea081d48 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -76,14 +76,14 @@ namespace eosio { namespace chain { wabt }; - wasm_interface(vm_type vm); + wasm_interface(vm_type vm, const chainbase::database& db); ~wasm_interface(); //validates code -- does a WASM validation pass and checks the wasm against EOSIO specific constraints static void validate(const controller& control, const bytes& code); //Calls apply or error on a given code - void apply(const code_object& code, apply_context& context); + void apply(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context); //Immediately exits currently running wasm. UB is called when no wasm running void exit(); diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index eef846a7537..c84287f1239 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -37,7 +37,7 @@ namespace eosio { namespace chain { struct by_first_block_num; struct by_last_block_num; - wasm_interface_impl(wasm_interface::vm_type vm) { + wasm_interface_impl(wasm_interface::vm_type vm, const chainbase::database& d) : db(d) { if(vm == wasm_interface::vm_type::wavm) runtime_interface = std::make_unique(); else if(vm == wasm_interface::vm_type::wabt) @@ -64,30 +64,36 @@ namespace eosio { namespace chain { return mem_image; } - const std::unique_ptr& get_instantiated_module( const code_object& code, - transaction_context& trx_context ) + const std::unique_ptr& get_instantiated_module( const digest_type& code_hash, const uint8_t& vm_type, + const uint8_t& vm_version, transaction_context& trx_context ) { wasm_cache_index::iterator it = wasm_instantiation_cache.find( - boost::make_tuple(code.code_hash, code.vm_type, code.vm_version) ); + boost::make_tuple(code_hash, vm_type, vm_version) ); + const code_object* codeobject = nullptr; if(it == wasm_instantiation_cache.end()) { + codeobject = db.find(boost::make_tuple(code_hash, vm_type, vm_version)); + it = wasm_instantiation_cache.emplace( wasm_interface_impl::wasm_cache_entry{ - .code_hash = code.code_hash, - .first_block_num_used = code.first_block_used, + .code_hash = code_hash, + .first_block_num_used = codeobject->first_block_used, .last_block_num_used = UINT32_MAX, .module = nullptr, - .vm_type = code.vm_type, - .vm_version = code.vm_version + .vm_type = vm_type, + .vm_version = vm_version } ).first; } if(!it->module) { + if(!codeobject) + codeobject = db.find(boost::make_tuple(code_hash, vm_type, vm_version)); + auto timer_pause = fc::make_scoped_exit([&](){ trx_context.resume_billing_timer(); }); trx_context.pause_billing_timer(); IR::Module module; try { - Serialization::MemoryInputStream stream((const U8*)code.code.data(), code.code.size()); + Serialization::MemoryInputStream stream((const U8*)codeobject->code.data(), codeobject->code.size()); WASM::serialize(stream, module); module.userSections.clear(); } catch(const Serialization::FatalSerializationException& e) { @@ -134,6 +140,8 @@ namespace eosio { namespace chain { > > wasm_cache_index; wasm_cache_index wasm_instantiation_cache; + + const chainbase::database& db; }; #define _REGISTER_INTRINSIC_EXPLICIT(CLS, MOD, METHOD, WASM_SIG, NAME, SIG)\ diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index cedb2f444fa..d1edc05f646 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -29,7 +29,7 @@ namespace eosio { namespace chain { using namespace webassembly; using namespace webassembly::common; - wasm_interface::wasm_interface(vm_type vm) : my( new wasm_interface_impl(vm) ) {} + wasm_interface::wasm_interface(vm_type vm, const chainbase::database& d) : my( new wasm_interface_impl(vm, d) ) {} wasm_interface::~wasm_interface() {} @@ -57,8 +57,8 @@ namespace eosio { namespace chain { //Hard: Kick off instantiation in a separate thread at this location } - void wasm_interface::apply( const code_object& code, apply_context& context ) { - my->get_instantiated_module(code, context.trx_context)->apply(context); + void wasm_interface::apply( const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context ) { + my->get_instantiated_module(code_hash, vm_type, vm_version, context.trx_context)->apply(context); } void wasm_interface::exit() { diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index e7f754f885f..a13407e8abb 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -2052,8 +2052,8 @@ read_only::get_code_results read_only::get_code( const get_code_params& params ) EOS_ASSERT( params.code_as_wasm, unsupported_feature, "Returning WAST from get_code is no longer supported" ); - if( accnt_metadata_obj.code_id._id != 0 ) { - const auto& code_obj = d.get(accnt_metadata_obj.code_id); + if( accnt_metadata_obj.code_hash != digest_type() ) { + const auto& code_obj = d.get(accnt_metadata_obj.code_hash); result.wasm = string(code_obj.code.begin(), code_obj.code.end()); result.code_hash = code_obj.code_hash; } @@ -2072,10 +2072,8 @@ read_only::get_code_hash_results read_only::get_code_hash( const get_code_hash_p const auto& d = db.db(); const auto& accnt = d.get( params.account_name ); - if( accnt.code_id._id != 0 ) { - const auto& code_obj = d.get(accnt.code_id); - result.code_hash = code_obj.code_hash; - } + if( accnt.code_hash != digest_type() ) + result.code_hash = accnt.code_hash; return result; } @@ -2087,8 +2085,8 @@ read_only::get_raw_code_and_abi_results read_only::get_raw_code_and_abi( const g const auto& d = db.db(); const auto& accnt_obj = d.get(params.account_name); const auto& accnt_metadata_obj = d.get(params.account_name); - if( accnt_metadata_obj.code_id._id != 0 ) { - const auto& code_obj = d.get(accnt_metadata_obj.code_id); + if( accnt_metadata_obj.code_hash != digest_type() ) { + const auto& code_obj = d.get(accnt_metadata_obj.code_hash); result.wasm = blob{{code_obj.code.begin(), code_obj.code.end()}}; } result.abi = blob{{accnt_obj.abi.begin(), accnt_obj.abi.end()}}; @@ -2104,10 +2102,8 @@ read_only::get_raw_abi_results read_only::get_raw_abi( const get_raw_abi_params& const auto& accnt_obj = d.get(params.account_name); const auto& accnt_metadata_obj = d.get(params.account_name); result.abi_hash = fc::sha256::hash( accnt_obj.abi.data(), accnt_obj.abi.size() ); - if( accnt_metadata_obj.code_id._id != 0 ) { - const auto& code_obj = d.get(accnt_metadata_obj.code_id); - result.code_hash = code_obj.code_hash; - } + if( accnt_metadata_obj.code_hash != digest_type() ) + result.code_hash = accnt_metadata_obj.code_hash; if( !params.abi_hash || *params.abi_hash != result.abi_hash ) result.abi = blob{{accnt_obj.abi.begin(), accnt_obj.abi.end()}}; diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp index 876cd4f3c7c..8cb026bbdac 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp @@ -117,22 +117,9 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper(obj.obj.name.value)); fc::raw::pack(ds, as_type(obj.obj.is_privileged())); fc::raw::pack(ds, as_type(obj.obj.last_code_update)); - - fc::raw::pack(ds, bool(obj.obj.code_id._id)); - if (obj.obj.code_id._id) { - auto& index = obj.db.get_index(); - const auto* code_obj = index.find(obj.obj.code_id); - if (!code_obj) { - auto& undo = index.stack().back(); - auto it = undo.removed_values.find(obj.obj.code_id); - EOS_ASSERT(it != undo.removed_values.end(), eosio::chain::plugin_exception, - "can not find code_object"); - code_obj = &it->second; - } - fc::raw::pack(ds, as_type(code_obj->vm_type)); - fc::raw::pack(ds, as_type(code_obj->vm_version)); - fc::raw::pack(ds, as_type(code_obj->code_hash)); - } + fc::raw::pack(ds, as_type(obj.obj.vm_type)); + fc::raw::pack(ds, as_type(obj.obj.vm_version)); + fc::raw::pack(ds, as_type(obj.obj.code_hash)); return ds; } diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index 6154b65ee54..2ad97e26445 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -187,13 +187,6 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "transaction_extensions", "type": "extension[]" } ] }, - { - "name": "code_id", "fields": [ - { "type": "uint8", "name": "vm_type" }, - { "type": "uint8", "name": "vm_version" }, - { "type": "checksum256", "name": "code_hash" } - ] - }, { "name": "account_v0", "fields": [ { "type": "name", "name": "name" }, @@ -206,7 +199,9 @@ extern const char* const state_history_plugin_abi = R"({ { "type": "name", "name": "name" }, { "type": "bool", "name": "privileged" }, { "type": "time_point", "name": "last_code_update" }, - { "type": "code_id?", "name": "code" } + { "type": "uint8", "name": "vm_type" }, + { "type": "uint8", "name": "vm_version" }, + { "type": "checksum256", "name": "code_hash" } ] }, { From 5f5327e95d022eebfba02a159e02ea61b0a692f4 Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Tue, 16 Apr 2019 17:11:15 +0900 Subject: [PATCH 393/680] Add missing whitespace for node execution command --- programs/eosio-launcher/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index fe73e1fb305..edebe0d5a6a 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -1558,7 +1558,7 @@ launcher_def::launch (eosd_def &instance, string >s) { install_path = specific_nodeos_installation_paths[node_num] + "/"; } } - string eosdcmd = install_path + "programs/nodeos/" + string(node_executable_name); + string eosdcmd = install_path + "programs/nodeos/" + string(node_executable_name) + " "; if (skip_transaction_signatures) { eosdcmd += "--skip-transaction-signatures "; } From b6111ffd9ff4be811f63417620bb191b901f03fe Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 15 Apr 2019 22:53:06 -0400 Subject: [PATCH 394/680] remove bnet_plugin bnet_plugin was depreciated in the 1.7.1 release for various reasons. Now is the time to remove it as 1.8 is closing in --- Docker/config.ini | 12 - .../include/eosio/chain/chain_id_type.hpp | 2 - plugins/CMakeLists.txt | 1 - plugins/bnet_plugin/CMakeLists.txt | 7 - plugins/bnet_plugin/bnet_plugin.cpp | 1569 ----------------- .../include/eosio/bnet_plugin/bnet_plugin.hpp | 55 - programs/eosio-launcher/main.cpp | 51 +- programs/nodeos/CMakeLists.txt | 1 - programs/nodeos/logging.json | 9 - 9 files changed, 6 insertions(+), 1701 deletions(-) delete mode 100644 plugins/bnet_plugin/CMakeLists.txt delete mode 100644 plugins/bnet_plugin/bnet_plugin.cpp delete mode 100644 plugins/bnet_plugin/include/eosio/bnet_plugin/bnet_plugin.hpp diff --git a/Docker/config.ini b/Docker/config.ini index 3dd9181f359..a85918d236b 100644 --- a/Docker/config.ini +++ b/Docker/config.ini @@ -1,15 +1,3 @@ -# the endpoint upon which to listen for incoming connections (eosio::bnet_plugin) -bnet-endpoint = 0.0.0.0:4321 - -# the number of threads to use to process network messages (eosio::bnet_plugin) -# bnet-threads = - -# remote endpoint of other node to connect to; Use multiple bnet-connect options as needed to compose a network (eosio::bnet_plugin) -# bnet-connect = - -# this peer will request no pending transactions from other nodes (eosio::bnet_plugin) -bnet-no-trx = false - # the location of the blocks directory (absolute path or relative to application data dir) (eosio::chain_plugin) blocks-dir = "blocks" diff --git a/libraries/chain/include/eosio/chain/chain_id_type.hpp b/libraries/chain/include/eosio/chain/chain_id_type.hpp index a16fc143ae6..59ab8f248b0 100644 --- a/libraries/chain/include/eosio/chain/chain_id_type.hpp +++ b/libraries/chain/include/eosio/chain/chain_id_type.hpp @@ -47,8 +47,6 @@ namespace chain { friend class eosio::net_plugin_impl; friend struct eosio::handshake_message; - - friend struct ::hello; // TODO: Rushed hack to support bnet_plugin. Need a better solution. }; } } // namespace eosio::chain diff --git a/plugins/CMakeLists.txt b/plugins/CMakeLists.txt index 8c93df9c48e..e07a10c5b8d 100644 --- a/plugins/CMakeLists.txt +++ b/plugins/CMakeLists.txt @@ -1,4 +1,3 @@ -add_subdirectory(bnet_plugin) add_subdirectory(net_plugin) add_subdirectory(net_api_plugin) add_subdirectory(http_plugin) diff --git a/plugins/bnet_plugin/CMakeLists.txt b/plugins/bnet_plugin/CMakeLists.txt deleted file mode 100644 index d49438298cf..00000000000 --- a/plugins/bnet_plugin/CMakeLists.txt +++ /dev/null @@ -1,7 +0,0 @@ -file(GLOB HEADERS "include/eosio/bnet_plugin/*.hpp") -add_library( bnet_plugin - bnet_plugin.cpp - ${HEADERS} ) - -target_link_libraries( bnet_plugin chain_plugin eosio_chain appbase ) -target_include_directories( bnet_plugin PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include" ) diff --git a/plugins/bnet_plugin/bnet_plugin.cpp b/plugins/bnet_plugin/bnet_plugin.cpp deleted file mode 100644 index 08d2091040f..00000000000 --- a/plugins/bnet_plugin/bnet_plugin.cpp +++ /dev/null @@ -1,1569 +0,0 @@ -/** - * The purpose of this protocol is to synchronize (and keep synchronized) two - * blockchains using a very simple algorithm: - * - * 1. find the last block id on our local chain that the remote peer knows about - * 2. if we have the next block send it to them - * 3. if we don't have the next block send them a the oldest unexpired transaction - * - * There are several input events: - * - * 1. new block accepted by local chain - * 2. block deemed irreversible by local chain - * 3. new block header accepted by local chain - * 4. transaction accepted by local chain - * 5. block received from remote peer - * 6. transaction received from remote peer - * 7. socket ready for next write - * - * Each session is responsible for maintaining the following - * - * 1. the most recent block on our current best chain which we know - * with certainty that the remote peer has. - * - this could be the peers last irreversible block - * - a block ID after the LIB that the peer has notified us of - * - a block which we have sent to the remote peer - * - a block which the peer has sent us - * 2. the block IDs we have received from the remote peer so that - * we can disconnect peer if one of those blocks is deemed invalid - * - we can clear these IDs once the block becomes reversible - * 3. the transactions we have received from the remote peer so that - * we do not send them something that they already know. - * - this includes transactions sent as part of blocks - * - we clear this cache after we have applied a block that - * includes the transactions because we know the controller - * should not notify us again (they would be dupe) - * - * Assumptions: - * 1. all blocks we send the peer are valid and will be held in the - * peers fork database until they become irreversible or are replaced - * by an irreversible alternative. - * 2. we don't care what fork the peer is on, so long as we know they have - * the block prior to the one we want to send. The peer will sort it out - * with its fork database and hopfully come to our conclusion. - * 3. the peer will send us blocks on the same basis - * - */ - -#include -#include -#include -#include - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -using tcp = boost::asio::ip::tcp; -namespace ws = boost::beast::websocket; - -namespace eosio { - using namespace chain; - - static appbase::abstract_plugin& _bnet_plugin = app().register_plugin(); - -} /// namespace eosio - -namespace fc { - extern std::unordered_map& get_logger_map(); -} - -const fc::string logger_name("bnet_plugin"); -fc::logger plugin_logger; -std::string peer_log_format; - -#define peer_dlog( PEER, FORMAT, ... ) \ - FC_MULTILINE_MACRO_BEGIN \ - if( plugin_logger.is_enabled( fc::log_level::debug ) ) \ - plugin_logger.log( FC_LOG_MESSAGE( debug, peer_log_format + FORMAT, __VA_ARGS__ (PEER->get_logger_variant()) ) ); \ - FC_MULTILINE_MACRO_END - -#define peer_ilog( PEER, FORMAT, ... ) \ - FC_MULTILINE_MACRO_BEGIN \ - if( plugin_logger.is_enabled( fc::log_level::info ) ) \ - plugin_logger.log( FC_LOG_MESSAGE( info, peer_log_format + FORMAT, __VA_ARGS__ (PEER->get_logger_variant()) ) ); \ - FC_MULTILINE_MACRO_END - -#define peer_wlog( PEER, FORMAT, ... ) \ - FC_MULTILINE_MACRO_BEGIN \ - if( plugin_logger.is_enabled( fc::log_level::warn ) ) \ - plugin_logger.log( FC_LOG_MESSAGE( warn, peer_log_format + FORMAT, __VA_ARGS__ (PEER->get_logger_variant()) ) ); \ - FC_MULTILINE_MACRO_END - -#define peer_elog( PEER, FORMAT, ... ) \ - FC_MULTILINE_MACRO_BEGIN \ - if( plugin_logger.is_enabled( fc::log_level::error ) ) \ - plugin_logger.log( FC_LOG_MESSAGE( error, peer_log_format + FORMAT, __VA_ARGS__ (PEER->get_logger_variant())) ); \ - FC_MULTILINE_MACRO_END - - -using eosio::public_key_type; -using eosio::chain_id_type; -using eosio::block_id_type; -using eosio::block_timestamp_type; -using std::string; -using eosio::sha256; -using eosio::signed_block_ptr; -using eosio::packed_transaction_ptr; -using std::vector; - -struct hello { - public_key_type peer_id; - string network_version; - string agent; - string protocol_version = "1.0.1"; - string user; - string password; - chain_id_type chain_id; - bool request_transactions = false; - uint32_t last_irr_block_num = 0; - vector pending_block_ids; -}; -// @swap user, password -FC_REFLECT( hello, (peer_id)(network_version)(user)(password)(agent)(protocol_version)(chain_id)(request_transactions)(last_irr_block_num)(pending_block_ids) ) - -struct hello_extension_irreversible_only {}; - -FC_REFLECT( hello_extension_irreversible_only, BOOST_PP_SEQ_NIL ) - -using hello_extension = fc::static_variant; - -/** - * This message is sent upon successful speculative application of a transaction - * and informs a peer not to send this message. - */ -struct trx_notice { - vector signed_trx_id; ///< hash of trx + sigs -}; - -FC_REFLECT( trx_notice, (signed_trx_id) ) - -/** - * This message is sent upon successfully adding a transaction to the fork database - * and informs the remote peer that there is no need to send this block. - */ -struct block_notice { - vector block_ids; -}; - -FC_REFLECT( block_notice, (block_ids) ); - -struct ping { - fc::time_point sent; - fc::sha256 code; - uint32_t lib; ///< the last irreversible block -}; -FC_REFLECT( ping, (sent)(code)(lib) ) - -struct pong { - fc::time_point sent; - fc::sha256 code; -}; -FC_REFLECT( pong, (sent)(code) ) - -using bnet_message = fc::static_variant; - - -struct by_id; -struct by_num; -struct by_received; -struct by_expired; - -namespace eosio { - using namespace chain::plugin_interface; - - class bnet_plugin_impl; - - template - void verify_strand_in_this_thread(const Strand& strand, const char* func, int line) { - if( !strand.running_in_this_thread() ) { - elog( "wrong strand: ${f} : line ${n}, exiting", ("f", func)("n", line) ); - app().quit(); - } - } - - /** - * Each session is presumed to operate in its own strand so that - * operations can execute in parallel. - */ - class session : public std::enable_shared_from_this - { - public: - enum session_state { - hello_state, - sending_state, - idle_state - }; - - struct block_status { - block_status( block_id_type i, bool kby_peer, bool rfrom_peer) - { - known_by_peer = kby_peer; - received_from_peer = rfrom_peer; - id = i; - } - - bool known_by_peer = false; ///< we sent block to peer or peer sent us notice - bool received_from_peer = false; ///< peer sent us this block and considers full block valid - block_id_type id; ///< the block id; - // block_id_type prev; ///< the prev block id - - // shared_ptr< vector > block_msg; ///< packed bnet_message for this block - - uint32_t block_num()const { return block_header::num_from_id(id); } - }; - - typedef boost::multi_index_container, member >, - ordered_non_unique< tag, const_mem_fun > - > - > block_status_index; - - - struct transaction_status { - time_point received; - time_point expired; /// 5 seconds from last accepted - transaction_id_type id; - transaction_metadata_ptr trx; - - void mark_known_by_peer() { received = fc::time_point::maximum(); trx.reset(); } - bool known_by_peer()const { return received == fc::time_point::maximum(); } - }; - - typedef boost::multi_index_container, member >, - ordered_non_unique< tag, member >, - ordered_non_unique< tag, member > - > - > transaction_status_index; - - block_status_index _block_status; - transaction_status_index _transaction_status; - const uint32_t _max_block_status_range = 2048; // limit tracked block_status known_by_peer - - public_key_type _local_peer_id; - uint32_t _local_lib = 0; - block_id_type _local_lib_id; - uint32_t _local_head_block_num = 0; - block_id_type _local_head_block_id; /// the last block id received on local channel - - - public_key_type _remote_peer_id; - uint32_t _remote_lib = 0; - block_id_type _remote_lib_id; - bool _remote_request_trx = false; - bool _remote_request_irreversible_only = false; - - uint32_t _last_sent_block_num = 0; - block_id_type _last_sent_block_id; /// the id of the last block sent - bool _recv_remote_hello = false; - bool _sent_remote_hello = false; - - - fc::sha256 _current_code; - fc::time_point _last_recv_ping_time = fc::time_point::now(); - ping _last_recv_ping; - ping _last_sent_ping; - - - int _session_num = 0; - session_state _state = hello_state; - tcp::resolver _resolver; - bnet_ptr _net_plugin; - boost::asio::io_service& _ios; - unique_ptr> _ws; - boost::asio::strand< boost::asio::io_context::executor_type> _strand; - - methods::get_block_by_number::method_type& _get_block_by_number; - - - string _peer; - string _remote_host; - string _remote_port; - - vector _out_buffer; - //boost::beast::multi_buffer _in_buffer; - boost::beast::flat_buffer _in_buffer; - flat_set _block_header_notices; - fc::optional _logger_variant; - - - int next_session_id()const { - static std::atomic session_count(0); - return ++session_count; - } - - /** - * Creating session from server socket acceptance - */ - explicit session( tcp::socket socket, bnet_ptr net_plug ) - :_resolver(socket.get_io_service()), - _net_plugin( std::move(net_plug) ), - _ios(socket.get_io_service()), - _ws( new ws::stream(move(socket)) ), - _strand(_ws->get_executor() ), - _get_block_by_number( app().get_method() ) - { - _session_num = next_session_id(); - set_socket_options(); - _ws->binary(true); - wlog( "open session ${n}",("n",_session_num) ); - } - - - /** - * Creating outgoing session - */ - explicit session( boost::asio::io_context& ioc, bnet_ptr net_plug ) - :_resolver(ioc), - _net_plugin( std::move(net_plug) ), - _ios(ioc), - _ws( new ws::stream(ioc) ), - _strand( _ws->get_executor() ), - _get_block_by_number( app().get_method() ) - { - _session_num = next_session_id(); - _ws->binary(true); - wlog( "open session ${n}",("n",_session_num) ); - } - - ~session(); - - - void set_socket_options() { - try { - /** to minimize latency when sending short messages */ - _ws->next_layer().set_option( boost::asio::ip::tcp::no_delay(true) ); - - /** to minimize latency when sending large 1MB blocks, the send buffer should not have to - * wait for an "ack", making this larger could result in higher latency for smaller urgent - * messages. - */ - _ws->next_layer().set_option( boost::asio::socket_base::send_buffer_size( 1024*1024 ) ); - _ws->next_layer().set_option( boost::asio::socket_base::receive_buffer_size( 1024*1024 ) ); - } catch ( ... ) { - elog( "uncaught exception on set socket options" ); - } - } - - void run() { - _ws->async_accept( boost::asio::bind_executor( - _strand, - std::bind( &session::on_accept, - shared_from_this(), - std::placeholders::_1) ) ); - } - - void run( const string& peer ) { - auto c = peer.find(':'); - auto host = peer.substr( 0, c ); - auto port = peer.substr( c+1, peer.size() ); - - _peer = peer; - _remote_host = host; - _remote_port = port; - - _resolver.async_resolve( _remote_host, _remote_port, - boost::asio::bind_executor( _strand, - std::bind( &session::on_resolve, - shared_from_this(), - std::placeholders::_1, - std::placeholders::_2 ) ) ); - } - - void on_resolve( boost::system::error_code ec, - tcp::resolver::results_type results ) { - if( ec ) return on_fail( ec, "resolve" ); - - boost::asio::async_connect( _ws->next_layer(), - results.begin(), results.end(), - boost::asio::bind_executor( _strand, - std::bind( &session::on_connect, - shared_from_this(), - std::placeholders::_1 ) ) ); - } - - void on_connect( boost::system::error_code ec ) { - if( ec ) return on_fail( ec, "connect" ); - - set_socket_options(); - - _ws->async_handshake( _remote_host, "/", - boost::asio::bind_executor( _strand, - std::bind( &session::on_handshake, - shared_from_this(), - std::placeholders::_1 ) ) ); - } - - void on_handshake( boost::system::error_code ec ) { - if( ec ) return on_fail( ec, "handshake" ); - - do_hello(); - do_read(); - } - - /** - * This will be called "every time" a the transaction is accepted which happens - * on the speculative block (potentially several such blocks) and when a block - * that contains the transaction is applied and/or when switching forks. - * - * We will add it to the transaction status table as "received now" to be the - * basis of sending it to the peer. When we send it to the peer "received now" - * will be set to the infinite future to mark it as sent so we don't resend it - * when it is accepted again. - * - * Each time the transaction is "accepted" we extend the time we cache it by - * 5 seconds from now. Every time a block is applied we purge all accepted - * transactions that have reached 5 seconds without a new "acceptance". - */ - void on_accepted_transaction( transaction_metadata_ptr t ) { - //ilog( "accepted ${t}", ("t",t->id) ); - auto itr = _transaction_status.find( t->id ); - if( itr != _transaction_status.end() ) { - if( !itr->known_by_peer() ) { - _transaction_status.modify( itr, [&]( auto& stat ) { - stat.expired = std::min( fc::time_point::now() + fc::seconds(5), t->packed_trx->expiration() ); - }); - } - return; - } - - transaction_status stat; - stat.received = fc::time_point::now(); - stat.expired = stat.received + fc::seconds(5); - stat.id = t->id; - stat.trx = t; - _transaction_status.insert( stat ); - - maybe_send_next_message(); - } - - /** - * Remove all transactions that expired from cache prior to now - */ - void purge_transaction_cache() { - auto& idx = _transaction_status.get(); - auto itr = idx.begin(); - auto now = fc::time_point::now(); - while( itr != idx.end() && itr->expired < now ) { - idx.erase(itr); - itr = idx.begin(); - } - } - - /** - * When our local LIB advances we can purge our known history up to - * the LIB or up to the last block known by the remote peer. - */ - void on_new_lib( block_state_ptr s ) { - verify_strand_in_this_thread(_strand, __func__, __LINE__); - _local_lib = s->block_num; - _local_lib_id = s->id; - - auto purge_to = std::min( _local_lib, _last_sent_block_num ); - - auto& idx = _block_status.get(); - auto itr = idx.begin(); - while( itr != idx.end() && itr->block_num() < purge_to ) { - idx.erase(itr); - itr = idx.begin(); - } - - if( _remote_request_irreversible_only ) { - auto bitr = _block_status.find(s->id); - if ( bitr == _block_status.end() || !bitr->received_from_peer ) { - _block_header_notices.insert(s->id); - } - } - - maybe_send_next_message(); - } - - - void on_bad_block( signed_block_ptr b ) { - verify_strand_in_this_thread(_strand, __func__, __LINE__); - try { - auto id = b->id(); - auto itr = _block_status.find( id ); - if( itr == _block_status.end() ) return; - if( itr->received_from_peer ) { - peer_elog(this, "bad signed_block_ptr : unknown" ); - elog( "peer sent bad block #${b} ${i}, disconnect", ("b", b->block_num())("i",b->id()) ); - _ws->next_layer().close(); - } - } catch ( ... ) { - elog( "uncaught exception" ); - } - } - - void on_accepted_block_header( const block_state_ptr& s ) { - verify_strand_in_this_thread(_strand, __func__, __LINE__); - // ilog( "accepted block header ${n}", ("n",s->block_num) ); - const auto& id = s->id; - - if( fc::time_point::now() - s->block->timestamp < fc::seconds(6) ) { - // ilog( "queue notice to peer that we have this block so hopefully they don't send it to us" ); - auto itr = _block_status.find( id ); - if( !_remote_request_irreversible_only && ( itr == _block_status.end() || !itr->received_from_peer ) ) { - _block_header_notices.insert( id ); - } - if( itr == _block_status.end() ) { - _block_status.insert( block_status(id, false, false) ); - } - } - } - - void on_accepted_block( const block_state_ptr& s ) { - verify_strand_in_this_thread(_strand, __func__, __LINE__); - //idump((_block_status.size())(_transaction_status.size())); - //ilog( "accepted block ${n}", ("n",s->block_num) ); - - const auto& id = s->id; - - _local_head_block_id = id; - _local_head_block_num = block_header::num_from_id(id); - - if( _local_head_block_num < _last_sent_block_num ) { - _last_sent_block_num = _local_lib; - _last_sent_block_id = _local_lib_id; - } - - purge_transaction_cache(); - - /** purge all transactions from cache, I will send them as part of a block - * in the future unless peer tells me they already have block. - */ - for( const auto& receipt : s->block->transactions ) { - if( receipt.trx.which() == 1 ) { - const auto& pt = receipt.trx.get(); - const auto& tid = pt.id(); - auto itr = _transaction_status.find( tid ); - if( itr != _transaction_status.end() ) - _transaction_status.erase(itr); - } - } - - maybe_send_next_message(); /// attempt to send if we are idle - } - - - template - void async_get_pending_block_ids( L&& callback ) { - /// send peer my head block status which is read from chain plugin - app().post(priority::low, [self = shared_from_this(),callback]{ - auto& control = app().get_plugin().chain(); - auto lib = control.last_irreversible_block_num(); - auto head = control.fork_db_head_block_id(); - auto head_num = block_header::num_from_id(head); - - - std::vector ids; - if( lib > 0 ) { - ids.reserve((head_num-lib)+1); - for( auto i = lib; i <= head_num; ++i ) { - ids.emplace_back(control.get_block_id_for_num(i)); - } - } - self->_ios.post( boost::asio::bind_executor( - self->_strand, - [callback,ids,lib](){ - callback(ids,lib); - } - )); - }); - } - - template - void async_get_block_num( uint32_t blocknum, L&& callback ) { - app().post(priority::low, [self = shared_from_this(), blocknum, callback]{ - auto& control = app().get_plugin().chain(); - signed_block_ptr sblockptr; - try { - //ilog( "fetch block ${n}", ("n",blocknum) ); - sblockptr = control.fetch_block_by_number( blocknum ); - } catch ( const fc::exception& e ) { - edump((e.to_detail_string())); - } - - self->_ios.post( boost::asio::bind_executor( - self->_strand, - [callback,sblockptr](){ - callback(sblockptr); - } - )); - }); - } - - void do_hello(); - - - void send( const bnet_message& msg ) { try { - auto ps = fc::raw::pack_size(msg); - _out_buffer.resize(ps); - fc::datastream ds(_out_buffer.data(), ps); - fc::raw::pack(ds, msg); - send(); - } FC_LOG_AND_RETHROW() } - - template - void send( const bnet_message& msg, const T& ex ) { try { - auto ex_size = fc::raw::pack_size(ex); - auto ps = fc::raw::pack_size(msg) + fc::raw::pack_size(unsigned_int(ex_size)) + ex_size; - _out_buffer.resize(ps); - fc::datastream ds(_out_buffer.data(), ps); - fc::raw::pack( ds, msg ); - fc::raw::pack( ds, unsigned_int(ex_size) ); - fc::raw::pack( ds, ex ); - send(); - } FC_LOG_AND_RETHROW() } - - void send() { try { - verify_strand_in_this_thread(_strand, __func__, __LINE__); - - _state = sending_state; - _ws->async_write( boost::asio::buffer(_out_buffer), - boost::asio::bind_executor( - _strand, - std::bind( &session::on_write, - shared_from_this(), - std::placeholders::_1, - std::placeholders::_2 ) ) ); - } FC_LOG_AND_RETHROW() } - - void mark_block_status( const block_id_type& id, bool known_by_peer, bool recv_from_peer ) { - auto itr = _block_status.find(id); - if( itr == _block_status.end() ) { - // optimization to avoid sending blocks to nodes that already know about them - // to avoid unbounded memory growth limit number tracked - const auto min_block_num = std::min( _local_lib, _last_sent_block_num ); - const auto max_block_num = min_block_num + _max_block_status_range; - const auto block_num = block_header::num_from_id( id ); - if( block_num > min_block_num && block_num < max_block_num && _block_status.size() < _max_block_status_range ) - _block_status.insert( block_status( id, known_by_peer, recv_from_peer ) ); - } else { - _block_status.modify( itr, [&]( auto& item ) { - item.known_by_peer = known_by_peer; - if (recv_from_peer) item.received_from_peer = true; - }); - } - } - - /** - * This method will determine whether there is a message in the - * out queue, if so it returns. Otherwise it determines the best - * message to send. - */ - void maybe_send_next_message() { - verify_strand_in_this_thread(_strand, __func__, __LINE__); - if( _state == sending_state ) return; /// in process of sending - if( _out_buffer.size() ) return; /// in process of sending - if( !_recv_remote_hello || !_sent_remote_hello ) return; - - clear_expired_trx(); - - if( send_block_notice() ) return; - if( send_pong() ) return; - if( send_ping() ) return; - - /// we don't know where we are (waiting on accept block localhost) - if( _local_head_block_id == block_id_type() ) return ; - if( send_next_block() ) return; - if( send_next_trx() ) return; - } - - bool send_block_notice() { - if( _block_header_notices.size() == 0 ) - return false; - - block_notice notice; - notice.block_ids.reserve( _block_header_notices.size() ); - for( auto& id : _block_header_notices ) - notice.block_ids.emplace_back(id); - send(notice); - _block_header_notices.clear(); - return true; - } - - bool send_pong() { - if( _last_recv_ping.code == fc::sha256() ) - return false; - - send( pong{ fc::time_point::now(), _last_recv_ping.code } ); - _last_recv_ping.code = fc::sha256(); - return true; - } - - bool send_ping() { - auto delta_t = fc::time_point::now() - _last_sent_ping.sent; - if( delta_t < fc::seconds(3) ) return false; - - if( _last_sent_ping.code == fc::sha256() ) { - _last_sent_ping.sent = fc::time_point::now(); - _last_sent_ping.code = fc::sha256::hash(_last_sent_ping.sent); /// TODO: make this more random - _last_sent_ping.lib = _local_lib; - send( _last_sent_ping ); - } - - /// we expect the peer to send us a ping every 3 seconds, so if we haven't gotten one - /// in the past 6 seconds then the connection is likely hung. Unfortunately, we cannot - /// use the round-trip time of ping/pong to measure latency because during syncing the - /// remote peer can be stuck doing CPU intensive tasks that block its reading of the - /// buffer. This buffer gets filled with perhaps 100 blocks taking .1 seconds each for - /// a total processing time of 10+ seconds. That said, the peer should come up for air - /// every .1 seconds so should still be able to send out a ping every 3 seconds. - // - // We don't want to wait a RTT for each block because that could also slow syncing for - // empty blocks... - // - //if( fc::time_point::now() - _last_recv_ping_time > fc::seconds(6) ) { - // do_goodbye( "no ping from peer in last 6 seconds...." ); - //} - return true; - } - - bool is_known_by_peer( block_id_type id ) { - auto itr = _block_status.find(id); - if( itr == _block_status.end() ) return false; - return itr->known_by_peer; - } - - void clear_expired_trx() { - auto& idx = _transaction_status.get(); - auto itr = idx.begin(); - while( itr != idx.end() && itr->expired < fc::time_point::now() ) { - idx.erase(itr); - itr = idx.begin(); - } - } - - bool send_next_trx() { try { - if( !_remote_request_trx ) return false; - - auto& idx = _transaction_status.get(); - auto start = idx.begin(); - if( start == idx.end() || start->known_by_peer() ) - return false; - - - auto ptrx_ptr = start->trx->packed_trx; - - idx.modify( start, [&]( auto& stat ) { - stat.mark_known_by_peer(); - }); - - // wlog("sending trx ${id}", ("id",start->id) ); - send(ptrx_ptr); - - return true; - - } FC_LOG_AND_RETHROW() } - - void on_async_get_block( const signed_block_ptr& nextblock ) { - verify_strand_in_this_thread(_strand, __func__, __LINE__); - if( !nextblock) { - _state = idle_state; - maybe_send_next_message(); - return; - } - - /// if something changed, the next block doesn't link to the last - /// block we sent, local chain must have switched forks - if( nextblock->previous != _last_sent_block_id && _last_sent_block_id != block_id_type() ) { - if( !is_known_by_peer( nextblock->previous ) ) { - _last_sent_block_id = _local_lib_id; - _last_sent_block_num = _local_lib; - _state = idle_state; - maybe_send_next_message(); - return; - } - } - - /// at this point we know the peer can link this block - - auto next_id = nextblock->id(); - - /// if the peer already knows about this block, great no need to - /// send it, mark it as 'sent' and move on. - if( is_known_by_peer( next_id ) ) { - _last_sent_block_id = next_id; - _last_sent_block_num = nextblock->block_num(); - - _state = idle_state; - maybe_send_next_message(); - return; - } - - mark_block_status( next_id, true, false ); - - _last_sent_block_id = next_id; - _last_sent_block_num = nextblock->block_num(); - - send( nextblock ); - status( "sending block " + std::to_string( block_header::num_from_id(next_id) ) ); - - if( nextblock->timestamp > (fc::time_point::now() - fc::seconds(5)) ) { - mark_block_transactions_known_by_peer( nextblock ); - } - } - - /** - * Send the next block after the last block in our current fork that - * we know the remote peer knows. - */ - bool send_next_block() { - - if ( _remote_request_irreversible_only && _last_sent_block_id == _local_lib_id ) { - return false; - } - - if( _last_sent_block_id == _local_head_block_id ) /// we are caught up - return false; - - ///< set sending state because this callback may result in sending a message - _state = sending_state; - async_get_block_num( _last_sent_block_num + 1, - [self=shared_from_this()]( auto sblockptr ) { - self->on_async_get_block( sblockptr ); - }); - - return true; - } - - void on_fail( boost::system::error_code ec, const char* what ) { - try { - verify_strand_in_this_thread(_strand, __func__, __LINE__); - elog( "${w}: ${m}", ("w", what)("m", ec.message() ) ); - _ws->next_layer().close(); - } catch ( ... ) { - elog( "uncaught exception on close" ); - } - } - - void on_accept( boost::system::error_code ec ) { - if( ec ) { - return on_fail( ec, "accept" ); - } - - do_hello(); - do_read(); - } - - void do_read() { - _ws->async_read( _in_buffer, - boost::asio::bind_executor( - _strand, - std::bind( &session::on_read, - shared_from_this(), - std::placeholders::_1, - std::placeholders::_2))); - } - - void on_read( boost::system::error_code ec, std::size_t bytes_transferred ) { - boost::ignore_unused(bytes_transferred); - - if( ec == ws::error::closed ) - return on_fail( ec, "close on read" ); - - if( ec ) { - return on_fail( ec, "read" );; - } - - try { - auto d = boost::asio::buffer_cast(boost::beast::buffers_front(_in_buffer.data())); - auto s = boost::asio::buffer_size(_in_buffer.data()); - fc::datastream ds(d,s); - - bnet_message msg; - fc::raw::unpack( ds, msg ); - on_message( msg, ds ); - _in_buffer.consume( ds.tellp() ); - - wait_on_app(); - return; - - } catch ( ... ) { - wlog( "close bad payload" ); - } - try { - _ws->close( boost::beast::websocket::close_code::bad_payload ); - } catch ( ... ) { - elog( "uncaught exception on close" ); - } - } - - /** if we just call do_read here then this thread might run ahead of - * the main thread, instead we post an event to main which will then - * post a new read event when ready. - * - * This also keeps the "shared pointer" alive in the callback preventing - * the connection from being closed. - */ - void wait_on_app() { - app().post( priority::medium, [self = shared_from_this()]() { - app().get_io_service().post( boost::asio::bind_executor( self->_strand, [self] { self->do_read(); } ) ); - } ); - } - - void on_message( const bnet_message& msg, fc::datastream& ds ) { - try { - switch( msg.which() ) { - case bnet_message::tag::value: - on( msg.get(), ds ); - break; - case bnet_message::tag::value: - on( msg.get() ); - break; - case bnet_message::tag::value: - on( msg.get() ); - break; - case bnet_message::tag::value: - on( msg.get() ); - break; - case bnet_message::tag::value: - on( msg.get() ); - break; - case bnet_message::tag::value: - on( msg.get() ); - break; - default: - wlog( "bad message received" ); - _ws->close( boost::beast::websocket::close_code::bad_payload ); - return; - } - maybe_send_next_message(); - } catch( const fc::exception& e ) { - elog( "${e}", ("e",e.to_detail_string())); - _ws->close( boost::beast::websocket::close_code::bad_payload ); - } - } - - void on( const block_notice& notice ) { - peer_ilog(this, "received block_notice"); - for( const auto& id : notice.block_ids ) { - status( "received notice " + std::to_string( block_header::num_from_id(id) ) ); - mark_block_status( id, true, false ); - } - } - - void on( const hello& hi, fc::datastream& ds ); - - void on( const ping& p ) { - peer_ilog(this, "received ping"); - _last_recv_ping = p; - _remote_lib = p.lib; - _last_recv_ping_time = fc::time_point::now(); - } - - void on( const pong& p ) { - peer_ilog(this, "received pong"); - if( p.code != _last_sent_ping.code ) { - peer_elog(this, "bad ping : invalid pong code"); - return do_goodbye( "invalid pong code" ); - } - _last_sent_ping.code = fc::sha256(); - } - - void do_goodbye( const string& reason ) { - try { - status( "goodbye - " + reason ); - _ws->next_layer().close(); - } catch ( ... ) { - elog( "uncaught exception on close" ); - } - } - - void check_for_redundant_connection(); - - void on( const signed_block_ptr& b ) { - peer_ilog(this, "received signed_block_ptr"); - if (!b) { - peer_elog(this, "bad signed_block_ptr : null pointer"); - EOS_THROW(block_validate_exception, "bad block" ); - } - status( "received block " + std::to_string(b->block_num()) ); - //ilog( "recv block ${n}", ("n", b->block_num()) ); - auto id = b->id(); - mark_block_status( id, true, true ); - - app().get_channel().publish(priority::high, b); - - mark_block_transactions_known_by_peer( b ); - } - - void mark_block_transactions_known_by_peer( const signed_block_ptr& b ) { - for( const auto& receipt : b->transactions ) { - if( receipt.trx.which() == 1 ) { - const auto& pt = receipt.trx.get(); - const auto& id = pt.id(); - mark_transaction_known_by_peer(id); - } - } - } - - /** - * @return true if trx is known by local host, false if new to this host - */ - bool mark_transaction_known_by_peer( const transaction_id_type& id ) { - auto itr = _transaction_status.find( id ); - if( itr != _transaction_status.end() ) { - _transaction_status.modify( itr, [&]( auto& stat ) { - stat.mark_known_by_peer(); - }); - return true; - } else { - transaction_status stat; - stat.id = id; - stat.mark_known_by_peer(); - stat.expired = fc::time_point::now()+fc::seconds(5); - _transaction_status.insert(stat); - } - return false; - } - - void on( const packed_transaction_ptr& p ); - - void on_write( boost::system::error_code ec, std::size_t bytes_transferred ) { - boost::ignore_unused(bytes_transferred); - verify_strand_in_this_thread(_strand, __func__, __LINE__); - if( ec ) { - _ws->next_layer().close(); - return on_fail( ec, "write" ); - } - _state = idle_state; - _out_buffer.resize(0); - maybe_send_next_message(); - } - - void status( const string& msg ) { - // ilog( "${remote_peer}: ${msg}", ("remote_peer",fc::variant(_remote_peer_id).as_string().substr(3,5) )("msg",msg) ); - } - - const fc::variant_object& get_logger_variant() { - if (!_logger_variant) { - boost::system::error_code ec; - auto rep = _ws->lowest_layer().remote_endpoint(ec); - string ip = ec ? "" : rep.address().to_string(); - string port = ec ? "" : std::to_string(rep.port()); - - auto lep = _ws->lowest_layer().local_endpoint(ec); - string lip = ec ? "" : lep.address().to_string(); - string lport = ec ? "" : std::to_string(lep.port()); - - _logger_variant.emplace(fc::mutable_variant_object() - ("_name", _peer) - ("_id", _remote_peer_id) - ("_ip", ip) - ("_port", port) - ("_lip", lip) - ("_lport", lport) - ); - } - return *_logger_variant; - } - }; - - - /** - * Accepts incoming connections and launches the sessions - */ - class listener : public std::enable_shared_from_this { - private: - tcp::acceptor _acceptor; - tcp::socket _socket; - bnet_ptr _net_plugin; - - public: - listener( boost::asio::io_context& ioc, tcp::endpoint endpoint, bnet_ptr np ) - :_acceptor(ioc), _socket(ioc), _net_plugin(std::move(np)) - { - boost::system::error_code ec; - - _acceptor.open( endpoint.protocol(), ec ); - if( ec ) { on_fail( ec, "open" ); return; } - - _acceptor.set_option( boost::asio::socket_base::reuse_address(true) ); - - _acceptor.bind( endpoint, ec ); - if( ec ) { on_fail( ec, "bind" ); return; } - - _acceptor.listen( boost::asio::socket_base::max_listen_connections, ec ); - if( ec ) on_fail( ec, "listen" ); - } - - void run() { - EOS_ASSERT( _acceptor.is_open(), plugin_exception, "unable top open listen socket" ); - do_accept(); - } - - void do_accept() { - _acceptor.async_accept( _socket, [self=shared_from_this()]( auto ec ){ self->on_accept(ec); } ); - } - - void on_fail( boost::system::error_code ec, const char* what ) { - elog( "${w}: ${m}", ("w", what)("m", ec.message() ) ); - } - - void on_accept( boost::system::error_code ec ); - }; - - - class bnet_plugin_impl : public std::enable_shared_from_this { - public: - bnet_plugin_impl() = default; - - const private_key_type _peer_pk = fc::crypto::private_key::generate(); /// one time random key to identify this process - public_key_type _peer_id = _peer_pk.get_public_key(); - string _bnet_endpoint_address = "0.0.0.0"; - uint16_t _bnet_endpoint_port = 4321; - bool _request_trx = true; - bool _follow_irreversible = false; - - std::vector _connect_to_peers; /// list of peers to connect to - std::vector _socket_threads; - int32_t _num_threads = 1; - - std::unique_ptr _ioc; // lifetime guarded by shared_ptr of bnet_plugin_impl - std::shared_ptr _listener; - std::shared_ptr _timer; // only access on app io_service - std::map > _sessions; // only access on app io_service - - channels::irreversible_block::channel_type::handle _on_irb_handle; - channels::accepted_block::channel_type::handle _on_accepted_block_handle; - channels::accepted_block_header::channel_type::handle _on_accepted_block_header_handle; - channels::rejected_block::channel_type::handle _on_bad_block_handle; - channels::accepted_transaction::channel_type::handle _on_appled_trx_handle; - - void async_add_session( std::weak_ptr wp ) { - app().post(priority::low, [wp,this]{ - if( auto l = wp.lock() ) { - _sessions[l.get()] = wp; - } - }); - } - - void on_session_close( const session* s ) { - auto itr = _sessions.find(s); - if( _sessions.end() != itr ) - _sessions.erase(itr); - } - - template - void for_each_session( Call callback ) { - app().post(priority::low, [this, callback = callback] { - for (const auto& item : _sessions) { - if (auto ses = item.second.lock()) { - ses->_ios.post(boost::asio::bind_executor( - ses->_strand, - [ses, cb = callback]() { cb(ses); } - )); - } - } - }); - } - - void on_accepted_transaction( transaction_metadata_ptr trx ) { - if( trx->implicit || trx->scheduled ) return; - for_each_session( [trx]( auto ses ){ ses->on_accepted_transaction( trx ); } ); - } - - /** - * Notify all active connection of the new irreversible block so they - * can purge their block cache - */ - void on_irreversible_block( block_state_ptr s ) { - for_each_session( [s]( auto ses ){ ses->on_new_lib( s ); } ); - } - - /** - * Notify all active connections of the new accepted block so - * they can relay it. This method also pre-packages the block - * as a packed bnet_message so the connections can simply relay - * it on. - */ - void on_accepted_block( block_state_ptr s ) { - _ioc->post( [s,this] { /// post this to the thread pool because packing can be intensive - for_each_session( [s]( auto ses ){ ses->on_accepted_block( s ); } ); - }); - } - - void on_accepted_block_header( block_state_ptr s ) { - _ioc->post( [s,this] { /// post this to the thread pool because packing can be intensive - for_each_session( [s]( auto ses ){ ses->on_accepted_block_header( s ); } ); - }); - } - - /** - * We received a bad block which either - * 1. didn't link to known chain - * 2. violated the consensus rules - * - * Any peer which sent us this block (not noticed) - * should be disconnected as they are objectively bad - */ - void on_bad_block( signed_block_ptr s ) { - for_each_session( [s]( auto ses ) { ses->on_bad_block(s); } ); - }; - - void on_reconnect_peers() { - for( const auto& peer : _connect_to_peers ) { - bool found = false; - for( const auto& con : _sessions ) { - auto ses = con.second.lock(); - if( ses && (ses->_peer == peer) ) { - found = true; - break; - } - } - - if( !found ) { - wlog( "attempt to connect to ${p}", ("p",peer) ); - auto s = std::make_shared( *_ioc, shared_from_this() ); - s->_local_peer_id = _peer_id; - _sessions[s.get()] = s; - s->run( peer ); - } - } - - start_reconnect_timer(); - } - - - void start_reconnect_timer() { - /// add some random delay so that all my peers don't attempt to reconnect to me - /// at the same time after shutting down.. - _timer->expires_from_now( boost::posix_time::microseconds( 1000000*(10+rand()%5) ) ); - _timer->async_wait(app().get_priority_queue().wrap(priority::low, [=](const boost::system::error_code& ec) { - if( ec ) { return; } - on_reconnect_peers(); - })); - } - }; - - - void listener::on_accept( boost::system::error_code ec ) { - if( ec ) { - if( ec == boost::system::errc::too_many_files_open ) - do_accept(); - return; - } - std::shared_ptr newsession; - try { - newsession = std::make_shared( move( _socket ), _net_plugin ); - } - catch( std::exception& e ) { - //making a session creates an instance of std::random_device which may open /dev/urandom - // for example. Unfortuately the only defined error is a std::exception derivative - _socket.close(); - } - if( newsession ) { - _net_plugin->async_add_session( newsession ); - newsession->_local_peer_id = _net_plugin->_peer_id; - newsession->run(); - } - do_accept(); - } - - - bnet_plugin::bnet_plugin() - :my(std::make_shared()) { - } - - bnet_plugin::~bnet_plugin() { - } - - void bnet_plugin::set_program_options(options_description& cli, options_description& cfg) { - cfg.add_options() - ("bnet-endpoint", bpo::value()->default_value("0.0.0.0:4321"), "the endpoint upon which to listen for incoming connections" ) - ("bnet-follow-irreversible", bpo::value()->default_value(false), "this peer will request only irreversible blocks from other nodes" ) - ("bnet-threads", bpo::value(), "the number of threads to use to process network messages" ) - ("bnet-connect", bpo::value>()->composing(), "remote endpoint of other node to connect to; Use multiple bnet-connect options as needed to compose a network" ) - ("bnet-no-trx", bpo::bool_switch()->default_value(false), "this peer will request no pending transactions from other nodes" ) - ("bnet-peer-log-format", bpo::value()->default_value( "[\"${_name}\" ${_ip}:${_port}]" ), - "The string used to format peers when logging messages about them. Variables are escaped with ${}.\n" - "Available Variables:\n" - " _name \tself-reported name\n\n" - " _id \tself-reported ID (Public Key)\n\n" - " _ip \tremote IP address of peer\n\n" - " _port \tremote port number of peer\n\n" - " _lip \tlocal IP address connected to peer\n\n" - " _lport \tlocal port number connected to peer\n\n") - ; - } - - void bnet_plugin::plugin_initialize(const variables_map& options) { - ilog( "Initialize bnet plugin" ); - - try { - peer_log_format = options.at( "bnet-peer-log-format" ).as(); - - if( options.count( "bnet-endpoint" )) { - auto ip_port = options.at( "bnet-endpoint" ).as(); - - //auto host = boost::asio::ip::host_name(ip_port); - auto port = ip_port.substr( ip_port.find( ':' ) + 1, ip_port.size()); - auto host = ip_port.substr( 0, ip_port.find( ':' )); - my->_bnet_endpoint_address = host; - my->_bnet_endpoint_port = std::stoi( port ); - idump((ip_port)( host )( port )( my->_follow_irreversible )); - } - if( options.count( "bnet-follow-irreversible" )) { - my->_follow_irreversible = options.at( "bnet-follow-irreversible" ).as(); - } - - - if( options.count( "bnet-connect" )) { - my->_connect_to_peers = options.at( "bnet-connect" ).as>(); - } - if( options.count( "bnet-threads" )) { - my->_num_threads = options.at( "bnet-threads" ).as(); - if( my->_num_threads > 8 ) - my->_num_threads = 8; - } - my->_request_trx = !options.at( "bnet-no-trx" ).as(); - - } FC_LOG_AND_RETHROW() - } - - void bnet_plugin::plugin_startup() { - handle_sighup(); // Sets logger - - wlog( "bnet startup " ); - - auto& chain = app().get_plugin().chain(); - FC_ASSERT ( chain.get_read_mode() != chain::db_read_mode::IRREVERSIBLE, "bnet is not compatible with \"irreversible\" read_mode"); - - my->_on_appled_trx_handle = app().get_channel() - .subscribe( [this]( transaction_metadata_ptr t ){ - my->on_accepted_transaction(t); - }); - - my->_on_irb_handle = app().get_channel() - .subscribe( [this]( block_state_ptr s ){ - my->on_irreversible_block(s); - }); - - my->_on_accepted_block_handle = app().get_channel() - .subscribe( [this]( block_state_ptr s ){ - my->on_accepted_block(s); - }); - - my->_on_accepted_block_header_handle = app().get_channel() - .subscribe( [this]( block_state_ptr s ){ - my->on_accepted_block_header(s); - }); - - my->_on_bad_block_handle = app().get_channel() - .subscribe( [this]( signed_block_ptr b ){ - my->on_bad_block(b); - }); - - - if( app().get_plugin().chain().get_read_mode() == chain::db_read_mode::READ_ONLY ) { - if (my->_request_trx) { - my->_request_trx = false; - ilog( "forced bnet-no-trx to true since in read-only mode" ); - } - } - - const auto address = boost::asio::ip::make_address( my->_bnet_endpoint_address ); - my->_ioc.reset( new boost::asio::io_context{my->_num_threads} ); - - - auto& ioc = *my->_ioc; - my->_timer = std::make_shared( app().get_io_service() ); - - my->start_reconnect_timer(); - - my->_listener = std::make_shared( ioc, - tcp::endpoint{ address, my->_bnet_endpoint_port }, - my ); - my->_listener->run(); - - my->_socket_threads.reserve( my->_num_threads ); - for( auto i = 0; i < my->_num_threads; ++i ) { - my->_socket_threads.emplace_back( [&ioc, i]{ - std::string tn = "bnet-" + std::to_string( i ); - fc::set_os_thread_name( tn ); - wlog( "start thread" ); - ioc.run(); - wlog( "end thread" ); - } ); - } - - for( const auto& peer : my->_connect_to_peers ) { - auto s = std::make_shared( ioc, my ); - s->_local_peer_id = my->_peer_id; - my->_sessions[s.get()] = s; - s->run( peer ); - } - } - - void bnet_plugin::plugin_shutdown() { - try { - my->_timer->cancel(); - my->_timer.reset(); - } catch ( ... ) { - elog( "exception thrown on timer shutdown" ); - } - - /// shut down all threads and close all connections - - my->for_each_session([](auto ses){ - ses->do_goodbye( "shutting down" ); - }); - - my->_listener.reset(); - my->_ioc->stop(); - - wlog( "joining bnet threads" ); - for( auto& t : my->_socket_threads ) { - t.join(); - } - wlog( "done joining threads" ); - - my->for_each_session([](auto ses){ - EOS_ASSERT( false, plugin_exception, "session ${ses} still active", ("ses", ses->_session_num) ); - }); - - // lifetime of _ioc is guarded by shared_ptr of bnet_plugin_impl - } - - void bnet_plugin::handle_sighup() { - if(fc::get_logger_map().find(logger_name) != fc::get_logger_map().end()) - plugin_logger = fc::get_logger_map()[logger_name]; - } - - - session::~session() { - wlog( "close session ${n}",("n",_session_num) ); - std::weak_ptr netp = _net_plugin; - app().post(priority::low, [netp,ses=this]{ - if( auto net = netp.lock() ) - net->on_session_close(ses); - }); - } - - void session::do_hello() { - /// TODO: find more effecient way to move large array of ids in event of fork - async_get_pending_block_ids( [self = shared_from_this() ]( const vector& ids, uint32_t lib ){ - hello hello_msg; - hello_msg.peer_id = self->_local_peer_id; - hello_msg.last_irr_block_num = lib; - hello_msg.pending_block_ids = ids; - hello_msg.request_transactions = self->_net_plugin->_request_trx; - hello_msg.chain_id = app().get_plugin().get_chain_id(); // TODO: Quick fix in a rush. Maybe a better solution is needed. - - self->_local_lib = lib; - if ( self->_net_plugin->_follow_irreversible ) { - self->send( hello_msg, hello_extension(hello_extension_irreversible_only()) ); - } else { - self->send( hello_msg ); - } - self->_sent_remote_hello = true; - }); - } - - void session::check_for_redundant_connection() { - app().post(priority::low, [self=shared_from_this()]{ - self->_net_plugin->for_each_session( [self]( auto ses ){ - if( ses != self && ses->_remote_peer_id == self->_remote_peer_id ) { - self->do_goodbye( "redundant connection" ); - } - }); - }); - } - - void session::on( const hello& hi, fc::datastream& ds ) { - peer_ilog(this, "received hello"); - _recv_remote_hello = true; - - if( hi.chain_id != app().get_plugin().get_chain_id() ) { // TODO: Quick fix in a rush. Maybe a better solution is needed. - peer_elog(this, "bad hello : wrong chain id"); - return do_goodbye( "disconnecting due to wrong chain id" ); - } - - if( hi.peer_id == _local_peer_id ) { - return do_goodbye( "connected to self" ); - } - - if ( _net_plugin->_follow_irreversible && hi.protocol_version <= "1.0.0") { - return do_goodbye( "need newer protocol version that supports sending only irreversible blocks" ); - } - - if ( hi.protocol_version >= "1.0.1" ) { - //optional extensions - while ( 0 < ds.remaining() ) { - unsigned_int size; - fc::raw::unpack( ds, size ); // next extension size - auto ex_start = ds.pos(); - fc::datastream dsw( ex_start, size ); - unsigned_int wich; - fc::raw::unpack( dsw, wich ); - hello_extension ex; - if ( wich < ex.count() ) { //know extension - fc::datastream dsx( ex_start, size ); //unpack needs to read static_variant _tag again - fc::raw::unpack( dsx, ex ); - if ( ex.which() == hello_extension::tag::value ) { - _remote_request_irreversible_only = true; - } - } else { - //unsupported extension, we just ignore it - //another side does know our protocol version, i.e. it know which extensions we support - //so, it some extensions were crucial, another side will close the connection - } - ds.skip(size); //move to next extension - } - } - - _last_sent_block_num = hi.last_irr_block_num; - _remote_request_trx = hi.request_transactions; - _remote_peer_id = hi.peer_id; - _remote_lib = hi.last_irr_block_num; - - for( const auto& id : hi.pending_block_ids ) - mark_block_status( id, true, false ); - - check_for_redundant_connection(); - - } - - void session::on( const packed_transaction_ptr& p ) { - peer_ilog(this, "received packed_transaction_ptr"); - if (!p) { - peer_elog(this, "bad packed_transaction_ptr : null pointer"); - EOS_THROW(transaction_exception, "bad transaction"); - } - if( !_net_plugin->_request_trx ) - return; - - // ilog( "recv trx ${n}", ("n", id) ); - if( p->expiration() < fc::time_point::now() ) return; - - const auto& id = p->id(); - - if( mark_transaction_known_by_peer( id ) ) - return; - - auto ptr = std::make_shared(p); - - app().get_channel().publish(priority::low, ptr); - } -} /// namespace eosio diff --git a/plugins/bnet_plugin/include/eosio/bnet_plugin/bnet_plugin.hpp b/plugins/bnet_plugin/include/eosio/bnet_plugin/bnet_plugin.hpp deleted file mode 100644 index 5874f2a28ba..00000000000 --- a/plugins/bnet_plugin/include/eosio/bnet_plugin/bnet_plugin.hpp +++ /dev/null @@ -1,55 +0,0 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ -#pragma once -#include - -#include - -namespace fc { class variant; } - -namespace eosio { - using chain::transaction_id_type; - using std::shared_ptr; - using namespace appbase; - using chain::name; - using fc::optional; - using chain::uint128_t; - - typedef shared_ptr bnet_ptr; - typedef shared_ptr bnet_const_ptr; - - - -/** - * This plugin tracks all actions and keys associated with a set of configured accounts. It enables - * wallets to paginate queries for bnet. - * - * An action will be included in the account's bnet if any of the following: - * - receiver - * - any account named in auth list - * - * A key will be linked to an account if the key is referneced in authorities of updateauth or newaccount - */ -class bnet_plugin : public plugin { - public: - APPBASE_PLUGIN_REQUIRES((chain_plugin)) - - bnet_plugin(); - virtual ~bnet_plugin(); - - virtual void set_program_options(options_description& cli, options_description& cfg) override; - - void plugin_initialize(const variables_map& options); - void plugin_startup(); - void plugin_shutdown(); - void handle_sighup() override; - - private: - bnet_ptr my; -}; - -} /// namespace eosio - - diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 7e6bfbaf7b3..75307e24657 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -327,12 +327,6 @@ struct last_run_def { vector running_nodes; }; - -enum class p2p_plugin { - NET, - BNET -}; - enum launch_modes { LM_NONE, LM_LOCAL, @@ -396,7 +390,6 @@ struct launcher_def { size_t producers; size_t next_node; string shape; - p2p_plugin p2p; allowed_connection allowed_connections = PC_NONE; bfs::path genesis; bfs::path output; @@ -488,7 +481,6 @@ launcher_def::set_options (bpo::options_description &cfg) { ("producers",bpo::value(&producers)->default_value(21),"total number of non-bios producer instances in this network") ("mode,m",bpo::value>()->multitoken()->default_value({"any"}, "any"),"connection mode, combination of \"any\", \"producers\", \"specified\", \"none\"") ("shape,s",bpo::value(&shape)->default_value("star"),"network topology, use \"star\" \"mesh\" or give a filename for custom") - ("p2p-plugin", bpo::value()->default_value("net"),"select a p2p plugin to use (either net or bnet). Defaults to net.") ("genesis,g",bpo::value()->default_value("./genesis.json"),"set the path to genesis.json") ("skip-signature", bpo::bool_switch(&skip_transaction_signatures)->default_value(false), "nodeos does not require transaction signatures.") ("nodeos", bpo::value(&eosd_extra_args), "forward nodeos command line argument(s) to each instance of nodeos, enclose arg(s) in quotes") @@ -597,20 +589,6 @@ launcher_def::initialize (const variables_map &vmap) { host_map_file = src.stem().string() + "_hosts.json"; } - string nc = vmap["p2p-plugin"].as(); - if ( !nc.empty() ) { - if (boost::iequals(nc,"net")) - p2p = p2p_plugin::NET; - else if (boost::iequals(nc,"bnet")) - p2p = p2p_plugin::BNET; - else { - p2p = p2p_plugin::NET; - } - } - else { - p2p = p2p_plugin::NET; - } - if( !host_map_file.empty() ) { try { fc::json::from_file(host_map_file).as>(bindings); @@ -1107,14 +1085,9 @@ launcher_def::write_config_file (tn_node_def &node) { cfg << "blocks-dir = " << block_dir << "\n"; cfg << "http-server-address = " << host->host_name << ":" << instance.http_port << "\n"; cfg << "http-validate-host = false\n"; - if (p2p == p2p_plugin::NET) { - cfg << "p2p-listen-endpoint = " << host->listen_addr << ":" << instance.p2p_port << "\n"; - cfg << "p2p-server-address = " << host->public_name << ":" << instance.p2p_port << "\n"; - } else { - cfg << "bnet-endpoint = " << host->listen_addr << ":" << instance.p2p_port << "\n"; - // Include the net_plugin endpoint, because the plugin is always loaded (even if not used). - cfg << "p2p-listen-endpoint = " << host->listen_addr << ":" << instance.p2p_port + 1000 << "\n"; - } + cfg << "p2p-listen-endpoint = " << host->listen_addr << ":" << instance.p2p_port << "\n"; + cfg << "p2p-server-address = " << host->public_name << ":" << instance.p2p_port << "\n"; + if (is_bios) { cfg << "enable-stale-production = true\n"; @@ -1140,18 +1113,10 @@ launcher_def::write_config_file (tn_node_def &node) { if(!is_bios) { auto &bios_node = network.nodes["bios"]; - if (p2p == p2p_plugin::NET) { - cfg << "p2p-peer-address = " << bios_node.instance->p2p_endpoint<< "\n"; - } else { - cfg << "bnet-connect = " << bios_node.instance->p2p_endpoint<< "\n"; - } + cfg << "p2p-peer-address = " << bios_node.instance->p2p_endpoint<< "\n"; } for (const auto &p : node.peers) { - if (p2p == p2p_plugin::NET) { - cfg << "p2p-peer-address = " << network.nodes.find(p)->second.instance->p2p_endpoint << "\n"; - } else { - cfg << "bnet-connect = " << network.nodes.find(p)->second.instance->p2p_endpoint << "\n"; - } + cfg << "p2p-peer-address = " << network.nodes.find(p)->second.instance->p2p_endpoint << "\n"; } if (instance.has_db || node.producers.size()) { for (const auto &kp : node.keys ) { @@ -1166,11 +1131,7 @@ launcher_def::write_config_file (tn_node_def &node) { if( instance.has_db ) { cfg << "plugin = eosio::mongo_db_plugin\n"; } - if ( p2p == p2p_plugin::NET ) { - cfg << "plugin = eosio::net_plugin\n"; - } else { - cfg << "plugin = eosio::bnet_plugin\n"; - } + cfg << "plugin = eosio::net_plugin\n"; cfg << "plugin = eosio::chain_api_plugin\n" << "plugin = eosio::history_api_plugin\n"; cfg.close(); diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index d9fb90ee45d..d5fe8273eb5 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -52,7 +52,6 @@ target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} login_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} history_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} state_history_plugin -Wl,${no_whole_archive_flag} - PRIVATE -Wl,${whole_archive_flag} bnet_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} history_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} chain_api_plugin -Wl,${no_whole_archive_flag} PRIVATE -Wl,${whole_archive_flag} net_plugin -Wl,${no_whole_archive_flag} diff --git a/programs/nodeos/logging.json b/programs/nodeos/logging.json index 0b02060b82e..07771457d72 100644 --- a/programs/nodeos/logging.json +++ b/programs/nodeos/logging.json @@ -64,15 +64,6 @@ "stderr", "net" ] - },{ - "name": "bnet_plugin", - "level": "debug", - "enabled": true, - "additivity": false, - "appenders": [ - "stderr", - "net" - ] },{ "name": "producer_plugin", "level": "debug", From 5e0c848986d1463c6ccbd6650aaf2adc10c259c4 Mon Sep 17 00:00:00 2001 From: Adam Mitz Date: Tue, 16 Apr 2019 09:42:46 -0500 Subject: [PATCH 395/680] eosio_install.sh: use CMake's cache to find the install prefix (only impacts display to user); remove unused vars --- scripts/eosio_install.sh | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/scripts/eosio_install.sh b/scripts/eosio_install.sh index a858fd63430..ee2aba76e4f 100755 --- a/scripts/eosio_install.sh +++ b/scripts/eosio_install.sh @@ -6,19 +6,19 @@ # Copyright (c) 2017, Respective Authors all rights reserved. # # After June 1, 2018 this software is available under the following terms: -# +# # The MIT License -# +# # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: -# +# # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. -# +# # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE @@ -34,16 +34,6 @@ SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" REPO_ROOT="${SCRIPT_DIR}/.." BUILD_DIR="${REPO_ROOT}/build" -OPT_LOCATION=$HOME/opt -BIN_LOCATION=$HOME/bin -LIB_LOCATION=$HOME/lib -mkdir -p $LIB_LOCATION - -CMAKE_BUILD_TYPE=Release -TIME_BEGIN=$( date -u +%s ) -INSTALL_PREFIX=$OPT_LOCATION/eosio -VERSION=1.2 - txtbld=$(tput bold) bldred=${txtbld}$(tput setaf 1) txtrst=$(tput sgr0) @@ -58,11 +48,13 @@ if ! pushd "${BUILD_DIR}" &> /dev/null;then exit 1; fi +CMAKE_INSTALL_PREFIX=$(grep ^CMAKE_INSTALL_PREFIX: CMakeCache.txt | sed 's/.*=//') + if ! make install; then printf "\\nMAKE installing EOSIO has exited with the above error.\\n\\n" exit -1 fi -popd &> /dev/null +popd &> /dev/null printf "\n${bldred} ___ ___ ___ ___\n" printf " / /\\ / /\\ / /\\ ___ / /\\ \n" @@ -77,7 +69,7 @@ printf " \\ \\::/ \\ \\::/ /__/:/ \\__\\/ \\ \\:: printf " \\__\\/ \\__\\/ \\__\\/ \\__\\/ \n\n${txtrst}" printf "==============================================================================================\\n" -printf "EOSIO has been installed into ${OPT_LOCATION}/eosio/bin!\\n" +printf "EOSIO has been installed into ${CMAKE_INSTALL_PREFIX}/bin!\\n" printf "If you need to, you can uninstall using: ./scripts/full_uninstaller.sh (it will leave your data directory).\\n" printf "==============================================================================================\\n\\n" From ab2ae5f34ae19d4bf24ae7aa09e21f586c32c5dc Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 16 Apr 2019 11:15:14 -0400 Subject: [PATCH 396/680] Remove bnet support for python test scripts --- tests/Cluster.py | 6 +++--- tests/TestHelper.py | 2 -- tests/distributed-transactions-test.py | 5 ++--- tests/launcher_test.py | 5 ++--- tests/nodeos_forked_chain_test.py | 5 ++--- tests/nodeos_run_test.py | 5 ++--- tests/nodeos_startup_catchup.py | 5 ++--- tests/nodeos_voting_test.py | 5 ++--- tests/restart-scenarios-test.py | 5 ++--- 9 files changed, 17 insertions(+), 26 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 77012324a66..1754b441d83 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -127,7 +127,7 @@ def setAlternateVersionLabels(self, file): # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches # pylint: disable=too-many-statements - def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, + def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="mesh", delay=1, onlyBios=False, dontBootstrap=False, totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, alternateVersionLabelsFile=None, associatedNodeLabels=None, loadSystemContract=True): """Launch cluster. @@ -191,9 +191,9 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me tries = tries - 1 time.sleep(2) - cmd="%s -p %s -n %s -d %s -i %s -f --p2p-plugin %s %s --unstarted-nodes %s" % ( + cmd="%s -p %s -n %s -d %s -i %s -f %s --unstarted-nodes %s" % ( Utils.EosLauncherPath, pnodes, totalNodes, delay, datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3], - p2pPlugin, producerFlag, unstartedNodes) + producerFlag, unstartedNodes) cmdArr=cmd.split() if self.staging: cmdArr.append("--nogen") diff --git a/tests/TestHelper.py b/tests/TestHelper.py index 768fccef890..6d68b63e9e6 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -65,8 +65,6 @@ def parse_args(includeArgs, applicationSpecificArgs=AppArgs()): default=Utils.SigKillTag) if "--kill-count" in includeArgs: parser.add_argument("--kill-count", type=int, help="nodeos instances to kill", default=-1) - if "--p2p-plugin" in includeArgs: - parser.add_argument("--p2p-plugin", choices=["net", "bnet"], help="select a p2p plugin to use. Defaults to net.", default="net") if "--seed" in includeArgs: parser.add_argument("--seed", type=int, help="random seed", default=1) diff --git a/tests/distributed-transactions-test.py b/tests/distributed-transactions-test.py index 2ea4edfe462..9a02d5e6de4 100755 --- a/tests/distributed-transactions-test.py +++ b/tests/distributed-transactions-test.py @@ -10,7 +10,7 @@ Print=Utils.Print errorExit=Utils.errorExit -args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file","--seed","--p2p-plugin" +args=TestHelper.parse_args({"-p","-n","-d","-s","--nodes-file","--seed" ,"--dump-error-details","-v","--leave-running","--clean-run","--keep-logs"}) pnodes=args.p @@ -25,7 +25,6 @@ dumpErrorDetails=args.dump_error_details killAll=args.clean_run keepLogs=args.keep_logs -p2pPlugin=args.p2p_plugin killWallet=not dontKill killEosInstances=not dontKill @@ -63,7 +62,7 @@ (pnodes, total_nodes-pnodes, topo, delay)) Print("Stand up cluster") - if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") diff --git a/tests/launcher_test.py b/tests/launcher_test.py index b7d21b41179..bac5ed447a2 100755 --- a/tests/launcher_test.py +++ b/tests/launcher_test.py @@ -21,7 +21,7 @@ from core_symbol import CORE_SYMBOL args = TestHelper.parse_args({"--defproducera_prvt_key","--dump-error-details","--dont-launch","--keep-logs", - "-v","--leave-running","--clean-run","--p2p-plugin"}) + "-v","--leave-running","--clean-run"}) debug=args.v defproduceraPrvtKey=args.defproducera_prvt_key dumpErrorDetails=args.dump_error_details @@ -29,7 +29,6 @@ dontLaunch=args.dont_launch dontKill=args.leave_running killAll=args.clean_run -p2pPlugin=args.p2p_plugin Utils.Debug=debug cluster=Cluster(walletd=True, defproduceraPrvtKey=defproduceraPrvtKey) @@ -53,7 +52,7 @@ cluster.cleanup() Print("Stand up cluster") pnodes=4 - if cluster.launch(pnodes=pnodes, totalNodes=pnodes, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=pnodes, totalNodes=pnodes) is False: cmdError("launcher") errorExit("Failed to stand up eos cluster.") else: diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index a7f2c777e3c..aa935931c06 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -114,7 +114,7 @@ def getMinHeadAndLib(prodNodes): args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", - "--p2p-plugin","--wallet-port"}) + "--wallet-port"}) Utils.Debug=args.v totalProducerNodes=2 totalNonProducerNodes=1 @@ -127,7 +127,6 @@ def getMinHeadAndLib(prodNodes): dontKill=args.leave_running prodCount=args.prod_count killAll=args.clean_run -p2pPlugin=args.p2p_plugin walletPort=args.wallet_port walletMgr=WalletMgr(True, port=walletPort) @@ -156,7 +155,7 @@ def getMinHeadAndLib(prodNodes): # and the only connection between those 2 groups is through the bridge node if cluster.launch(prodCount=prodCount, onlyBios=False, topo="bridge", pnodes=totalProducerNodes, - totalNodes=totalNodes, totalProducers=totalProducers, p2pPlugin=p2pPlugin, + totalNodes=totalNodes, totalProducers=totalProducers, useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs) is False: Utils.cmdError("launcher") Utils.errorExit("Failed to stand up eos cluster.") diff --git a/tests/nodeos_run_test.py b/tests/nodeos_run_test.py index 1db83f7f692..fbe7f8f6d05 100755 --- a/tests/nodeos_run_test.py +++ b/tests/nodeos_run_test.py @@ -23,7 +23,7 @@ args = TestHelper.parse_args({"--host","--port","--prod-count","--defproducera_prvt_key","--defproducerb_prvt_key","--mongodb" ,"--dump-error-details","--dont-launch","--keep-logs","-v","--leave-running","--only-bios","--clean-run" - ,"--sanity-test","--p2p-plugin","--wallet-port"}) + ,"--sanity-test","--wallet-port"}) server=args.host port=args.port debug=args.v @@ -38,7 +38,6 @@ onlyBios=args.only_bios killAll=args.clean_run sanityTest=args.sanity_test -p2pPlugin=args.p2p_plugin walletPort=args.wallet_port Utils.Debug=debug @@ -68,7 +67,7 @@ cluster.killall(allInstances=killAll) cluster.cleanup() Print("Stand up cluster") - if cluster.launch(prodCount=prodCount, onlyBios=onlyBios, dontBootstrap=dontBootstrap, p2pPlugin=p2pPlugin) is False: + if cluster.launch(prodCount=prodCount, onlyBios=onlyBios, dontBootstrap=dontBootstrap) is False: cmdError("launcher") errorExit("Failed to stand up eos cluster.") else: diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index 03a55936385..f2954f3d83c 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -37,7 +37,7 @@ extraArgs = appArgs.add(flag="--catchup-count", type=int, help="How many catchup-nodes to launch", default=10) extraArgs = appArgs.add(flag="--txn-gen-nodes", type=int, help="How many transaction generator nodes", default=2) args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", - "-p","--p2p-plugin","--wallet-port"}, applicationSpecificArgs=appArgs) + "-p","--wallet-port"}, applicationSpecificArgs=appArgs) Utils.Debug=args.v pnodes=args.p if args.p > 0 else 1 startedNonProdNodes = args.txn_gen_nodes if args.txn_gen_nodes >= 2 else 2 @@ -47,7 +47,6 @@ dontKill=args.leave_running prodCount=args.prod_count if args.prod_count > 1 else 2 killAll=args.clean_run -p2pPlugin=args.p2p_plugin walletPort=args.wallet_port catchupCount=args.catchup_count if args.catchup_count > 0 else 1 totalNodes=startedNonProdNodes+pnodes+catchupCount @@ -71,7 +70,7 @@ for nodeNum in range(txnGenNodeNum, txnGenNodeNum+startedNonProdNodes): specificExtraNodeosArgs[nodeNum]="--plugin eosio::txn_test_gen_plugin --txn-test-gen-account-prefix txntestacct" Print("Stand up cluster") - if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=pnodes, totalNodes=totalNodes, totalProducers=pnodes*prodCount, p2pPlugin=p2pPlugin, + if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=pnodes, totalNodes=totalNodes, totalProducers=pnodes*prodCount, useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs, unstartedNodes=catchupCount, loadSystemContract=False) is False: Utils.errorExit("Failed to stand up eos cluster.") diff --git a/tests/nodeos_voting_test.py b/tests/nodeos_voting_test.py index ee728962b9f..fad398a860a 100755 --- a/tests/nodeos_voting_test.py +++ b/tests/nodeos_voting_test.py @@ -144,7 +144,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): from core_symbol import CORE_SYMBOL args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", - "--p2p-plugin","--wallet-port"}) + "--wallet-port"}) Utils.Debug=args.v totalNodes=4 cluster=Cluster(walletd=True) @@ -153,7 +153,6 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): dontKill=args.leave_running prodCount=args.prod_count killAll=args.clean_run -p2pPlugin=args.p2p_plugin walletPort=args.wallet_port walletMgr=WalletMgr(True, port=walletPort) @@ -171,7 +170,7 @@ def verifyProductionRounds(trans, node, prodsActive, rounds): cluster.killall(allInstances=killAll) cluster.cleanup() Print("Stand up cluster") - if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21, p2pPlugin=p2pPlugin, useBiosBootFile=False) is False: + if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=totalNodes, totalNodes=totalNodes, totalProducers=totalNodes*21, useBiosBootFile=False) is False: Utils.cmdError("launcher") Utils.errorExit("Failed to stand up eos cluster.") diff --git a/tests/restart-scenarios-test.py b/tests/restart-scenarios-test.py index 894a7d0d271..8d0f8721c10 100755 --- a/tests/restart-scenarios-test.py +++ b/tests/restart-scenarios-test.py @@ -26,7 +26,7 @@ Print=Utils.Print errorExit=Utils.errorExit -args=TestHelper.parse_args({"-p","-d","-s","-c","--kill-sig","--kill-count","--keep-logs","--p2p-plugin" +args=TestHelper.parse_args({"-p","-d","-s","-c","--kill-sig","--kill-count","--keep-logs" ,"--dump-error-details","-v","--leave-running","--clean-run"}) pnodes=args.p topo=args.s @@ -40,7 +40,6 @@ dumpErrorDetails=args.dump_error_details keepLogs=args.keep_logs killAll=args.clean_run -p2pPlugin=args.p2p_plugin seed=1 Utils.Debug=debug @@ -66,7 +65,7 @@ pnodes, topo, delay, chainSyncStrategyStr)) Print("Stand up cluster") - if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") From 8b336a2a8cebffab9352e50b97ac24b14b13cd5f Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Tue, 16 Apr 2019 12:03:30 -0400 Subject: [PATCH 397/680] Artifact check LRT (#7138) --- .buildkite/long_running_tests.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index c242d219b0e..0189d3f5f54 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -3,7 +3,7 @@ steps: echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ + tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi label: ":ubuntu: 16.04 Build" agents: queue: "automation-large-builder-fleet" @@ -24,7 +24,7 @@ steps: echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ + tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi label: ":ubuntu: 18.04 Build" agents: queue: "automation-large-builder-fleet" @@ -45,7 +45,7 @@ steps: echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ + tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi label: ":centos: 7 Build" agents: queue: "automation-large-builder-fleet" @@ -66,7 +66,7 @@ steps: echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ + tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi label: ":aws: 2 Build" agents: queue: "automation-large-builder-fleet" @@ -89,7 +89,7 @@ steps: echo "+++ Building :hammer:" ./scripts/eosio_build.sh -y echo "--- Compressing build directory :compression:" - tar -pczf build.tar.gz build/ + tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi label: ":darwin: Mojave Build" agents: - "role=builder-v2-1" From a7319087cbc0447a4d3bb4513e10458b441d230f Mon Sep 17 00:00:00 2001 From: Adam Mitz Date: Tue, 16 Apr 2019 12:05:18 -0500 Subject: [PATCH 398/680] cover the case where CMake is built from source and -p option is used --- scripts/eosio_build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 5c05d1c3c76..32395919051 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -208,7 +208,7 @@ export CMAKE=$(command -v cmake 2>/dev/null) if [ "$ARCH" == "Linux" ]; then # Check if cmake is already installed or not and use source install location - if [ -z $CMAKE ]; then export CMAKE=$HOME/bin/cmake; fi + if [ -z $CMAKE ]; then export CMAKE=$PREFIX/bin/cmake; fi export OS_NAME=$( cat /etc/os-release | grep ^NAME | cut -d'=' -f2 | sed 's/\"//gI' ) OPENSSL_ROOT_DIR=/usr/include/openssl if [ ! -e /etc/os-release ]; then From 81c3151aa6c3524022c9f29bca867878c01f21e2 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 16 Apr 2019 13:36:15 -0400 Subject: [PATCH 399/680] bump chainbase --- libraries/chainbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chainbase b/libraries/chainbase index 8a153c42842..118c513436e 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 8a153c428429a62ce727814a1ba04d3fcdc2bc83 +Subproject commit 118c513436e1310d8e1395303c964430f26b0bb4 From b235020679c492aa736bc14c2c92aa73d68730f9 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 16 Apr 2019 15:11:24 -0400 Subject: [PATCH 400/680] tweaks to code_object/index usage from review --- libraries/chain/eosio_contract.cpp | 4 ++-- libraries/chain/include/eosio/chain/code_object.hpp | 1 + .../include/eosio/chain/wasm_interface_private.hpp | 4 ++-- .../state_history_serialization.hpp | 11 +++++++---- .../state_history_plugin/state_history_plugin_abi.cpp | 11 ++++++++--- 5 files changed, 20 insertions(+), 11 deletions(-) diff --git a/libraries/chain/eosio_contract.cpp b/libraries/chain/eosio_contract.cpp index e2b3e6546be..9b6b157984a 100644 --- a/libraries/chain/eosio_contract.cpp +++ b/libraries/chain/eosio_contract.cpp @@ -154,7 +154,7 @@ void apply_eosio_setcode(apply_context& context) { int64_t new_size = code_size * config::setcode_ram_bytes_multiplier; if( existing_code ) { - const code_object& old_code_entry = db.get(account.code_hash); + const code_object& old_code_entry = db.get(boost::make_tuple(account.code_hash, account.vm_type, account.vm_version)); EOS_ASSERT( old_code_entry.code_hash != code_hash, set_exact_code, "contract is already running this version of code" ); int64_t old_size = (int64_t)old_code_entry.code.size() * config::setcode_ram_bytes_multiplier; @@ -179,7 +179,7 @@ void apply_eosio_setcode(apply_context& context) { o.code_hash = code_hash; o.code.assign(act.code.data(), code_size); o.code_ref_count = 1; - o.first_block_used = context.control.head_block_num(); + o.first_block_used = context.control.head_block_num() + 1; o.vm_type = act.vmtype; o.vm_version = act.vmversion; }); diff --git a/libraries/chain/include/eosio/chain/code_object.hpp b/libraries/chain/include/eosio/chain/code_object.hpp index b50835b5293..e8789c1612b 100644 --- a/libraries/chain/include/eosio/chain/code_object.hpp +++ b/libraries/chain/include/eosio/chain/code_object.hpp @@ -4,6 +4,7 @@ */ #pragma once #include +#include #include "multi_index_includes.hpp" diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index c84287f1239..362a29e19f0 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -71,7 +71,7 @@ namespace eosio { namespace chain { boost::make_tuple(code_hash, vm_type, vm_version) ); const code_object* codeobject = nullptr; if(it == wasm_instantiation_cache.end()) { - codeobject = db.find(boost::make_tuple(code_hash, vm_type, vm_version)); + codeobject = &db.get(boost::make_tuple(code_hash, vm_type, vm_version)); it = wasm_instantiation_cache.emplace( wasm_interface_impl::wasm_cache_entry{ .code_hash = code_hash, @@ -85,7 +85,7 @@ namespace eosio { namespace chain { if(!it->module) { if(!codeobject) - codeobject = db.find(boost::make_tuple(code_hash, vm_type, vm_version)); + codeobject = &db.get(boost::make_tuple(code_hash, vm_type, vm_version)); auto timer_pause = fc::make_scoped_exit([&](){ trx_context.resume_billing_timer(); diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp index 8cb026bbdac..6ecf28e05c5 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp @@ -117,10 +117,13 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper(obj.obj.name.value)); fc::raw::pack(ds, as_type(obj.obj.is_privileged())); fc::raw::pack(ds, as_type(obj.obj.last_code_update)); - fc::raw::pack(ds, as_type(obj.obj.vm_type)); - fc::raw::pack(ds, as_type(obj.obj.vm_version)); - fc::raw::pack(ds, as_type(obj.obj.code_hash)); - + bool has_code = obj.obj.code_hash != eosio::chain::digest_type(); + fc::raw::pack(ds, has_code); + if(has_code) { + fc::raw::pack(ds, as_type(obj.obj.vm_type)); + fc::raw::pack(ds, as_type(obj.obj.vm_version)); + fc::raw::pack(ds, as_type(obj.obj.code_hash)); + } return ds; } diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index 2ad97e26445..6154b65ee54 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -187,6 +187,13 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "transaction_extensions", "type": "extension[]" } ] }, + { + "name": "code_id", "fields": [ + { "type": "uint8", "name": "vm_type" }, + { "type": "uint8", "name": "vm_version" }, + { "type": "checksum256", "name": "code_hash" } + ] + }, { "name": "account_v0", "fields": [ { "type": "name", "name": "name" }, @@ -199,9 +206,7 @@ extern const char* const state_history_plugin_abi = R"({ { "type": "name", "name": "name" }, { "type": "bool", "name": "privileged" }, { "type": "time_point", "name": "last_code_update" }, - { "type": "uint8", "name": "vm_type" }, - { "type": "uint8", "name": "vm_version" }, - { "type": "checksum256", "name": "code_hash" } + { "type": "code_id?", "name": "code" } ] }, { From 45184012b41036abb88f0577d8cb501e846aac62 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 16 Apr 2019 16:26:26 -0400 Subject: [PATCH 401/680] Remove reservation of code_object at index 0 --- libraries/chain/controller.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 8aa44689dd9..f4567d2a963 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -902,8 +902,6 @@ struct controller_impl { authorization.initialize_database(); resource_limits.initialize_database(); - db.create([](auto&){}); // reserve 0 code_id (used in account_metadata_object to indicate no code) - authority system_auth(conf.genesis.initial_key); create_native_account( config::system_account_name, system_auth, system_auth, true ); From c2a39e09ecee64d34427e82a6ff5a68df6319d6b Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Tue, 16 Apr 2019 16:39:32 -0400 Subject: [PATCH 402/680] ship: reduce spam --- .../state_history_serialization.hpp | 9 +++++---- .../state_history_plugin/state_history_plugin.cpp | 15 +++++++++++++++ .../state_history_plugin_abi.cpp | 1 - 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp index 6ecf28e05c5..61b9662d4ed 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp @@ -112,14 +112,15 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper -datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { +datastream& operator<<(datastream& ds, + const history_serial_wrapper& obj) { fc::raw::pack(ds, fc::unsigned_int(0)); fc::raw::pack(ds, as_type(obj.obj.name.value)); fc::raw::pack(ds, as_type(obj.obj.is_privileged())); fc::raw::pack(ds, as_type(obj.obj.last_code_update)); bool has_code = obj.obj.code_hash != eosio::chain::digest_type(); fc::raw::pack(ds, has_code); - if(has_code) { + if (has_code) { fc::raw::pack(ds, as_type(obj.obj.vm_type)); fc::raw::pack(ds, as_type(obj.obj.vm_version)); fc::raw::pack(ds, as_type(obj.obj.code_hash)); @@ -133,7 +134,6 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper(obj.obj.vm_type)); fc::raw::pack(ds, as_type(obj.obj.vm_version)); fc::raw::pack(ds, as_type(obj.obj.code_hash)); - fc::raw::pack(ds, as_type(obj.obj.code_ref_count)); fc::raw::pack(ds, as_type(obj.obj.code)); return ds; } @@ -548,7 +548,8 @@ datastream& operator<<(datastream& fc::raw::pack(ds, bool(obj.obj.account_ram_delta)); if (obj.obj.account_ram_delta) { - fc::raw::pack(ds, make_history_serial_wrapper(obj.db, as_type(*obj.obj.account_ram_delta))); + fc::raw::pack( + ds, make_history_serial_wrapper(obj.db, as_type(*obj.obj.account_ram_delta))); } fc::optional e; diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 34ac0405939..887044a5f28 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -86,6 +86,21 @@ bool include_delta(const eosio::chain::resource_limits::resource_limits_state_ob old.virtual_cpu_limit != curr.virtual_cpu_limit; } +bool include_delta(const eosio::chain::account_metadata_object& old, + const eosio::chain::account_metadata_object& curr) { + return // + old.name.value != curr.name.value || // + old.is_privileged() != curr.is_privileged() || // + old.last_code_update != curr.last_code_update || // + old.vm_type != curr.vm_type || // + old.vm_version != curr.vm_version || // + old.code_hash != curr.code_hash; +} + +bool include_delta(const eosio::chain::code_object& old, const eosio::chain::code_object& curr) { // + return false; +} + struct state_history_plugin_impl : std::enable_shared_from_this { chain_plugin* chain_plug = nullptr; fc::optional trace_log; diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index 6154b65ee54..b6496f5cd60 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -214,7 +214,6 @@ extern const char* const state_history_plugin_abi = R"({ { "type": "uint8", "name": "vm_type" }, { "type": "uint8", "name": "vm_version" }, { "type": "checksum256", "name": "code_hash" }, - { "type": "uint64", "name": "code_ref_count" }, { "type": "bytes", "name": "code" } ] }, From 35a79fe6d4f90ca352da3df82bb05746345f93cf Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Tue, 16 Apr 2019 19:38:04 -0400 Subject: [PATCH 403/680] Added lib64 to CMAKE_MODULE_PATH --- CMakeModules/eosio-config.cmake.in | 1 + 1 file changed, 1 insertion(+) diff --git a/CMakeModules/eosio-config.cmake.in b/CMakeModules/eosio-config.cmake.in index 97de49c4568..e30a3fa4ab2 100644 --- a/CMakeModules/eosio-config.cmake.in +++ b/CMakeModules/eosio-config.cmake.in @@ -2,6 +2,7 @@ if(EOSIO_ROOT STREQUAL "" OR NOT EOSIO_ROOT) set(EOSIO_ROOT "@EOS_ROOT_DIR@") endif() list(APPEND CMAKE_MODULE_PATH ${EOSIO_ROOT}/lib/cmake/eosio) +list(APPEND CMAKE_MODULE_PATH ${EOSIO_ROOT}/lib64/cmake/eosio) include(EosioTester) function(EXTRACT_MAJOR_MINOR_FROM_VERSION version success major minor) From f6ac7f68e67d3badb460a2d3e5fa41279b541108 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Tue, 16 Apr 2019 21:21:11 -0400 Subject: [PATCH 404/680] hopefully close to finished with pinning --- CMakeLists.txt | 13 ++++++----- programs/cleos/CMakeLists.txt | 4 ++++ programs/keosd/CMakeLists.txt | 4 ++++ programs/nodeos/CMakeLists.txt | 4 ++++ scripts/eosio_build_ubuntu.sh | 42 ++++++++++++++++++++++++++++++++-- unittests/CMakeLists.txt | 8 ++++++- 6 files changed, 66 insertions(+), 9 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index e35c50a286a..caf9a255606 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -47,12 +47,6 @@ set( GUI_CLIENT_EXECUTABLE_NAME eosio ) set( CUSTOM_URL_SCHEME "gcs" ) set( INSTALLER_APP_ID "68ad7005-8eee-49c9-95ce-9eed97e5b347" ) -if(${EOSIO_PIN_COMPILER}) - message(STATUS "Pinning compiler to Clang 8") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++ -v") - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -stdlib=libc++ -lc++abi") -endif() - # http://stackoverflow.com/a/18369825 if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0) @@ -127,6 +121,13 @@ set(THREADS_PREFER_PTHREAD_FLAG 1) find_package(Threads) link_libraries(Threads::Threads) +if(${EOSIO_PIN_COMPILER}) + if(NOT APPLE AND UNIX) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -nostdinc++ -nostdlib++ -I${LIBSTDCPP_DIR}/include/c++/7.1.0") + set(CMAKE_CXX_STANDARD_LIBRARIES "${LIBSTDCPP_DIR}/lib64/libstdc++.a") + endif() +endif() + if( WIN32 ) message( STATUS "Configuring EOSIO on WIN32") diff --git a/programs/cleos/CMakeLists.txt b/programs/cleos/CMakeLists.txt index 0d98fdcf63d..1083b70ea3a 100644 --- a/programs/cleos/CMakeLists.txt +++ b/programs/cleos/CMakeLists.txt @@ -34,6 +34,10 @@ configure_file(config.hpp.in config.hpp ESCAPE_QUOTES) target_include_directories(${CLI_CLIENT_EXECUTABLE_NAME} PUBLIC ${Intl_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR}) +if(EOSIO_PIN_COMPILER) + target_link_libraries(${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE -nostdlib++) +endif() + target_link_libraries( ${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE appbase chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ${Intl_LIBRARIES} ) diff --git a/programs/keosd/CMakeLists.txt b/programs/keosd/CMakeLists.txt index 3c806fbed39..cecb1eae5a1 100644 --- a/programs/keosd/CMakeLists.txt +++ b/programs/keosd/CMakeLists.txt @@ -11,6 +11,10 @@ endif() configure_file(config.hpp.in config.hpp ESCAPE_QUOTES) +if(EOSIO_PIN_COMPILER) + target_link_libraries(${KEY_STORE_EXECUTABLE_NAME} PRIVATE -nostdlib++) +endif() + target_link_libraries( ${KEY_STORE_EXECUTABLE_NAME} PRIVATE appbase PRIVATE wallet_api_plugin wallet_plugin diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index d9fb90ee45d..59ade059b32 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -47,6 +47,10 @@ else() set(build_id_flag "") endif() +if(EOSIO_PIN_COMPILER) + target_link_libraries(${NODE_EXECUTABLE_NAME} PRIVATE -nostdlib++) +endif() + target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE appbase PRIVATE -Wl,${whole_archive_flag} login_plugin -Wl,${no_whole_archive_flag} diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 514c5f7a9b6..676e7785ac1 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -255,10 +255,48 @@ cd .. printf "\\n" if $BUILD_CLANG8; then + if [ ! -d ${OPT_LOCATION}/gmp ]; then + printf "Installing gmp...\\n" + cd ${TMP_LOCATION} \ + && wget https://ftp.gnu.org/gnu/gmp/gmp-5.0.1.tar.gz && tar -xzf gmp-5.0.1.tar.gz \ + && cd gmp-5.0.1 && mkdir build && cd build \ + && ../configure --prefix=${OPT_LOCATION}/gmp \ + && make -j"${JOBS}" && make install \ + && cd ../ && rm -r gmp-5.0.1 \ + || exit 1 + if [ ! -d ${OPT_LOCATION}/mpfr ]; then + printf "Installing mpfr...\\n" + cd ${TMP_LOCATION} \ + && wget https://ftp.gnu.org/gnu/mpfr/mpfr-3.0.0.tar.gz && tar -xzf mpfr-3.0.0.tar.gz \ + && cd mpfr-3.0.0 && mkdir build && cd build \ + && ../configure --prefix=${OPT_LOCATION}/mpfr --with-gmp=${OPT_LOCATION}/gmp \ + && make -j"${JOBS}" && make install \ + && cd ../ && rm -r mpfr-3.0.0 \ + || exit 1 + if [ ! -d ${OPT_LOCATION}/mpc ]; then + printf "Installing mpc...\\n" + cd ${TMP_LOCATION} \ + && wget https://ftp.gnu.org/gnu/mpc/mpc-1.0.1.tar.gz && tar -xzf mpc-1.0.1.tar.gz \ + && cd mpc-1.0.1 && mkdir build && cd build \ + && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ + && make -j"${JOBS}" && make install \ + && cd ../ && rm -r mpc-1.0.1 \ + || exit 1 + if [ ! -d ${OPT_LOCATION}/gcc ]; then + printf "Installing libstdc++\\n" + cd ${TMP_LOCATION} \ + && wget https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ + && cd gcc-7.1.0 && mkdir build && cd build \ + && + && ../configure --disable-libsanitizer --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --enable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc + && make -j"${JOBS}" && make install \ + && cd ../ && rm -r gcc-7.1.0 \ + || exit 1 + printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" - cd ${OPT_LOCATION} \ + cd ${TMP_LOCATION} \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ && git checkout $PINNED_COMPILER_LLVM_COMMIT \ && cd tools \ @@ -280,7 +318,7 @@ if $BUILD_CLANG8; then && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ - && cd ${OPT_LOCATION}/clang8 \ + && cd ${TMP_LOCATION}/clang8 \ && mkdir build && cd build \ && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index dfb2a029e26..c5cfd31d096 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -42,7 +42,13 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/contracts.hpp.in ${CMAKE_CURRENT_BINA ### BUILD UNIT TEST EXECUTABLE ### file(GLOB UNIT_TESTS "*.cpp") # find all unit test suites add_executable( unit_test ${UNIT_TESTS}) # build unit tests as one executable -target_link_libraries( unit_test eosio_chain chainbase eosio_testing fc appbase ${PLATFORM_SPECIFIC_LIBS} ) + +if(EOSIO_PIN_COMPILER) + target_link_libraries(unit_test PRIVATE -nostdlib++ PUBLIC eosio_chain chainbase eosio_testing fc appbase ${PLATFORM_SPECIFIC_LIBS}) +else() + target_link_libraries( unit_test eosio_chain chainbase eosio_testing fc appbase ${PLATFORM_SPECIFIC_LIBS} ) +endif() + target_compile_options(unit_test PUBLIC -DDISABLE_EOSLIB_SERIALIZE) target_include_directories( unit_test PUBLIC ${CMAKE_SOURCE_DIR}/libraries/testing/include From 9055d81d77d23ca1a71b9ac4d666b11f228b173e Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Tue, 16 Apr 2019 21:26:52 -0400 Subject: [PATCH 405/680] update script --- scripts/eosio_build_ubuntu.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 676e7785ac1..7368673cfbf 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -287,8 +287,7 @@ if $BUILD_CLANG8; then cd ${TMP_LOCATION} \ && wget https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ && cd gcc-7.1.0 && mkdir build && cd build \ - && - && ../configure --disable-libsanitizer --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --enable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc + &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 && make -j"${JOBS}" && make install \ && cd ../ && rm -r gcc-7.1.0 \ || exit 1 From 645899ea05e8daa38ee8289e96a25cdfef1e5bf7 Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 16 Apr 2019 23:12:14 -0400 Subject: [PATCH 406/680] fix long-running tests given RESTRICT_ACTION_TO_SELF protocol feature changes --- tests/Cluster.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 746fa210c6a..efd6dbccfdc 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -1212,7 +1212,7 @@ def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPoli contract=eosioTokenAccount.name Utils.Print("push create action to %s contract" % (contract)) action="create" - data="{\"issuer\":\"%s\",\"maximum_supply\":\"1000000000.0000 %s\",\"can_freeze\":\"0\",\"can_recall\":\"0\",\"can_whitelist\":\"0\"}" % (eosioTokenAccount.name, CORE_SYMBOL) + data="{\"issuer\":\"%s\",\"maximum_supply\":\"1000000000.0000 %s\"}" % (eosioAccount.name, CORE_SYMBOL) opts="--permission %s@active" % (contract) trans=biosNode.pushMessage(contract, action, data, opts) if trans is None or not trans[0]: @@ -1229,7 +1229,7 @@ def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPoli Utils.Print("push issue action to %s contract" % (contract)) action="issue" data="{\"to\":\"%s\",\"quantity\":\"1000000000.0000 %s\",\"memo\":\"initial issue\"}" % (eosioAccount.name, CORE_SYMBOL) - opts="--permission %s@active" % (contract) + opts="--permission %s@active" % (eosioAccount.name) trans=biosNode.pushMessage(contract, action, data, opts) if trans is None or not trans[0]: Utils.Print("ERROR: Failed to push issue action to eosio contract.") From aa978cf375b5573cb38bc1ef902e9ffc1d1257db Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 16 Apr 2019 23:49:58 -0400 Subject: [PATCH 407/680] use updated bios contract that renames preactivate action to activate; adjust tests and testing utilities accordingly --- libraries/testing/tester.cpp | 2 +- tests/Node.py | 4 +-- unittests/contracts/eosio.bios/eosio.bios.abi | 30 +++++++++--------- .../contracts/eosio.bios/eosio.bios.wasm | Bin 17779 -> 17777 bytes unittests/protocol_feature_tests.cpp | 4 +-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 5d56814a7a5..a9a442d8380 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -983,7 +983,7 @@ namespace eosio { namespace testing { void base_tester::preactivate_protocol_features(const vector feature_digests) { for( const auto& feature_digest: feature_digests ) { - push_action( config::system_account_name, N(preactivate), config::system_account_name, + push_action( config::system_account_name, N(activate), config::system_account_name, fc::mutable_variant_object()("feature_digest", feature_digest) ); } } diff --git a/tests/Node.py b/tests/Node.py index ccb3df11557..fc519a41569 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1539,10 +1539,10 @@ def getAllBuiltinFeatureDigestsToPreactivate(self): # Require PREACTIVATE_FEATURE to be activated and require eosio.bios with preactivate_feature def preactivateProtocolFeatures(self, featureDigests:list): for digest in featureDigests: - Utils.Print("push preactivate action with digest {}".format(digest)) + Utils.Print("push activate action with digest {}".format(digest)) data="{{\"feature_digest\":{}}}".format(digest) opts="--permission eosio@active" - trans=self.pushMessage("eosio", "preactivate", data, opts) + trans=self.pushMessage("eosio", "activate", data, opts) if trans is None or not trans[0]: Utils.Print("ERROR: Failed to preactive digest {}".format(digest)) return None diff --git a/unittests/contracts/eosio.bios/eosio.bios.abi b/unittests/contracts/eosio.bios/eosio.bios.abi index 3f9749263ce..01f62c976f5 100644 --- a/unittests/contracts/eosio.bios/eosio.bios.abi +++ b/unittests/contracts/eosio.bios/eosio.bios.abi @@ -17,6 +17,16 @@ } ] }, + { + "name": "activate", + "base": "", + "fields": [ + { + "name": "feature_digest", + "type": "checksum256" + } + ] + }, { "name": "authority", "base": "", @@ -241,16 +251,6 @@ } ] }, - { - "name": "preactivate", - "base": "", - "fields": [ - { - "name": "feature_digest", - "type": "checksum256" - } - ] - }, { "name": "producer_key", "base": "", @@ -451,6 +451,11 @@ } ], "actions": [ + { + "name": "activate", + "type": "activate", + "ricardian_contract": "" + }, { "name": "canceldelay", "type": "canceldelay", @@ -476,11 +481,6 @@ "type": "onerror", "ricardian_contract": "" }, - { - "name": "preactivate", - "type": "preactivate", - "ricardian_contract": "" - }, { "name": "reqactivated", "type": "reqactivated", diff --git a/unittests/contracts/eosio.bios/eosio.bios.wasm b/unittests/contracts/eosio.bios/eosio.bios.wasm index 9d15da6fc49d215664c142c5055de37d8a39746d..968bd1529dc23933a35712bdabe2697d37553c99 100755 GIT binary patch delta 5851 zcmcIod2Afj8Gmn%o$<~(9y|7~cWlSA<7+pLkHpT}IlW^$N0k5$;uewZB%8*Q!;a$w zQbE>8L5P&nphsIMSE$;msD~&K5-nFKh_+H`qZUDfN*nqIZAC?)R3L<^^!Lr|I>xLW zRJC4tXXed!z3+YR_x;{}aDu;og6G)iq5YgO#vkI=2xB91?pThENV#M5g*!EO41a7y zp(`8VIiAB8$>r#l;U97|NE)Q81PU~I4DXKx1CSsSc}+GFBB@;Np4{%*NOt_t*!b}1 zp+nipDJIL=14lypv-=NarIyK6|1;L#YN0}}< z_(okF-7yXmOdRA1;Ytpd#V)D2(wB_vJUls>Juo#qHMT#?%EZHx)!@G{cI_D6J$7K% z@L0N=nPjwN7{Tn>H#OG9%EhxhmXiDk3I-;#JF{a)vXiWghQm<*Vs^^c*2Ta1%G{=n zETl25B&>%*nidYrGQLAv6^}|$IjZ0vA2rIOvK+?Sa4aTQ=l}eh;7N}wDxcutuqt!L zLs2=$-RU#T$T9c(FV0-F60VL;Ozb<#+DghebGcD+ReR@(?sV_!HS5B^oaP%9X4V+f zoXa^W6MSs8p_IA!B&EADHgnA*4)+`>ji+Grc*c=!=DC{Z-h9wuY3X=IL_`*6K0V@S*hG56#d@$uTE9WOJ?Iyah@;J3^u z1mfmAN3pqQ#)Z4?7H)~>)^*3bIM2D!giJzksC=Q6+1!@QCF1}nJo0+z;h>O^Z>s()r47+59tIPVVM`Fy&5$^}zN;#DDbX9*uw`3u6} z=a*EAXkX0&>eu&OLj92i)lb!#;{5v1{1KR$g@WIkxrF!eB~{`IH#E-#-pZur_k4ay z+XAYTx?9~#cz@BIKXnZY&zmy8wZFDV;1U+@*8Zi%0_QHhe7-+=daL{|iT_Htp%sZI z&h=l5nw0u4;P1(gUZY1;asTx6^w}p)KYjlSac^iK*I`9;l#DNQ;lumR^&-~I+7nX&TqSKfK`T>q7&p}@$NeYnbLX-{BfPmgTpt=OK~Wko`P6~z1zVk=l7 zqu(SeYGCC!xm83)k0)1J5fz0_0bYfRnwEwF(?dOSZ{BpTXZDCcYt6l@tOyl732((k zOG}#r;mtjACNF%3LE4I7U-@#k;G&D|TI}+*R)psVZ^U3pTDmS!zOF~^%qw?#=6Va* zX4XRDB=;JbQGu8jkmu*`(1Hu5Bk{W7G1nV2}SeSQ5exT`3UXn8MbbhxE$3oQL;b5*(@&-zzYUC z5hRcw*v3swydGUz5k!=ahT9a)(W*qdO5**D)?KnSk02H@+LqbQ6cj@crPvW00Se9Q zkI*xp7BeO8Hd6~EqTF}w{sLWv;oFog@s$YD-0LBJjGFzTuKdf3kr~DPJ*PpWo&-bU z>GI_ra*-%bNx`%iUkn4Y0$!RWVs%y9JUPIMJWzG5-Uko5=g8vCaIJ_&A4w@iJWSK- zBeAn5x=a}1*7DR&*&lQb1Z}A%RuLkBw3G`V%g;K^;9F{L0hK_~yV5+DS zTnx*W8__iB!7cOLiGzC`sSz+SSHyTqUrZjYT?les?299 zy16UfuQ<(Z@twupJS5&&dCBR)CwWelec= z8Fq|eloZz1xI>mgIWF9)=J}IS} z4Guber~kCkcm3oUa*f#nDfPTmKf{ggnH$BLMa%dmv1d^qcf|J=HR1c$i&idI9JP>V zHJ@kIj3*=?Zji-`t46D+nyTn78;Iv&At`lJr&nCy7P&#qpDe581LB8e50wof(t=6K z<@IKb=I3&ni`QfA)qxE_sZS*oGLOoJiII3*q^q6z1tzM~3%!hrSF7#307#f`9#Q&< zfO&3J3Ok;h-@-_I&77B5Y^q=iZlbzU{5)O=uBiDf>L*kKVw-howuWu70wV}Lffp;lbiHp3;6pz=1 z=9oY-05`L^SJfrVT;KOO%skR}DKn?)qVq>$W-c|Ey_wGu#rTq%dGhgArr}b#=$c10 z;9;M``-|?vIY`Yv|A31{g>4sG(14FMiebCRViF=H5AdQ#7-cm;`9JT*E(BT{5DOsa5wiQQlhtNRLhveY!LaqBc4}1ddaPR@D`THYj#)R247|9zx;Ts-ls*Bh-WOC@3 zo_V9Vv*{?`F68DG1l8I+P!sI+0x%GAfaL%@XexI$=hO>6Hk7tp%gaT!wDG;eVP%2_+N}HCF=>CP1=~2P;6r9A=Ck}ba8uI zORJ#lv#UlJ$dol<1CzCl9vjIi-e{cD3e_lw=~lSie4 zGtbN7WJetQaTc$kt;IlI;^O%Z+6ixTtOS2FJBdHO)JgpDU2upz-23HTIL zMf>ttCE!nXeOe9tlZo}cEQ?!PYsKN^H^9o-o`5F?oJldyDUwca6G-HVrJDrV6p53 zNYA8q=K<;Rl>|t+o>34b6e3YfbD5|}r~U^v$BbFjSbWe!MEEnHS4d2)80#(16HM&U ztAhQHgI2KALi9F)VE&}{tJUj*_3E9qi`V+Qcx2V~sw!-2pRrLb>XWEMn(uE{HS?(W zc-6gCc0$218$Y{g_*aPSGBbJlTdOVJK%LIkP-+JC79S#j4TfR-NjCf+4vnqiHv@6; z;6N8T4u149>Ta4Go60irkAaxUT;$nM=|^A2+!HT|+QDA_thjoxo4+GY46fo2i7Xe><~xs+D-x_gd`0lG*0cqz7Svh z;(VkHpqzuKRMDnT2HH|e2^A`71*Iib)gmZ?Rw5E_sjnbWP*Gd8l@MtQsDIG(_wDW( zWBh2PHdc0aXTH~b^F4lZ7w_PIzk?T8dTNX_#`ps~GN=?z6xg6B;Gq^yoH)S-H5xFA z0v?PF@&Yg5fp?4`#8WBQ11BnCSBo44e!}?x5s9<8PT2Hp&_h&(!dD81rN+xUeM)Bh zn4Zg|hX>L_`H_k7f#GyM&HOYS%uGy;Obn!_rZTxa<22RZzT)ykO)*&7Td0S`5~A7u+Gg8~bb`$|3&g9>rRrgL@n!Wkxwm>PZrmujoLZ%j9$7mrlZGm1V8T8;_a-82}MJU6D_O+xU9< zr|?<6Q{G?I#x?nB)fC?QBJ_SdQe9=+t0u&tog45o5_A}Ww@W@ASy|W>fM1KcD1`9| z;%-yG5D??^`wXGmo_?3vyDTHWy^4ah4_2twTh7~T_eT)^Glu~~6+_j1fqN9;DToub z3`cgbL&kFg55f3|Ijry8R2o^)u2r?S+ z5{*^Ki>u9r3Dy{?g%^*{EOJ*glEzL!h<@JaT9`8$+cam<4j5(|H6APKoJZ_au^rl* zRIX&u9Or%V7W4YbnXF|oRB*pjHs#;Uq)Tbk4$r2chGclG7>vBHcC%{-b9Vp%RI$l9 z@6@)sY=YcI1pzfc#mYrbt-Q0rGlvxf2qhx8yFM)MtcxzlT_I0-^(7=Mth+)UtPd@! zyWdwSx5PaQ=zpy~xhQvId@;+%;*0AZ_b;aVqNhT>zl4OYhQ;(38Wz_-T@s4U$`)hx z*$JH84@>H5X0I0y9;ZiLpGZ0rSGkdMM`DC;k^e|sJ;z~$uh40|AU>EJLOFXT*@1HQ zL2^ff-E&E)$*DpKgQtXwtVe|sXk~Q_0~MuQj5nBo`^tH zq_PP5kG|8?48WsiL!+udsjki`2bvqNSFfpA=7In(N~QthZdr4(euv@NS_M1~>Dwh_k3*xF>|p=YV3V%F|c%l6nc=M|ci!)xn$b*uo-0Mhqf)FP1V7jCK$0TJ*!_S`2%qKzr!K!OTDQ<~#N4%AizPDM8^Yp$tx-3V~u?1w-IdRiY>-1aHZH|3t@75t!l zx0%qxNxc7mW?tW5&bO&$EriONTh^P7hZHmgDpP>d2~kIdP_e?rHgU40O0&u2gDtBO zsL!-ixe@&lafpZsIlkLcSG6I6!^WD=>9aP<3R0hyO|5yv`?p)S@O|?4t%o7(inb)q zYN~BsGvWK}f9!h~>I&#afBG1#1#Wlk!$Bq$UkrDRCgw$xCXnp`y$!Ov+Z*O{*75d_ zA|Px#X0Zr(t-Z=Yg5*P>Pt6GeO5d1f+osR4&F2zHBei1VCxQnVL*|mzxMw95J%%Vu zD&d&lHr`K_#PH}oIhLxQ&-#Z`36p$?^IXF0O-0>v&30TmkJ)a3!-=w|!~8fxrVL^R zA>^AoD8}#YXa|;ip@Xnop|kA}k{kEL1j?IcHIG}(qgj+X@)@o!iXL|&h&lNPh(&c! zMIChJyJyY!+4Equu`&vzf&!^;ycv4Fi03}(x$HX#w8t)^K>N*QtLJl)lMjuX;-$?I z+pYFZ;5vC|Q+z&h#N~$nA};Ch;G*|7CFT>hzKe2kPuHgM8UF)+!YfpRztwfvEmHo? zRHV|IhkbNWMzxE`mo_K=2LjU-pmyHzh&N?7ag@q)^#thvJIv~+~<@G+&A2I zva7qEZ;<)!JHw`PVx6;UL6=<%qSm2G9Wh|zHVRN*4v9#cK4r!Ea`_|cp30puH7HPK zV_y7M=@QHIeP<8#L^!ZL`hpj!6rh-}0X-r4Kqqb~n80De|(hYvx3KQb4 zUNQtda=Auu8mlPy0muyiUZY_KmQk0aNqqGOoY`H0qxRj$((UUB>5~s@Rkiy;Cb~(u zEf`!-N`i$I%o^yF`hNMA7VqCZSE>)vp?z;aV|oP(2-0rdgCBQVIWJft0`5?PzC}3% zWAy_B2q2sQ0ro+7QYj1Q*CvG2)r5AC-sNUBf|hbXy_NUNU#V>}?rB-^Y4lLaXEag( z3Do$b`U*MfsSm$>4jmM3a$r3wKEQ^HT$|*8*Hh^b6cG?o&_%DwIjm#~Havq|D;MP^ z^cq@3!s5CKz3TFDpJx#XVcGACE@1s@^|ecKH^!IR!B~808^-+$+2Ekqr6qJVEM-Tb zVQDGTC9CG~g@a`qn?4={_DOO=u_%HF43patV+oXI1gzYlR3*>!Vo5({@~ny5^O;^i zAO!#EUZ61i*x=1tpS#fnJUrbiPbsx4LCr0m?PgYCJ76S;&r;aQ=aqFer@>3$^qUyB zU_8?s2)wBTnEhMlPI3qhROy7{1V$1^W4OC}xxrv-X zzbuc8-(Za7Gr4qrBF8?P!z*Y|o&p-J;{Ba-GSb({ACjNyYvZrTFZ6BXx5?-FEdI1q Vw Date: Wed, 17 Apr 2019 00:23:28 -0400 Subject: [PATCH 408/680] small changes for gcc build --- scripts/eosio_build.sh | 2 +- scripts/eosio_build_ubuntu.sh | 31 ++++++++++++++++++++++++------- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 68ab0848f89..490a16c3b06 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -228,7 +228,7 @@ fi if $PIN_COMPILER; then BUILD_CLANG8=true CPP_COMP=${OPT_LOCATION}/clang8/bin/clang++ - PIN_COMPILER_CMAKE="-DEOSIO_PIN_COMPILER=1" + PIN_COMPILER_CMAKE="-DEOSIO_PIN_COMPILER=1 -DLIBSTDCPP_DIR=${OPT_LOCATION}/gcc" elif $NO_CPP17; then if [ $NONINTERACTIVE -eq 0 ]; then BUILD_CLANG8=true diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 2b8100300c5..e50cb3f6d7d 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -253,6 +253,15 @@ if [ $? -ne 0 ]; then exit -1; fi cd .. printf "\\n" +# Use current directory's tmp directory if noexec is enabled for /tmp +if (mount | grep "/tmp " | grep --quiet noexec); then + mkdir -p $REPO_ROOT/tmp + TMP_LOCATION="${REPO_ROOT}/tmp" + rm -rf $REPO_ROOT/tmp/* +else # noexec wasn't found + TMP_LOCATION="/tmp" +fi + if $BUILD_CLANG8; then if [ ! -d ${OPT_LOCATION}/gmp ]; then printf "Installing gmp...\\n" @@ -261,8 +270,9 @@ if $BUILD_CLANG8; then && cd gmp-5.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r gmp-5.0.1 \ + && cd ../ && rm -r ${TMP_LOCATION}/gmp-5.0.1 ${TMP_LOCATION}/gmp-5.0.1.tar.gz \ || exit 1 + fi if [ ! -d ${OPT_LOCATION}/mpfr ]; then printf "Installing mpfr...\\n" cd ${TMP_LOCATION} \ @@ -270,8 +280,9 @@ if $BUILD_CLANG8; then && cd mpfr-3.0.0 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpfr --with-gmp=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r mpfr-3.0.0 \ + && cd ../ && rm -r ${TMP_LOCATION}/mpfr-3.0.0 ${TMP_LOCATION}/mpfr-3.0.0.tar.gz \ || exit 1 + fi if [ ! -d ${OPT_LOCATION}/mpc ]; then printf "Installing mpc...\\n" cd ${TMP_LOCATION} \ @@ -279,18 +290,21 @@ if $BUILD_CLANG8; then && cd mpc-1.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r mpc-1.0.1 \ + && cd ../ && rm -r ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1 \ || exit 1 + fi if [ ! -d ${OPT_LOCATION}/gcc ]; then printf "Installing libstdc++\\n" cd ${TMP_LOCATION} \ && wget https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ && cd gcc-7.1.0 && mkdir build && cd build \ - &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 - && make -j"${JOBS}" && make install \ - && cd ../ && rm -r gcc-7.1.0 \ + &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ + && make -j3 && make install \ + && cd ../ && rm -r ${TMP_LOCATION}/gcc-7.1.0 ${TMP_LOCATION}/gcc-7.1.0.tar.gz \ || exit 1 + fi + #&& make -j"${JOBS}" && make install \ printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" @@ -319,10 +333,13 @@ if $BUILD_CLANG8; then && cd ${TMP_LOCATION}/clang8 \ && mkdir build && cd build \ && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ - && make -j"${JOBS}" \ + && make -j3 \ && make install \ + && rm -r ${TMP_LOCATION}/clang8 \ && cd ../.. \ || exit 1 + + #&& make -j"${JOBS}" \ printf " - Clang 8 successfully installed @ ${CLANG8_ROOT}\\n" else printf " - Clang 8 found @ ${CLANG8_ROOT}.\\n" From 37df54eb8ccc9f020338f8a48ffcf61a376d2431 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 17 Apr 2019 00:24:17 -0400 Subject: [PATCH 409/680] forgot to rename preactivate to activate in testnet.template --- testnet.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet.template b/testnet.template index e36e8ba4f80..574d7ec9795 100644 --- a/testnet.template +++ b/testnet.template @@ -87,7 +87,7 @@ ecmd set contract eosio $bioscontractpath eosio.bios.wasm eosio.bios.abi # Preactivate all digests for digest in $FEATURE_DIGESTS; do -ecmd push action eosio preactivate "{\"feature_digest\":\"$digest\"}" -p eosio +ecmd push action eosio activate "{\"feature_digest\":\"$digest\"}" -p eosio done # Create required system accounts From 5a11c0b69af52bad9b6bef121a858e6b2b1d7055 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 17 Apr 2019 00:45:20 -0400 Subject: [PATCH 410/680] update all other build scripts --- scripts/eosio_build_amazon.sh | 56 +++++++++++++++++++++++++++++++++-- scripts/eosio_build_centos.sh | 47 +++++++++++++++++++++++++++-- scripts/eosio_build_fedora.sh | 47 +++++++++++++++++++++++++++-- scripts/eosio_build_ubuntu.sh | 6 ++-- 4 files changed, 146 insertions(+), 10 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index e769917a790..2abc3b60632 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -248,11 +248,61 @@ if [ $? -ne 0 ]; then exit -1; fi cd .. printf "\\n" +# Use current directory's tmp directory if noexec is enabled for /tmp +if (mount | grep "/tmp " | grep --quiet noexec); then + mkdir -p $REPO_ROOT/tmp + TMP_LOCATION="${REPO_ROOT}/tmp" + rm -rf $REPO_ROOT/tmp/* +else # noexec wasn't found + TMP_LOCATION="/tmp" +fi + if $BUILD_CLANG8; then + if [ ! -d ${OPT_LOCATION}/gmp ]; then + printf "Installing gmp...\\n" + cd ${TMP_LOCATION} \ + && wget https://ftp.gnu.org/gnu/gmp/gmp-5.0.1.tar.gz && tar -xzf gmp-5.0.1.tar.gz \ + && cd gmp-5.0.1 && mkdir build && cd build \ + && ../configure --prefix=${OPT_LOCATION}/gmp \ + && make -j"${JOBS}" && make install \ + && cd ../ && rm -r ${TMP_LOCATION}/gmp-5.0.1 ${TMP_LOCATION}/gmp-5.0.1.tar.gz \ + || exit 1 + fi + if [ ! -d ${OPT_LOCATION}/mpfr ]; then + printf "Installing mpfr...\\n" + cd ${TMP_LOCATION} \ + && wget https://ftp.gnu.org/gnu/mpfr/mpfr-3.0.0.tar.gz && tar -xzf mpfr-3.0.0.tar.gz \ + && cd mpfr-3.0.0 && mkdir build && cd build \ + && ../configure --prefix=${OPT_LOCATION}/mpfr --with-gmp=${OPT_LOCATION}/gmp \ + && make -j"${JOBS}" && make install \ + && cd ../ && rm -r ${TMP_LOCATION}/mpfr-3.0.0 ${TMP_LOCATION}/mpfr-3.0.0.tar.gz \ + || exit 1 + fi + if [ ! -d ${OPT_LOCATION}/mpc ]; then + printf "Installing mpc...\\n" + cd ${TMP_LOCATION} \ + && wget https://ftp.gnu.org/gnu/mpc/mpc-1.0.1.tar.gz && tar -xzf mpc-1.0.1.tar.gz \ + && cd mpc-1.0.1 && mkdir build && cd build \ + && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ + && make -j"${JOBS}" && make install \ + && cd ../ && rm -r ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1 \ + || exit 1 + fi + if [ ! -d ${OPT_LOCATION}/gcc ]; then + printf "Installing libstdc++\\n" + cd ${TMP_LOCATION} \ + && wget https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ + && cd gcc-7.1.0 && mkdir build && cd build \ + &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ + && make -j"${JOBS}" && make install \ + && cd ../ && rm -r ${TMP_LOCATION}/gcc-7.1.0 ${TMP_LOCATION}/gcc-7.1.0.tar.gz \ + || exit 1 + fi + printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" - cd ${OPT_LOCATION} \ + cd ${TMP_LOCATION} \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ && git checkout $PINNED_COMPILER_LLVM_COMMIT \ && cd tools \ @@ -274,13 +324,15 @@ if $BUILD_CLANG8; then && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ - && cd ${OPT_LOCATION}/clang8 \ + && cd ${TMP_LOCATION}/clang8 \ && mkdir build && cd build \ && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ && make install \ + && rm -r ${TMP_LOCATION}/clang8 \ && cd ../.. \ || exit 1 + printf " - Clang 8 successfully installed @ ${CLANG8_ROOT}\\n" else printf " - Clang 8 found @ ${CLANG8_ROOT}.\\n" diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 7a11c7ee257..3bd5546d50c 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -300,10 +300,51 @@ cd .. printf "\\n" if $BUILD_CLANG8; then + if [ ! -d ${OPT_LOCATION}/gmp ]; then + printf "Installing gmp...\\n" + cd ${TMP_LOCATION} \ + && wget https://ftp.gnu.org/gnu/gmp/gmp-5.0.1.tar.gz && tar -xzf gmp-5.0.1.tar.gz \ + && cd gmp-5.0.1 && mkdir build && cd build \ + && ../configure --prefix=${OPT_LOCATION}/gmp \ + && make -j"${JOBS}" && make install \ + && cd ../ && rm -r ${TMP_LOCATION}/gmp-5.0.1 ${TMP_LOCATION}/gmp-5.0.1.tar.gz \ + || exit 1 + fi + if [ ! -d ${OPT_LOCATION}/mpfr ]; then + printf "Installing mpfr...\\n" + cd ${TMP_LOCATION} \ + && wget https://ftp.gnu.org/gnu/mpfr/mpfr-3.0.0.tar.gz && tar -xzf mpfr-3.0.0.tar.gz \ + && cd mpfr-3.0.0 && mkdir build && cd build \ + && ../configure --prefix=${OPT_LOCATION}/mpfr --with-gmp=${OPT_LOCATION}/gmp \ + && make -j"${JOBS}" && make install \ + && cd ../ && rm -r ${TMP_LOCATION}/mpfr-3.0.0 ${TMP_LOCATION}/mpfr-3.0.0.tar.gz \ + || exit 1 + fi + if [ ! -d ${OPT_LOCATION}/mpc ]; then + printf "Installing mpc...\\n" + cd ${TMP_LOCATION} \ + && wget https://ftp.gnu.org/gnu/mpc/mpc-1.0.1.tar.gz && tar -xzf mpc-1.0.1.tar.gz \ + && cd mpc-1.0.1 && mkdir build && cd build \ + && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ + && make -j"${JOBS}" && make install \ + && cd ../ && rm -r ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1 \ + || exit 1 + fi + if [ ! -d ${OPT_LOCATION}/gcc ]; then + printf "Installing libstdc++\\n" + cd ${TMP_LOCATION} \ + && wget https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ + && cd gcc-7.1.0 && mkdir build && cd build \ + &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ + && make -j"${JOBS}" && make install \ + && cd ../ && rm -r ${TMP_LOCATION}/gcc-7.1.0 ${TMP_LOCATION}/gcc-7.1.0.tar.gz \ + || exit 1 + fi + printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" - cd ${OPT_LOCATION} \ + cd ${TMP_LOCATION} \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ && git checkout $PINNED_COMPILER_LLVM_COMMIT \ && cd tools \ @@ -325,13 +366,15 @@ if $BUILD_CLANG8; then && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ - && cd ${OPT_LOCATION}/clang8 \ + && cd ${TMP_LOCATION}/clang8 \ && mkdir build && cd build \ && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ && make install \ + && rm -r ${TMP_LOCATION}/clang8 \ && cd ../.. \ || exit 1 + printf " - Clang 8 successfully installed @ ${CLANG8_ROOT}\\n" else printf " - Clang 8 found @ ${CLANG8_ROOT}.\\n" diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 8fab815109c..8843a9c5b1d 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -232,10 +232,51 @@ cd .. printf "\\n" if $BUILD_CLANG8; then + if [ ! -d ${OPT_LOCATION}/gmp ]; then + printf "Installing gmp...\\n" + cd ${TMP_LOCATION} \ + && wget https://ftp.gnu.org/gnu/gmp/gmp-5.0.1.tar.gz && tar -xzf gmp-5.0.1.tar.gz \ + && cd gmp-5.0.1 && mkdir build && cd build \ + && ../configure --prefix=${OPT_LOCATION}/gmp \ + && make -j"${JOBS}" && make install \ + && cd ../ && rm -r ${TMP_LOCATION}/gmp-5.0.1 ${TMP_LOCATION}/gmp-5.0.1.tar.gz \ + || exit 1 + fi + if [ ! -d ${OPT_LOCATION}/mpfr ]; then + printf "Installing mpfr...\\n" + cd ${TMP_LOCATION} \ + && wget https://ftp.gnu.org/gnu/mpfr/mpfr-3.0.0.tar.gz && tar -xzf mpfr-3.0.0.tar.gz \ + && cd mpfr-3.0.0 && mkdir build && cd build \ + && ../configure --prefix=${OPT_LOCATION}/mpfr --with-gmp=${OPT_LOCATION}/gmp \ + && make -j"${JOBS}" && make install \ + && cd ../ && rm -r ${TMP_LOCATION}/mpfr-3.0.0 ${TMP_LOCATION}/mpfr-3.0.0.tar.gz \ + || exit 1 + fi + if [ ! -d ${OPT_LOCATION}/mpc ]; then + printf "Installing mpc...\\n" + cd ${TMP_LOCATION} \ + && wget https://ftp.gnu.org/gnu/mpc/mpc-1.0.1.tar.gz && tar -xzf mpc-1.0.1.tar.gz \ + && cd mpc-1.0.1 && mkdir build && cd build \ + && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ + && make -j"${JOBS}" && make install \ + && cd ../ && rm -r ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1 \ + || exit 1 + fi + if [ ! -d ${OPT_LOCATION}/gcc ]; then + printf "Installing libstdc++\\n" + cd ${TMP_LOCATION} \ + && wget https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ + && cd gcc-7.1.0 && mkdir build && cd build \ + &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ + && make -j"${JOBS}" && make install \ + && cd ../ && rm -r ${TMP_LOCATION}/gcc-7.1.0 ${TMP_LOCATION}/gcc-7.1.0.tar.gz \ + || exit 1 + fi + printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" - cd ${OPT_LOCATION} \ + cd ${TMP_LOCATION} \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ && git checkout $PINNED_COMPILER_LLVM_COMMIT \ && cd tools \ @@ -257,13 +298,15 @@ if $BUILD_CLANG8; then && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ - && cd ${OPT_LOCATION}/clang8 \ + && cd ${TMP_LOCATION}/clang8 \ && mkdir build && cd build \ && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ && make install \ + && rm -r ${TMP_LOCATION}/clang8 \ && cd ../.. \ || exit 1 + printf " - Clang 8 successfully installed @ ${CLANG8_ROOT}\\n" else printf " - Clang 8 found @ ${CLANG8_ROOT}.\\n" diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index e50cb3f6d7d..f11ade2272b 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -299,12 +299,11 @@ if $BUILD_CLANG8; then && wget https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ && cd gcc-7.1.0 && mkdir build && cd build \ &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ - && make -j3 && make install \ + && make -j"${JOBS}" && make install \ && cd ../ && rm -r ${TMP_LOCATION}/gcc-7.1.0 ${TMP_LOCATION}/gcc-7.1.0.tar.gz \ || exit 1 fi - #&& make -j"${JOBS}" && make install \ printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" @@ -333,13 +332,12 @@ if $BUILD_CLANG8; then && cd ${TMP_LOCATION}/clang8 \ && mkdir build && cd build \ && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ - && make -j3 \ + && make -j"${JOBS}" \ && make install \ && rm -r ${TMP_LOCATION}/clang8 \ && cd ../.. \ || exit 1 - #&& make -j"${JOBS}" \ printf " - Clang 8 successfully installed @ ${CLANG8_ROOT}\\n" else printf " - Clang 8 found @ ${CLANG8_ROOT}.\\n" From b9f058aa9390b7c3a187dd127c0de84057af30df Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 17 Apr 2019 00:51:50 -0400 Subject: [PATCH 411/680] use curl instead of wget --- scripts/eosio_build_amazon.sh | 10 +++++----- scripts/eosio_build_centos.sh | 8 ++++---- scripts/eosio_build_fedora.sh | 10 +++++----- scripts/eosio_build_ubuntu.sh | 8 ++++---- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 2abc3b60632..480d52ee919 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -12,7 +12,7 @@ if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then # Amazonlinux1 DEP_ARRAY=( sudo procps util-linux which gcc72 gcc72-c++ autoconf automake libtool make doxygen graphviz \ bzip2 bzip2-devel openssl-devel gmp gmp-devel libstdc++72 python27 python27-devel python36 python36-devel \ - libedit-devel ncurses-devel swig wget file libcurl-devel libusb1-devel + libedit-devel ncurses-devel swig curl -LO file libcurl-devel libusb1-devel ) else # Amazonlinux2 DEP_ARRAY=( @@ -261,7 +261,7 @@ if $BUILD_CLANG8; then if [ ! -d ${OPT_LOCATION}/gmp ]; then printf "Installing gmp...\\n" cd ${TMP_LOCATION} \ - && wget https://ftp.gnu.org/gnu/gmp/gmp-5.0.1.tar.gz && tar -xzf gmp-5.0.1.tar.gz \ + && curl -LO https://ftp.gnu.org/gnu/gmp/gmp-5.0.1.tar.gz && tar -xzf gmp-5.0.1.tar.gz \ && cd gmp-5.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ @@ -271,7 +271,7 @@ if $BUILD_CLANG8; then if [ ! -d ${OPT_LOCATION}/mpfr ]; then printf "Installing mpfr...\\n" cd ${TMP_LOCATION} \ - && wget https://ftp.gnu.org/gnu/mpfr/mpfr-3.0.0.tar.gz && tar -xzf mpfr-3.0.0.tar.gz \ + && curl -LO https://ftp.gnu.org/gnu/mpfr/mpfr-3.0.0.tar.gz && tar -xzf mpfr-3.0.0.tar.gz \ && cd mpfr-3.0.0 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpfr --with-gmp=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ @@ -281,7 +281,7 @@ if $BUILD_CLANG8; then if [ ! -d ${OPT_LOCATION}/mpc ]; then printf "Installing mpc...\\n" cd ${TMP_LOCATION} \ - && wget https://ftp.gnu.org/gnu/mpc/mpc-1.0.1.tar.gz && tar -xzf mpc-1.0.1.tar.gz \ + && curl -LO https://ftp.gnu.org/gnu/mpc/mpc-1.0.1.tar.gz && tar -xzf mpc-1.0.1.tar.gz \ && cd mpc-1.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ && make -j"${JOBS}" && make install \ @@ -291,7 +291,7 @@ if $BUILD_CLANG8; then if [ ! -d ${OPT_LOCATION}/gcc ]; then printf "Installing libstdc++\\n" cd ${TMP_LOCATION} \ - && wget https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ + && curl -LO https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ && cd gcc-7.1.0 && mkdir build && cd build \ &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ && make -j"${JOBS}" && make install \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 3bd5546d50c..858fae748bc 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -303,7 +303,7 @@ if $BUILD_CLANG8; then if [ ! -d ${OPT_LOCATION}/gmp ]; then printf "Installing gmp...\\n" cd ${TMP_LOCATION} \ - && wget https://ftp.gnu.org/gnu/gmp/gmp-5.0.1.tar.gz && tar -xzf gmp-5.0.1.tar.gz \ + && curl -LO https://ftp.gnu.org/gnu/gmp/gmp-5.0.1.tar.gz && tar -xzf gmp-5.0.1.tar.gz \ && cd gmp-5.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ @@ -313,7 +313,7 @@ if $BUILD_CLANG8; then if [ ! -d ${OPT_LOCATION}/mpfr ]; then printf "Installing mpfr...\\n" cd ${TMP_LOCATION} \ - && wget https://ftp.gnu.org/gnu/mpfr/mpfr-3.0.0.tar.gz && tar -xzf mpfr-3.0.0.tar.gz \ + && curl -LO https://ftp.gnu.org/gnu/mpfr/mpfr-3.0.0.tar.gz && tar -xzf mpfr-3.0.0.tar.gz \ && cd mpfr-3.0.0 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpfr --with-gmp=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ @@ -323,7 +323,7 @@ if $BUILD_CLANG8; then if [ ! -d ${OPT_LOCATION}/mpc ]; then printf "Installing mpc...\\n" cd ${TMP_LOCATION} \ - && wget https://ftp.gnu.org/gnu/mpc/mpc-1.0.1.tar.gz && tar -xzf mpc-1.0.1.tar.gz \ + && curl -LO https://ftp.gnu.org/gnu/mpc/mpc-1.0.1.tar.gz && tar -xzf mpc-1.0.1.tar.gz \ && cd mpc-1.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ && make -j"${JOBS}" && make install \ @@ -333,7 +333,7 @@ if $BUILD_CLANG8; then if [ ! -d ${OPT_LOCATION}/gcc ]; then printf "Installing libstdc++\\n" cd ${TMP_LOCATION} \ - && wget https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ + && curl -LO https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ && cd gcc-7.1.0 && mkdir build && cd build \ &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ && make -j"${JOBS}" && make install \ diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 8843a9c5b1d..76fd321d889 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -43,7 +43,7 @@ printf "Disk space available: ${DISK_AVAIL%.*}G\\n" # llvm is symlinked from /usr/lib64/llvm4.0 into user's home DEP_ARRAY=( git sudo procps-ng which gcc gcc-c++ autoconf automake libtool make \ - bzip2-devel wget bzip2 compat-openssl10 graphviz doxygen \ + bzip2-devel curl -LO bzip2 compat-openssl10 graphviz doxygen \ openssl-devel gmp-devel libstdc++-devel python2 python2-devel python3 python3-devel \ libedit ncurses-devel swig llvm4.0 llvm4.0-devel llvm4.0-libs llvm4.0-static libcurl-devel libusb-devel ) @@ -235,7 +235,7 @@ if $BUILD_CLANG8; then if [ ! -d ${OPT_LOCATION}/gmp ]; then printf "Installing gmp...\\n" cd ${TMP_LOCATION} \ - && wget https://ftp.gnu.org/gnu/gmp/gmp-5.0.1.tar.gz && tar -xzf gmp-5.0.1.tar.gz \ + && curl -LO https://ftp.gnu.org/gnu/gmp/gmp-5.0.1.tar.gz && tar -xzf gmp-5.0.1.tar.gz \ && cd gmp-5.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ @@ -245,7 +245,7 @@ if $BUILD_CLANG8; then if [ ! -d ${OPT_LOCATION}/mpfr ]; then printf "Installing mpfr...\\n" cd ${TMP_LOCATION} \ - && wget https://ftp.gnu.org/gnu/mpfr/mpfr-3.0.0.tar.gz && tar -xzf mpfr-3.0.0.tar.gz \ + && curl -LO https://ftp.gnu.org/gnu/mpfr/mpfr-3.0.0.tar.gz && tar -xzf mpfr-3.0.0.tar.gz \ && cd mpfr-3.0.0 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpfr --with-gmp=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ @@ -255,7 +255,7 @@ if $BUILD_CLANG8; then if [ ! -d ${OPT_LOCATION}/mpc ]; then printf "Installing mpc...\\n" cd ${TMP_LOCATION} \ - && wget https://ftp.gnu.org/gnu/mpc/mpc-1.0.1.tar.gz && tar -xzf mpc-1.0.1.tar.gz \ + && curl -LO https://ftp.gnu.org/gnu/mpc/mpc-1.0.1.tar.gz && tar -xzf mpc-1.0.1.tar.gz \ && cd mpc-1.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ && make -j"${JOBS}" && make install \ @@ -265,7 +265,7 @@ if $BUILD_CLANG8; then if [ ! -d ${OPT_LOCATION}/gcc ]; then printf "Installing libstdc++\\n" cd ${TMP_LOCATION} \ - && wget https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ + && curl -LO https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ && cd gcc-7.1.0 && mkdir build && cd build \ &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ && make -j"${JOBS}" && make install \ diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index f11ade2272b..d616553021f 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -266,7 +266,7 @@ if $BUILD_CLANG8; then if [ ! -d ${OPT_LOCATION}/gmp ]; then printf "Installing gmp...\\n" cd ${TMP_LOCATION} \ - && wget https://ftp.gnu.org/gnu/gmp/gmp-5.0.1.tar.gz && tar -xzf gmp-5.0.1.tar.gz \ + && curl -LO https://ftp.gnu.org/gnu/gmp/gmp-5.0.1.tar.gz && tar -xzf gmp-5.0.1.tar.gz \ && cd gmp-5.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ @@ -276,7 +276,7 @@ if $BUILD_CLANG8; then if [ ! -d ${OPT_LOCATION}/mpfr ]; then printf "Installing mpfr...\\n" cd ${TMP_LOCATION} \ - && wget https://ftp.gnu.org/gnu/mpfr/mpfr-3.0.0.tar.gz && tar -xzf mpfr-3.0.0.tar.gz \ + && curl -LO https://ftp.gnu.org/gnu/mpfr/mpfr-3.0.0.tar.gz && tar -xzf mpfr-3.0.0.tar.gz \ && cd mpfr-3.0.0 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpfr --with-gmp=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ @@ -286,7 +286,7 @@ if $BUILD_CLANG8; then if [ ! -d ${OPT_LOCATION}/mpc ]; then printf "Installing mpc...\\n" cd ${TMP_LOCATION} \ - && wget https://ftp.gnu.org/gnu/mpc/mpc-1.0.1.tar.gz && tar -xzf mpc-1.0.1.tar.gz \ + && curl -LO https://ftp.gnu.org/gnu/mpc/mpc-1.0.1.tar.gz && tar -xzf mpc-1.0.1.tar.gz \ && cd mpc-1.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ && make -j"${JOBS}" && make install \ @@ -296,7 +296,7 @@ if $BUILD_CLANG8; then if [ ! -d ${OPT_LOCATION}/gcc ]; then printf "Installing libstdc++\\n" cd ${TMP_LOCATION} \ - && wget https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ + && curl -LO https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ && cd gcc-7.1.0 && mkdir build && cd build \ &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ && make -j"${JOBS}" && make install \ From 5937ffd67611f420967cf73e67e1bdd5855e99b7 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 17 Apr 2019 01:28:54 -0400 Subject: [PATCH 412/680] fix rm --- scripts/eosio_build_amazon.sh | 2 +- scripts/eosio_build_centos.sh | 2 +- scripts/eosio_build_fedora.sh | 2 +- scripts/eosio_build_ubuntu.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 480d52ee919..441776f9606 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -285,7 +285,7 @@ if $BUILD_CLANG8; then && cd mpc-1.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1 \ + && cd ../ && rm -r ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1.tar.gz \ || exit 1 fi if [ ! -d ${OPT_LOCATION}/gcc ]; then diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 858fae748bc..c40a3a8e686 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -327,7 +327,7 @@ if $BUILD_CLANG8; then && cd mpc-1.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1 \ + && cd ../ && rm -r ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1.tar.gz \ || exit 1 fi if [ ! -d ${OPT_LOCATION}/gcc ]; then diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 76fd321d889..bad7556d044 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -259,7 +259,7 @@ if $BUILD_CLANG8; then && cd mpc-1.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1 \ + && cd ../ && rm -r ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1.tar.gz \ || exit 1 fi if [ ! -d ${OPT_LOCATION}/gcc ]; then diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index d616553021f..55bd5d33435 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -290,7 +290,7 @@ if $BUILD_CLANG8; then && cd mpc-1.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1 \ + && cd ../ && rm -r ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1.tar.gz \ || exit 1 fi if [ ! -d ${OPT_LOCATION}/gcc ]; then From eeb8b803c35fb66cc16a76af1731169b0da0ea2a Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 17 Apr 2019 01:41:34 -0400 Subject: [PATCH 413/680] export LD_LIBRARY_PATH --- scripts/eosio_build_amazon.sh | 1 + scripts/eosio_build_centos.sh | 1 + scripts/eosio_build_fedora.sh | 1 + scripts/eosio_build_ubuntu.sh | 1 + 4 files changed, 4 insertions(+) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 441776f9606..e499845d4a4 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -258,6 +258,7 @@ else # noexec wasn't found fi if $BUILD_CLANG8; then + export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr:$LD_LIBRARY_PATH if [ ! -d ${OPT_LOCATION}/gmp ]; then printf "Installing gmp...\\n" cd ${TMP_LOCATION} \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index c40a3a8e686..3993c544dd9 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -300,6 +300,7 @@ cd .. printf "\\n" if $BUILD_CLANG8; then + export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr:$LD_LIBRARY_PATH if [ ! -d ${OPT_LOCATION}/gmp ]; then printf "Installing gmp...\\n" cd ${TMP_LOCATION} \ diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index bad7556d044..c778ee51fa4 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -232,6 +232,7 @@ cd .. printf "\\n" if $BUILD_CLANG8; then + export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr:$LD_LIBRARY_PATH if [ ! -d ${OPT_LOCATION}/gmp ]; then printf "Installing gmp...\\n" cd ${TMP_LOCATION} \ diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 55bd5d33435..4f750475453 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -263,6 +263,7 @@ else # noexec wasn't found fi if $BUILD_CLANG8; then + export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr:$LD_LIBRARY_PATH if [ ! -d ${OPT_LOCATION}/gmp ]; then printf "Installing gmp...\\n" cd ${TMP_LOCATION} \ From 408f5ebdd4d7ff2559d88dc8b20d2b78a7befc2d Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 17 Apr 2019 01:44:19 -0400 Subject: [PATCH 414/680] fix up darwin --- scripts/eosio_build_darwin.sh | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index ebda9b2ae8e..fa0acbe26d1 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -262,11 +262,20 @@ fi cd .. printf "\\n" +# Use current directory's tmp directory if noexec is enabled for /tmp +if (mount | grep "/tmp " | grep --quiet noexec); then + mkdir -p $REPO_ROOT/tmp + TMP_LOCATION="${REPO_ROOT}/tmp" + rm -rf $REPO_ROOT/tmp/* +else # noexec wasn't found + TMP_LOCATION="/tmp" +fi + if $BUILD_CLANG8; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" - cd ${OPT_LOCATION} \ + cd ${TMP_LOCATION} \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 \ && cd clang8 && git checkout $PINNED_COMPILER_LLVM_COMMIT \ && cd tools \ @@ -288,7 +297,7 @@ if $BUILD_CLANG8; then && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ - && cd ${OPT_LOCATION}/clang8 \ + && cd ${TMP_LOCATION}/clang8 \ && mkdir build && cd build \ && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ From 78127099336458a27c23b9e3902e876ca7653819 Mon Sep 17 00:00:00 2001 From: Kayan Date: Wed, 17 Apr 2019 18:32:54 +0800 Subject: [PATCH 415/680] add test case --- unittests/contracts.hpp.in | 1 + unittests/protocol_feature_tests.cpp | 18 ++++ .../ram_restriction_test/CMakeLists.txt | 6 ++ .../ram_restriction_test.abi | 91 ++++++++++++++++++ .../ram_restriction_test.cpp | 65 +++++++++++++ .../ram_restriction_test.wasm | Bin 0 -> 10560 bytes 6 files changed, 181 insertions(+) create mode 100644 unittests/test-contracts/ram_restriction_test/CMakeLists.txt create mode 100644 unittests/test-contracts/ram_restriction_test/ram_restriction_test.abi create mode 100644 unittests/test-contracts/ram_restriction_test/ram_restriction_test.cpp create mode 100755 unittests/test-contracts/ram_restriction_test/ram_restriction_test.wasm diff --git a/unittests/contracts.hpp.in b/unittests/contracts.hpp.in index 4f09e318ddf..33bc62ed93f 100644 --- a/unittests/contracts.hpp.in +++ b/unittests/contracts.hpp.in @@ -52,6 +52,7 @@ namespace eosio { MAKE_READ_WASM_ABI(test_api_db, test_api_db, test-contracts) MAKE_READ_WASM_ABI(test_api_multi_index, test_api_multi_index, test-contracts) MAKE_READ_WASM_ABI(test_ram_limit, test_ram_limit, test-contracts) + MAKE_READ_WASM_ABI(ram_restriction_test, ram_restriction_test, test-contracts) }; } /// eosio::testing } /// eosio diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 7985cb4cd01..6194c0edf58 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -1019,4 +1019,22 @@ BOOST_AUTO_TEST_CASE( get_sender_test ) { try { ); } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE( ram_restriction_test ) { try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + const auto& tester1_account = account_name("tester1"); + const auto& tester2_account = account_name("tester2"); + const auto& alice_account = account_name("alice"); + c.create_accounts( {tester1_account, tester2_account, alice_account} ); + c.produce_block(); + c.set_code( tester1_account, contracts::ram_restriction_test_wasm() ); + c.set_abi( tester1_account, contracts::ram_restriction_test_abi().data() ); + c.produce_block(); + c.set_code( tester2_account, contracts::ram_restriction_test_wasm() ); + c.set_abi( tester2_account, contracts::ram_restriction_test_abi().data() ); + c.produce_block(); + + +} FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/test-contracts/ram_restriction_test/CMakeLists.txt b/unittests/test-contracts/ram_restriction_test/CMakeLists.txt new file mode 100644 index 00000000000..74710d96a8b --- /dev/null +++ b/unittests/test-contracts/ram_restriction_test/CMakeLists.txt @@ -0,0 +1,6 @@ +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( ram_restriction_test ram_restriction_test ram_restriction_test.cpp ) +else() + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/ram_restriction_test.wasm ${CMAKE_CURRENT_BINARY_DIR}/ram_restriction_test.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/ram_restriction_test.abi ${CMAKE_CURRENT_BINARY_DIR}/ram_restriction_test.abi COPYONLY ) +endif() diff --git a/unittests/test-contracts/ram_restriction_test/ram_restriction_test.abi b/unittests/test-contracts/ram_restriction_test/ram_restriction_test.abi new file mode 100644 index 00000000000..662e6dfed91 --- /dev/null +++ b/unittests/test-contracts/ram_restriction_test/ram_restriction_test.abi @@ -0,0 +1,91 @@ +{ + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", + "version": "eosio::abi/1.1", + "types": [], + "structs": [ + { + "name": "data", + "base": "", + "fields": [ + { + "name": "key", + "type": "uint64" + }, + { + "name": "value", + "type": "bytes" + } + ] + }, + { + "name": "notifysetdat", + "base": "", + "fields": [ + { + "name": "acctonotify", + "type": "name" + }, + { + "name": "len1", + "type": "int32" + }, + { + "name": "len2", + "type": "int32" + }, + { + "name": "payer", + "type": "name" + } + ] + }, + { + "name": "setdata", + "base": "", + "fields": [ + { + "name": "len1", + "type": "int32" + }, + { + "name": "len2", + "type": "int32" + }, + { + "name": "payer", + "type": "name" + } + ] + } + ], + "actions": [ + { + "name": "notifysetdat", + "type": "notifysetdat", + "ricardian_contract": "" + }, + { + "name": "setdata", + "type": "setdata", + "ricardian_contract": "" + } + ], + "tables": [ + { + "name": "tablea", + "type": "data", + "index_type": "i64", + "key_names": [], + "key_types": [] + }, + { + "name": "tableb", + "type": "data", + "index_type": "i64", + "key_names": [], + "key_types": [] + } + ], + "ricardian_clauses": [], + "variants": [] +} \ No newline at end of file diff --git a/unittests/test-contracts/ram_restriction_test/ram_restriction_test.cpp b/unittests/test-contracts/ram_restriction_test/ram_restriction_test.cpp new file mode 100644 index 00000000000..ec51c38fa31 --- /dev/null +++ b/unittests/test-contracts/ram_restriction_test/ram_restriction_test.cpp @@ -0,0 +1,65 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ + +#include +#include + +class [[eosio::contract]] ram_restriction_test : public eosio::contract { + +public: + struct [[eosio::table]] data { + uint64_t key; + std::vector value; + + uint64_t primary_key() const { return key; } + }; + + typedef eosio::multi_index<"tablea"_n, data> tablea; + typedef eosio::multi_index<"tableb"_n, data> tableb; + +public: + using eosio::contract::contract; + + template + void setdata_(int len, eosio::name payer) { + Table ta(_self, 0); + std::vector data; + data.resize(len, 0); + auto it = ta.find(0); + if (it == ta.end()) { + if (len) { + ta.emplace(payer, [&](auto &v) { + v.key = 0; + v.value = data; + }); + } + } else { + if (len) { + ta.modify(it, payer, [&](auto &v) { + v.key = 0; + v.value = data; + }); + } else { + ta.erase(it); + } + } + } + + [[eosio::action]] + void setdata(int len1, int len2, eosio::name payer ) { + setdata_(len1, payer); + setdata_(len2, payer); + } + + [[eosio::action]] + void notifysetdat(eosio::name acctonotify, int len1, int len2, eosio::name payer) { + require_recipient(acctonotify); + } + + [[eosio::on_notify("testacc::notifysetdat")]] + void on_notify_setdata(eosio::name acctonotify, int len1, int len2, eosio::name payer) { + setdata(len1, len2, payer); + } +}; diff --git a/unittests/test-contracts/ram_restriction_test/ram_restriction_test.wasm b/unittests/test-contracts/ram_restriction_test/ram_restriction_test.wasm new file mode 100755 index 0000000000000000000000000000000000000000..62e979122b64f8d44a8345c1835abc808a511c48 GIT binary patch literal 10560 zcmeI2O^hAcRmbnG?{;f z`rD3CKC;{VW5D^_7Yp6nq)dz5MXzOy=}t>^2buWEvyA(AVRrI*ax@*SU!U6;cF8lD|(?5u7q4|jG(+Y^^+sJJ$|wlkWz z%$9ABhL@HPtabTx&dTP^IOpj0XzQib?a}h~Xk~SCb+kTlg(e+6NPBr@2L8m}+C zuzYzH4p+}U>#FI<`sl{QMr?z8_`(JZIX!JwUg8|<6VV^7ULS3{84VwT%!{MA#(14& zU}v%cBO-iAb4t!~ZR7eVGCIeEmo||=oKb6r#8+1Wf# z)7c|UbL7a;{D~)0{iVn9^k|+Q%W}Wzz0XtceDP$u>$^MmT)peMe}7}|QqK5%xVd@t zrhDShKU`SIT=PBk?|Ro94KDakWQ$98`1cq8^7*eWg@msfZQhkHz4FVy@z=lgN5A(= zzq-3W{QY@wHvP`MFMak+_sMLrv+rO1%isRjpZmIN%DS%e0~7Y?fAPQn{+~Y|DY4{J zckUcmVhdjR+SmT}!*Tw@cK(67?p*U^UElFO-Axwb|KL72=Q{o`B%gutuDRa&ahsgw zTO7T+*rvgayK>y!*lOL`}*!ZZ4TkA%28Ep82s>2hWY3erV1! z-u=ow*CZi5QirroTz%I2-FLCwTFc&NlUY{6!t6rg4(_DUv&kJ;`7y4aS=7qHb+4eI z|Mb4t?c5;IIW%ugd~OBzp*g;E%}2ytbL>dnyrX;aEM#GCdgN|*+-AX#n`5nxkG?XF z7}Bjad5&eSyESf8SZTAa+-m&?=Mqif{4DEkjN6=NgCeBC2cE38^pFC1S+) zBR9sOFd8Aw$yt)xGuIriLxMQqi=|9-d_XvobO^C0DNfkMzSqwnx9-V7s-qy{>;mk= zMKdopy)4hpI!KYT&d%EUyid?K^m+bNi%?oO=h8)l=cM<~F1Cr*CPy91dzsWQhNy#U zPSl5;R-2$;I*y*K*Pt3Tn4Hptq?;X!BmI4ZZKnIB%{y6|ENVmB3`#uIW^qYi#vHE7 zmWkN{S4?4`=gs&OJk%8*u=c(8v(k@;I(N+>SHG6{Ts)mwYm-45j4ugF{~OW39T|6B?0b`)(C}DV@8@u_zs4j-3&NFCq;xoa_z?OU7#1)x3!EEtYG^nm$=EV*& zKMvO?MSutA&7e)(5@_osKMWknq>Ff8FH!`_1}K$Pxp`tBepM|pa|j-%HKQt^8*+U_ zv5N`Q*TBItu7M-qH8Kvdz|*g|?;<0*_edjp`r9+2_kOEJ^h3z}Z5z?k`$mNOe#aZp zFZO{ud<%g)MxXgUe$Nv_ze()2cqWh2JOXq1VC=RadmtF!+G>jj!Z8uIkKW2%7Uhd> z_P&@+)H@jBD+|&BDr`)MZ|<93DT>Dse>$CSlwpEA>u-9Y+#@{Sm(IRfGJCWXFd&8X z^Tr4gAS_IimwbK7#s4LPXZMcxNEz&<$}@X&`;4NL-Y3V0h{BR%pY5vgHzqu3@1&TV zH)sULI+v1rl<-n8CJTp zyzZNmN9y`N(xlw=x9@{nqA*X2cj(FlxS!%9y7fZXq;sXG98Wlq-fd6}u&p7Kkndr3 zkFC097MDa34p9B6>|+p0xa*->c8VE;5_(Mir+bFU3P2uG8I+d0$f&d6C?zprAud4| zM0qw@=t6U%&6OydkmDvv#px~Mg1=1oCNR+a0mqT8of`DuFWRlV2zqRw99}igoPZ9X z9PIJUg1Ap$eWOlclnv#8fb{R?SIDRDdP zfq$l0YT%#sK$D2cUVmM(PZk^4;iR9iIP-NI44#2Ov4oz2S1b}J@d&In7j|t5i4gC_ zYSz3zE-4K(g)$QR+#j2E#j3y&$*QcZK*uX5T{#60t$^}q=|@U;iOXi%Vb%vO^t?Ycs5o$f1aip@k*@^DP+%F z|IUo*K6Cx~H=+$kZJE(nV55H0IoXgpMz)FzKrbVjF@$i;T-ySNfSgQduQC%=j{o z-x9)WZm+QH?8DAU}!|n5|l4p_1bES*XMZ(hzkyf)c;D zkS8_G;|10Jk~>i&#M&ZnN^H~lH$>7fmm?^H1vwcM;N7gb^dBgo#JD+)Q7}yP!*`9 zm>L1KD5}3CEITM7J5>I%0KkzhrC~;@c>yMml0kHI0JK>TYr!}X9ecrGy%;j`49iXu zP*@MZfQgU*UV`g_-ZDfC&5*hcqFCCV@l@p?Q$RCltmp99(*a5H^CQF3X3~pzkG{DBh_^yQ~^{*uG;(8R8IK6V9V|EU4aE5!7|vTfFcqy0<(p?k2C@g>J&z zL)2DDDBT^ug;Y!p@SUkJfqcsgIvNaLXbaU}=#!30^X#6`(CD7{%m^a34#h|9&`3`1 z)C#JqCe=Y8iqj%MD3`LwzOA-TQ&^TF$T}=g#~OsmLy4O?UT^*3!x~a%FE^NY*#af+kpcVB+bxNgY(>2X3y@FLBP$Nnz&)3i%LQ>_!S+RInX-;x1*&yC% zC5}_^t8SZmOljZjOIViei&b8VSaVL2UU*8T>3I7}=g63gl%rRL#I$Q~e5Z;Zc=1DI zr5i}e2I+o)W};aZt5%oIVY>O|DxMWgktw29y*o!{oL9y~<3QWky^Hi& zj;0hC)X8$S-ZrbB_;0><$-VctT1H<0IwT*HUDk_+aAsTMp*Rg{r|?ilO36rV)s>Uz zhT9)-v^LZWp)_3U+`$y1w5RZcp{UE^#b#m2PzGmet;|tP7m>o$3yr6?qz(HM2U;%T zXpbl{eJWq{tq5ZI4iQ;G!N0znQ?|XhGG}<4vLC1Hx0kZ(FJ*`A7d!PkFArC*jxL4C zMi^eby0OBquEOT<#nFzlcklHx8!wDUD-*`}?U_wmos71J{C;bhC96AOePa?<*Tc1! zu1;2$`MKEW1}o{#*YDtGZ5!J`zf40a;c-A{d+d(*0}eVT8OhdL+NJFYEX9yTzGrlU`eVL`si8s_U5vlPkkXczO5; Lt)i*_8aVgAh*dyj literal 0 HcmV?d00001 From c2655023c4d5ab9d49e31b313796eb5cdb8feedd Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Wed, 17 Apr 2019 10:05:13 -0400 Subject: [PATCH 416/680] Buildkite Docker plugin now adds variables on "Environment" tab to container --- .buildkite/long_running_tests.yml | 8 ++++++++ .buildkite/pipeline.yml | 15 +++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index 0189d3f5f54..56eb9d08953 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -17,6 +17,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + propagate-environment: true workdir: /data/job timeout: 60 @@ -38,6 +39,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + propagate-environment: true workdir: /data/job timeout: 60 @@ -59,6 +61,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + propagate-environment: true workdir: /data/job timeout: 60 @@ -80,6 +83,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + propagate-environment: true workdir: /data/job timeout: 60 @@ -116,6 +120,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + propagate-environment: true workdir: /data/job timeout: 90 @@ -136,6 +141,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + propagate-environment: true workdir: /data/job timeout: 90 @@ -156,6 +162,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + propagate-environment: true workdir: /data/job timeout: 90 @@ -176,6 +183,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + propagate-environment: true workdir: /data/job timeout: 90 diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index c3b449fc07f..4527ee82dc6 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -17,6 +17,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + propagate-environment: true workdir: /data/job timeout: 60 @@ -38,6 +39,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + propagate-environment: true workdir: /data/job timeout: 60 @@ -59,6 +61,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + propagate-environment: true workdir: /data/job timeout: 60 @@ -80,6 +83,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + propagate-environment: true workdir: /data/job timeout: 60 @@ -117,6 +121,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + propagate-environment: true workdir: /data/job timeout: 60 @@ -137,6 +142,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + propagate-environment: true workdir: /data/job timeout: 60 @@ -158,6 +164,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + propagate-environment: true workdir: /data/job timeout: 60 @@ -178,6 +185,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + propagate-environment: true workdir: /data/job timeout: 60 @@ -199,6 +207,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + propagate-environment: true workdir: /data/job timeout: 60 @@ -219,6 +228,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + propagate-environment: true workdir: /data/job timeout: 60 @@ -240,6 +250,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + propagate-environment: true workdir: /data/job timeout: 60 @@ -260,6 +271,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + propagate-environment: true workdir: /data/job timeout: 60 @@ -309,6 +321,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + propagate-environment: true workdir: /data/job env: OS: "ubuntu-16.04" @@ -335,6 +348,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + propagate-environment: true workdir: /data/job env: OS: "ubuntu-18.04" @@ -368,6 +382,7 @@ steps: docker#v2.1.0: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + propagate-environment: true workdir: /data/job env: OS: "el7" From cbc87cb1c4a6319ef7abc9585fb73da872f59c0d Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 17 Apr 2019 10:20:08 -0400 Subject: [PATCH 417/680] export correct lib paths --- scripts/eosio_build_amazon.sh | 3 ++- scripts/eosio_build_centos.sh | 4 +++- scripts/eosio_build_fedora.sh | 4 +++- scripts/eosio_build_ubuntu.sh | 4 +++- 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index e499845d4a4..2699f12ef3a 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -258,7 +258,8 @@ else # noexec wasn't found fi if $BUILD_CLANG8; then - export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr:$LD_LIBRARY_PATH + export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr/lib:$LD_LIBRARY_PATH + export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib:$LD_LIBRARY_PATH if [ ! -d ${OPT_LOCATION}/gmp ]; then printf "Installing gmp...\\n" cd ${TMP_LOCATION} \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 3993c544dd9..804c9213e27 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -300,7 +300,9 @@ cd .. printf "\\n" if $BUILD_CLANG8; then - export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr:$LD_LIBRARY_PATH + export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr/lib:$LD_LIBRARY_PATH + export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib:$LD_LIBRARY_PATH + if [ ! -d ${OPT_LOCATION}/gmp ]; then printf "Installing gmp...\\n" cd ${TMP_LOCATION} \ diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index c778ee51fa4..4f841fef806 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -232,7 +232,9 @@ cd .. printf "\\n" if $BUILD_CLANG8; then - export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr:$LD_LIBRARY_PATH + export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr/lib:$LD_LIBRARY_PATH + export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib:$LD_LIBRARY_PATH + if [ ! -d ${OPT_LOCATION}/gmp ]; then printf "Installing gmp...\\n" cd ${TMP_LOCATION} \ diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 4f750475453..41046051e77 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -263,7 +263,9 @@ else # noexec wasn't found fi if $BUILD_CLANG8; then - export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr:$LD_LIBRARY_PATH + export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr/lib:$LD_LIBRARY_PATH + export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib:$LD_LIBRARY_PATH + if [ ! -d ${OPT_LOCATION}/gmp ]; then printf "Installing gmp...\\n" cd ${TMP_LOCATION} \ From 846de6d511dab3ff87aa913579010985f12be65e Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 17 Apr 2019 10:23:00 -0400 Subject: [PATCH 418/680] update submodules --- libraries/chainbase | 2 +- libraries/fc | 2 +- libraries/wabt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/chainbase b/libraries/chainbase index 8a153c42842..118c513436e 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 8a153c428429a62ce727814a1ba04d3fcdc2bc83 +Subproject commit 118c513436e1310d8e1395303c964430f26b0bb4 diff --git a/libraries/fc b/libraries/fc index 89a102d0ca7..800d6c21b8b 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 89a102d0ca71bb32b009cf94db32a082da9aa403 +Subproject commit 800d6c21b8be9316a651e926a5b2affcc3c52c8e diff --git a/libraries/wabt b/libraries/wabt index a023d5132bd..ae8189a9d45 160000 --- a/libraries/wabt +++ b/libraries/wabt @@ -1 +1 @@ -Subproject commit a023d5132bd4aaa611c88c6d43486eb02c5e7411 +Subproject commit ae8189a9d45e9453bd947c778bf5f3d7255b0627 From dd0a6cda2f87668a5dab3b839a2de40e55f7fb20 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 17 Apr 2019 10:59:31 -0400 Subject: [PATCH 419/680] patch linux-unwind.h --- scripts/eosio_build_amazon.sh | 17 ++++++++++------- scripts/eosio_build_centos.sh | 16 +++++++++------- scripts/eosio_build_fedora.sh | 16 +++++++++------- scripts/eosio_build_ubuntu.sh | 16 +++++++++------- 4 files changed, 37 insertions(+), 28 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 2699f12ef3a..cc7006d8ac5 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -259,7 +259,8 @@ fi if $BUILD_CLANG8; then export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr/lib:$LD_LIBRARY_PATH - export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib:$LD_LIBRARY_PATH + export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib64:$LD_LIBRARY_PATH + if [ ! -d ${OPT_LOCATION}/gmp ]; then printf "Installing gmp...\\n" cd ${TMP_LOCATION} \ @@ -267,7 +268,7 @@ if $BUILD_CLANG8; then && cd gmp-5.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/gmp-5.0.1 ${TMP_LOCATION}/gmp-5.0.1.tar.gz \ + && cd ../ && rm -rf ${TMP_LOCATION}/gmp-5.0.1 ${TMP_LOCATION}/gmp-5.0.1.tar.gz \ || exit 1 fi if [ ! -d ${OPT_LOCATION}/mpfr ]; then @@ -277,7 +278,7 @@ if $BUILD_CLANG8; then && cd mpfr-3.0.0 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpfr --with-gmp=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/mpfr-3.0.0 ${TMP_LOCATION}/mpfr-3.0.0.tar.gz \ + && cd ../ && rm -rf ${TMP_LOCATION}/mpfr-3.0.0 ${TMP_LOCATION}/mpfr-3.0.0.tar.gz \ || exit 1 fi if [ ! -d ${OPT_LOCATION}/mpc ]; then @@ -287,17 +288,19 @@ if $BUILD_CLANG8; then && cd mpc-1.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1.tar.gz \ + && cd ../ && rm -rf ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1.tar.gz \ || exit 1 fi if [ ! -d ${OPT_LOCATION}/gcc ]; then printf "Installing libstdc++\\n" cd ${TMP_LOCATION} \ && curl -LO https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ - && cd gcc-7.1.0 && mkdir build && cd build \ + && cd gcc-7.1.0 \ + && sed '61 s/ucontext/ucontext_t/' libgcc/config/i386/linux-unwind.h \ + && mkdir build && cd build \ &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/gcc-7.1.0 ${TMP_LOCATION}/gcc-7.1.0.tar.gz \ + && cd ../ && rm -rf ${TMP_LOCATION}/gcc-7.1.0 ${TMP_LOCATION}/gcc-7.1.0.tar.gz \ || exit 1 fi @@ -331,7 +334,7 @@ if $BUILD_CLANG8; then && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ && make install \ - && rm -r ${TMP_LOCATION}/clang8 \ + && rm -rf ${TMP_LOCATION}/clang8 \ && cd ../.. \ || exit 1 diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 804c9213e27..cbe4738c924 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -301,7 +301,7 @@ printf "\\n" if $BUILD_CLANG8; then export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr/lib:$LD_LIBRARY_PATH - export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib:$LD_LIBRARY_PATH + export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib64:$LD_LIBRARY_PATH if [ ! -d ${OPT_LOCATION}/gmp ]; then printf "Installing gmp...\\n" @@ -310,7 +310,7 @@ if $BUILD_CLANG8; then && cd gmp-5.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/gmp-5.0.1 ${TMP_LOCATION}/gmp-5.0.1.tar.gz \ + && cd ../ && rm -rf ${TMP_LOCATION}/gmp-5.0.1 ${TMP_LOCATION}/gmp-5.0.1.tar.gz \ || exit 1 fi if [ ! -d ${OPT_LOCATION}/mpfr ]; then @@ -320,7 +320,7 @@ if $BUILD_CLANG8; then && cd mpfr-3.0.0 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpfr --with-gmp=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/mpfr-3.0.0 ${TMP_LOCATION}/mpfr-3.0.0.tar.gz \ + && cd ../ && rm -rf ${TMP_LOCATION}/mpfr-3.0.0 ${TMP_LOCATION}/mpfr-3.0.0.tar.gz \ || exit 1 fi if [ ! -d ${OPT_LOCATION}/mpc ]; then @@ -330,17 +330,19 @@ if $BUILD_CLANG8; then && cd mpc-1.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1.tar.gz \ + && cd ../ && rm -rf ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1.tar.gz \ || exit 1 fi if [ ! -d ${OPT_LOCATION}/gcc ]; then printf "Installing libstdc++\\n" cd ${TMP_LOCATION} \ && curl -LO https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ - && cd gcc-7.1.0 && mkdir build && cd build \ + && cd gcc-7.1.0 \ + && sed '61 s/ucontext/ucontext_t/' libgcc/config/i386/linux-unwind.h \ + && mkdir build && cd build \ &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/gcc-7.1.0 ${TMP_LOCATION}/gcc-7.1.0.tar.gz \ + && cd ../ && rm -rf ${TMP_LOCATION}/gcc-7.1.0 ${TMP_LOCATION}/gcc-7.1.0.tar.gz \ || exit 1 fi @@ -374,7 +376,7 @@ if $BUILD_CLANG8; then && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ && make install \ - && rm -r ${TMP_LOCATION}/clang8 \ + && rm -rf ${TMP_LOCATION}/clang8 \ && cd ../.. \ || exit 1 diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 4f841fef806..fc29a5ae8b3 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -233,7 +233,7 @@ printf "\\n" if $BUILD_CLANG8; then export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr/lib:$LD_LIBRARY_PATH - export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib:$LD_LIBRARY_PATH + export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib64:$LD_LIBRARY_PATH if [ ! -d ${OPT_LOCATION}/gmp ]; then printf "Installing gmp...\\n" @@ -242,7 +242,7 @@ if $BUILD_CLANG8; then && cd gmp-5.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/gmp-5.0.1 ${TMP_LOCATION}/gmp-5.0.1.tar.gz \ + && cd ../ && rm -rf ${TMP_LOCATION}/gmp-5.0.1 ${TMP_LOCATION}/gmp-5.0.1.tar.gz \ || exit 1 fi if [ ! -d ${OPT_LOCATION}/mpfr ]; then @@ -252,7 +252,7 @@ if $BUILD_CLANG8; then && cd mpfr-3.0.0 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpfr --with-gmp=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/mpfr-3.0.0 ${TMP_LOCATION}/mpfr-3.0.0.tar.gz \ + && cd ../ && rm -rf ${TMP_LOCATION}/mpfr-3.0.0 ${TMP_LOCATION}/mpfr-3.0.0.tar.gz \ || exit 1 fi if [ ! -d ${OPT_LOCATION}/mpc ]; then @@ -262,17 +262,19 @@ if $BUILD_CLANG8; then && cd mpc-1.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1.tar.gz \ + && cd ../ && rm -rf ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1.tar.gz \ || exit 1 fi if [ ! -d ${OPT_LOCATION}/gcc ]; then printf "Installing libstdc++\\n" cd ${TMP_LOCATION} \ && curl -LO https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ - && cd gcc-7.1.0 && mkdir build && cd build \ + && cd gcc-7.1.0 \ + && sed '61 s/ucontext/ucontext_t/' libgcc/config/i386/linux-unwind.h \ + && mkdir build && cd build \ &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/gcc-7.1.0 ${TMP_LOCATION}/gcc-7.1.0.tar.gz \ + && cd ../ && rm -rf ${TMP_LOCATION}/gcc-7.1.0 ${TMP_LOCATION}/gcc-7.1.0.tar.gz \ || exit 1 fi @@ -306,7 +308,7 @@ if $BUILD_CLANG8; then && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ && make install \ - && rm -r ${TMP_LOCATION}/clang8 \ + && rm -rf ${TMP_LOCATION}/clang8 \ && cd ../.. \ || exit 1 diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 41046051e77..9ad232e2221 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -264,7 +264,7 @@ fi if $BUILD_CLANG8; then export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr/lib:$LD_LIBRARY_PATH - export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib:$LD_LIBRARY_PATH + export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib64:$LD_LIBRARY_PATH if [ ! -d ${OPT_LOCATION}/gmp ]; then printf "Installing gmp...\\n" @@ -273,7 +273,7 @@ if $BUILD_CLANG8; then && cd gmp-5.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/gmp-5.0.1 ${TMP_LOCATION}/gmp-5.0.1.tar.gz \ + && cd ../ && rm -rf ${TMP_LOCATION}/gmp-5.0.1 ${TMP_LOCATION}/gmp-5.0.1.tar.gz \ || exit 1 fi if [ ! -d ${OPT_LOCATION}/mpfr ]; then @@ -283,7 +283,7 @@ if $BUILD_CLANG8; then && cd mpfr-3.0.0 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpfr --with-gmp=${OPT_LOCATION}/gmp \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/mpfr-3.0.0 ${TMP_LOCATION}/mpfr-3.0.0.tar.gz \ + && cd ../ && rm -rf ${TMP_LOCATION}/mpfr-3.0.0 ${TMP_LOCATION}/mpfr-3.0.0.tar.gz \ || exit 1 fi if [ ! -d ${OPT_LOCATION}/mpc ]; then @@ -293,17 +293,19 @@ if $BUILD_CLANG8; then && cd mpc-1.0.1 && mkdir build && cd build \ && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1.tar.gz \ + && cd ../ && rm -rf ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1.tar.gz \ || exit 1 fi if [ ! -d ${OPT_LOCATION}/gcc ]; then printf "Installing libstdc++\\n" cd ${TMP_LOCATION} \ && curl -LO https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ - && cd gcc-7.1.0 && mkdir build && cd build \ + && cd gcc-7.1.0 \ + && sed '61 s/ucontext/ucontext_t/' libgcc/config/i386/linux-unwind.h \ + && mkdir build && cd build \ &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ && make -j"${JOBS}" && make install \ - && cd ../ && rm -r ${TMP_LOCATION}/gcc-7.1.0 ${TMP_LOCATION}/gcc-7.1.0.tar.gz \ + && cd ../ && rm -rf ${TMP_LOCATION}/gcc-7.1.0 ${TMP_LOCATION}/gcc-7.1.0.tar.gz \ || exit 1 fi @@ -337,7 +339,7 @@ if $BUILD_CLANG8; then && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ && make install \ - && rm -r ${TMP_LOCATION}/clang8 \ + && rm -rf ${TMP_LOCATION}/clang8 \ && cd ../.. \ || exit 1 From 2609c0cff1f2c7078dfbbb21baa6008b55eb8a76 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 17 Apr 2019 11:20:05 -0400 Subject: [PATCH 420/680] fix patch and bump timeout for build --- .buildkite/pipeline.yml | 10 +++++----- scripts/eosio_build_amazon.sh | 2 +- scripts/eosio_build_centos.sh | 2 +- scripts/eosio_build_fedora.sh | 2 +- scripts/eosio_build_ubuntu.sh | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 9ab24bbe88d..64fe5462694 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -18,7 +18,7 @@ steps: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job - timeout: 60 + timeout: 120 - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" @@ -39,7 +39,7 @@ steps: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job - timeout: 60 + timeout: 120 - command: | # CentOS 7 Build echo "+++ :hammer: Building" @@ -60,7 +60,7 @@ steps: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job - timeout: 60 + timeout: 120 - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" @@ -81,7 +81,7 @@ steps: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job - timeout: 60 + timeout: 120 - command: | # macOS Mojave Build echo "--- Creating symbolic link to job directory :file_folder:" @@ -95,7 +95,7 @@ steps: - "role=builder-v2-1" - "os=mojave" artifact_paths: "build.tar.gz" - timeout: 60 + timeout: 120 - wait diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index cc7006d8ac5..5dc9ba5381d 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -296,7 +296,7 @@ if $BUILD_CLANG8; then cd ${TMP_LOCATION} \ && curl -LO https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ && cd gcc-7.1.0 \ - && sed '61 s/ucontext/ucontext_t/' libgcc/config/i386/linux-unwind.h \ + && sed '61 s/ucontext/ucontext_t/' libgcc/config/i386/linux-unwind.h &> libgcc/config/i386/linux-unwind.h \ && mkdir build && cd build \ &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ && make -j"${JOBS}" && make install \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index cbe4738c924..31b02b44bde 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -338,7 +338,7 @@ if $BUILD_CLANG8; then cd ${TMP_LOCATION} \ && curl -LO https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ && cd gcc-7.1.0 \ - && sed '61 s/ucontext/ucontext_t/' libgcc/config/i386/linux-unwind.h \ + && sed '61 s/ucontext/ucontext_t/' libgcc/config/i386/linux-unwind.h &> libgcc/config/i386/linux-unwind.h \ && mkdir build && cd build \ &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ && make -j"${JOBS}" && make install \ diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index fc29a5ae8b3..fce3e7d301a 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -270,7 +270,7 @@ if $BUILD_CLANG8; then cd ${TMP_LOCATION} \ && curl -LO https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ && cd gcc-7.1.0 \ - && sed '61 s/ucontext/ucontext_t/' libgcc/config/i386/linux-unwind.h \ + && sed '61 s/ucontext/ucontext_t/' libgcc/config/i386/linux-unwind.h &> libgcc/config/i386/linux-unwind.h \ && mkdir build && cd build \ &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ && make -j"${JOBS}" && make install \ diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 9ad232e2221..18282795b72 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -301,7 +301,7 @@ if $BUILD_CLANG8; then cd ${TMP_LOCATION} \ && curl -LO https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ && cd gcc-7.1.0 \ - && sed '61 s/ucontext/ucontext_t/' libgcc/config/i386/linux-unwind.h \ + && sed '61 s/ucontext/ucontext_t/' libgcc/config/i386/linux-unwind.h &> libgcc/config/i386/linux-unwind.h \ && mkdir build && cd build \ &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ && make -j"${JOBS}" && make install \ From 397e266ecae796c43b7e7bdc73b0e4a77f2614a9 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 17 Apr 2019 11:52:38 -0400 Subject: [PATCH 421/680] fix missing tmp directory centos --- scripts/eosio_build.sh | 2 +- scripts/eosio_build_centos.sh | 9 +++++++++ scripts/eosio_build_darwin.sh | 1 - scripts/eosio_build_fedora.sh | 9 +++++++++ 4 files changed, 19 insertions(+), 2 deletions(-) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 490a16c3b06..8bec7210293 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -332,7 +332,7 @@ printf "## ENABLE_COVERAGE_TESTING=%s\\n" "${ENABLE_COVERAGE_TESTING}" mkdir -p $BUILD_DIR cd $BUILD_DIR -$CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=false \# $PIN_COMPILER_CMAKE \ +$CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \# $PIN_COMPILER_CMAKE \ -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS "${REPO_ROOT}" diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 31b02b44bde..6bf5fc2b1ff 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -299,6 +299,15 @@ if [ $? -ne 0 ]; then exit -1; fi cd .. printf "\\n" +# Use current directory's tmp directory if noexec is enabled for /tmp +if (mount | grep "/tmp " | grep --quiet noexec); then + mkdir -p $REPO_ROOT/tmp + TMP_LOCATION="${REPO_ROOT}/tmp" + rm -rf $REPO_ROOT/tmp/* +else # noexec wasn't found + TMP_LOCATION="/tmp" +fi + if $BUILD_CLANG8; then export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr/lib:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib64:$LD_LIBRARY_PATH diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index fa0acbe26d1..1f5c56f06f3 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -187,7 +187,6 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" - printf "Checking MongoDB installation...\\n" if [ ! -d $MONGODB_ROOT ]; then printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index fce3e7d301a..e5e87e6c37f 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -231,6 +231,15 @@ if [ $? -ne 0 ]; then exit -1; fi cd .. printf "\\n" +# Use current directory's tmp directory if noexec is enabled for /tmp +if (mount | grep "/tmp " | grep --quiet noexec); then + mkdir -p $REPO_ROOT/tmp + TMP_LOCATION="${REPO_ROOT}/tmp" + rm -rf $REPO_ROOT/tmp/* +else # noexec wasn't found + TMP_LOCATION="/tmp" +fi + if $BUILD_CLANG8; then export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr/lib:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib64:$LD_LIBRARY_PATH From 988cebfaa67fdf76cd03ae2b48fb89ac57bfc46b Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 17 Apr 2019 13:41:29 -0400 Subject: [PATCH 422/680] add back fix for auto_ptr to unique_ptr --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 800d6c21b8b..8d5e6acddfd 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 800d6c21b8be9316a651e926a5b2affcc3c52c8e +Subproject commit 8d5e6acddfdc9c9b30b9d3b8790630799c4fc5c6 From e4bfd468f798217c1bf22d2775a4544c2c28242f Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 17 Apr 2019 14:04:14 -0400 Subject: [PATCH 423/680] update for Apple --- CMakeLists.txt | 2 ++ programs/cleos/CMakeLists.txt | 4 +++- programs/keosd/CMakeLists.txt | 4 +++- programs/nodeos/CMakeLists.txt | 4 +++- unittests/CMakeLists.txt | 2 +- 5 files changed, 12 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index caf9a255606..952e2b8dc7d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -125,6 +125,8 @@ if(${EOSIO_PIN_COMPILER}) if(NOT APPLE AND UNIX) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -nostdinc++ -nostdlib++ -I${LIBSTDCPP_DIR}/include/c++/7.1.0") set(CMAKE_CXX_STANDARD_LIBRARIES "${LIBSTDCPP_DIR}/lib64/libstdc++.a") + else() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") endif() endif() diff --git a/programs/cleos/CMakeLists.txt b/programs/cleos/CMakeLists.txt index 1083b70ea3a..3f99a791c87 100644 --- a/programs/cleos/CMakeLists.txt +++ b/programs/cleos/CMakeLists.txt @@ -35,7 +35,9 @@ configure_file(config.hpp.in config.hpp ESCAPE_QUOTES) target_include_directories(${CLI_CLIENT_EXECUTABLE_NAME} PUBLIC ${Intl_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR}) if(EOSIO_PIN_COMPILER) - target_link_libraries(${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE -nostdlib++) + if(NOT APPLE) + target_link_libraries(${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE -nostdlib++) + endif() endif() target_link_libraries( ${CLI_CLIENT_EXECUTABLE_NAME} diff --git a/programs/keosd/CMakeLists.txt b/programs/keosd/CMakeLists.txt index cecb1eae5a1..e20393ea504 100644 --- a/programs/keosd/CMakeLists.txt +++ b/programs/keosd/CMakeLists.txt @@ -12,7 +12,9 @@ endif() configure_file(config.hpp.in config.hpp ESCAPE_QUOTES) if(EOSIO_PIN_COMPILER) - target_link_libraries(${KEY_STORE_EXECUTABLE_NAME} PRIVATE -nostdlib++) + if(NOT APPLE) + target_link_libraries(${KEY_STORE_EXECUTABLE_NAME} PRIVATE -nostdlib++) + endif() endif() target_link_libraries( ${KEY_STORE_EXECUTABLE_NAME} diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index 59ade059b32..357fb07dfd0 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -48,7 +48,9 @@ else() endif() if(EOSIO_PIN_COMPILER) - target_link_libraries(${NODE_EXECUTABLE_NAME} PRIVATE -nostdlib++) + if (NOT APPLE) + target_link_libraries(${NODE_EXECUTABLE_NAME} PRIVATE -nostdlib++) + endif() endif() target_link_libraries( ${NODE_EXECUTABLE_NAME} diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index c5cfd31d096..85a33acddf9 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -43,7 +43,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/contracts.hpp.in ${CMAKE_CURRENT_BINA file(GLOB UNIT_TESTS "*.cpp") # find all unit test suites add_executable( unit_test ${UNIT_TESTS}) # build unit tests as one executable -if(EOSIO_PIN_COMPILER) +if(EOSIO_PIN_COMPILER AND NOT APPLE) target_link_libraries(unit_test PRIVATE -nostdlib++ PUBLIC eosio_chain chainbase eosio_testing fc appbase ${PLATFORM_SPECIFIC_LIBS}) else() target_link_libraries( unit_test eosio_chain chainbase eosio_testing fc appbase ${PLATFORM_SPECIFIC_LIBS} ) From 80ed27b64e0856ea77d0d526c6f7348980a0f042 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 17 Apr 2019 15:33:33 -0400 Subject: [PATCH 424/680] update wabt/no-clobber --- libraries/wabt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/wabt b/libraries/wabt index ae8189a9d45..9c0e1131a45 160000 --- a/libraries/wabt +++ b/libraries/wabt @@ -1 +1 @@ -Subproject commit ae8189a9d45e9453bd947c778bf5f3d7255b0627 +Subproject commit 9c0e1131a457d5a44c6da512ef542efd32647446 From a0f40f517b150f9a3162068aa032c3a95f721332 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 17 Apr 2019 16:04:51 -0400 Subject: [PATCH 425/680] switch to more efficient order of checks in apply_context::exec_one (related to FORWARD_SETCODE protocol feature) --- libraries/chain/apply_context.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index f86915e6f6c..edb96ab88a2 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -73,10 +73,10 @@ void apply_context::exec_one() } if( ( receiver_account->code_hash != digest_type() ) && - ( control.is_builtin_activated( builtin_protocol_feature_t::forward_setcode ) - || !( act->account == config::system_account_name - && act->name == N( setcode ) - && receiver == config::system_account_name ) + ( !( act->account == config::system_account_name + && act->name == N( setcode ) + && receiver == config::system_account_name ) + || control.is_builtin_activated( builtin_protocol_feature_t::forward_setcode ) ) ) { if( trx_context.enforce_whiteblacklist && control.is_producing_block() ) { From 416f69c7da95fcec6b8b086c290c19a2f6f6d9b8 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Wed, 17 Apr 2019 16:31:52 -0400 Subject: [PATCH 426/680] ship: add partial transaction to trace --- libraries/chain/controller.cpp | 14 ++--- .../chain/include/eosio/chain/controller.hpp | 2 +- plugins/chain_plugin/chain_plugin.cpp | 4 +- plugins/history_plugin/history_plugin.cpp | 4 +- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 4 +- .../state_history_plugin.hpp | 42 +++++++++++++ .../state_history_serialization.hpp | 63 ++++++++++++------- .../state_history_plugin.cpp | 40 ++++++------ .../state_history_plugin_abi.cpp | 16 ++++- 9 files changed, 131 insertions(+), 58 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 2500c2015ee..f51bdb26412 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1078,7 +1078,7 @@ struct controller_impl { trace->receipt = push_receipt( gtrx.trx_id, transaction_receipt::expired, billed_cpu_time_us, 0 ); // expire the transaction trace->account_ram_delta = account_delta( gtrx.payer, trx_removal_ram_delta ); emit( self.accepted_transaction, trx ); - emit( self.applied_transaction, trace ); + emit( self.applied_transaction, std::tie(trace, dtrx) ); undo_session.squash(); return trace; } @@ -1125,7 +1125,7 @@ struct controller_impl { trace->account_ram_delta = account_delta( gtrx.payer, trx_removal_ram_delta ); emit( self.accepted_transaction, trx ); - emit( self.applied_transaction, trace ); + emit( self.applied_transaction, std::tie(trace, dtrx) ); trx_context.squash(); undo_session.squash(); @@ -1159,7 +1159,7 @@ struct controller_impl { if( !trace->except_ptr ) { trace->account_ram_delta = account_delta( gtrx.payer, trx_removal_ram_delta ); emit( self.accepted_transaction, trx ); - emit( self.applied_transaction, trace ); + emit( self.applied_transaction, std::tie(trace, dtrx) ); undo_session.squash(); return trace; } @@ -1197,12 +1197,12 @@ struct controller_impl { trace->account_ram_delta = account_delta( gtrx.payer, trx_removal_ram_delta ); emit( self.accepted_transaction, trx ); - emit( self.applied_transaction, trace ); + emit( self.applied_transaction, std::tie(trace, dtrx) ); undo_session.squash(); } else { emit( self.accepted_transaction, trx ); - emit( self.applied_transaction, trace ); + emit( self.applied_transaction, std::tie(trace, dtrx) ); } return trace; @@ -1314,7 +1314,7 @@ struct controller_impl { emit( self.accepted_transaction, trx); } - emit(self.applied_transaction, trace); + emit(self.applied_transaction, std::tie(trace, trn)); if ( read_mode != db_read_mode::SPECULATIVE && pending->_block_status == controller::block_status::incomplete ) { @@ -1344,7 +1344,7 @@ struct controller_impl { } emit( self.accepted_transaction, trx ); - emit( self.applied_transaction, trace ); + emit( self.applied_transaction, std::tie(trace, trn) ); return trace; } FC_CAPTURE_AND_RETHROW((trace)) diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 43d1f1637c9..c7702d09414 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -282,7 +282,7 @@ namespace eosio { namespace chain { signal accepted_block; signal irreversible_block; signal accepted_transaction; - signal applied_transaction; + signal)> applied_transaction; signal bad_alloc; /* diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index a13407e8abb..a2ed50bfd86 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -943,8 +943,8 @@ void chain_plugin::plugin_initialize(const variables_map& options) { } ); my->applied_transaction_connection = my->chain->applied_transaction.connect( - [this]( const transaction_trace_ptr& trace ) { - my->applied_transaction_channel.publish( priority::low, trace ); + [this]( std::tuple t ) { + my->applied_transaction_channel.publish( priority::low, std::get<0>(t) ); } ); my->chain->add_indices(); diff --git a/plugins/history_plugin/history_plugin.cpp b/plugins/history_plugin/history_plugin.cpp index 6cdcffcb546..12cd3a5e731 100644 --- a/plugins/history_plugin/history_plugin.cpp +++ b/plugins/history_plugin/history_plugin.cpp @@ -351,8 +351,8 @@ namespace eosio { db.add_index(); my->applied_transaction_connection.emplace( - chain.applied_transaction.connect( [&]( const transaction_trace_ptr& p ) { - my->on_applied_transaction( p ); + chain.applied_transaction.connect( [&]( std::tuple t ) { + my->on_applied_transaction( std::get<0>(t) ); } )); } FC_LOG_AND_RETHROW() } diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index bf66e570730..f2f36608667 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -1695,8 +1695,8 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) my->accepted_transaction( t ); } )); my->applied_transaction_connection.emplace( - chain.applied_transaction.connect( [&]( const chain::transaction_trace_ptr& t ) { - my->applied_transaction( t ); + chain.applied_transaction.connect( [&]( std::tuple t ) { + my->applied_transaction( std::get<0>(t) ); } )); if( my->wipe_database_on_startup ) { diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp index f3429e2d190..fb704693fba 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp @@ -22,6 +22,48 @@ using std::shared_ptr; typedef shared_ptr state_history_ptr; +struct partial_transaction { + chain::time_point_sec expiration = {}; + uint16_t ref_block_num = {}; + uint32_t ref_block_prefix = {}; + fc::unsigned_int max_net_usage_words = {}; + uint8_t max_cpu_usage_ms = {}; + fc::unsigned_int delay_sec = {}; + chain::extensions_type transaction_extensions = {}; + vector signatures = {}; + vector context_free_data = {}; + + partial_transaction(const chain::signed_transaction& t) + : expiration(t.expiration) + , ref_block_num(t.ref_block_num) + , ref_block_prefix(t.ref_block_prefix) + , max_net_usage_words(t.max_net_usage_words) + , max_cpu_usage_ms(t.max_cpu_usage_ms) + , delay_sec(t.delay_sec) + , transaction_extensions(t.transaction_extensions) + , signatures(t.signatures) + , context_free_data(t.context_free_data) {} +}; + +struct augmented_transaction_trace { + chain::transaction_trace_ptr trace; + std::shared_ptr partial; + + augmented_transaction_trace() = default; + augmented_transaction_trace(const augmented_transaction_trace&) = default; + augmented_transaction_trace(augmented_transaction_trace&&) = default; + + augmented_transaction_trace(const chain::transaction_trace_ptr& trace) + : trace{trace} {} + + augmented_transaction_trace(const chain::transaction_trace_ptr& trace, const chain::signed_transaction& t) + : trace{trace} + , partial{std::make_shared(t)} {} + + augmented_transaction_trace& operator=(const augmented_transaction_trace&) = default; + augmented_transaction_trace& operator=(augmented_transaction_trace&&) = default; +}; + struct table_delta { fc::unsigned_int struct_version = 0; std::string name{}; diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp index 61b9662d4ed..d419481b24e 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp @@ -524,53 +524,68 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper -datastream& operator<<(datastream& ds, - const history_context_wrapper& obj) { +datastream& operator<<(datastream& ds, + const history_context_wrapper& obj) { + auto& trace = *obj.obj.trace; fc::raw::pack(ds, fc::unsigned_int(0)); - fc::raw::pack(ds, as_type(obj.obj.id)); - if (obj.obj.receipt) { - if (obj.obj.failed_dtrx_trace && - obj.obj.receipt->status.value == eosio::chain::transaction_receipt_header::soft_fail) + fc::raw::pack(ds, as_type(trace.id)); + if (trace.receipt) { + if (trace.failed_dtrx_trace && trace.receipt->status.value == eosio::chain::transaction_receipt_header::soft_fail) fc::raw::pack(ds, uint8_t(eosio::chain::transaction_receipt_header::executed)); else - fc::raw::pack(ds, as_type(obj.obj.receipt->status.value)); - fc::raw::pack(ds, as_type(obj.obj.receipt->cpu_usage_us)); - fc::raw::pack(ds, as_type(obj.obj.receipt->net_usage_words)); + fc::raw::pack(ds, as_type(trace.receipt->status.value)); + fc::raw::pack(ds, as_type(trace.receipt->cpu_usage_us)); + fc::raw::pack(ds, as_type(trace.receipt->net_usage_words)); } else { fc::raw::pack(ds, uint8_t(obj.context)); fc::raw::pack(ds, uint32_t(0)); fc::raw::pack(ds, fc::unsigned_int(0)); } - fc::raw::pack(ds, as_type(obj.obj.elapsed.count())); - fc::raw::pack(ds, as_type(obj.obj.net_usage)); - fc::raw::pack(ds, as_type(obj.obj.scheduled)); - history_serialize_container(ds, obj.db, as_type>(obj.obj.action_traces)); + fc::raw::pack(ds, as_type(trace.elapsed.count())); + fc::raw::pack(ds, as_type(trace.net_usage)); + fc::raw::pack(ds, as_type(trace.scheduled)); + history_serialize_container(ds, obj.db, as_type>(trace.action_traces)); - fc::raw::pack(ds, bool(obj.obj.account_ram_delta)); - if (obj.obj.account_ram_delta) { + fc::raw::pack(ds, bool(trace.account_ram_delta)); + if (trace.account_ram_delta) { fc::raw::pack( - ds, make_history_serial_wrapper(obj.db, as_type(*obj.obj.account_ram_delta))); + ds, make_history_serial_wrapper(obj.db, as_type(*trace.account_ram_delta))); } fc::optional e; - if (obj.obj.except) - e = obj.obj.except->to_string(); + if (trace.except) + e = trace.except->to_string(); fc::raw::pack(ds, as_type>(e)); - fc::raw::pack(ds, as_type>(obj.obj.error_code)); + fc::raw::pack(ds, as_type>(trace.error_code)); - fc::raw::pack(ds, bool(obj.obj.failed_dtrx_trace)); - if (obj.obj.failed_dtrx_trace) { + fc::raw::pack(ds, bool(trace.failed_dtrx_trace)); + if (trace.failed_dtrx_trace) { uint8_t stat = eosio::chain::transaction_receipt_header::hard_fail; - if (obj.obj.receipt && obj.obj.receipt->status.value == eosio::chain::transaction_receipt_header::soft_fail) + if (trace.receipt && trace.receipt->status.value == eosio::chain::transaction_receipt_header::soft_fail) stat = eosio::chain::transaction_receipt_header::soft_fail; - fc::raw::pack(ds, make_history_context_wrapper(obj.db, stat, *obj.obj.failed_dtrx_trace)); + fc::raw::pack( + ds, make_history_context_wrapper(obj.db, stat, eosio::augmented_transaction_trace{trace.failed_dtrx_trace})); + } + + fc::raw::pack(ds, bool(obj.obj.partial)); + if (obj.obj.partial) { + auto& partial = *obj.obj.partial; + fc::raw::pack(ds, as_type(partial.expiration)); + fc::raw::pack(ds, as_type(partial.ref_block_num)); + fc::raw::pack(ds, as_type(partial.ref_block_prefix)); + fc::raw::pack(ds, as_type(partial.max_net_usage_words)); + fc::raw::pack(ds, as_type(partial.max_cpu_usage_ms)); + fc::raw::pack(ds, as_type(partial.delay_sec)); + fc::raw::pack(ds, as_type(partial.transaction_extensions)); + fc::raw::pack(ds, as_type>(partial.signatures)); + fc::raw::pack(ds, as_type>(partial.context_free_data)); } return ds; } template -datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { +datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { uint8_t stat = eosio::chain::transaction_receipt_header::hard_fail; ds << make_history_context_wrapper(obj.db, stat, obj.obj); return ds; diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 266da5830cb..1ee464055b7 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -102,17 +102,17 @@ bool include_delta(const eosio::chain::code_object& old, const eosio::chain::cod } struct state_history_plugin_impl : std::enable_shared_from_this { - chain_plugin* chain_plug = nullptr; - fc::optional trace_log; - fc::optional chain_state_log; - bool stopping = false; - fc::optional applied_transaction_connection; - fc::optional accepted_block_connection; - string endpoint_address = "0.0.0.0"; - uint16_t endpoint_port = 8080; - std::unique_ptr acceptor; - std::map cached_traces; - transaction_trace_ptr onblock_trace; + chain_plugin* chain_plug = nullptr; + fc::optional trace_log; + fc::optional chain_state_log; + bool stopping = false; + fc::optional applied_transaction_connection; + fc::optional accepted_block_connection; + string endpoint_address = "0.0.0.0"; + uint16_t endpoint_port = 8080; + std::unique_ptr acceptor; + std::map cached_traces; + fc::optional onblock_trace; void get_log_entry(state_history_log& log, uint32_t block_num, fc::optional& result) { if (block_num < log.begin_block() || block_num >= log.end_block()) @@ -392,14 +392,14 @@ struct state_history_plugin_impl : std::enable_shared_from_thisreceipt && trace_log) { if (is_onblock(p)) - onblock_trace = p; + onblock_trace.emplace(p, t); else if (p->failed_dtrx_trace) - cached_traces[p->failed_dtrx_trace->id] = p; + cached_traces[p->failed_dtrx_trace->id] = augmented_transaction_trace{p, t}; else - cached_traces[p->id] = p; + cached_traces[p->id] = augmented_transaction_trace{p, t}; } } @@ -419,9 +419,9 @@ struct state_history_plugin_impl : std::enable_shared_from_this traces; + std::vector traces; if (onblock_trace) - traces.push_back(onblock_trace); + traces.push_back(*onblock_trace); for (auto& r : block_state->block->transactions) { transaction_id_type id; if (r.trx.contains()) @@ -429,7 +429,7 @@ struct state_history_plugin_impl : std::enable_shared_from_this().id(); auto it = cached_traces.find(id); - EOS_ASSERT(it != cached_traces.end() && it->second->receipt, plugin_exception, + EOS_ASSERT(it != cached_traces.end() && it->second.trace->receipt, plugin_exception, "missing trace for transaction ${id}", ("id", id)); traces.push_back(it->second); } @@ -576,7 +576,9 @@ void state_history_plugin::plugin_initialize(const variables_map& options) { EOS_ASSERT(my->chain_plug, chain::missing_chain_plugin_exception, ""); auto& chain = my->chain_plug->chain(); my->applied_transaction_connection.emplace( - chain.applied_transaction.connect([&](const transaction_trace_ptr& p) { my->on_applied_transaction(p); })); + chain.applied_transaction.connect([&](std::tuple t) { + my->on_applied_transaction(std::get<0>(t), std::get<1>(t)); + })); my->accepted_block_connection.emplace( chain.accepted_block.connect([&](const block_state_ptr& p) { my->on_accepted_block(p); })); diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index b6496f5cd60..059c3c717f4 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -106,6 +106,19 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "error_code", "type": "uint64?" } ] }, + { + "name": "partial_transaction", "fields": [ + { "name": "expiration", "type": "time_point_sec" }, + { "name": "ref_block_num", "type": "uint16" }, + { "name": "ref_block_prefix", "type": "uint32" }, + { "name": "max_net_usage_words", "type": "varuint32" }, + { "name": "max_cpu_usage_ms", "type": "uint8" }, + { "name": "delay_sec", "type": "varuint32" }, + { "name": "transaction_extensions", "type": "extension[]" }, + { "name": "signatures", "type": "signature[]" }, + { "name": "context_free_data", "type": "bytes[]" } + ] + }, { "name": "transaction_trace_v0", "fields": [ { "name": "id", "type": "checksum256" }, @@ -119,7 +132,8 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "account_ram_delta", "type": "account_delta?" }, { "name": "except", "type": "string?" }, { "name": "error_code", "type": "uint64?" }, - { "name": "failed_dtrx_trace", "type": "transaction_trace?" } + { "name": "failed_dtrx_trace", "type": "transaction_trace?" }, + { "name": "partial", "type": "partial_transaction?" } ] }, { From 18e9d477788b9294080e1c7dd2161e4ad0fa086c Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Wed, 17 Apr 2019 17:17:39 -0400 Subject: [PATCH 427/680] Updated Buildkite yaml to match release/1.7.x branch --- .buildkite/long_running_tests.yml | 81 +++++++------ .buildkite/pipeline.yml | 181 ++++++++++++++++-------------- 2 files changed, 141 insertions(+), 121 deletions(-) diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index 56eb9d08953..ecf663432e9 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -1,10 +1,11 @@ steps: - - command: | # Ubuntu 16.04 Build + - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi - label: ":ubuntu: 16.04 Build" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":aws: Amazon Linux 2 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" @@ -16,17 +17,18 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" propagate-environment: true workdir: /data/job timeout: 60 - - command: | # Ubuntu 18.04 Build + - command: | # CentOS 7 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi - label: ":ubuntu: 18.04 Build" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":centos: CentOS 7 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" @@ -38,17 +40,18 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" propagate-environment: true workdir: /data/job timeout: 60 - - command: | # CentOS 7 Build + - command: | # Ubuntu 16.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi - label: ":centos: 7 Build" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":ubuntu: Ubuntu 16.04 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" @@ -60,17 +63,18 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" propagate-environment: true workdir: /data/job timeout: 60 - - command: | # Amazon Linux 2 Build + - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi - label: ":aws: 2 Build" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":ubuntu: Ubuntu 18.04 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" @@ -82,18 +86,21 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" propagate-environment: true workdir: /data/job timeout: 60 - command: | # macOS Mojave Build echo "--- Creating symbolic link to job directory :file_folder:" - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job + sleep 5 + ln -s "$(pwd)" /data/job + cd /data/job echo "+++ Building :hammer:" ./scripts/eosio_build.sh -y echo "--- Compressing build directory :compression:" - tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi label: ":darwin: Mojave Build" agents: - "role=builder-v2-1" @@ -103,12 +110,12 @@ steps: - wait - - command: | # Ubuntu 16.04 Tests + - command: | # Amazon Linux 2 Tests echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" echo "+++ :microscope: Running LR Tests" ./scripts/long-running-test.sh - label: ":ubuntu: 16.04 LR Tests" + label: ":aws: Amazon Linux 2 LR Tests" agents: queue: "automation-large-builder-fleet" plugins: @@ -119,17 +126,17 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" propagate-environment: true workdir: /data/job timeout: 90 - - - command: | # Ubuntu 18.04 Tests + + - command: | # centOS 7 Tests echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" echo "+++ :microscope: Running LR Tests" ./scripts/long-running-test.sh - label: ":ubuntu: 18.04 LR Tests" + label: ":centos: CentOS 7 LR Tests" agents: queue: "automation-large-builder-fleet" plugins: @@ -140,17 +147,17 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" propagate-environment: true workdir: /data/job timeout: 90 - - command: | # centOS Tests + - command: | # Ubuntu 16.04 Tests echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" echo "+++ :microscope: Running LR Tests" ./scripts/long-running-test.sh - label: ":centos: 7 LR Tests" + label: ":ubuntu: Ubuntu 16.04 LR Tests" agents: queue: "automation-large-builder-fleet" plugins: @@ -161,17 +168,17 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" propagate-environment: true workdir: /data/job timeout: 90 - - - command: | # Amazon AWS-2 Linux Tests + + - command: | # Ubuntu 18.04 Tests echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" echo "+++ :microscope: Running LR Tests" ./scripts/long-running-test.sh - label: ":aws: 2 LR Tests" + label: ":ubuntu: Ubuntu 18.04 LR Tests" agents: queue: "automation-large-builder-fleet" plugins: @@ -182,7 +189,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" propagate-environment: true workdir: /data/job timeout: 90 @@ -196,4 +203,4 @@ steps: agents: - "role=tester-v2-1" - "os=mojave" - timeout: 90 + timeout: 90 \ No newline at end of file diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 4527ee82dc6..69019957ff9 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,10 +1,11 @@ steps: - - command: | # Ubuntu 16.04 Build + - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi - label: ":ubuntu: 16.04 Build" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":aws: Amazon Linux 2 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" @@ -16,17 +17,18 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" propagate-environment: true workdir: /data/job timeout: 60 - - command: | # Ubuntu 18.04 Build + - command: | # CentOS 7 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi - label: ":ubuntu: 18.04 Build" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":centos: CentOS 7 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" @@ -38,17 +40,18 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" propagate-environment: true workdir: /data/job timeout: 60 - - command: | # CentOS 7 Build + - command: | # Ubuntu 16.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi - label: ":centos: 7 Build" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":ubuntu: Ubuntu 16.04 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" @@ -60,17 +63,18 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" propagate-environment: true workdir: /data/job timeout: 60 - - command: | # Amazon Linux 2 Build + - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi - label: ":aws: 2 Build" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":ubuntu: Ubuntu 18.04 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" @@ -82,18 +86,21 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" propagate-environment: true workdir: /data/job timeout: 60 - command: | # macOS Mojave Build echo "--- Creating symbolic link to job directory :file_folder:" - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job + sleep 5 + ln -s "$(pwd)" /data/job + cd /data/job echo "+++ Building :hammer:" ./scripts/eosio_build.sh -y echo "--- Compressing build directory :compression:" - tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi label: ":darwin: Mojave Build" agents: - "role=builder-v2-1" @@ -103,13 +110,13 @@ steps: - wait - # Ubuntu 16.04 Tests + # Amazon Linux 2 Tests - command: | echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" echo "+++ :microscope: Running Tests" ./scripts/parallel-test.sh - label: ":ubuntu: 16.04 Tests" + label: ":aws: Amazon Linux 2 Tests" agents: queue: "automation-large-builder-fleet" plugins: @@ -120,17 +127,17 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" propagate-environment: true workdir: /data/job timeout: 60 - command: | echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" echo "+++ :microscope: Running Tests" ./scripts/serial-test.sh - label: ":ubuntu: 16.04 NP Tests" + label: ":aws: Amazon Linux 2 NP Tests" agents: queue: "automation-large-builder-fleet" plugins: @@ -141,18 +148,18 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" propagate-environment: true workdir: /data/job timeout: 60 - - # Ubuntu 18.04 Tests + + # centOS 7 Tests - command: | echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" echo "+++ :microscope: Running Tests" ./scripts/parallel-test.sh - label: ":ubuntu: 18.04 Tests" + label: ":centos: CentOS 7 Tests" agents: queue: "automation-large-builder-fleet" plugins: @@ -163,17 +170,17 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" propagate-environment: true workdir: /data/job timeout: 60 - command: | echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" echo "+++ :microscope: Running Tests" ./scripts/serial-test.sh - label: ":ubuntu: 18.04 NP Tests" + label: ":centos: CentOS 7 NP Tests" agents: queue: "automation-large-builder-fleet" plugins: @@ -184,18 +191,18 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" propagate-environment: true workdir: /data/job timeout: 60 - # centOS Tests + # Ubuntu 16.04 Tests - command: | echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" echo "+++ :microscope: Running Tests" ./scripts/parallel-test.sh - label: ":centos: 7 Tests" + label: ":ubuntu: Ubuntu 16.04 Tests" agents: queue: "automation-large-builder-fleet" plugins: @@ -206,17 +213,17 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" propagate-environment: true workdir: /data/job timeout: 60 - command: | echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" echo "+++ :microscope: Running Tests" ./scripts/serial-test.sh - label: ":centos: 7 NP Tests" + label: ":ubuntu: Ubuntu 16.04 NP Tests" agents: queue: "automation-large-builder-fleet" plugins: @@ -227,18 +234,18 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" propagate-environment: true workdir: /data/job timeout: 60 - # Amazon AWS-2 Linux Tests + # Ubuntu 18.04 Tests - command: | echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" echo "+++ :microscope: Running Tests" ./scripts/parallel-test.sh - label: ":aws: 2 Tests" + label: ":ubuntu: Ubuntu 18.04 Tests" agents: queue: "automation-large-builder-fleet" plugins: @@ -249,17 +256,17 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" propagate-environment: true workdir: /data/job timeout: 60 - command: | echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" echo "+++ :microscope: Running Tests" ./scripts/serial-test.sh - label: ":aws: 2 NP Tests" + label: ":ubuntu: Ubuntu 18.04 NP Tests" agents: queue: "automation-large-builder-fleet" plugins: @@ -270,7 +277,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" propagate-environment: true workdir: /data/job timeout: 60 @@ -292,26 +299,35 @@ steps: echo "--- :arrow_down: Downloading Build Directory" buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" echo "+++ :microscope: Running Tests" - ln -s "$(pwd)" /data/job && ./scripts/serial-test.sh + ln -s "$(pwd)" /data/job + ./scripts/serial-test.sh label: ":darwin: Mojave NP Tests" agents: - "role=tester-v2-1" - "os=mojave" timeout: 60 - + - wait - - command: | # Ubuntu 16.04 Package Builder + - command: | # CentOS 7 Package Builder echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" tar -zxf build.tar.gz echo "+++ :microscope: Starting package build" - cd /data/job/build/packages && bash generate_package.sh deb - label: ":ubuntu: 16.04 Package builder" + yum install -y rpm-build + mkdir -p /root/rpmbuild/BUILD + mkdir -p /root/rpmbuild/BUILDROOT + mkdir -p /root/rpmbuild/RPMS + mkdir -p /root/rpmbuild/SOURCES + mkdir -p /root/rpmbuild/SPECS + mkdir -p /root/rpmbuild/SRPMS + cd /data/job/build/packages + bash generate_package.sh rpm + label: ":centos: CentOS 7 Package Builder" agents: queue: "automation-large-builder-fleet" artifact_paths: - - "build/packages/*.deb" + - "build/packages/*.rpm" plugins: ecr#v1.1.4: login: true @@ -320,21 +336,22 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" propagate-environment: true workdir: /data/job env: - OS: "ubuntu-16.04" - PKGTYPE: "deb" + OS: "el7" + PKGTYPE: "rpm" timeout: 60 - - command: | # Ubuntu 18.04 Package Builder + - command: | # Ubuntu 16.04 Package Builder echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" tar -zxf build.tar.gz echo "+++ :microscope: Starting package build" - cd /data/job/build/packages && bash generate_package.sh deb - label: ":ubuntu: 18.04 Package builder" + cd /data/job/build/packages + bash generate_package.sh deb + label: ":ubuntu: Ubuntu 16.04 Package Builder" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -347,32 +364,26 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" propagate-environment: true workdir: /data/job env: - OS: "ubuntu-18.04" + OS: "ubuntu-16.04" PKGTYPE: "deb" timeout: 60 - - command: | # CentOS 7 Package Builder + - command: | # Ubuntu 18.04 Package Builder echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" tar -zxf build.tar.gz echo "+++ :microscope: Starting package build" - yum install -y rpm-build - mkdir -p /root/rpmbuild/BUILD - mkdir -p /root/rpmbuild/BUILDROOT - mkdir -p /root/rpmbuild/RPMS - mkdir -p /root/rpmbuild/SOURCES - mkdir -p /root/rpmbuild/SPECS - mkdir -p /root/rpmbuild/SRPMS - cd /data/job/build/packages && bash generate_package.sh rpm - label: ":centos: 7 Package builder" + cd /data/job/build/packages + bash generate_package.sh deb + label: ":ubuntu: Ubuntu 18.04 Package Builder" agents: queue: "automation-large-builder-fleet" artifact_paths: - - "build/packages/*.rpm" + - "build/packages/*.deb" plugins: ecr#v1.1.4: login: true @@ -381,12 +392,12 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" propagate-environment: true workdir: /data/job env: - OS: "el7" - PKGTYPE: "rpm" + OS: "ubuntu-18.04" + PKGTYPE: "deb" timeout: 60 - command: | # macOS Mojave Package Builder @@ -394,7 +405,9 @@ steps: buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" tar -zxf build.tar.gz echo "+++ :microscope: Starting package build" - ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew + ln -s "$(pwd)" /data/job + cd /data/job/build/packages + bash generate_package.sh brew label: ":darwin: Mojave Package Builder" agents: - "role=builder-v2-1" @@ -406,7 +419,7 @@ steps: - wait - - command: | + - command: | # Brew Updater echo "--- :arrow_down: Downloading brew files" buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" label: ":darwin: Brew Updater" @@ -414,12 +427,12 @@ steps: queue: "automation-large-builder-fleet" artifact_paths: - "build/packages/eosio.rb" - timeout: 60 + timeout: 5 - - command: | + - command: | # Git Submodule Regression Check echo "+++ :microscope: Running git submodule regression check" && \ ./scripts/submodule_check.sh - label: "Git submodule regression check" + label: "Git Submodule Regression Check" agents: queue: "automation-large-builder-fleet" - timeout: 240 + timeout: 5 \ No newline at end of file From 4b2b757d63711e3b9823ba5b80c68f98659ce45a Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Wed, 17 Apr 2019 17:19:54 -0400 Subject: [PATCH 428/680] ship: add partial transaction to trace --- .../eosio/state_history_plugin/state_history_serialization.hpp | 1 + plugins/state_history_plugin/state_history_plugin_abi.cpp | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp index d419481b24e..9883a36bb2c 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp @@ -570,6 +570,7 @@ datastream& operator<<(datastream& fc::raw::pack(ds, bool(obj.obj.partial)); if (obj.obj.partial) { auto& partial = *obj.obj.partial; + fc::raw::pack(ds, fc::unsigned_int(0)); fc::raw::pack(ds, as_type(partial.expiration)); fc::raw::pack(ds, as_type(partial.ref_block_num)); fc::raw::pack(ds, as_type(partial.ref_block_prefix)); diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index 059c3c717f4..a5ff928f52c 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -107,7 +107,7 @@ extern const char* const state_history_plugin_abi = R"({ ] }, { - "name": "partial_transaction", "fields": [ + "name": "partial_transaction_v0", "fields": [ { "name": "expiration", "type": "time_point_sec" }, { "name": "ref_block_num", "type": "uint16" }, { "name": "ref_block_prefix", "type": "uint32" }, @@ -465,6 +465,7 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "action_receipt", "types": ["action_receipt_v0"] }, { "name": "action_trace", "types": ["action_trace_v0"] }, + { "name": "partial_transaction", "types": ["partial_transaction_v0"] }, { "name": "transaction_trace", "types": ["transaction_trace_v0"] }, { "name": "transaction_variant", "types": ["transaction_id", "packed_transaction"] }, From 6c1a7ab4a1472b910e1c9b65f7e8ec49761d32f1 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Wed, 17 Apr 2019 17:42:56 -0400 Subject: [PATCH 429/680] fix unit tests --- unittests/api_tests.cpp | 28 +++++++++++++++++++------ unittests/protocol_feature_tests.cpp | 3 ++- unittests/whitelist_blacklist_tests.cpp | 3 ++- 3 files changed, 26 insertions(+), 8 deletions(-) diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index f2e20a948dc..334db77899f 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -1082,7 +1082,10 @@ BOOST_FIXTURE_TEST_CASE(transaction_tests, TESTER) { try { { produce_blocks(10); transaction_trace_ptr trace; - auto c = control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t && t->receipt && t->receipt->status != transaction_receipt::executed) { trace = t; } } ); + auto c = control->applied_transaction.connect([&](std::tuple x) { + auto& t = std::get<0>(x); + if (t && t->receipt && t->receipt->status != transaction_receipt::executed) { trace = t; } + } ); // test error handling on deferred transaction failure CALL_TEST_FUNCTION(*this, "test_transaction", "send_transaction_trigger_error_handler", {}); @@ -1128,7 +1131,10 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { //schedule { transaction_trace_ptr trace; - auto c = control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t->scheduled) { trace = t; } } ); + auto c = control->applied_transaction.connect([&](std::tuple x) { + auto& t = std::get<0>(x); + if (t->scheduled) { trace = t; } + } ); CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_transaction", {} ); BOOST_CHECK(!trace); produce_block( fc::seconds(2) ); @@ -1148,7 +1154,10 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { { transaction_trace_ptr trace; uint32_t count = 0; - auto c = control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t && t->scheduled) { trace = t; ++count; } } ); + auto c = control->applied_transaction.connect([&](std::tuple x) { + auto& t = std::get<0>(x); + if (t && t->scheduled) { trace = t; ++count; } + } ); CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_transaction", {}); BOOST_CHECK_THROW(CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_transaction", {}), deferred_tx_duplicate); produce_blocks( 3 ); @@ -1171,7 +1180,10 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { { transaction_trace_ptr trace; uint32_t count = 0; - auto c = control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t && t->scheduled) { trace = t; ++count; } } ); + auto c = control->applied_transaction.connect([&](std::tuple x) { + auto& t = std::get<0>(x); + if (t && t->scheduled) { trace = t; ++count; } + } ); CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_transaction_replace", {}); CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_transaction_replace", {}); produce_blocks( 3 ); @@ -1193,7 +1205,10 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { //schedule and cancel { transaction_trace_ptr trace; - auto c = control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { if (t && t->scheduled) { trace = t; } } ); + auto c = control->applied_transaction.connect([&](std::tuple x) { + auto& t = std::get<0>(x); + if (t && t->scheduled) { trace = t; } + } ); CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_transaction", {}); CALL_TEST_FUNCTION(*this, "test_transaction", "cancel_deferred_transaction_success", {}); produce_block( fc::seconds(2) ); @@ -1213,7 +1228,8 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { //repeated deferred transactions { vector traces; - auto c = control->applied_transaction.connect([&]( const transaction_trace_ptr& t) { + auto c = control->applied_transaction.connect([&](std::tuple x) { + auto& t = std::get<0>(x); if (t && t->scheduled) { traces.push_back( t ); } diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index a18b987e1e7..db1d0f5f9bf 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -540,7 +540,8 @@ BOOST_AUTO_TEST_CASE( no_duplicate_deferred_id_test ) try { c2.produce_empty_block( fc::minutes(10) ); transaction_trace_ptr trace0; - auto h = c2.control->applied_transaction.connect( [&]( const transaction_trace_ptr& t) { + auto h = c2.control->applied_transaction.connect( [&](std::tuple x) { + auto& t = std::get<0>(x); if( t && t->receipt && t->receipt->status == transaction_receipt::expired) { trace0 = t; } diff --git a/unittests/whitelist_blacklist_tests.cpp b/unittests/whitelist_blacklist_tests.cpp index 5df32c19f79..4a372000082 100644 --- a/unittests/whitelist_blacklist_tests.cpp +++ b/unittests/whitelist_blacklist_tests.cpp @@ -498,7 +498,8 @@ BOOST_AUTO_TEST_CASE( actor_blacklist_inline_deferred ) { try { tester2.chain->push_block( b ); } - auto log_trxs = [&]( const transaction_trace_ptr& t) { + auto log_trxs = [&](std::tuple x) { + auto& t = std::get<0>(x); if( !t || t->action_traces.size() == 0 ) return; const auto& act = t->action_traces[0].act; From 72f4315bafef45cee14f74416b979b85c3883e23 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Wed, 17 Apr 2019 18:06:43 -0400 Subject: [PATCH 430/680] fix mongo --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index f2f36608667..e05f1a01815 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -1695,7 +1695,7 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) my->accepted_transaction( t ); } )); my->applied_transaction_connection.emplace( - chain.applied_transaction.connect( [&]( std::tuple t ) { + chain.applied_transaction.connect( [&]( std::tuple t ) { my->applied_transaction( std::get<0>(t) ); } )); From 2bad775fbfaebd4e60731461d265b709270ca73f Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Wed, 17 Apr 2019 18:54:22 -0400 Subject: [PATCH 431/680] ship: include partial with failed tx, not onerror --- .../state_history_plugin/state_history_plugin.hpp | 5 +++++ .../state_history_serialization.hpp | 10 ++++++---- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp index fb704693fba..a682b205e00 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_plugin.hpp @@ -56,6 +56,11 @@ struct augmented_transaction_trace { augmented_transaction_trace(const chain::transaction_trace_ptr& trace) : trace{trace} {} + augmented_transaction_trace(const chain::transaction_trace_ptr& trace, + const std::shared_ptr& partial) + : trace{trace} + , partial{partial} {} + augmented_transaction_trace(const chain::transaction_trace_ptr& trace, const chain::signed_transaction& t) : trace{trace} , partial{std::make_shared(t)} {} diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp index 9883a36bb2c..dd6eb611581 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp @@ -563,12 +563,14 @@ datastream& operator<<(datastream& uint8_t stat = eosio::chain::transaction_receipt_header::hard_fail; if (trace.receipt && trace.receipt->status.value == eosio::chain::transaction_receipt_header::soft_fail) stat = eosio::chain::transaction_receipt_header::soft_fail; - fc::raw::pack( - ds, make_history_context_wrapper(obj.db, stat, eosio::augmented_transaction_trace{trace.failed_dtrx_trace})); + fc::raw::pack( // + ds, make_history_context_wrapper( + obj.db, stat, eosio::augmented_transaction_trace{trace.failed_dtrx_trace, obj.obj.partial})); } - fc::raw::pack(ds, bool(obj.obj.partial)); - if (obj.obj.partial) { + bool include_partial = obj.obj.partial && !trace.failed_dtrx_trace; + fc::raw::pack(ds, include_partial); + if (include_partial) { auto& partial = *obj.obj.partial; fc::raw::pack(ds, fc::unsigned_int(0)); fc::raw::pack(ds, as_type(partial.expiration)); From dfa9d891b4e0e36e1cd973a8316ff4c5925a6c29 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Wed, 17 Apr 2019 20:06:28 -0400 Subject: [PATCH 432/680] Added ctest error handling code so XML is still uploaded when ctest fails --- scripts/long-running-test.sh | 10 ++++++++++ scripts/parallel-test.sh | 10 ++++++++++ scripts/serial-test.sh | 10 ++++++++++ 3 files changed, 30 insertions(+) diff --git a/scripts/long-running-test.sh b/scripts/long-running-test.sh index 60cae2d0b7f..30ec5faaa12 100755 --- a/scripts/long-running-test.sh +++ b/scripts/long-running-test.sh @@ -11,8 +11,12 @@ cd /data/job/build echo "Running tests..." TEST_COUNT=$(ctest -N -L nonparallelizable_tests | grep -i 'Total Tests: ' | cut -d ':' -f 2 | awk '{print $1}') [[ $TEST_COUNT > 0 ]] && echo "$TEST_COUNT tests found." || (echo "ERROR: No tests registered with ctest! Exiting..." && exit 1) +set +e # defer ctest error handling to end echo "$ ctest -L long_running_tests --output-on-failure -T Test" ctest -L long_running_tests --output-on-failure -T Test +EXIT_STATUS=$? +[[ "$EXIT_STATUS" == 0 ]] && set -e +echo "Done running long-running tests." # upload artifacts echo "Uploading artifacts..." XML_FILENAME="test-results.xml" @@ -24,3 +28,9 @@ buildkite-agent artifact upload mongod.log cd build buildkite-agent artifact upload $XML_FILENAME echo "Done uploading artifacts." +# ctest error handling +if [[ "$EXIT_STATUS" != 0 ]]; then + echo "Failing due to non-zero exit status from ctest: $EXIT_STATUS" + echo ' ^^^ scroll up for more information ^^^' + exit $EXIT_STATUS +fi \ No newline at end of file diff --git a/scripts/parallel-test.sh b/scripts/parallel-test.sh index 5174c454e2a..fd53ca55198 100755 --- a/scripts/parallel-test.sh +++ b/scripts/parallel-test.sh @@ -13,8 +13,12 @@ CPU_CORES=$(getconf _NPROCESSORS_ONLN) echo "$CPU_CORES cpu cores detected." TEST_COUNT=$(ctest -N -LE _tests | grep -i 'Total Tests: ' | cut -d ':' -f 2 | awk '{print $1}') [[ $TEST_COUNT > 0 ]] && echo "$TEST_COUNT tests found." || (echo "ERROR: No tests registered with ctest! Exiting..." && exit 1) +set +e # defer ctest error handling to end echo "$ ctest -j $CPU_CORES -LE _tests --output-on-failure -T Test" ctest -j $CPU_CORES -LE _tests --output-on-failure -T Test +EXIT_STATUS=$? +[[ "$EXIT_STATUS" == 0 ]] && set -e +echo "Done running parallelizable tests." # upload artifacts echo "Uploading artifacts..." XML_FILENAME="test-results.xml" @@ -26,3 +30,9 @@ buildkite-agent artifact upload mongod.log cd build buildkite-agent artifact upload $XML_FILENAME echo "Done uploading artifacts." +# ctest error handling +if [[ "$EXIT_STATUS" != 0 ]]; then + echo "Failing due to non-zero exit status from ctest: $EXIT_STATUS" + echo ' ^^^ scroll up for more information ^^^' + exit $EXIT_STATUS +fi \ No newline at end of file diff --git a/scripts/serial-test.sh b/scripts/serial-test.sh index 512229d6272..1d36e081712 100755 --- a/scripts/serial-test.sh +++ b/scripts/serial-test.sh @@ -11,8 +11,12 @@ cd /data/job/build echo "Running tests..." TEST_COUNT=$(ctest -N -L nonparallelizable_tests | grep -i 'Total Tests: ' | cut -d ':' -f 2 | awk '{print $1}') [[ $TEST_COUNT > 0 ]] && echo "$TEST_COUNT tests found." || (echo "ERROR: No tests registered with ctest! Exiting..." && exit 1) +set +e # defer ctest error handling to end echo "$ ctest -L nonparallelizable_tests --output-on-failure -T Test" ctest -L nonparallelizable_tests --output-on-failure -T Test +EXIT_STATUS=$? +[[ "$EXIT_STATUS" == 0 ]] && set -e +echo "Done running non-parallelizable tests." # upload artifacts echo "Uploading artifacts..." XML_FILENAME="test-results.xml" @@ -24,3 +28,9 @@ buildkite-agent artifact upload mongod.log cd build buildkite-agent artifact upload $XML_FILENAME echo "Done uploading artifacts." +# ctest error handling +if [[ "$EXIT_STATUS" != 0 ]]; then + echo "Failing due to non-zero exit status from ctest: $EXIT_STATUS" + echo ' ^^^ scroll up for more information ^^^' + exit $EXIT_STATUS +fi \ No newline at end of file From f2debc2ad30fea08559680fe4d737b90ba9a816a Mon Sep 17 00:00:00 2001 From: Kayan Date: Thu, 18 Apr 2019 14:31:24 +0800 Subject: [PATCH 433/680] add test cases --- unittests/protocol_feature_tests.cpp | 104 +++++++++++++++++- .../ram_restriction_test.abi | 42 +++++++ .../ram_restriction_test.cpp | 47 +++++--- .../ram_restriction_test.wasm | Bin 10560 -> 14336 bytes 4 files changed, 177 insertions(+), 16 deletions(-) diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 6194c0edf58..e07c20f01d4 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -1025,7 +1025,9 @@ BOOST_AUTO_TEST_CASE( ram_restriction_test ) { try { const auto& tester1_account = account_name("tester1"); const auto& tester2_account = account_name("tester2"); const auto& alice_account = account_name("alice"); - c.create_accounts( {tester1_account, tester2_account, alice_account} ); + const auto& bob_account = account_name("bob"); + const auto& carol_account = account_name("carol"); + c.create_accounts( {tester1_account, tester2_account, alice_account, bob_account, carol_account} ); c.produce_block(); c.set_code( tester1_account, contracts::ram_restriction_test_wasm() ); c.set_abi( tester1_account, contracts::ram_restriction_test_abi().data() ); @@ -1034,6 +1036,106 @@ BOOST_AUTO_TEST_CASE( ram_restriction_test ) { try { c.set_abi( tester2_account, contracts::ram_restriction_test_abi().data() ); c.produce_block(); + // basic + c.push_action(tester1_account, N(setdata), alice_account, mutable_variant_object() + ("len1", 10) + ("len2", 0) + ("payer", "alice")); + + // basic, negative + BOOST_REQUIRE_EXCEPTION( + c.push_action(tester1_account, N(setdata), alice_account, mutable_variant_object() + ("len1", 20) + ("len2", 0) + ("payer", "bob")), + missing_auth_exception, + fc_exception_message_starts_with( "missing authority of bob" ) ); + + // migrate from table1 to table2, negative + BOOST_REQUIRE_EXCEPTION(c.push_action(tester1_account, N(setdata), carol_account, mutable_variant_object() + ("len1", 0) + ("len2", 10) + ("payer", "alice")), + missing_auth_exception, + fc_exception_message_starts_with( "missing authority of alice" ) ); + + // notify, increase RAM usage, negative + BOOST_REQUIRE_EXCEPTION(c.push_action(tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + ("acctonotify", "tester1") + ("len1", 20) + ("len2", 0) + ("payer", "alice")), + subjective_block_production_exception, + fc_exception_message_starts_with( "Cannot charge RAM to other accounts during notify" ) ); + + // notify migrate, negative + BOOST_REQUIRE_EXCEPTION(c.push_action(tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + ("acctonotify", "tester1") + ("len1", 0) + ("len2", 10) + ("payer", "alice")), + subjective_block_production_exception, + fc_exception_message_starts_with( "Cannot charge RAM to other accounts during notify" ) ); + + // notify defer result in subjective_block_production_exception + BOOST_REQUIRE_EXCEPTION(c.push_action(tester2_account, N(notifydefer), alice_account, mutable_variant_object() + ("acctonotify", "tester1") + ("senderid", 123) + ("payer", "alice")), + subjective_block_production_exception, + fc_exception_message_starts_with( "Cannot charge RAM to other accounts during notify" ) ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest( builtin_protocol_feature_t::ram_restrictions ); + BOOST_REQUIRE( d ); + + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + + // migration is ok + c.push_action(tester1_account, N(setdata), carol_account, mutable_variant_object() + ("len1", 0) + ("len2", 10) + ("payer", "alice")); + + // migration failed if overall is increasing RAM + BOOST_REQUIRE_EXCEPTION(c.push_action(tester1_account, N(setdata), carol_account, mutable_variant_object() + ("len1", 12) + ("len2", 0) + ("payer", "alice")), + unauthorized_ram_usage_increase, + fc_exception_message_starts_with( "unprivileged contract cannot increase RAM usage of another account that has not authorized the action" ) ); + + // notify migrate, ok + c.push_action(tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + ("acctonotify", "tester1") + ("len1", 10) + ("len2", 0) + ("payer", "alice")); + + // notify migrate with overall usage increased, negative + BOOST_REQUIRE_EXCEPTION(c.push_action(tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + ("acctonotify", "tester1") + ("len1", 0) + ("len2", 11) + ("payer", "alice")), + unauthorized_ram_usage_increase, + fc_exception_message_starts_with( "unprivileged contract cannot increase RAM usage of another account within a notify context" ) ); + + // notify migrate with overall increased, paid by receiver + c.push_action(tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + ("acctonotify", "tester1") + ("len1", 0) + ("len2", 12) + ("payer", "tester1")); + + // notify defer now result in objective exception + BOOST_REQUIRE_EXCEPTION(c.push_action(tester2_account, N(notifydefer), alice_account, mutable_variant_object() + ("acctonotify", "tester1") + ("senderid", 124) + ("payer", "alice")), + action_validate_exception, + fc_exception_message_starts_with("cannot bill RAM usage of deferred transactions to another account within notify context")); } FC_LOG_AND_RETHROW() } diff --git a/unittests/test-contracts/ram_restriction_test/ram_restriction_test.abi b/unittests/test-contracts/ram_restriction_test/ram_restriction_test.abi index 662e6dfed91..9c731340929 100644 --- a/unittests/test-contracts/ram_restriction_test/ram_restriction_test.abi +++ b/unittests/test-contracts/ram_restriction_test/ram_restriction_test.abi @@ -17,6 +17,24 @@ } ] }, + { + "name": "notifydefer", + "base": "", + "fields": [ + { + "name": "acctonotify", + "type": "name" + }, + { + "name": "senderid", + "type": "uint32" + }, + { + "name": "payer", + "type": "name" + } + ] + }, { "name": "notifysetdat", "base": "", @@ -39,6 +57,20 @@ } ] }, + { + "name": "senddefer", + "base": "", + "fields": [ + { + "name": "senderid", + "type": "uint32" + }, + { + "name": "payer", + "type": "name" + } + ] + }, { "name": "setdata", "base": "", @@ -59,11 +91,21 @@ } ], "actions": [ + { + "name": "notifydefer", + "type": "notifydefer", + "ricardian_contract": "" + }, { "name": "notifysetdat", "type": "notifysetdat", "ricardian_contract": "" }, + { + "name": "senddefer", + "type": "senddefer", + "ricardian_contract": "" + }, { "name": "setdata", "type": "setdata", diff --git a/unittests/test-contracts/ram_restriction_test/ram_restriction_test.cpp b/unittests/test-contracts/ram_restriction_test/ram_restriction_test.cpp index ec51c38fa31..ac344e9a59b 100644 --- a/unittests/test-contracts/ram_restriction_test/ram_restriction_test.cpp +++ b/unittests/test-contracts/ram_restriction_test/ram_restriction_test.cpp @@ -29,21 +29,15 @@ class [[eosio::contract]] ram_restriction_test : public eosio::contract { data.resize(len, 0); auto it = ta.find(0); if (it == ta.end()) { - if (len) { - ta.emplace(payer, [&](auto &v) { - v.key = 0; - v.value = data; - }); - } + ta.emplace(payer, [&](auto &v) { + v.key = 0; + v.value = data; + }); } else { - if (len) { - ta.modify(it, payer, [&](auto &v) { - v.key = 0; - v.value = data; - }); - } else { - ta.erase(it); - } + ta.modify(it, payer, [&](auto &v) { + v.key = 0; + v.value = data; + }); } } @@ -58,8 +52,31 @@ class [[eosio::contract]] ram_restriction_test : public eosio::contract { require_recipient(acctonotify); } - [[eosio::on_notify("testacc::notifysetdat")]] + [[eosio::on_notify("tester2::notifysetdat")]] void on_notify_setdata(eosio::name acctonotify, int len1, int len2, eosio::name payer) { setdata(len1, len2, payer); } + + [[eosio::action]] + void senddefer( uint32_t senderid, eosio::name payer ) { + eosio::transaction trx; + trx.actions.emplace_back( + std::vector{{_self, "active"_n}}, + get_self(), + "noop"_n, + std::make_tuple() + ); + trx.send(senderid, payer); + } + + [[eosio::action]] + void notifydefer(eosio::name acctonotify, uint32_t senderid, eosio::name payer) { + require_recipient(acctonotify); + } + + [[eosio::on_notify("tester2::notifydefer")]] + void on_notifydefer(eosio::name acctonotify, uint32_t senderid, eosio::name payer) { + senddefer(senderid, payer); + } + }; diff --git a/unittests/test-contracts/ram_restriction_test/ram_restriction_test.wasm b/unittests/test-contracts/ram_restriction_test/ram_restriction_test.wasm index 62e979122b64f8d44a8345c1835abc808a511c48..32909f6e230a23f0d1713e7c0de65299a7f563c6 100755 GIT binary patch literal 14336 zcmeI3U2J97Rmb=Fy7zdFJwDU8Q)e=Y_B{;XDM=Zci4&5R_!yEj{h%MJP{fPJv2QZI z9)FDONfQ*t4$1?leF#t>5v>5VD3B_l;^P4jFo{xyhagq70V-992WUkVic|$5fkgQI z*FM+xj>j`e5RgFQN$x#opS{;!d#$zC`meRmb@Q7mk#jD3BszS`xl`Hp`R(m(cgjWE zr(#_s+=gpDqw`L!(RrSq=Zf3(JbyfD>D#)dBEdlJ-Kj`}BW`2eL5$A~lIoFi*UVry zw8g)lOeEda`^PrBtBZ4s-Fv$m8{I`0sjZw}*jid!om-sWnxETT`l+sq)jZl=+gw_k zo8R2*Zfvd9f2Zrwg>x)>b!%>GX{GCOHBD#=E#m&}&`2YDXjlMq3u}vAH=-fM z;yrWsE+NLH>67k&TFO3Mmf3~UMxcFwjv}vqCbCf)Hn-N0hnflvZ6nf^wfnoF!x$aT zt)qz0V>G$(q#HXp?&2&xcyN3?%aUX~A1@CbD$DZF+paoTmJ``kSMgu+_AHsm__V(? zi`Jtk%96-Mt;5N7RPS7H#kQ;e@!8$4_DGb?udknZ(7pY@pWJscb>(F7O61B3?|k&p zbY^ykzZYKm{MTlE%&l6>@6To*{ng+8t1tf9A3pxs?LqT9Gdlm>i@*F!zxkD+&ezfU z^>6*UOVw;B4il<40zET$fwkeP}H5(TuCN?-+|m6Nr!VKhL6` zM*D~hJ-u@*qP_mD3$BcPGG6$kh+Q!qMcd!QbjuZUPsgK-goV+m*bUF5*6DZ$R&L|_ zQ!_=&)sLd(e~F^jcI`T`7NJhdV)CqacaCvWmmd&o<>B$7yt+O-4NZ9KueaXrRjH49 z<>5+eN9QT)^U2vNzJp<|eyCR^@KL38`)n26J{GGBi)UEfH`KEWlK3tSX#AYPxwY7Eeoci?DmUhm8HDGg^v7nZSYzWu zj^T|=3K&Dw-jzp-gHEGOP%s@@uPv6L8q^mbQ-`=7?TI7({{g1y&zF?fFf^Xggf*yAmWhpn&3Q;8UR2*em#EIo9?j+t8BFXAs8C9v)oYp%?Ir3?FxD&&r zY|bb3b-hmJ(@u*PG?v!my?XsDWR5wzFViDx?#WD=zIB34Y9D;^8TLWpGuD@u8U1Sd zW$_GD9y(j681SG|Qcv}K7WyT#u}mAJmF&4yx<`pJfR&z>g7ZGr0#BUd0)^h9NQH&_+@*ryz&OVE;wxmHfmBo7cSi$ z!H_Vm)4eLv(~Jo|f_5Uz1=B+qW=QO3>+9L76A+l{af?)+kqbifW0Ca6I>p+F9z=p8 z{@jHDeU7`33mr?vpuT$}u{%chH5AXJT zGb{cV8wcrMhl6Z(dE+N1@l{vwk>)q#5!4R{*uK<9nll@6dJmngT9>#zJX_->6;#Zd zw(8M6zOmfxWYgy6422+=7*|^E>CRSVT63kxlgUz^+-o=dbt9AuxWPSP<4Qr}@={=< z$$j2zj35DdswPMz_079nyqXM7?k?}LGT04CCwBYV)KfteAqedw3JXF_RLOf62Nr4X zsF;)|#Ep9b3!YAa1;02IpRjl-zc&@1G@lz{i(Fwpyx{8*?8{*JH$EJjKon*NQ5o#J zM=P%$W=99oKnzWol?+0BEw4vx7FE9(CuF(gz(m%a$Rca`y&Z+Uue%<0A(_@M4K80E zT;lRS$U~y(N6duckB6^CCQN59Jo4V{C+hdW2jL^cw?Fvf$KP%7xQWcZF3Y1;W)WXF z0O)|82#W>^6g7zHw3dM0=@{>(CY0C=NA-d3&R0MESt7pz+}{gHvob!2=L&Ls5Ypcz zF0h6Wlo~=7ks1rcaGnyrv6#dsB93cU{$OY*PYBWMM6#1_r%8syLJFfuN4PgIuv|~A z;2cO_+VwYIc>IPBvz5NVcgMhnI^{mIPFelC-+19_l<1=|EO*Al7BGMnfQIT* zkkeJpRWn;u-`#`c-Gh5@Qye&r>UFZ5fZk#{@ubC4%-p!;At>Ci9a=C%P;U(lCPfO6 zgFT$NB`Obuv55nVZFk zDd8`YB1dL*ZBhJ=(q(ohQ7}E`By|D?>&e+<=u$ZXcO_pKyDMjpnBLN_1TCDDrV=)q)S@3$b{?_}Iv$$OdG=NJ@0NO=O z`>t_Hh^c`+)lS1k%wgthGtj>YvW{IxssXT(tuL;W8#P+qY3qG(3O*^g#;(40_~P@gj& z+Hr&o%2d}oU;}Q|CQ!%>X`gCi5m|JqTA@Wg+v*|mC7#j)UJq877iX6E7Ga~4UPLm5 z8u%q@NU^Ks;puAgTlFlg3b2zdH&zwBPu^C3Mu1z3Rx(PHxo4l~%Sn>v)U9AZiaK5t z-$~*&U|>%bM-(Qh9XOz=2@rp$8WcJVRW=+Tb8$=DQygl_2AQI(8pQzXGl=H1-P$2B z4@`nzmm(a&{^9h)5Q@2KDnKDZw4q1WmoHeop+)TosdU;zA|`E>C16H*_Ob8<=utDq zGgW&5vJg))MN1Hi9Q=f=&xCIRqpCQBNCm8pDORec6l_!%&|?Atsl0s85gt-rz*<9$ z;R}>djRdgH6GI#EjET%*KPE@alO$w`El<~+FS-in!P6vn7ACa(fd1Q|vf={v1K~6P zaI{z6>f45`QdK@-c(SAVipVf&2vP>ELfI`+)nX7BnsS5yv(Jj3PZCO+06?@({n)~) z6(NCU6!y^}vj2-`B>Q-#95=N3QEU8nYXr04 z{E;xEEnv{{?Lh45XHC0eRf9%ko&tx}fo05n#2;9Gs?)~i5^V@J=4|Oda3usk2@x~AURT#*ZhYYn? z5DyU05q25YkDxITG&EQQRVX2XjwphT^dqQ{kwwsOQG6*R?XWB{nrwl`_2de05P06I zzwqLdFA9p)PwGO83w^-@)-A2S`s9o6R2IM)j0PrDN0v6>i`EEKX@`|F$_7O3&(+cb~#VQKBGK97aAth@7 zQFkJg91CyDM*86sv4$udDah*}+{n``JBg`JAbJ4~Rc=6xDb?h5Q;e{ABk)P698p6O zVOv$ZlWG%K2)4?RMi8~Ki04Cf8HHOJ}GZ=3~Lr^_0OS{_i2??wB2mWku*b8?e(yW9z6Q`1Y7%O*`xA!b-Q5UW5gV$E1da@RGX92+Y`HBAP} z#v`U`+O;=a`ik#Fwh72uIu39R>863Ag;^Hlc~=C#gC(g_s?qK8ZATi7MU0?}OcAx} zMImIydSy%`D4NEuU7*i!G{uUTMfR$t zW@lVEWd9m(kWxZa$PQjQt`C?qi|c}_w~ub*bu@W4+62pqmC6@#+LRk4@guh@?Vso-FCkwf+JDMF7 z`ym!xKAIi8Vl)#R4|dsbW$+(cB0!b5ACVQxfLhH#VgT)gw8nz8hGV;H5Mch_48;+L z@M0b_ZVXym#+XmVyizKdH`_-g^g7Wi?+_RaQ8z7qF1(e{f2rACCbd9h3IX=j$uLUj z$8^1^3$<|UA+TsKVH#{Ss2~h(?=uNz1tbaJh=xfiBMX2dt7BnEJrZyCr}XBxL?&KLj@gWN>V^avL_w>d)9psItt03 z79H1pdunFt!||5ZavBB8AS@8z3Sh`%z+0(AS%Gt6=y(Cx?a zE&%(L$lk9X(tf;raT%h|x^g|1YLy7nc}SI{>LxQ$;s|4sLL<(L>KRI`>}_xC)2Qqc z2nbzbEbnBBqrLLB;yeB1G#s2SX67-Po#rXPx;Udc$q&XDO zhH@NCS6Y2d!+QITB*Q|wFlEJ+UBMO{`xQ>VA}OxEcYf(ichPUH`S~;ayow(Z`Stnx zx|_~k@h)yyyQkM(*rJVJby>G1esnd@Pm<;svb5<}*S7r9s$V&GW@~AV-#B$2U?hjN z#XI=H*xH8IZ@!jR@ALg(JpJS|uE^BRHN~1DurR+WiVHl3lQ8+FCfJ(4 z=L`+^`gxw8?r!*nwbiW+euu?K?5=2iVuLBT;jprJ*Ckx9tSv6x``~36<}h=2?xyQ& zocrl#U2$vp1r|It+@GFby$?BVEUnCMJm{CZ5Bg7>?jj^(LeiLDZPo_E2w=VA+<5r$MY2}mT)9K>Ou0DJxM~>=`J2^O1>qfQ|1c?JJfzZfjc@t}+ zub$4f46W9tZVBy=CK+h^BR|>%N=fJslNQ`0P${Og6awjwK%f+w5K5pVAy7#Br|vVm z=NMYKP15`*r6;sA`_B8m^UO2vJacz(b@Ks1jkP64MC@G8lWyh22AwoF@L|bGVjCxI z-A@v{nF(4t$?$K$rhBk*-7}ITdt%H)KWpI4w|MuH>f7oXD@^B%X!)EteQNHJh2_(8 z3$=sf+^{T1>#fvcX^Yi3wbE=6>Ap9Aw%Lr9TXW55K3X^zHL2q%KFD^cA?{BK?U6^u zX0)_&E=n`FMzuouPM2J6nVp@kYgvZjc6nY`SGTour=kCif@O4DMnQdz_YZw=jYLAB zZgL&WYOJmZf7}l#Mm!#LL9w_&l7LI ztu7el+#Vr9^7_g+<;D^|C$A3hG_^IKRHB zYPmt(Jd_*L%?F)T^+K+i3`s+B%rPvDa6BF&am{3AGDzWT^jKgF5zLzuhlC|b<0#dQ z3Yw&DjAw5X2&bw6yM!maikaQhZ+FpjMp&CR(4+Htv1Dj|O|e%=(3yxUoIs}$9!Y!F zu{;)UOiC*}69*uR0vl7@uetRtX-YbtN0ERnQ)a%>uJ9hj%NU^{g+JRQj|*u}<+)^e z=IWiOrU)xBGNn6z@!4PgYUXG{o)cg@XY&kv>kQzc8f>2r_>t9sn-|ykbmne@zHBk0 z(x`9``^+&RvSrI{dpns^7NaAa*xM^)l7%3#%XADT27@U#9l%A?DPtL`66V$bZSRiu zOsF7@2ekJ9c3CGtzKOsD(P^P-N&}`DBx!;hkV-d51(jI1-dKcLX@s;Y+zCJ%fqNG- z>0P&xSX%m#F|SdiTjmomOVcMInKZSl#(>e+=wlbsXIDu#P<^sON1+Dt2HyFwcK zw}q5!TS@yqX(inW+SA(mIE6HJvyjGd{@QW>X(fHSLMq+5lGN*B0_ru_d#~23=X(d- zzz!W8eH?HYF1e|k{m4z_?0WC*>d)>N{Ac^>sm$doy7@*v^?ELUP`~{l|DX%O!`aI; zPaW9RVkPzCT}N4<{9DLE#}lC(=c{udbBpeTO82 z1?lzam$_`{8TGf)3>Gsza5sBNeRE)_t5%XY$3@_VPlN zD@$OzuY4Q(oLVZcv;FFi<+~GoEYlO5Y5Z>t!%WzQkzh__g?MDT`TJM4f%&a#nenES zOK5U2z^xIM;{f#o@kTt9z(PI~`T`RmrMLncD@r5}i?N@BivtPDN#_`p*$MBD{SGux zdCPZXM?1`ZAiWB?7Izk&feS<*0b0hnZ{ea2U)Gq~Lm7bMS_&=M5&K$6CGue+2N^M| zlVtZ7ricTsZ5DXU{j>66|Jur@2W9Rf<#WRI7(hL}GCksitNPv1w@Do3$fYfcc2Hd%zE4YY z$H?~(|HF~bL*3sTc>?O*+;bP6KiM+_`-giTW}WIwm02qw@g7rG4h*PQDziY+RXu@c zqk3UmgqKHejqsj-flvtXqS;B|)0-svA89NsMyF+KMK%@}SLPe7NUk;>jn)WXUX`jH zX78w1Yqu4z!`Es?jnlAS-XdW>8sXq8}u@=dNHMzXfk_*dn>Fi=_VGh6RqIG0}){Fh~jb%NU qdaCc#D_?-xibIW7D_UA@X~@}+HrdYII@4&$^Nm|%1m?f^M1Kc1(x?Xj From bacee727f4e1f3e75f3b736cf7f5846018c34742 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 18 Apr 2019 10:41:07 -0400 Subject: [PATCH 434/680] just one spurious keyword was all it took to cause consensus failure; aren't blockchains fun?! --- libraries/chain/eosio_contract.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/eosio_contract.cpp b/libraries/chain/eosio_contract.cpp index 9b6b157984a..1fe849abb48 100644 --- a/libraries/chain/eosio_contract.cpp +++ b/libraries/chain/eosio_contract.cpp @@ -157,7 +157,7 @@ void apply_eosio_setcode(apply_context& context) { const code_object& old_code_entry = db.get(boost::make_tuple(account.code_hash, account.vm_type, account.vm_version)); EOS_ASSERT( old_code_entry.code_hash != code_hash, set_exact_code, "contract is already running this version of code" ); - int64_t old_size = (int64_t)old_code_entry.code.size() * config::setcode_ram_bytes_multiplier; + old_size = (int64_t)old_code_entry.code.size() * config::setcode_ram_bytes_multiplier; if( old_code_entry.code_ref_count == 1 ) { db.remove(old_code_entry); } else { From f07d14a2f863766132c2a7fcc224e676de99e976 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Thu, 18 Apr 2019 14:25:56 -0400 Subject: [PATCH 435/680] resolved requested changes --- scripts/eosio_build_amazon.sh | 2 +- scripts/eosio_build_ubuntu.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 5dc9ba5381d..3184bb610db 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -12,7 +12,7 @@ if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then # Amazonlinux1 DEP_ARRAY=( sudo procps util-linux which gcc72 gcc72-c++ autoconf automake libtool make doxygen graphviz \ bzip2 bzip2-devel openssl-devel gmp gmp-devel libstdc++72 python27 python27-devel python36 python36-devel \ - libedit-devel ncurses-devel swig curl -LO file libcurl-devel libusb1-devel + libedit-devel ncurses-devel swig curl file libcurl-devel libusb1-devel ) else # Amazonlinux2 DEP_ARRAY=( diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 18282795b72..576af1178e7 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -162,7 +162,7 @@ if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ && cd $BOOST_ROOT \ && ./bootstrap.sh --prefix=$BOOST_ROOT \ - && ./b2 -q -j2 install \ + && ./b2 -q -j"${JOBS}" install \ && cd .. \ && rm -f boost_$BOOST_VERSION.tar.bz2 \ && rm -rf $BOOST_LINK_LOCATION \ From f997ebbee43bcf268d300f4453b0685d56aee0a0 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Thu, 18 Apr 2019 14:35:21 -0400 Subject: [PATCH 436/680] update submodules for PR #7107 --- libraries/fc | 2 +- libraries/wabt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/fc b/libraries/fc index 800d6c21b8b..8221d5c6a7a 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 800d6c21b8be9316a651e926a5b2affcc3c52c8e +Subproject commit 8221d5c6a7af55d7b6742341651017685b5ef7a6 diff --git a/libraries/wabt b/libraries/wabt index ae8189a9d45..a136149d941 160000 --- a/libraries/wabt +++ b/libraries/wabt @@ -1 +1 @@ -Subproject commit ae8189a9d45e9453bd947c778bf5f3d7255b0627 +Subproject commit a136149d941df2942a25a4b66d8865fada0a325e From a5fe39b25ca47d5d00496bcf52a9ca28a5e38474 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 18 Apr 2019 16:45:59 -0400 Subject: [PATCH 437/680] adjustments to unit test for RAM_RESTRICTIONS protocol feature --- unittests/contracts.hpp.in | 28 +- unittests/protocol_feature_tests.cpp | 294 +++++++++++++----- unittests/test-contracts/CMakeLists.txt | 1 + .../ram_restriction_test/CMakeLists.txt | 6 - .../ram_restriction_test.cpp | 82 ----- .../ram_restriction_test.wasm | Bin 14336 -> 0 bytes .../ram_restrictions_test/CMakeLists.txt | 6 + .../ram_restrictions_test.abi} | 36 ++- .../ram_restrictions_test.cpp | 62 ++++ .../ram_restrictions_test.hpp | 45 +++ .../ram_restrictions_test.wasm | Bin 0 -> 14329 bytes 11 files changed, 378 insertions(+), 182 deletions(-) delete mode 100644 unittests/test-contracts/ram_restriction_test/CMakeLists.txt delete mode 100644 unittests/test-contracts/ram_restriction_test/ram_restriction_test.cpp delete mode 100755 unittests/test-contracts/ram_restriction_test/ram_restriction_test.wasm create mode 100644 unittests/test-contracts/ram_restrictions_test/CMakeLists.txt rename unittests/test-contracts/{ram_restriction_test/ram_restriction_test.abi => ram_restrictions_test/ram_restrictions_test.abi} (81%) create mode 100644 unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.cpp create mode 100644 unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.hpp create mode 100644 unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.wasm diff --git a/unittests/contracts.hpp.in b/unittests/contracts.hpp.in index 33bc62ed93f..efafa1846c8 100644 --- a/unittests/contracts.hpp.in +++ b/unittests/contracts.hpp.in @@ -39,20 +39,20 @@ namespace eosio { MAKE_READ_WASM_ABI(before_preactivate_eosio_bios, eosio.bios, contracts/old_versions/v1.6.0-rc3) // Contracts in `eos/unittests/unittests/test-contracts' directory - MAKE_READ_WASM_ABI(asserter, asserter, test-contracts) - MAKE_READ_WASM_ABI(deferred_test, deferred_test, test-contracts) - MAKE_READ_WASM_ABI(get_sender_test, get_sender_test, test-contracts) - MAKE_READ_WASM_ABI(noop, noop, test-contracts) - MAKE_READ_WASM_ABI(payloadless, payloadless, test-contracts) - MAKE_READ_WASM_ABI(proxy, proxy, test-contracts) - MAKE_READ_WASM_ABI(reject_all, reject_all, test-contracts) - MAKE_READ_WASM_ABI(restrict_action_test, restrict_action_test, test-contracts) - MAKE_READ_WASM_ABI(snapshot_test, snapshot_test, test-contracts) - MAKE_READ_WASM_ABI(test_api, test_api, test-contracts) - MAKE_READ_WASM_ABI(test_api_db, test_api_db, test-contracts) - MAKE_READ_WASM_ABI(test_api_multi_index, test_api_multi_index, test-contracts) - MAKE_READ_WASM_ABI(test_ram_limit, test_ram_limit, test-contracts) - MAKE_READ_WASM_ABI(ram_restriction_test, ram_restriction_test, test-contracts) + MAKE_READ_WASM_ABI(asserter, asserter, test-contracts) + MAKE_READ_WASM_ABI(deferred_test, deferred_test, test-contracts) + MAKE_READ_WASM_ABI(get_sender_test, get_sender_test, test-contracts) + MAKE_READ_WASM_ABI(noop, noop, test-contracts) + MAKE_READ_WASM_ABI(payloadless, payloadless, test-contracts) + MAKE_READ_WASM_ABI(proxy, proxy, test-contracts) + MAKE_READ_WASM_ABI(ram_restrictions_test, ram_restrictions_test, test-contracts) + MAKE_READ_WASM_ABI(reject_all, reject_all, test-contracts) + MAKE_READ_WASM_ABI(restrict_action_test, restrict_action_test, test-contracts) + MAKE_READ_WASM_ABI(snapshot_test, snapshot_test, test-contracts) + MAKE_READ_WASM_ABI(test_api, test_api, test-contracts) + MAKE_READ_WASM_ABI(test_api_db, test_api_db, test-contracts) + MAKE_READ_WASM_ABI(test_api_multi_index, test_api_multi_index, test-contracts) + MAKE_READ_WASM_ABI(test_ram_limit, test_ram_limit, test-contracts) }; } /// eosio::testing } /// eosio diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index e07c20f01d4..90f073235e4 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -1019,123 +1019,269 @@ BOOST_AUTO_TEST_CASE( get_sender_test ) { try { ); } FC_LOG_AND_RETHROW() } -BOOST_AUTO_TEST_CASE( ram_restriction_test ) { try { +BOOST_AUTO_TEST_CASE( ram_restrictions_test ) { try { tester c( setup_policy::preactivate_feature_and_new_bios ); const auto& tester1_account = account_name("tester1"); const auto& tester2_account = account_name("tester2"); const auto& alice_account = account_name("alice"); const auto& bob_account = account_name("bob"); - const auto& carol_account = account_name("carol"); - c.create_accounts( {tester1_account, tester2_account, alice_account, bob_account, carol_account} ); + c.create_accounts( {tester1_account, tester2_account, alice_account, bob_account} ); c.produce_block(); - c.set_code( tester1_account, contracts::ram_restriction_test_wasm() ); - c.set_abi( tester1_account, contracts::ram_restriction_test_abi().data() ); + c.set_code( tester1_account, contracts::ram_restrictions_test_wasm() ); + c.set_abi( tester1_account, contracts::ram_restrictions_test_abi().data() ); c.produce_block(); - c.set_code( tester2_account, contracts::ram_restriction_test_wasm() ); - c.set_abi( tester2_account, contracts::ram_restriction_test_abi().data() ); + c.set_code( tester2_account, contracts::ram_restrictions_test_wasm() ); + c.set_abi( tester2_account, contracts::ram_restrictions_test_abi().data() ); c.produce_block(); - // basic - c.push_action(tester1_account, N(setdata), alice_account, mutable_variant_object() + // Basic setup + c.push_action( tester1_account, N(setdata), alice_account, mutable_variant_object() ("len1", 10) ("len2", 0) - ("payer", "alice")); + ("payer", alice_account) + ); - // basic, negative + // Cannot bill more RAM to another account that has not authorized the action. BOOST_REQUIRE_EXCEPTION( - c.push_action(tester1_account, N(setdata), alice_account, mutable_variant_object() - ("len1", 20) - ("len2", 0) - ("payer", "bob")), + c.push_action( tester1_account, N(setdata), bob_account, mutable_variant_object() + ("len1", 20) + ("len2", 0) + ("payer", alice_account) + ), missing_auth_exception, - fc_exception_message_starts_with( "missing authority of bob" ) ); + fc_exception_message_starts_with( "missing authority" ) + ); - // migrate from table1 to table2, negative - BOOST_REQUIRE_EXCEPTION(c.push_action(tester1_account, N(setdata), carol_account, mutable_variant_object() - ("len1", 0) - ("len2", 10) - ("payer", "alice")), + // Cannot migrate data from table1 to table2 paid by another account + // in a RAM usage neutral way without the authority of that account. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester1_account, N(setdata), bob_account, mutable_variant_object() + ("len1", 0) + ("len2", 10) + ("payer", alice_account) + ), missing_auth_exception, - fc_exception_message_starts_with( "missing authority of alice" ) ); - - // notify, increase RAM usage, negative - BOOST_REQUIRE_EXCEPTION(c.push_action(tester2_account, N(notifysetdat), alice_account, mutable_variant_object() - ("acctonotify", "tester1") - ("len1", 20) - ("len2", 0) - ("payer", "alice")), + fc_exception_message_starts_with( "missing authority" ) + ); + + // Cannot bill more RAM to another account within a notification + // even if the account authorized the original action. + // This is due to the subjective mitigation in place. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + ("acctonotify", tester1_account) + ("len1", 20) + ("len2", 0) + ("payer", alice_account) + ), subjective_block_production_exception, - fc_exception_message_starts_with( "Cannot charge RAM to other accounts during notify" ) ); + fc_exception_message_is( "Cannot charge RAM to other accounts during notify." ) + ); - // notify migrate, negative - BOOST_REQUIRE_EXCEPTION(c.push_action(tester2_account, N(notifysetdat), alice_account, mutable_variant_object() - ("acctonotify", "tester1") - ("len1", 0) - ("len2", 10) - ("payer", "alice")), + // Cannot migrate data from table1 to table2 paid by another account + // in a RAM usage neutral way within a notification. + // This is due to the subjective mitigation in place. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + ("acctonotify", tester1_account) + ("len1", 0) + ("len2", 10) + ("payer", alice_account) + ), subjective_block_production_exception, - fc_exception_message_starts_with( "Cannot charge RAM to other accounts during notify" ) ); + fc_exception_message_is( "Cannot charge RAM to other accounts during notify." ) + ); - // notify defer result in subjective_block_production_exception - BOOST_REQUIRE_EXCEPTION(c.push_action(tester2_account, N(notifydefer), alice_account, mutable_variant_object() - ("acctonotify", "tester1") + // Cannot send deferred transaction paid by another account that has not authorized the action. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester1_account, N(senddefer), bob_account, mutable_variant_object() + ("senderid", 123) + ("payer", alice_account) + ), + missing_auth_exception, + fc_exception_message_starts_with( "missing authority" ) + ); + + // Cannot send deferred transaction paid by another account within a notification + // even if the account authorized the original action. + // This is due to the subjective mitigation in place. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester2_account, N(notifydefer), alice_account, mutable_variant_object() + ("acctonotify", tester1_account) + ("senderid", 123) + ("payer", alice_account) + ), + subjective_block_production_exception, + fc_exception_message_is( "Cannot charge RAM to other accounts during notify." ) + ); + + // Can send deferred transaction paid by another account if it has authorized the action. + c.push_action( tester1_account, N(senddefer), alice_account, mutable_variant_object() ("senderid", 123) - ("payer", "alice")), + ("payer", alice_account) + ); + c.produce_block(); + + // Can migrate data from table1 to table2 paid by another account + // in a RAM usage neutral way with the authority of that account. + c.push_action( tester1_account, N(setdata), alice_account, mutable_variant_object() + ("len1", 0) + ("len2", 10) + ("payer", alice_account) + ); + + c.produce_block(); + + // Disable the subjective mitigation + c.close(); + auto cfg = c.get_config(); + cfg.disable_all_subjective_mitigations = true; + c.init( cfg, nullptr ); + + c.produce_block(); + + // Without the subjective mitigation, it is now possible to bill more RAM to another account + // within a notification if the account authorized the original action. + // This is due to the subjective mitigation in place. + c.push_action( tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + ("acctonotify", tester1_account) + ("len1", 10) + ("len2", 10) + ("payer", alice_account) + ); + + // Reset back to the original state. + c.push_action( tester1_account, N(setdata), alice_account, mutable_variant_object() + ("len1", 10) + ("len2", 0) + ("payer", alice_account) + ); + c.produce_block(); + + // Re-enable the subjective mitigation + c.close(); + cfg.disable_all_subjective_mitigations = false; + c.init( cfg, nullptr ); + + c.produce_block(); + + // Still cannot bill more RAM to another account within a notification + // even if the account authorized the original action. + // This is due to the subjective mitigation in place. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + ("acctonotify", tester1_account) + ("len1", 10) + ("len2", 10) + ("payer", alice_account) + ), subjective_block_production_exception, - fc_exception_message_starts_with( "Cannot charge RAM to other accounts during notify" ) ); + fc_exception_message_is( "Cannot charge RAM to other accounts during notify." ) + ); const auto& pfm = c.control->get_protocol_feature_manager(); const auto& d = pfm.get_builtin_digest( builtin_protocol_feature_t::ram_restrictions ); BOOST_REQUIRE( d ); + // Activate RAM_RESTRICTIONS protocol feature (this would also disable the subjective mitigation). c.preactivate_protocol_features( {*d} ); c.produce_block(); - // migration is ok - c.push_action(tester1_account, N(setdata), carol_account, mutable_variant_object() - ("len1", 0) - ("len2", 10) - ("payer", "alice")); + // Cannot send deferred transaction paid by another account that has not authorized the action. + // This still fails objectively, but now with another error message. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester1_account, N(senddefer), bob_account, mutable_variant_object() + ("senderid", 123) + ("payer", alice_account) + ), + action_validate_exception, + fc_exception_message_starts_with( "cannot bill RAM usage of deferred transaction to another account that has not authorized the action" ) + ); - // migration failed if overall is increasing RAM - BOOST_REQUIRE_EXCEPTION(c.push_action(tester1_account, N(setdata), carol_account, mutable_variant_object() - ("len1", 12) - ("len2", 0) - ("payer", "alice")), + // Cannot send deferred transaction paid by another account within a notification + // even if the account authorized the original action. + // This now fails with an objective error. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester2_account, N(notifydefer), alice_account, mutable_variant_object() + ("acctonotify", tester1_account) + ("senderid", 123) + ("payer", alice_account) + ), + action_validate_exception, + fc_exception_message_is( "cannot bill RAM usage of deferred transactions to another account within notify context" ) + ); + + // Cannot bill more RAM to another account within a notification + // even if the account authorized the original action. + // This now fails with an objective error. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + ("acctonotify", tester1_account) + ("len1", 20) + ("len2", 0) + ("payer", alice_account) + ), unauthorized_ram_usage_increase, - fc_exception_message_starts_with( "unprivileged contract cannot increase RAM usage of another account that has not authorized the action" ) ); + fc_exception_message_starts_with( "unprivileged contract cannot increase RAM usage of another account within a notify context" ) + ); - // notify migrate, ok - c.push_action(tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + // Cannot bill more RAM to another account that has not authorized the action. + // This still fails objectively, but now with another error message. + BOOST_REQUIRE_EXCEPTION( + c.push_action( tester1_account, N(setdata), bob_account, mutable_variant_object() + ("len1", 20) + ("len2", 0) + ("payer", alice_account) + ), + unauthorized_ram_usage_increase, + fc_exception_message_starts_with( "unprivileged contract cannot increase RAM usage of another account that has not authorized the action" ) + ); + + // Still can send deferred transaction paid by another account if it has authorized the action. + c.push_action( tester1_account, N(senddefer), alice_account, mutable_variant_object() + ("senderid", 123) + ("payer", alice_account) + ); + c.produce_block(); + + // Now can migrate data from table1 to table2 paid by another account + // in a RAM usage neutral way without the authority of that account. + c.push_action( tester1_account, N(setdata), bob_account, mutable_variant_object() + ("len1", 0) + ("len2", 10) + ("payer", alice_account) + ); + + // Now can also migrate data from table2 to table1 paid by another account + // in a RAM usage neutral way even within a notification . + c.push_action( tester2_account, N(notifysetdat), bob_account, mutable_variant_object() ("acctonotify", "tester1") ("len1", 10) ("len2", 0) - ("payer", "alice")); + ("payer", "alice") + ); - // notify migrate with overall usage increased, negative - BOOST_REQUIRE_EXCEPTION(c.push_action(tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + // Of course it should also be possible to migrate data from table1 to table2 paid by another account + // in a way that reduces RAM usage as well, even within a notification. + c.push_action( tester2_account, N(notifysetdat), bob_account, mutable_variant_object() ("acctonotify", "tester1") ("len1", 0) - ("len2", 11) - ("payer", "alice")), - unauthorized_ram_usage_increase, - fc_exception_message_starts_with( "unprivileged contract cannot increase RAM usage of another account within a notify context" ) ); + ("len2", 5) + ("payer", "alice") + ); - // notify migrate with overall increased, paid by receiver - c.push_action(tester2_account, N(notifysetdat), alice_account, mutable_variant_object() + // It should also still be possible for the receiver to take over payment of the RAM + // if it is necessary to increase RAM usage without the authorization of the original payer. + // This should all be possible to do even within a notification. + c.push_action( tester2_account, N(notifysetdat), bob_account, mutable_variant_object() ("acctonotify", "tester1") - ("len1", 0) - ("len2", 12) - ("payer", "tester1")); + ("len1", 10) + ("len2", 10) + ("payer", "tester1") + ); - // notify defer now result in objective exception - BOOST_REQUIRE_EXCEPTION(c.push_action(tester2_account, N(notifydefer), alice_account, mutable_variant_object() - ("acctonotify", "tester1") - ("senderid", 124) - ("payer", "alice")), - action_validate_exception, - fc_exception_message_starts_with("cannot bill RAM usage of deferred transactions to another account within notify context")); + c.produce_block(); } FC_LOG_AND_RETHROW() } diff --git a/unittests/test-contracts/CMakeLists.txt b/unittests/test-contracts/CMakeLists.txt index 90b0c6484f6..1aded520712 100644 --- a/unittests/test-contracts/CMakeLists.txt +++ b/unittests/test-contracts/CMakeLists.txt @@ -14,6 +14,7 @@ add_subdirectory( integration_test ) add_subdirectory( noop ) add_subdirectory( payloadless ) add_subdirectory( proxy ) +add_subdirectory( ram_restrictions_test ) add_subdirectory( reject_all ) add_subdirectory( restrict_action_test ) add_subdirectory( snapshot_test ) diff --git a/unittests/test-contracts/ram_restriction_test/CMakeLists.txt b/unittests/test-contracts/ram_restriction_test/CMakeLists.txt deleted file mode 100644 index 74710d96a8b..00000000000 --- a/unittests/test-contracts/ram_restriction_test/CMakeLists.txt +++ /dev/null @@ -1,6 +0,0 @@ -if( EOSIO_COMPILE_TEST_CONTRACTS ) - add_contract( ram_restriction_test ram_restriction_test ram_restriction_test.cpp ) -else() - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/ram_restriction_test.wasm ${CMAKE_CURRENT_BINARY_DIR}/ram_restriction_test.wasm COPYONLY ) - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/ram_restriction_test.abi ${CMAKE_CURRENT_BINARY_DIR}/ram_restriction_test.abi COPYONLY ) -endif() diff --git a/unittests/test-contracts/ram_restriction_test/ram_restriction_test.cpp b/unittests/test-contracts/ram_restriction_test/ram_restriction_test.cpp deleted file mode 100644 index ac344e9a59b..00000000000 --- a/unittests/test-contracts/ram_restriction_test/ram_restriction_test.cpp +++ /dev/null @@ -1,82 +0,0 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ - -#include -#include - -class [[eosio::contract]] ram_restriction_test : public eosio::contract { - -public: - struct [[eosio::table]] data { - uint64_t key; - std::vector value; - - uint64_t primary_key() const { return key; } - }; - - typedef eosio::multi_index<"tablea"_n, data> tablea; - typedef eosio::multi_index<"tableb"_n, data> tableb; - -public: - using eosio::contract::contract; - - template - void setdata_(int len, eosio::name payer) { - Table ta(_self, 0); - std::vector data; - data.resize(len, 0); - auto it = ta.find(0); - if (it == ta.end()) { - ta.emplace(payer, [&](auto &v) { - v.key = 0; - v.value = data; - }); - } else { - ta.modify(it, payer, [&](auto &v) { - v.key = 0; - v.value = data; - }); - } - } - - [[eosio::action]] - void setdata(int len1, int len2, eosio::name payer ) { - setdata_(len1, payer); - setdata_(len2, payer); - } - - [[eosio::action]] - void notifysetdat(eosio::name acctonotify, int len1, int len2, eosio::name payer) { - require_recipient(acctonotify); - } - - [[eosio::on_notify("tester2::notifysetdat")]] - void on_notify_setdata(eosio::name acctonotify, int len1, int len2, eosio::name payer) { - setdata(len1, len2, payer); - } - - [[eosio::action]] - void senddefer( uint32_t senderid, eosio::name payer ) { - eosio::transaction trx; - trx.actions.emplace_back( - std::vector{{_self, "active"_n}}, - get_self(), - "noop"_n, - std::make_tuple() - ); - trx.send(senderid, payer); - } - - [[eosio::action]] - void notifydefer(eosio::name acctonotify, uint32_t senderid, eosio::name payer) { - require_recipient(acctonotify); - } - - [[eosio::on_notify("tester2::notifydefer")]] - void on_notifydefer(eosio::name acctonotify, uint32_t senderid, eosio::name payer) { - senddefer(senderid, payer); - } - -}; diff --git a/unittests/test-contracts/ram_restriction_test/ram_restriction_test.wasm b/unittests/test-contracts/ram_restriction_test/ram_restriction_test.wasm deleted file mode 100755 index 32909f6e230a23f0d1713e7c0de65299a7f563c6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14336 zcmeI3U2J97Rmb=Fy7zdFJwDU8Q)e=Y_B{;XDM=Zci4&5R_!yEj{h%MJP{fPJv2QZI z9)FDONfQ*t4$1?leF#t>5v>5VD3B_l;^P4jFo{xyhagq70V-992WUkVic|$5fkgQI z*FM+xj>j`e5RgFQN$x#opS{;!d#$zC`meRmb@Q7mk#jD3BszS`xl`Hp`R(m(cgjWE zr(#_s+=gpDqw`L!(RrSq=Zf3(JbyfD>D#)dBEdlJ-Kj`}BW`2eL5$A~lIoFi*UVry zw8g)lOeEda`^PrBtBZ4s-Fv$m8{I`0sjZw}*jid!om-sWnxETT`l+sq)jZl=+gw_k zo8R2*Zfvd9f2Zrwg>x)>b!%>GX{GCOHBD#=E#m&}&`2YDXjlMq3u}vAH=-fM z;yrWsE+NLH>67k&TFO3Mmf3~UMxcFwjv}vqCbCf)Hn-N0hnflvZ6nf^wfnoF!x$aT zt)qz0V>G$(q#HXp?&2&xcyN3?%aUX~A1@CbD$DZF+paoTmJ``kSMgu+_AHsm__V(? zi`Jtk%96-Mt;5N7RPS7H#kQ;e@!8$4_DGb?udknZ(7pY@pWJscb>(F7O61B3?|k&p zbY^ykzZYKm{MTlE%&l6>@6To*{ng+8t1tf9A3pxs?LqT9Gdlm>i@*F!zxkD+&ezfU z^>6*UOVw;B4il<40zET$fwkeP}H5(TuCN?-+|m6Nr!VKhL6` zM*D~hJ-u@*qP_mD3$BcPGG6$kh+Q!qMcd!QbjuZUPsgK-goV+m*bUF5*6DZ$R&L|_ zQ!_=&)sLd(e~F^jcI`T`7NJhdV)CqacaCvWmmd&o<>B$7yt+O-4NZ9KueaXrRjH49 z<>5+eN9QT)^U2vNzJp<|eyCR^@KL38`)n26J{GGBi)UEfH`KEWlK3tSX#AYPxwY7Eeoci?DmUhm8HDGg^v7nZSYzWu zj^T|=3K&Dw-jzp-gHEGOP%s@@uPv6L8q^mbQ-`=7?TI7({{g1y&zF?fFf^Xggf*yAmWhpn&3Q;8UR2*em#EIo9?j+t8BFXAs8C9v)oYp%?Ir3?FxD&&r zY|bb3b-hmJ(@u*PG?v!my?XsDWR5wzFViDx?#WD=zIB34Y9D;^8TLWpGuD@u8U1Sd zW$_GD9y(j681SG|Qcv}K7WyT#u}mAJmF&4yx<`pJfR&z>g7ZGr0#BUd0)^h9NQH&_+@*ryz&OVE;wxmHfmBo7cSi$ z!H_Vm)4eLv(~Jo|f_5Uz1=B+qW=QO3>+9L76A+l{af?)+kqbifW0Ca6I>p+F9z=p8 z{@jHDeU7`33mr?vpuT$}u{%chH5AXJT zGb{cV8wcrMhl6Z(dE+N1@l{vwk>)q#5!4R{*uK<9nll@6dJmngT9>#zJX_->6;#Zd zw(8M6zOmfxWYgy6422+=7*|^E>CRSVT63kxlgUz^+-o=dbt9AuxWPSP<4Qr}@={=< z$$j2zj35DdswPMz_079nyqXM7?k?}LGT04CCwBYV)KfteAqedw3JXF_RLOf62Nr4X zsF;)|#Ep9b3!YAa1;02IpRjl-zc&@1G@lz{i(Fwpyx{8*?8{*JH$EJjKon*NQ5o#J zM=P%$W=99oKnzWol?+0BEw4vx7FE9(CuF(gz(m%a$Rca`y&Z+Uue%<0A(_@M4K80E zT;lRS$U~y(N6duckB6^CCQN59Jo4V{C+hdW2jL^cw?Fvf$KP%7xQWcZF3Y1;W)WXF z0O)|82#W>^6g7zHw3dM0=@{>(CY0C=NA-d3&R0MESt7pz+}{gHvob!2=L&Ls5Ypcz zF0h6Wlo~=7ks1rcaGnyrv6#dsB93cU{$OY*PYBWMM6#1_r%8syLJFfuN4PgIuv|~A z;2cO_+VwYIc>IPBvz5NVcgMhnI^{mIPFelC-+19_l<1=|EO*Al7BGMnfQIT* zkkeJpRWn;u-`#`c-Gh5@Qye&r>UFZ5fZk#{@ubC4%-p!;At>Ci9a=C%P;U(lCPfO6 zgFT$NB`Obuv55nVZFk zDd8`YB1dL*ZBhJ=(q(ohQ7}E`By|D?>&e+<=u$ZXcO_pKyDMjpnBLN_1TCDDrV=)q)S@3$b{?_}Iv$$OdG=NJ@0NO=O z`>t_Hh^c`+)lS1k%wgthGtj>YvW{IxssXT(tuL;W8#P+qY3qG(3O*^g#;(40_~P@gj& z+Hr&o%2d}oU;}Q|CQ!%>X`gCi5m|JqTA@Wg+v*|mC7#j)UJq877iX6E7Ga~4UPLm5 z8u%q@NU^Ks;puAgTlFlg3b2zdH&zwBPu^C3Mu1z3Rx(PHxo4l~%Sn>v)U9AZiaK5t z-$~*&U|>%bM-(Qh9XOz=2@rp$8WcJVRW=+Tb8$=DQygl_2AQI(8pQzXGl=H1-P$2B z4@`nzmm(a&{^9h)5Q@2KDnKDZw4q1WmoHeop+)TosdU;zA|`E>C16H*_Ob8<=utDq zGgW&5vJg))MN1Hi9Q=f=&xCIRqpCQBNCm8pDORec6l_!%&|?Atsl0s85gt-rz*<9$ z;R}>djRdgH6GI#EjET%*KPE@alO$w`El<~+FS-in!P6vn7ACa(fd1Q|vf={v1K~6P zaI{z6>f45`QdK@-c(SAVipVf&2vP>ELfI`+)nX7BnsS5yv(Jj3PZCO+06?@({n)~) z6(NCU6!y^}vj2-`B>Q-#95=N3QEU8nYXr04 z{E;xEEnv{{?Lh45XHC0eRf9%ko&tx}fo05n#2;9Gs?)~i5^V@J=4|Oda3usk2@x~AURT#*ZhYYn? z5DyU05q25YkDxITG&EQQRVX2XjwphT^dqQ{kwwsOQG6*R?XWB{nrwl`_2de05P06I zzwqLdFA9p)PwGO83w^-@)-A2S`s9o6R2IM)j0PrDN0v6>i`EEKX@`|F$_7O3&(+cb~#VQKBGK97aAth@7 zQFkJg91CyDM*86sv4$udDah*}+{n``JBg`JAbJ4~Rc=6xDb?h5Q;e{ABk)P698p6O zVOv$ZlWG%K2)4?RMi8~Ki04Cf8HHOJ}GZ=3~Lr^_0OS{_i2??wB2mWku*b8?e(yW9z6Q`1Y7%O*`xA!b-Q5UW5gV$E1da@RGX92+Y`HBAP} z#v`U`+O;=a`ik#Fwh72uIu39R>863Ag;^Hlc~=C#gC(g_s?qK8ZATi7MU0?}OcAx} zMImIydSy%`D4NEuU7*i!G{uUTMfR$t zW@lVEWd9m(kWxZa$PQjQt`C?qi|c}_w~ub*bu@W4+62pqmC6@#+LRk4@guh@?Vso-FCkwf+JDMF7 z`ym!xKAIi8Vl)#R4|dsbW$+(cB0!b5ACVQxfLhH#VgT)gw8nz8hGV;H5Mch_48;+L z@M0b_ZVXym#+XmVyizKdH`_-g^g7Wi?+_RaQ8z7qF1(e{f2rACCbd9h3IX=j$uLUj z$8^1^3$<|UA+TsKVH#{Ss2~h(?=uNz1tbaJh=xfiBMX2dt7BnEJrZyCr}XBxL?&KLj@gWN>V^avL_w>d)9psItt03 z79H1pdunFt!||5ZavBB8AS@8z3Sh`%z+0(AS%Gt6=y(Cx?a zE&%(L$lk9X(tf;raT%h|x^g|1YLy7nc}SI{>LxQ$;s|4sLL<(L>KRI`>}_xC)2Qqc z2nbzbEbnBBqrLLB;yeB1G#s2SX67-Po#rXPx;Udc$q&XDO zhH@NCS6Y2d!+QITB*Q|wFlEJ+UBMO{`xQ>VA}OxEcYf(ichPUH`S~;ayow(Z`Stnx zx|_~k@h)yyyQkM(*rJVJby>G1esnd@Pm<;svb5<}*S7r9s$V&GW@~AV-#B$2U?hjN z#XI=H*xH8IZ@!jR@ALg(JpJS|uE^BRHN~1DurR+WiVHl3lQ8+FCfJ(4 z=L`+^`gxw8?r!*nwbiW+euu?K?5=2iVuLBT;jprJ*Ckx9tSv6x``~36<}h=2?xyQ& zocrl#U2$vp1r|It+@GFby$?BVEUnCMJm{CZ5Bg7>?jj^(LeiLDZPo_E2w=VA+ + +using namespace eosio; + +template +void _setdata(name self, int len, name payer) { + Table ta(self, 0); + std::vector data; + data.resize(len, 0); + auto it = ta.find(0); + if (it == ta.end()) { + ta.emplace(payer, [&](auto &v) { + v.key = 0; + v.value = data; + }); + } else { + ta.modify(it, payer, [&](auto &v) { + v.key = 0; + v.value = data; + }); + } +} + +void ram_restrictions_test::noop( ) { +} + +void ram_restrictions_test::setdata( uint32_t len1, uint32_t len2, name payer ) { + _setdata(get_self(), len1, payer); + _setdata(get_self(), len2, payer); +} + +void ram_restrictions_test::notifysetdat( name acctonotify, uint32_t len1, uint32_t len2, name payer ) { + require_recipient(acctonotify); +} + +void ram_restrictions_test::on_notify_setdata( name acctonotify, uint32_t len1, uint32_t len2, name payer) { + setdata(len1, len2, payer); +} + +void ram_restrictions_test::senddefer( uint64_t senderid, name payer ) { + transaction trx; + trx.actions.emplace_back( + std::vector{{_self, "active"_n}}, + get_self(), + "noop"_n, + std::make_tuple() + ); + trx.send( senderid, payer ); +} + +void ram_restrictions_test::notifydefer( name acctonotify, uint64_t senderid, name payer ) { + require_recipient(acctonotify); +} + +void ram_restrictions_test::on_notifydefer( name acctonotify, uint64_t senderid, name payer ) { + senddefer(senderid, payer); +} diff --git a/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.hpp b/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.hpp new file mode 100644 index 00000000000..34a988fe5d5 --- /dev/null +++ b/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.hpp @@ -0,0 +1,45 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +class [[eosio::contract]] ram_restrictions_test : public eosio::contract { +public: + struct [[eosio::table]] data { + uint64_t key; + std::vector value; + + uint64_t primary_key() const { return key; } + }; + + typedef eosio::multi_index<"tablea"_n, data> tablea; + typedef eosio::multi_index<"tableb"_n, data> tableb; + +public: + using eosio::contract::contract; + + [[eosio::action]] + void noop(); + + [[eosio::action]] + void setdata( uint32_t len1, uint32_t len2, eosio::name payer ); + + [[eosio::action]] + void notifysetdat( eosio::name acctonotify, uint32_t len1, uint32_t len2, eosio::name payer ); + + [[eosio::on_notify("tester2::notifysetdat")]] + void on_notify_setdata( eosio::name acctonotify, uint32_t len1, uint32_t len2, eosio::name payer ); + + [[eosio::action]] + void senddefer( uint64_t senderid, eosio::name payer ); + + [[eosio::action]] + void notifydefer( eosio::name acctonotify, uint64_t senderid, eosio::name payer ); + + [[eosio::on_notify("tester2::notifydefer")]] + void on_notifydefer( eosio::name acctonotify, uint64_t senderid, eosio::name payer ); + +}; diff --git a/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.wasm b/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.wasm new file mode 100644 index 0000000000000000000000000000000000000000..74be3b18fdf042a30362886f684b84b21be8324a GIT binary patch literal 14329 zcmeI3TWn?5S;yDDAJ4YOd(!E|nT)D+HUoGHDMORYgtVpJrnxmOy{JMJFCND}$@rWZ z+hcpu1cmVwWol5w2ry`qZpBg1dz};ksJ~Seu$yU?{J-o+% zmK;oa%MVVh_m&su7JK*i*4BE9E>hcgZ)JUHWo~|by|=dEVl|bE_s!kE#K@%+x41+t z+5CMg^g6xgXM1PYdmAp*$APupx${eFy}7mC!qVzeZ+XLIYWIwoyYI}(!eHU_{L=E= z>ipXL+1^HPZJpPJ0gKy8g0-;vaDe3>xZ0dtpbrW3>e|Zc%6e~6FN^0Ffa?l%jxU^t zwB?Ptjis|a*HP0!ft_F2SbDHGGTJ~@YF=5MTb$pRpIcx0g`OMJjB2enzi3n1K4+s3 zjB;vjVP&!B_Sx{^n(&~M5L-~RDP?|-n zQ50oKeIT8g zy=XsQ`iIM(bC;{`bGOYw+7M@4}Lo{_?Sx+y~Q{#=gJ&#XtRzU;0m1?Jw;otbftQY@_+~ zcmC!TMhj?6Nk&wuV;|MA^n{9c&ezBf+qV9CZ+KT?(#qbS*oXTpEp-7(=B z{+o_J23wly!8+>K@d>^{>!UMu;$6Qf`prYd*F;2n^M{vQ75QYc^hp`J@8O?$+EhkIdlS;;5MYU z+3eS;kNVZ2TC^hyDfIc|Tpi!ZG}k=RuM_a7)229AM|Vub8Upc5YaZ&?8GYT{CqDA@ z4C-Qy1t9y8K7e8&616Q4^?hzId>E54C9ytNU0?bbUVs-<8R+;0-~iIW!4{-2VHNu> zZ-!~jp>85l;BR^l=z~RdSZGEPYNNHAz+}GEyMbU+rWo~l6 zJ!3L`Ecv||xUf%zHqq@gOHxkK3Q#!nauQ`C;tP@WpMT+= zSy#n%)VR9=tC_Iz&Gp`J5(Z2mcbzaWM-mPFAM=RdlJd~2kQ zLl%#)-kVT7bHB=wFI{HWZ~88G{ViL28DC}Be`0IoYkAk7 zk8v179YwM*Mjg3~={T*^fsfvYaN!0dlG&inaSwITY`$kAZeHQ1=%V3>K3z%k-!vrm zGQL2jq79%VzPRVcx`U@EeWyCoP4I>oz_j0$WaAWWau^A0k1c>(tbt|Z07Mn9kWa@{;+Kd+vb`h zX5~gqTkvU-{3>QlDxU2$Yt{(YMIXisE1Ye4YRmuuOI7FE2sFO=X!4Zc&Or>_l}Kroix{;o zE1N;Rm~RZjhqK+R@X2Q_gqe9+57iL^VFSZ$fyTqM>W;&7V*c2?r3;zzBKZ#JpkJ9kc1hyN#%4C1vtRw3nL8KZ!qzxl>?ayBD`E2wEzkc!OZiHV*qaq<9#qd%-5l5-E z10`fDSR*D%G3JZcfnUEBn9ur>XDoyyDMk6G!E(wZ-vk2j3X*Sf$w-r->Su(x!W-I|7%>UM2 z-SvNEukQX{+N)>cgo-FNRgC*ZA~v$p>W2yMlIAO}FEcG`_l@n>m$zRrvhSgjIP?9% zqJJnn78UNyUV8K$n@==v$Mj=BLwfkw$Za10WlgdB25+9RA6KSSP@}s3MyMI?cbRtb!%xmxU-6osbd#q_CKdkJrlYhJY-W zlP^*p>mv|i+9iX4gNPefy)`scHxqKf9wl?%Op^?Tg>sf6*{vA9nLbP4Xkq-1a6@YXV z1HkNGx)ad>G$o?6T~^-dIfB8I!Lonxkz2BD012 zU2<4Fb~pH2r4$zY2!C>C1x}G48+0d#f-{02TrL?!a|&>J>+sgD7BwFxGUeCSH?epe zL@H`lsd$M7MVB2#={cmBwDC!&B2?=QgQ8|*WHRM4V?dZpPtr~%<`awv7>wz(Fi3L* z%AyYECy=NQhpa&9I=M?h3}j>yvWm>EC7_WaZWyGg>i6T}4H+$YV@o4X`OYOXdIdi* zh>VQoRb8`0renb^%Si}mKW&3d2BYQ-S(ZSW7CelOtD;@F$b?Yrp?Mp0W5fz(R=DDli&q{9Rwpr zlvzG&4w|}0oElwe_234D10shg#kI|V?anivXCXzIZ`zp9{~)Y$q7i_E=OvH z{lhJ_LT7XVI*T!M_ z^kD8*L#)BIPt)E)yW`~%<%pa-E_cwe63Y#h9$EC+%Q=bindE;RF&moV>)t5 zW)vw9ivs_u10*x-U5CiyjCR(`EO#6hx!QVJf!Q?Q5b9;IFXTK>XUxcepb2S%ER(Ai zURxU&!jV8yqf^c=mtvW0phuxsD3&p0SS(BSD3;~HVp*}we3NCQp;-2rq*ZYI1;<>J z%3y9({WPXLsH7=mE4jv2P!S|5EW)RoixW>k(}?n;b;SlT!7J|E+U;&XEJly;2!qCr z)+dydcT@S4xY^%tBB7q^0Kq9FGArA$$_TianNrEKH55gVCUW)aork5xQoy{m`N1;- zxz#|?aCD1??+W?JYeAk0%@vV1n_puFT7w{y1KZ$;kY{^@yx9&J%?O`W%}6#dYO0)+ z?=pX`S|;_A@$Dmuw7F>IqLgxx2^pd?EElnpCOXx`VTx;fd^?KMP!`ob<0GvsDnFzD zTCmI)NEKZLQYA}Sb&3}jTuK)ASfCw1A0=lyN3|kks6JZxHkPPl@G@;4IoF8PVWkq} z*bt0@jl*>)K#8?Su?>IdT%!Y+l9h{>i{{Hx@ZdbZAvq26+6{qhwFbf`=IdlU+|uO-dla!2|q%`v42G)otED8;pE z#HU*`afxI0U3+{b(|Gx9#jXjN+sufK9?i($-L|EdBUS<0c3Xwk^;>yjxUGUZ9rRvO zkT<(YoE{N@PG3@BuGFMJcIt>0*CD$09Vwu=E-645WQdUgB!G&F2?C`JT*t430>jyyl!sBwUMRPrh$}y(+5*tn_;&Z>L9^r3X^6TRy!PP9P$!-i)7k1aP=?#eoz>K` za|a-e4(vqb#egjU5?#efT<4hOaim{0<+tKj1+qkamU_81M=@#L3SUFqYBxxRNh_!6 z>~r4k^JPQ7A%$(|I^^4~{V@?QEPjPZaMAIpL|$(?eGc)n5)fmGMo5B9{^M?^t>vlm zl5{)e_GLIL4RrRWF_%eEF`H-CRKn!6(d3NCt$Fs6j#kviVKZm<_phhD_f( zUl@v%E=>fEhYg@0kpJw+3F(fVB#GFiW_AFtXw>3xSH2Kr%#JnNWa%{{V=aM0dPCh+ ze87~6OeJcF+AtB@p=sNUErO=CTt!tS#n&{gzt>E1K%|LtSK?B;JJ&MuYw_@k#&#~! zp1VtC5Lc;;Oa;iJ#Znoo(@?LLn(&Jgcv;eL&<@(|L3?Mwt;fQ5sMU<2;VQ1vFeVKj z8#!p#H4PoK=i8an)_o1joXlHtrV&b5JWLh{ExJ-z{J}0SmFg;o4wyq60^#AHodXCv zX!j|xWjK4#WXX*)UPWQVJn;h#fAb2uxw8A|QW!B^(=(Y2Ay`xuJtXgkZTC7H@y1loX0jV!cSr~5>XI*wFe}CxQY#Y1rERNBjPdwXR10jjWjB~*YdUPX*t+&GluAs=-AQUgv+-+vtI$}z5 zF*-Nm!l={*JHriPlF29PTvtWdPmZZLG8mdub~J1m%b5^ZaRS|auSSb=Qm0zAC@aFl zL2yQCVIYtT$>Z=M(-ursGdPCYGSML#pq^&iQ9i8zUgBaRLuH!kn7GC5p&<6zfYsru z9T_<77!p~%Ng`{k41(hbufYoa;W}5+-m8nX8Qb+i-Sa>bGFX}1b7MI3iFGV#m;}f{ zDkv$d4Qb}Up$*}XH`B$0i+A`^$wg@3-kxy=@LWztZ{UOuOfE+4E;Mmge{au--$=bLowdv~gm*5}l{YWUFAL%VZJ-qvU)KT~^Y@*h;eJ0)|LNYEUsze* zSmTeen90S+@-44I^z6#w()|x#7Dp~lZY|diy~gF9e$JI|3x9M48ZE-7=a(OVk84Y3 f=hq(ggWkjbBd2@t#gGsK=9gP?Kp6U0yUzU&$L&r2 literal 0 HcmV?d00001 From 74a4c7a2fe994beb562682d08bc1f18822912fea Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 18 Apr 2019 16:52:42 -0400 Subject: [PATCH 438/680] remove "eosio root" print on nodeos startup There is no utility to printing this; on the contrary it has been distracting for some support requests --- programs/nodeos/main.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 8b3b2d9478c..04c726ed8df 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -97,7 +97,6 @@ int main(int argc, char** argv) return INITIALIZE_FAIL; initialize_logging(); ilog("${name} version ${ver}", ("name", nodeos::config::node_executable_name)("ver", app().version_string())); - ilog("eosio root is ${root}", ("root", root.string())); ilog("${name} using configuration file ${c}", ("name", nodeos::config::node_executable_name)("c", app().full_config_file_path().string())); ilog("${name} data directory is ${d}", ("name", nodeos::config::node_executable_name)("d", app().data_dir().string())); app().startup(); From b3c40f3615145275498d17750bdafa10c1ace838 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Thu, 18 Apr 2019 16:52:54 -0400 Subject: [PATCH 439/680] Removed long-running tests as they have been centralized --- .buildkite/long_running_tests.yml | 206 ------------------------------ scripts/long-running-test.sh | 36 ------ 2 files changed, 242 deletions(-) delete mode 100644 .buildkite/long_running_tests.yml delete mode 100755 scripts/long-running-test.sh diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml deleted file mode 100644 index ecf663432e9..00000000000 --- a/.buildkite/long_running_tests.yml +++ /dev/null @@ -1,206 +0,0 @@ -steps: - - command: | # Amazon Linux 2 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":aws: Amazon Linux 2 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" - propagate-environment: true - workdir: /data/job - timeout: 60 - - - command: | # CentOS 7 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":centos: CentOS 7 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" - propagate-environment: true - workdir: /data/job - timeout: 60 - - - command: | # Ubuntu 16.04 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":ubuntu: Ubuntu 16.04 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" - propagate-environment: true - workdir: /data/job - timeout: 60 - - - command: | # Ubuntu 18.04 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":ubuntu: Ubuntu 18.04 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" - propagate-environment: true - workdir: /data/job - timeout: 60 - - - command: | # macOS Mojave Build - echo "--- Creating symbolic link to job directory :file_folder:" - sleep 5 - ln -s "$(pwd)" /data/job - cd /data/job - echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y - echo "--- Compressing build directory :compression:" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":darwin: Mojave Build" - agents: - - "role=builder-v2-1" - - "os=mojave" - artifact_paths: "build.tar.gz" - timeout: 60 - - - wait - - - command: | # Amazon Linux 2 Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":aws: Amazon Linux 2 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" - propagate-environment: true - workdir: /data/job - timeout: 90 - - - command: | # centOS 7 Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":centos: CentOS 7 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" - propagate-environment: true - workdir: /data/job - timeout: 90 - - - command: | # Ubuntu 16.04 Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":ubuntu: Ubuntu 16.04 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" - propagate-environment: true - workdir: /data/job - timeout: 90 - - - command: | # Ubuntu 18.04 Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":ubuntu: Ubuntu 18.04 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" - propagate-environment: true - workdir: /data/job - timeout: 90 - - - command: | # Mojave Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - echo "+++ :microscope: Running LR Tests" - ln -s "$(pwd)" /data/job && ./scripts/long-running-test.sh - label: ":darwin: Mojave LR Tests" - agents: - - "role=tester-v2-1" - - "os=mojave" - timeout: 90 \ No newline at end of file diff --git a/scripts/long-running-test.sh b/scripts/long-running-test.sh deleted file mode 100755 index 30ec5faaa12..00000000000 --- a/scripts/long-running-test.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -set -e # exit on failure of any "simple" command (excludes &&, ||, or | chains) -# prepare environment -PATH=$PATH:~/opt/mongodb/bin -echo "Extracting build directory..." -tar -zxf build.tar.gz -echo "Starting MongoDB..." -~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log -cd /data/job/build -# run tests -echo "Running tests..." -TEST_COUNT=$(ctest -N -L nonparallelizable_tests | grep -i 'Total Tests: ' | cut -d ':' -f 2 | awk '{print $1}') -[[ $TEST_COUNT > 0 ]] && echo "$TEST_COUNT tests found." || (echo "ERROR: No tests registered with ctest! Exiting..." && exit 1) -set +e # defer ctest error handling to end -echo "$ ctest -L long_running_tests --output-on-failure -T Test" -ctest -L long_running_tests --output-on-failure -T Test -EXIT_STATUS=$? -[[ "$EXIT_STATUS" == 0 ]] && set -e -echo "Done running long-running tests." -# upload artifacts -echo "Uploading artifacts..." -XML_FILENAME="test-results.xml" -mv $(pwd)/Testing/$(ls $(pwd)/Testing/ | grep '20' | tail -n 1)/Test.xml $XML_FILENAME -buildkite-agent artifact upload config.ini -buildkite-agent artifact upload genesis.json -cd .. -buildkite-agent artifact upload mongod.log -cd build -buildkite-agent artifact upload $XML_FILENAME -echo "Done uploading artifacts." -# ctest error handling -if [[ "$EXIT_STATUS" != 0 ]]; then - echo "Failing due to non-zero exit status from ctest: $EXIT_STATUS" - echo ' ^^^ scroll up for more information ^^^' - exit $EXIT_STATUS -fi \ No newline at end of file From 32c3375bbfed1b27f7dc48951a225ef8e521c815 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 18 Apr 2019 17:17:55 -0400 Subject: [PATCH 440/680] fix commited ram_restrictions_test.abi file --- .../ram_restrictions_test.abi | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.abi b/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.abi index 48d831c5b31..6a12751077a 100644 --- a/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.abi +++ b/unittests/test-contracts/ram_restrictions_test/ram_restrictions_test.abi @@ -76,20 +76,6 @@ } ] }, - { - "name": "senddefer", - "base": "", - "fields": [ - { - "name": "senderid", - "type": "uint32" - }, - { - "name": "payer", - "type": "name" - } - ] - }, { "name": "setdata", "base": "", @@ -154,4 +140,4 @@ ], "ricardian_clauses": [], "variants": [] -} +} \ No newline at end of file From 0823c8b6ac9b502d99fbaddd998b6363fb723967 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Thu, 18 Apr 2019 17:46:24 -0400 Subject: [PATCH 441/680] ship: protocol_state --- .../eosio/chain/protocol_state_object.hpp | 4 ++++ .../state_history_serialization.hpp | 18 ++++++++++++++++++ .../state_history_plugin.cpp | 5 +++++ .../state_history_plugin_abi.cpp | 14 ++++++++++++++ 4 files changed, 41 insertions(+) diff --git a/libraries/chain/include/eosio/chain/protocol_state_object.hpp b/libraries/chain/include/eosio/chain/protocol_state_object.hpp index 91fc47b08c4..dfbb4373a8a 100644 --- a/libraries/chain/include/eosio/chain/protocol_state_object.hpp +++ b/libraries/chain/include/eosio/chain/protocol_state_object.hpp @@ -33,6 +33,10 @@ namespace eosio { namespace chain { :feature_digest( feature_digest ) ,activation_block_num( activation_block_num ) {} + + bool operator==(const activated_protocol_feature& rhs) const { + return feature_digest == rhs.feature_digest && activation_block_num == rhs.activation_block_num; + } }; public: diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp index dd6eb611581..e11b7e5f4f7 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -303,6 +304,23 @@ datastream& operator<<(datastream& return ds; } +template +datastream& +operator<<(datastream& ds, + const history_serial_wrapper& obj) { + fc::raw::pack(ds, fc::unsigned_int(0)); + fc::raw::pack(ds, as_type(obj.obj.feature_digest)); + fc::raw::pack(ds, as_type(obj.obj.activation_block_num)); + return ds; +} + +template +datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { + fc::raw::pack(ds, fc::unsigned_int(0)); + history_serialize_container(ds, obj.db, obj.obj.activated_protocol_features); + return ds; +} + template datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { fc::raw::pack(ds, as_type(obj.obj.key)); diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 1ee464055b7..f0579891dee 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -101,6 +101,10 @@ bool include_delta(const eosio::chain::code_object& old, const eosio::chain::cod return false; } +bool include_delta(const eosio::chain::protocol_state_object& old, const eosio::chain::protocol_state_object& curr) { + return old.activated_protocol_features != curr.activated_protocol_features; +} + struct state_history_plugin_impl : std::enable_shared_from_this { chain_plugin* chain_plug = nullptr; fc::optional trace_log; @@ -527,6 +531,7 @@ struct state_history_plugin_impl : std::enable_shared_from_this(), pack_row); process_table("generated_transaction", db.get_index(), pack_row); + process_table("protocol_state", db.get_index(), pack_row); process_table("permission", db.get_index(), pack_row); process_table("permission_link", db.get_index(), pack_row); diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index a5ff928f52c..9d5324b0bbf 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -348,6 +348,17 @@ extern const char* const state_history_plugin_abi = R"({ { "type": "bytes", "name": "packed_trx" } ] }, + { + "name": "activated_protocol_feature_v0", "fields": [ + { "type": "checksum256", "name": "feature_digest" }, + { "type": "uint32", "name": "activation_block_num" } + ] + }, + { + "name": "protocol_state_v0", "fields": [ + { "type": "activated_protocol_feature[]", "name": "activated_protocol_features" } + ] + }, { "name": "key_weight", "fields": [ { "type": "public_key", "name": "key" }, @@ -483,6 +494,8 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "chain_config", "types": ["chain_config_v0"] }, { "name": "global_property", "types": ["global_property_v0"] }, { "name": "generated_transaction", "types": ["generated_transaction_v0"] }, + { "name": "activated_protocol_feature", "types": ["activated_protocol_feature_v0"] }, + { "name": "protocol_state", "types": ["protocol_state_v0"] }, { "name": "permission", "types": ["permission_v0"] }, { "name": "permission_link", "types": ["permission_link_v0"] }, { "name": "resource_limits", "types": ["resource_limits_v0"] }, @@ -506,6 +519,7 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "contract_index_long_double", "type": "contract_index_long_double", "key_names": ["code", "scope", "table", "primary_key"] }, { "name": "global_property", "type": "global_property", "key_names": [] }, { "name": "generated_transaction", "type": "generated_transaction", "key_names": ["sender", "sender_id"] }, + { "name": "protocol_state", "type": "protocol_state", "key_names": [] }, { "name": "permission", "type": "permission", "key_names": ["owner", "name"] }, { "name": "permission_link", "type": "permission_link", "key_names": ["account", "code", "message_type"] }, { "name": "resource_limits", "type": "resource_limits", "key_names": ["owner"] }, From 2ce09d9eeadacaf6cc193df9813c3b95659ce2d3 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Thu, 18 Apr 2019 18:05:53 -0400 Subject: [PATCH 442/680] add principal macro for test executable and then use that for add_eosio_test --- CMakeModules/EosioTester.cmake.in | 11 ++++++----- CMakeModules/EosioTesterBuild.cmake.in | 7 ++++++- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index 5469c053d17..d7f6025eb57 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -73,7 +73,7 @@ find_library(GMP_LIBRARIES NAMES libgmp.a gmp.lib gmp libgmp-10 mpir DOC "Path to the GMP library" ) -macro(add_eosio_test test_name) +macro(add_eosio_test_executable test_name) add_executable( ${test_name} ${ARGN} ) target_link_libraries( ${test_name} ${LLVM} @@ -139,8 +139,9 @@ macro(add_eosio_test test_name) #Manually run unit_test for all supported runtimes #To run unit_test with all log from blockchain displayed, put --verbose after --, i.e. unit_test -- --verbose - add_test(NAME ${test_name}_binaryen COMMAND ${test_name} - --report_level=detailed --color_output -- --binaryen) - add_test(NAME ${test_name}_wavm COMMAND ${test_name} - --report_level=detailed --color_output --catch_system_errors=no -- --wavm) +endmacro() + +macro(add_eosio_test test_name) + add_eosio_test_executable( ${test_name} ${ARGN} ) + add_test(NAME ${test_name} COMMAND ${test_name} --report_level=detailed --color_output) endmacro() diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 053ad6fa4f4..6e3abcdcbbf 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -72,7 +72,7 @@ find_library(GMP_LIBRARIES NAMES libgmp.a gmp.lib gmp libgmp-10 mpir DOC "Path to the GMP library" ) -macro(add_eosio_test test_name) +macro(add_eosio_test_executable test_name) add_executable( ${test_name} ${ARGN} ) target_link_libraries( ${test_name} ${LLVM} @@ -147,6 +147,11 @@ macro(add_eosio_test test_name) --report_level=detailed --color_output --catch_system_errors=no -- --wavm) endmacro() +macro(add_eosio_test test_name) + add_eosio_test_executable( ${test_name} ${ARGN} ) + add_test(NAME ${test_name} COMMAND ${test_name} --report_level=detailed --color_output) +endmacro() + if(ENABLE_COVERAGE_TESTING) set(Coverage_NAME ${PROJECT_NAME}_ut_coverage) From ba081a6ae2e8d8f8d6a7cbee7de48904f5f2ecc6 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Thu, 18 Apr 2019 18:36:49 -0400 Subject: [PATCH 443/680] Small tweaks to comments --- CMakeModules/EosioTester.cmake.in | 7 ++++--- CMakeModules/EosioTesterBuild.cmake.in | 10 +++------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/CMakeModules/EosioTester.cmake.in b/CMakeModules/EosioTester.cmake.in index d7f6025eb57..c915de69127 100644 --- a/CMakeModules/EosioTester.cmake.in +++ b/CMakeModules/EosioTester.cmake.in @@ -128,7 +128,6 @@ macro(add_eosio_test_executable test_name) ${PLATFORM_SPECIFIC_LIBS} ) - #### TODO /usr/local/include is a hack for fc and some other includes target_include_directories( ${test_name} PUBLIC ${Boost_INCLUDE_DIRS} @OPENSSL_INCLUDE_DIR@ @@ -137,11 +136,13 @@ macro(add_eosio_test_executable test_name) @CMAKE_INSTALL_FULL_INCLUDEDIR@/wasm-jit @CMAKE_INSTALL_FULL_INCLUDEDIR@/softfloat ) - #Manually run unit_test for all supported runtimes - #To run unit_test with all log from blockchain displayed, put --verbose after --, i.e. unit_test -- --verbose endmacro() macro(add_eosio_test test_name) add_eosio_test_executable( ${test_name} ${ARGN} ) + #This will generate a test with the default runtime add_test(NAME ${test_name} COMMAND ${test_name} --report_level=detailed --color_output) + + #Manually run unit_test for all supported runtimes + #To run unit_test with all log from blockchain displayed, put --verbose after --, i.e. unit_test -- --verbose endmacro() diff --git a/CMakeModules/EosioTesterBuild.cmake.in b/CMakeModules/EosioTesterBuild.cmake.in index 6e3abcdcbbf..0644fdfc221 100644 --- a/CMakeModules/EosioTesterBuild.cmake.in +++ b/CMakeModules/EosioTesterBuild.cmake.in @@ -138,18 +138,14 @@ macro(add_eosio_test_executable test_name) @CMAKE_SOURCE_DIR@/libraries/chainbase/include @CMAKE_SOURCE_DIR@/libraries/testing/include @CMAKE_SOURCE_DIR@/libraries/wasm-jit/Include ) - # - #Manually run unit_test for all supported runtimes - #To run unit_test with all log from blockchain displayed, put --verbose after --, i.e. unit_test -- --verbose - add_test(NAME ${test_name}_binaryen COMMAND ${test_name} - --report_level=detailed --color_output -- --binaryen) - add_test(NAME ${test_name}_wavm COMMAND ${test_name} - --report_level=detailed --color_output --catch_system_errors=no -- --wavm) endmacro() macro(add_eosio_test test_name) add_eosio_test_executable( ${test_name} ${ARGN} ) + #This will generate a test with the default runtime add_test(NAME ${test_name} COMMAND ${test_name} --report_level=detailed --color_output) + #Manually run unit_test for all supported runtimes + #To run unit_test with all log from blockchain displayed, put --verbose after --, i.e. unit_test -- --verbose endmacro() if(ENABLE_COVERAGE_TESTING) From a81ec613eece3db527d4ae557c03cceb02ba9d34 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 19 Apr 2019 12:00:11 -0400 Subject: [PATCH 444/680] exit with error if chain_plugin needs to write out a protocol feature JSON to a file path that already exist rather than trying with another name --- plugins/chain_plugin/chain_plugin.cpp | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index a2ed50bfd86..3ce179f77b2 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -486,27 +486,24 @@ protocol_feature_set initialize_protocol_features( const fc::path& p, bool popul auto output_protocol_feature = [&p]( const builtin_protocol_feature& f, const digest_type& feature_digest ) { static constexpr int max_tries = 10; - string filename_base( "BUILTIN-" ); - filename_base += builtin_protocol_feature_codename( f.get_codename() ); - - string filename = filename_base+ ".json"; - int i = 0; - for( ; - i < max_tries && fc::exists( p / filename ); - ++i, filename = filename_base + "-" + std::to_string(i) + ".json" ) - ; + string filename( "BUILTIN-" ); + filename += builtin_protocol_feature_codename( f.get_codename() ); + filename += ".json"; + + auto file_path = p / filename; - EOS_ASSERT( i < max_tries, plugin_exception, - "Could not save builtin protocol feature with codename '${codename}' due to file name conflicts", + EOS_ASSERT( !fc::exists( file_path ), plugin_exception, + "Could not save builtin protocol feature with codename '${codename}' because a file at the following path already exists: ${path}", ("codename", builtin_protocol_feature_codename( f.get_codename() )) + ("path", file_path.generic_string()) ); - fc::json::save_to_file( f, p / filename ); + fc::json::save_to_file( f, file_path ); ilog( "Saved default specification for builtin protocol feature '${codename}' (with digest of '${digest}') to: ${path}", ("codename", builtin_protocol_feature_codename(f.get_codename())) ("digest", feature_digest) - ("path", (p / filename).generic_string()) + ("path", file_path.generic_string()) ); }; From 4fd7e9067ae411982d591842a417e00a895d9fa6 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 19 Apr 2019 12:23:09 -0400 Subject: [PATCH 445/680] log error message if unable to successfully write out JSON files (protocol features or genesis state) --- libraries/fc | 2 +- plugins/chain_plugin/chain_plugin.cpp | 29 ++++++++++++++++++--------- 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/libraries/fc b/libraries/fc index 8221d5c6a7a..469bdf3298f 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 8221d5c6a7af55d7b6742341651017685b5ef7a6 +Subproject commit 469bdf3298fff96cb138a901ee1231eccb155471 diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 3ce179f77b2..9ab3d2e8c67 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -498,13 +498,19 @@ protocol_feature_set initialize_protocol_features( const fc::path& p, bool popul ("path", file_path.generic_string()) ); - fc::json::save_to_file( f, file_path ); - - ilog( "Saved default specification for builtin protocol feature '${codename}' (with digest of '${digest}') to: ${path}", - ("codename", builtin_protocol_feature_codename(f.get_codename())) - ("digest", feature_digest) - ("path", file_path.generic_string()) - ); + if( fc::json::save_to_file( f, file_path ) ) { + ilog( "Saved default specification for builtin protocol feature '${codename}' (with digest of '${digest}') to: ${path}", + ("codename", builtin_protocol_feature_codename(f.get_codename())) + ("digest", feature_digest) + ("path", file_path.generic_string()) + ); + } else { + elog( "Error occurred while writing default specification for builtin protocol feature '${codename}' (with digest of '${digest}') to: ${path}", + ("codename", builtin_protocol_feature_codename(f.get_codename())) + ("digest", feature_digest) + ("path", file_path.generic_string()) + ); + } }; std::function add_missing_builtins = @@ -690,8 +696,13 @@ void chain_plugin::plugin_initialize(const variables_map& options) { p = bfs::current_path() / p; } - fc::json::save_to_file( gs, p, true ); - ilog( "Saved genesis JSON to '${path}'", ("path", p.generic_string())); + EOS_ASSERT( fc::json::save_to_file( gs, p, true ), + misc_exception, + "Error occurred while writing genesis JSON to '${path}'", + ("path", p.generic_string()) + ); + + ilog( "Saved genesis JSON to '${path}'", ("path", p.generic_string()) ); } EOS_THROW( extract_genesis_state_exception, "extracted genesis state from blocks.log" ); From 3d1a006d411067e115fb6a1c81810447c7b8abd0 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 19 Apr 2019 12:20:30 -0500 Subject: [PATCH 446/680] Reduce logging of complete object when unable to serialize --- libraries/chain/include/eosio/chain/abi_serializer.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index 398f219ced8..cf997e9cd35 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -688,7 +688,7 @@ void abi_serializer::to_variant( const T& o, variant& vo, Resolver resolver, con impl::abi_traverse_context ctx(max_serialization_time); impl::abi_to_variant::add(mvo, "_", o, resolver, ctx); vo = std::move(mvo["_"]); -} FC_RETHROW_EXCEPTIONS(error, "Failed to serialize type", ("object",o)) +} FC_RETHROW_EXCEPTIONS(error, "Failed to serialize type", ("type", typeid(o).name() )) template void abi_serializer::from_variant( const variant& v, T& o, Resolver resolver, const fc::microseconds& max_serialization_time ) try { From 905fdc959a6c3d96ea538ec3410a6c298a20b52f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 19 Apr 2019 13:15:32 -0500 Subject: [PATCH 447/680] Add demangle of type --- libraries/chain/include/eosio/chain/abi_serializer.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index cf997e9cd35..3fd6aef137d 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -688,7 +688,7 @@ void abi_serializer::to_variant( const T& o, variant& vo, Resolver resolver, con impl::abi_traverse_context ctx(max_serialization_time); impl::abi_to_variant::add(mvo, "_", o, resolver, ctx); vo = std::move(mvo["_"]); -} FC_RETHROW_EXCEPTIONS(error, "Failed to serialize type", ("type", typeid(o).name() )) +} FC_RETHROW_EXCEPTIONS(error, "Failed to serialize: ${type}", ("type", boost::core::demangle( typeid(o).name() ) )) template void abi_serializer::from_variant( const variant& v, T& o, Resolver resolver, const fc::microseconds& max_serialization_time ) try { From b0d05538df533a6c83c8fcf13985c2749b5c04d7 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Fri, 19 Apr 2019 14:23:01 -0400 Subject: [PATCH 448/680] don't think we need to add the libstdc++ any more, trying it out --- CMakeLists.txt | 5 +++-- programs/cleos/CMakeLists.txt | 6 ------ programs/keosd/CMakeLists.txt | 6 ------ programs/nodeos/CMakeLists.txt | 6 ------ scripts/eosio_build.sh | 2 +- unittests/CMakeLists.txt | 6 +----- 6 files changed, 5 insertions(+), 26 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 952e2b8dc7d..0de173dd054 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -127,6 +127,7 @@ if(${EOSIO_PIN_COMPILER}) set(CMAKE_CXX_STANDARD_LIBRARIES "${LIBSTDCPP_DIR}/lib64/libstdc++.a") else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") + set(CMAKE_CXX_STANDARD_LIBRARIES "${LIBCPP_DIR}/lib/libc++.a ${LIBCPP_DIR}/lib/libc++abi.a") endif() endif() @@ -168,11 +169,11 @@ else( WIN32 ) # Apple AND Linux if( APPLE ) # Apple Specific Options Here message( STATUS "Configuring EOSIO on OS X" ) - set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -Wall -Wno-deprecated-declarations" ) + set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall -Wno-deprecated-declarations" ) else( APPLE ) # Linux Specific Options Here message( STATUS "Configuring EOSIO on Linux" ) - set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -Wall" ) + set( CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_CXX_FLAGS} -Wall" ) if ( FULL_STATIC_BUILD ) set( CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libstdc++ -static-libgcc") endif ( FULL_STATIC_BUILD ) diff --git a/programs/cleos/CMakeLists.txt b/programs/cleos/CMakeLists.txt index 3f99a791c87..0d98fdcf63d 100644 --- a/programs/cleos/CMakeLists.txt +++ b/programs/cleos/CMakeLists.txt @@ -34,12 +34,6 @@ configure_file(config.hpp.in config.hpp ESCAPE_QUOTES) target_include_directories(${CLI_CLIENT_EXECUTABLE_NAME} PUBLIC ${Intl_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR}) -if(EOSIO_PIN_COMPILER) - if(NOT APPLE) - target_link_libraries(${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE -nostdlib++) - endif() -endif() - target_link_libraries( ${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE appbase chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ${Intl_LIBRARIES} ) diff --git a/programs/keosd/CMakeLists.txt b/programs/keosd/CMakeLists.txt index e20393ea504..3c806fbed39 100644 --- a/programs/keosd/CMakeLists.txt +++ b/programs/keosd/CMakeLists.txt @@ -11,12 +11,6 @@ endif() configure_file(config.hpp.in config.hpp ESCAPE_QUOTES) -if(EOSIO_PIN_COMPILER) - if(NOT APPLE) - target_link_libraries(${KEY_STORE_EXECUTABLE_NAME} PRIVATE -nostdlib++) - endif() -endif() - target_link_libraries( ${KEY_STORE_EXECUTABLE_NAME} PRIVATE appbase PRIVATE wallet_api_plugin wallet_plugin diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index 357fb07dfd0..d9fb90ee45d 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -47,12 +47,6 @@ else() set(build_id_flag "") endif() -if(EOSIO_PIN_COMPILER) - if (NOT APPLE) - target_link_libraries(${NODE_EXECUTABLE_NAME} PRIVATE -nostdlib++) - endif() -endif() - target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE appbase PRIVATE -Wl,${whole_archive_flag} login_plugin -Wl,${no_whole_archive_flag} diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 8bec7210293..db4e2156d87 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -228,7 +228,7 @@ fi if $PIN_COMPILER; then BUILD_CLANG8=true CPP_COMP=${OPT_LOCATION}/clang8/bin/clang++ - PIN_COMPILER_CMAKE="-DEOSIO_PIN_COMPILER=1 -DLIBSTDCPP_DIR=${OPT_LOCATION}/gcc" + PIN_COMPILER_CMAKE="-DEOSIO_PIN_COMPILER=1 -DLIBSTDCPP_DIR=${OPT_LOCATION}/gcc -DLIBCPP_DIR=${OPT_LOCATION}/clang8/" elif $NO_CPP17; then if [ $NONINTERACTIVE -eq 0 ]; then BUILD_CLANG8=true diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index 85a33acddf9..b0a08280336 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -43,11 +43,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/contracts.hpp.in ${CMAKE_CURRENT_BINA file(GLOB UNIT_TESTS "*.cpp") # find all unit test suites add_executable( unit_test ${UNIT_TESTS}) # build unit tests as one executable -if(EOSIO_PIN_COMPILER AND NOT APPLE) - target_link_libraries(unit_test PRIVATE -nostdlib++ PUBLIC eosio_chain chainbase eosio_testing fc appbase ${PLATFORM_SPECIFIC_LIBS}) -else() - target_link_libraries( unit_test eosio_chain chainbase eosio_testing fc appbase ${PLATFORM_SPECIFIC_LIBS} ) -endif() +target_link_libraries( unit_test eosio_chain chainbase eosio_testing fc appbase ${PLATFORM_SPECIFIC_LIBS} ) target_compile_options(unit_test PUBLIC -DDISABLE_EOSLIB_SERIALIZE) target_include_directories( unit_test PUBLIC From dd2b2e7178d38fc56b97fbf1f810bda67e328f72 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Fri, 19 Apr 2019 14:29:21 -0400 Subject: [PATCH 449/680] updating submodules --- libraries/fc | 2 +- libraries/wabt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/fc b/libraries/fc index 8d5e6acddfd..469bdf3298f 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 8d5e6acddfdc9c9b30b9d3b8790630799c4fc5c6 +Subproject commit 469bdf3298fff96cb138a901ee1231eccb155471 diff --git a/libraries/wabt b/libraries/wabt index 9c0e1131a45..a136149d941 160000 --- a/libraries/wabt +++ b/libraries/wabt @@ -1 +1 @@ -Subproject commit 9c0e1131a457d5a44c6da512ef542efd32647446 +Subproject commit a136149d941df2942a25a4b66d8865fada0a325e From 06a7f310a285b9b51d69cc7564af5583a9c0e8d7 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Fri, 19 Apr 2019 14:44:04 -0400 Subject: [PATCH 450/680] fixing other merging issues --- scripts/eosio_build.sh | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 5cdb07a04d5..575aa12199a 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -74,7 +74,7 @@ EOT } if [ $# -ne 0 ]; then - while getopts ":cdo:s:p:b:pc:hy" opt; do + while getopts ":cdo:s:p:b:Phy" opt; do case "${opt}" in o ) options=( "Debug" "Release" "RelWithDebInfo" "MinSizeRel" ) @@ -107,8 +107,9 @@ if [ $# -ne 0 ]; then p) PREFIX=$OPTARG ;; - pc) + P) PIN_COMPILER=true + ;; h) usage exit 1 @@ -200,8 +201,10 @@ fi BUILD_CLANG8=false if [ ! -z $CXX ]; then CPP_COMP=$CXX + CC_COMP=$CC else CPP_COMP=c++ + CC_COMP=cc fi NO_CPP17=false @@ -230,16 +233,18 @@ fi if $PIN_COMPILER; then BUILD_CLANG8=true CPP_COMP=${OPT_LOCATION}/clang8/bin/clang++ + CC_COMP=${OPT_LOCATION}/clang8/bin/clang PIN_COMPILER_CMAKE="-DEOSIO_PIN_COMPILER=1 -DLIBSTDCPP_DIR=${OPT_LOCATION}/gcc -DLIBCPP_DIR=${OPT_LOCATION}/clang8/" elif $NO_CPP17; then if [ $NONINTERACTIVE -eq 0 ]; then BUILD_CLANG8=true CPP_COMP=${OPT_LOCATION}/clang8/bin/clang++ + CC_COMP=${OPT_LOCATION}/clang8/bin/clang else printf "Error no C++17 support.\\nEnter Y/y or N/n to continue with downloading and building a viable compiler or exit now.\\nIf you already have a C++17 compiler installed or would like to install your own, export CXX to point to the compiler of your choosing." read -p "Enter Y/y or N/n to continue with downloading and building a viable compiler or exit now. " yn case $yn in - [Yy]* ) BUILD_CLANG8=true; CPP_COMP=${OPT_LOCATION}/clang8/bin/clang++; break;; + [Yy]* ) BUILD_CLANG8=true; CPP_COMP=${OPT_LOCATION}/clang8/bin/clang++; CC_COMP=${OPT_LOCATION}/clang8/bin/clang; break;; [Nn]* ) exit 1;; * ) echo "Improper input"; exit 1;; esac @@ -247,6 +252,7 @@ elif $NO_CPP17; then fi CXX=$CPP_COMP +CC=$CC_COMP export BUILD_CLANG8=$BUILD_CLANG8 @@ -345,7 +351,7 @@ mkdir -p $BUILD_DIR cd $BUILD_DIR $CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" \ - -DCMAKE_C_COMPILER="${C_COMPILER}" -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ + -DCMAKE_C_COMPILER="${CC}" -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ -DCMAKE_PREFIX_PATH=$PREFIX \ From 0348ca75143507f39bb47ac3c5a3446b4b660823 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Fri, 19 Apr 2019 14:45:31 -0400 Subject: [PATCH 451/680] update pipeline.yml --- .buildkite/pipeline.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index a58077773c2..03c8f636300 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,7 +1,7 @@ steps: - command: | # Ubuntu 16.04 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -p + ./scripts/eosio_build.sh -y -P echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi label: ":ubuntu: 16.04 Build" @@ -22,7 +22,7 @@ steps: - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -p + ./scripts/eosio_build.sh -y -P echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi label: ":ubuntu: 18.04 Build" @@ -43,7 +43,7 @@ steps: - command: | # CentOS 7 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -p + ./scripts/eosio_build.sh -y -P echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi label: ":centos: 7 Build" @@ -64,7 +64,7 @@ steps: - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -p + ./scripts/eosio_build.sh -y -P echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi label: ":aws: 2 Build" @@ -87,7 +87,7 @@ steps: echo "--- Creating symbolic link to job directory :file_folder:" sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y -p + ./scripts/eosio_build.sh -y -P echo "--- Compressing build directory :compression:" tar -pczf build.tar.gz build/; if [ ! -f build.tar.gz ]; then echo "No TAR Found!" && exit 1; fi label: ":darwin: Mojave Build" From 0d7f14fe49d6feb7c154e5bbd8872a3c83b4e2b0 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Fri, 19 Apr 2019 15:50:39 -0400 Subject: [PATCH 452/680] add missing include dir for linux --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0de173dd054..241281e4b89 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -123,7 +123,7 @@ link_libraries(Threads::Threads) if(${EOSIO_PIN_COMPILER}) if(NOT APPLE AND UNIX) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -nostdinc++ -nostdlib++ -I${LIBSTDCPP_DIR}/include/c++/7.1.0") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -nostdinc++ -nostdlib++ -I${LIBSTDCPP_DIR}/include/c++/7.1.0 -I${LIBSTDCPP_DIR}/include/c++/7.1.0/x86_64-pc-linux-gnu") set(CMAKE_CXX_STANDARD_LIBRARIES "${LIBSTDCPP_DIR}/lib64/libstdc++.a") else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") From 10b8ae10d0c6f832f5e2d787f0f91295a1d2c2ae Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 19 Apr 2019 15:54:39 -0400 Subject: [PATCH 453/680] avoid checking if RAM_RESTRICTIONS is activated twice in apply_context::schedule_deferred_transaction --- libraries/chain/apply_context.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index c77cbd98d43..37ad5a3762f 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -394,9 +394,11 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a auto delay = fc::seconds(trx.delay_sec); + bool ram_restrictions_activated = control.is_builtin_activated( builtin_protocol_feature_t::ram_restrictions ); + if( !control.skip_auth_check() && !privileged ) { // Do not need to check authorization if replayng irreversible block or if contract is privileged if( payer != receiver ) { - if( control.is_builtin_activated( builtin_protocol_feature_t::ram_restrictions ) ) { + if( ram_restrictions_activated ) { EOS_ASSERT( receiver == act->account, action_validate_exception, "cannot bill RAM usage of deferred transactions to another account within notify context" ); @@ -505,7 +507,7 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a } ); } - EOS_ASSERT( control.is_builtin_activated( builtin_protocol_feature_t::ram_restrictions ) + EOS_ASSERT( ram_restrictions_activated || control.is_ram_billing_in_notify_allowed() || (receiver == act->account) || (receiver == payer) || privileged, subjective_block_production_exception, From fc977c267f4280c24bd2697a23e5d02e24d1985f Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Fri, 19 Apr 2019 15:56:22 -0400 Subject: [PATCH 454/680] I was wrong still need the special case for libstdc++ --- programs/cleos/CMakeLists.txt | 6 ++++++ programs/keosd/CMakeLists.txt | 6 ++++++ programs/nodeos/CMakeLists.txt | 6 ++++++ unittests/CMakeLists.txt | 6 +++++- 4 files changed, 23 insertions(+), 1 deletion(-) diff --git a/programs/cleos/CMakeLists.txt b/programs/cleos/CMakeLists.txt index 0787c5fe937..28c0e07a14c 100644 --- a/programs/cleos/CMakeLists.txt +++ b/programs/cleos/CMakeLists.txt @@ -35,6 +35,12 @@ configure_file(config.hpp.in config.hpp ESCAPE_QUOTES) target_include_directories(${CLI_CLIENT_EXECUTABLE_NAME} PUBLIC ${Intl_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) +if(EOSIO_PIN_COMPILER) + if (NOT APPLE) + target_link_libraries(${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE -nostdlib++) + endif() +endif() + target_link_libraries( ${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE appbase chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ${Intl_LIBRARIES} ) diff --git a/programs/keosd/CMakeLists.txt b/programs/keosd/CMakeLists.txt index 3c806fbed39..519de5db567 100644 --- a/programs/keosd/CMakeLists.txt +++ b/programs/keosd/CMakeLists.txt @@ -11,6 +11,12 @@ endif() configure_file(config.hpp.in config.hpp ESCAPE_QUOTES) +if(EOSIO_PIN_COMPILER) + if (NOT APPLE) + target_link_libraries(${KEY_STORE_EXECUTABLE_NAME} PRIVATE -nostdlib++) + endif() +endif() + target_link_libraries( ${KEY_STORE_EXECUTABLE_NAME} PRIVATE appbase PRIVATE wallet_api_plugin wallet_plugin diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index d5fe8273eb5..b499c68784d 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -70,6 +70,12 @@ if(BUILD_MONGO_DB_PLUGIN) target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} mongo_db_plugin -Wl,${no_whole_archive_flag} ) endif() +if(EOSIO_PIN_COMPILER) + if (NOT APPLE) + target_link_libraries(${NODE_EXECUTABLE_NAME} PRIVATE -nostdlib++) + endif() +endif() + include(additionalPlugins) copy_bin( ${NODE_EXECUTABLE_NAME} ) diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index b0a08280336..85a33acddf9 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -43,7 +43,11 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/contracts.hpp.in ${CMAKE_CURRENT_BINA file(GLOB UNIT_TESTS "*.cpp") # find all unit test suites add_executable( unit_test ${UNIT_TESTS}) # build unit tests as one executable -target_link_libraries( unit_test eosio_chain chainbase eosio_testing fc appbase ${PLATFORM_SPECIFIC_LIBS} ) +if(EOSIO_PIN_COMPILER AND NOT APPLE) + target_link_libraries(unit_test PRIVATE -nostdlib++ PUBLIC eosio_chain chainbase eosio_testing fc appbase ${PLATFORM_SPECIFIC_LIBS}) +else() + target_link_libraries( unit_test eosio_chain chainbase eosio_testing fc appbase ${PLATFORM_SPECIFIC_LIBS} ) +endif() target_compile_options(unit_test PUBLIC -DDISABLE_EOSLIB_SERIALIZE) target_include_directories( unit_test PUBLIC From c491c182eebfc4e1360971f254e63b7b18703bb8 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 19 Apr 2019 14:02:00 -0400 Subject: [PATCH 455/680] Remove generation of debug info from WAVM code generation generation of debug info consumes a massive amount of memory and quite a bit of it seems to get lodged in the global llvmcontext meaning it becomes a leak in the current design of wavm. Removing this as we don't need it --- .../wasm-jit/Source/Runtime/LLVMEmitIR.cpp | 42 +------------------ libraries/wasm-jit/Source/Runtime/LLVMJIT.cpp | 23 +--------- libraries/wasm-jit/Source/Runtime/LLVMJIT.h | 2 - 3 files changed, 2 insertions(+), 65 deletions(-) diff --git a/libraries/wasm-jit/Source/Runtime/LLVMEmitIR.cpp b/libraries/wasm-jit/Source/Runtime/LLVMEmitIR.cpp index 5ca01bad510..2c002c497ea 100644 --- a/libraries/wasm-jit/Source/Runtime/LLVMEmitIR.cpp +++ b/libraries/wasm-jit/Source/Runtime/LLVMEmitIR.cpp @@ -27,12 +27,6 @@ namespace LLVMJIT llvm::Constant* defaultTableMaxElementIndex; llvm::Constant* defaultMemoryBase; llvm::Constant* defaultMemoryEndOffset; - - llvm::DIBuilder diBuilder; - llvm::DICompileUnit* diCompileUnit; - llvm::DIFile* diModuleScope; - - llvm::DIType* diValueTypes[(Uptr)ValueType::num]; llvm::MDNode* likelyFalseBranchWeights; llvm::MDNode* likelyTrueBranchWeights; @@ -41,20 +35,7 @@ namespace LLVMJIT : module(inModule) , moduleInstance(inModuleInstance) , llvmModule(new llvm::Module("",context)) - , diBuilder(*llvmModule) - { - diModuleScope = diBuilder.createFile("unknown","unknown"); - diCompileUnit = diBuilder.createCompileUnit(0xffff,diModuleScope,"WAVM",true,"",0); - - diValueTypes[(Uptr)ValueType::any] = nullptr; - diValueTypes[(Uptr)ValueType::i32] = diBuilder.createBasicType("i32",32,llvm::dwarf::DW_ATE_signed); - diValueTypes[(Uptr)ValueType::i64] = diBuilder.createBasicType("i64",64,llvm::dwarf::DW_ATE_signed); - diValueTypes[(Uptr)ValueType::f32] = diBuilder.createBasicType("f32",32,llvm::dwarf::DW_ATE_float); - diValueTypes[(Uptr)ValueType::f64] = diBuilder.createBasicType("f64",64,llvm::dwarf::DW_ATE_float); - #if ENABLE_SIMD_PROTOTYPE - diValueTypes[(Uptr)ValueType::v128] = diBuilder.createBasicType("v128",128,llvm::dwarf::DW_ATE_signed); - #endif - + { auto zeroAsMetadata = llvm::ConstantAsMetadata::get(emitLiteral(I32(0))); auto i32MaxAsMetadata = llvm::ConstantAsMetadata::get(emitLiteral(I32(INT32_MAX))); likelyFalseBranchWeights = llvm::MDTuple::getDistinct(context,{llvm::MDString::get(context,"branch_weights"),zeroAsMetadata,i32MaxAsMetadata}); @@ -1481,22 +1462,6 @@ namespace LLVMJIT void EmitFunctionContext::emit() { - // Create debug info for the function. - llvm::SmallVector diFunctionParameterTypes; - for(auto parameterType : functionType->parameters) { diFunctionParameterTypes.push_back(moduleContext.diValueTypes[(Uptr)parameterType]); } - auto diFunctionType = moduleContext.diBuilder.createSubroutineType(moduleContext.diBuilder.getOrCreateTypeArray(diFunctionParameterTypes)); - diFunction = moduleContext.diBuilder.createFunction( - moduleContext.diModuleScope, - functionInstance->debugName, - llvmFunction->getName(), - moduleContext.diModuleScope, - 0, - diFunctionType, - false, - true, - 0); - llvmFunction->setSubprogram(diFunction); - // Create the return basic block, and push the root control context for the function. auto returnBlock = llvm::BasicBlock::Create(context,"return",llvmFunction); auto returnPHI = createPHI(returnBlock,functionType->ret); @@ -1544,10 +1509,8 @@ namespace LLVMJIT OperatorDecoderStream decoder(functionDef.code); UnreachableOpVisitor unreachableOpVisitor(*this); OperatorPrinter operatorPrinter(module,functionDef); - Uptr opIndex = 0; while(decoder && controlStack.size()) { - irBuilder.SetCurrentDebugLocation(llvm::DILocation::get(context,(unsigned int)opIndex++,0,diFunction)); if(ENABLE_LOGGING) { logOperator(decoder.decodeOpWithoutConsume(operatorPrinter)); @@ -1624,9 +1587,6 @@ namespace LLVMJIT // Compile each function in the module. for(Uptr functionDefIndex = 0;functionDefIndex < module.functions.defs.size();++functionDefIndex) { EmitFunctionContext(*this,module,module.functions.defs[functionDefIndex],moduleInstance->functionDefs[functionDefIndex],functionDefs[functionDefIndex]).emit(); } - - // Finalize the debug info. - diBuilder.finalize(); Timing::logRatePerSecond("Emitted LLVM IR",emitTimer,(F64)llvmModule->size(),"functions"); diff --git a/libraries/wasm-jit/Source/Runtime/LLVMJIT.cpp b/libraries/wasm-jit/Source/Runtime/LLVMJIT.cpp index 98fbe2fac9e..18cf2f4cfb1 100644 --- a/libraries/wasm-jit/Source/Runtime/LLVMJIT.cpp +++ b/libraries/wasm-jit/Source/Runtime/LLVMJIT.cpp @@ -290,13 +290,6 @@ namespace LLVMJIT JITModule(ModuleInstance* inModuleInstance): moduleInstance(inModuleInstance) {} ~JITModule() override { - // Delete the module's symbols, and remove them from the global address-to-symbol map. - Platform::Lock addressToSymbolMapLock(addressToSymbolMapMutex); - for(auto symbol : functionDefSymbols) - { - addressToSymbolMap.erase(addressToSymbolMap.find(symbol->baseAddress + symbol->numBytes)); - delete symbol; - } } void notifySymbolLoaded(const char* name,Uptr baseAddress,Uptr numBytes,std::map&& offsetToOpIndexMap) override @@ -308,14 +301,7 @@ namespace LLVMJIT WAVM_ASSERT_THROW(moduleInstance); WAVM_ASSERT_THROW(functionDefIndex < moduleInstance->functionDefs.size()); FunctionInstance* functionInstance = moduleInstance->functionDefs[functionDefIndex]; - auto symbol = new JITSymbol(functionInstance,baseAddress,numBytes,std::move(offsetToOpIndexMap)); - functionDefSymbols.push_back(symbol); functionInstance->nativeFunction = reinterpret_cast(baseAddress); - - { - Platform::Lock addressToSymbolMapLock(addressToSymbolMapMutex); - addressToSymbolMap[baseAddress + numBytes] = symbol; - } } } }; @@ -492,9 +478,6 @@ namespace LLVMJIT llvm::object::ObjectFile* object = jitUnit->loadedObjects[objectIndex].object; llvm::RuntimeDyld::LoadedObjectInfo* loadedObject = jitUnit->loadedObjects[objectIndex].loadedObject; - // Create a DWARF context to interpret the debug information in this compilation unit. - auto dwarfContext = llvm::make_unique(*object,loadedObject); - // Iterate over the functions in the loaded object. for(auto symbolSizePair : llvm::object::computeSymbolSizes(*object)) { @@ -514,16 +497,12 @@ namespace LLVMJIT loadedAddress += (Uptr)loadedObject->getSectionLoadAddress(*symbolSection.get()); } - // Get the DWARF line info for this symbol, which maps machine code addresses to WebAssembly op indices. - llvm::DILineInfoTable lineInfoTable = dwarfContext->getLineInfoForAddressRange(loadedAddress,symbolSizePair.second); - std::map offsetToOpIndexMap; - for(auto lineInfo : lineInfoTable) { offsetToOpIndexMap.emplace(U32(lineInfo.first - loadedAddress),lineInfo.second.Line); } - #if PRINT_DISASSEMBLY Log::printf(Log::Category::error,"Disassembly for function %s\n",name.get().data()); disassembleFunction(reinterpret_cast(loadedAddress),Uptr(symbolSizePair.second)); #endif + std::map offsetToOpIndexMap; // Notify the JIT unit that the symbol was loaded. WAVM_ASSERT_THROW(symbolSizePair.second <= UINTPTR_MAX); jitUnit->notifySymbolLoaded( diff --git a/libraries/wasm-jit/Source/Runtime/LLVMJIT.h b/libraries/wasm-jit/Source/Runtime/LLVMJIT.h index afadb6053e4..cad02a101ed 100644 --- a/libraries/wasm-jit/Source/Runtime/LLVMJIT.h +++ b/libraries/wasm-jit/Source/Runtime/LLVMJIT.h @@ -49,8 +49,6 @@ #include "llvm/Support/DynamicLibrary.h" #include "llvm/Transforms/Scalar.h" #include "llvm/IR/DIBuilder.h" -#include "llvm/DebugInfo/DIContext.h" -#include "llvm/DebugInfo/DWARF/DWARFContext.h" #include #include #include From 060e8f1ff0f389bde93a11b341e738f6052d0c3f Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 19 Apr 2019 16:17:15 -0400 Subject: [PATCH 456/680] use custom pipeline branch which uses persistent nodes supporting protocol feature changes for the sync node tests --- .pipelinebranch | 1 + 1 file changed, 1 insertion(+) create mode 100644 .pipelinebranch diff --git a/.pipelinebranch b/.pipelinebranch new file mode 100644 index 00000000000..089fa42ccc2 --- /dev/null +++ b/.pipelinebranch @@ -0,0 +1 @@ +use-protocol-features-sync-nodes From 093bdafa81e841f1f43ef6c5a5f95f0e0ec97245 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 19 Apr 2019 17:18:28 -0400 Subject: [PATCH 457/680] don't hold on to wavm Module object any longer than needed After creating a ModuleInstance from a Module, we really don't need to hold on to the Module any longer. It's a memory sink. Refactor the one instance we needed the Module and free it after that --- libraries/chain/webassembly/wavm.cpp | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/libraries/chain/webassembly/wavm.cpp b/libraries/chain/webassembly/wavm.cpp index febd2ec8b07..cd731bd4f8f 100644 --- a/libraries/chain/webassembly/wavm.cpp +++ b/libraries/chain/webassembly/wavm.cpp @@ -25,9 +25,14 @@ class wavm_instantiated_module : public wasm_instantiated_module_interface { public: wavm_instantiated_module(ModuleInstance* instance, std::unique_ptr module, std::vector initial_mem) : _initial_memory(initial_mem), - _instance(instance), - _module(std::move(module)) - {} + _instance(instance) + { + //The memory instance is reused across all wavm_instantiated_modules, but for wasm instances + // that didn't declare "memory", getDefaultMemory() won't see it. It would also be possible + // to say something like if(module->memories.size()) here I believe + if(getDefaultMemory(_instance)) + _initial_memory_config = module->memories.defs[0].type; + } void apply(apply_context& context) override { vector args = {Value(uint64_t(context.receiver)), @@ -52,7 +57,7 @@ class wavm_instantiated_module : public wasm_instantiated_module_interface { if(default_mem) { //reset memory resizes the sandbox'ed memory to the module's init memory size and then // (effectively) memzeros it all - resetMemory(default_mem, _module->memories.defs[0].type); + resetMemory(default_mem, _initial_memory_config); char* memstart = &memoryRef(getDefaultMemory(_instance), 0); memcpy(memstart, _initial_memory.data(), _initial_memory.size()); @@ -78,7 +83,7 @@ class wavm_instantiated_module : public wasm_instantiated_module_interface { //naked pointer because ModuleInstance is opaque //_instance is deleted via WAVM's object garbage collection when wavm_rutime is deleted ModuleInstance* _instance; - std::unique_ptr _module; + MemoryType _initial_memory_config; }; From 57e44cbeaa6b044401ab7650b83691941bbbafd0 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 19 Apr 2019 19:22:38 -0400 Subject: [PATCH 458/680] immediately return created snapshot in irreversible mode (no need to wait for new blocks to be produced) --- plugins/producer_plugin/producer_plugin.cpp | 73 +++++++++++++-------- 1 file changed, 47 insertions(+), 26 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 6c8ab4a2ce8..b91968c785e 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1023,15 +1023,57 @@ void producer_plugin::create_snapshot(producer_plugin::next_functionchain_plug->chain(); auto head_id = chain.head_block_id(); - std::string snapshot_path = (pending_snapshot::get_final_path(head_id, my->_snapshots_dir)).generic_string(); + const auto& snapshot_path = pending_snapshot::get_final_path(head_id, my->_snapshots_dir); + const auto& temp_path = pending_snapshot::get_temp_path(head_id, my->_snapshots_dir); // maintain legacy exception if the snapshot exists if( fc::is_regular_file(snapshot_path) ) { - auto ex = snapshot_exists_exception( FC_LOG_MESSAGE( error, "snapshot named ${name} already exists", ("name", snapshot_path) ) ); + auto ex = snapshot_exists_exception( FC_LOG_MESSAGE( error, "snapshot named ${name} already exists", ("name", snapshot_path.generic_string()) ) ); next(ex.dynamic_copy_exception()); return; } + auto write_snapshot = [&]( const bfs::path& p ) -> void { + auto reschedule = fc::make_scoped_exit([this](){ + my->schedule_production_loop(); + }); + + if (chain.is_building_block()) { + // abort the pending block + chain.abort_block(); + } else { + reschedule.cancel(); + } + + // create the snapshot + auto snap_out = std::ofstream(p.generic_string(), (std::ios::out | std::ios::binary)); + auto writer = std::make_shared(snap_out); + chain.write_snapshot(writer); + writer->finalize(); + snap_out.flush(); + snap_out.close(); + }; + + // If in irreversible mode, create snapshot and return path to snapshot immediately. + if( chain.get_read_mode() == db_read_mode::IRREVERSIBLE ) { + try { + write_snapshot( temp_path ); + + boost::system::error_code ec; + bfs::rename(temp_path, snapshot_path, ec); + EOS_ASSERT(!ec, snapshot_finalization_exception, + "Unable to finalize valid snapshot of block number ${bn}: [code: ${ec}] ${message}", + ("bn", chain.head_block_num()) + ("ec", ec.value()) + ("message", ec.message())); + + next( producer_plugin::snapshot_information{head_id, snapshot_path.generic_string()} ); + } CATCH_AND_CALL (next); + return; + } + + // Otherwise, the result will be returned when the snapshot becomes irreversible. + // determine if this snapshot is already in-flight auto& pending_by_id = my->_pending_snapshot_index.get(); auto existing = pending_by_id.find(head_id); @@ -1044,31 +1086,10 @@ void producer_plugin::create_snapshot(producer_plugin::next_function_snapshots_dir)).generic_string(); - std::string pending_path = (pending_snapshot::get_pending_path(head_id, my->_snapshots_dir)).generic_string(); - std::string final_path = (pending_snapshot::get_final_path(head_id, my->_snapshots_dir)).generic_string(); - bool written = false; + const auto& pending_path = pending_snapshot::get_pending_path(head_id, my->_snapshots_dir); try { - auto reschedule = fc::make_scoped_exit([this](){ - my->schedule_production_loop(); - }); - - if (chain.is_building_block()) { - // abort the pending block - chain.abort_block(); - } else { - reschedule.cancel(); - } - - // create a new pending snapshot - auto snap_out = std::ofstream(temp_path, (std::ios::out | std::ios::binary)); - auto writer = std::make_shared(snap_out); - chain.write_snapshot(writer); - writer->finalize(); - snap_out.flush(); - snap_out.close(); + write_snapshot( temp_path ); // create a new pending snapshot boost::system::error_code ec; bfs::rename(temp_path, pending_path, ec); @@ -1078,7 +1099,7 @@ void producer_plugin::create_snapshot(producer_plugin::next_function_pending_snapshot_index.emplace(head_id, next, pending_path, final_path); + my->_pending_snapshot_index.emplace(head_id, next, pending_path.generic_string(), snapshot_path.generic_string()); } CATCH_AND_CALL (next); } } From aa6f647dd2ffab30ea09bf0c167591788c9c253f Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 19 Apr 2019 20:09:53 -0400 Subject: [PATCH 459/680] create snapshots directory if it does not already exist. --- plugins/producer_plugin/producer_plugin.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index b91968c785e..cb6eb48504e 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1045,6 +1045,8 @@ void producer_plugin::create_snapshot(producer_plugin::next_function(snap_out); From bc532d9628280f8813914d7d3580244bcf642fd4 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Fri, 19 Apr 2019 20:39:59 -0400 Subject: [PATCH 460/680] Still not statically linking after merge --- CMakeLists.txt | 7 ++++++- programs/cleos/CMakeLists.txt | 6 ------ programs/keosd/CMakeLists.txt | 6 ------ programs/nodeos/CMakeLists.txt | 6 ------ unittests/CMakeLists.txt | 6 +----- 5 files changed, 7 insertions(+), 24 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 241281e4b89..8574cf508df 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -123,8 +123,13 @@ link_libraries(Threads::Threads) if(${EOSIO_PIN_COMPILER}) if(NOT APPLE AND UNIX) + execute_process(COMMAND ${CMAKE_CXX_COMPILER} --print-search-dirs OUTPUT_VARIABLE LIB_PATHS) + string(REGEX REPLACE "programs.*libraries: =" "" LIB_PATHS ${LIB_PATHS}) + string(REGEX REPLACE ":" ";" LIB_PATHS ${LIB_PATHS}) + find_library(LIBGCC_LIBRARY NAMES libgcc_s.so HINTS ${LIB_PATHS}) + get_filename_component(LIBGCC_PATH ${LIBGCC_LIBRARY} DIRECTORY) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -nostdinc++ -nostdlib++ -I${LIBSTDCPP_DIR}/include/c++/7.1.0 -I${LIBSTDCPP_DIR}/include/c++/7.1.0/x86_64-pc-linux-gnu") - set(CMAKE_CXX_STANDARD_LIBRARIES "${LIBSTDCPP_DIR}/lib64/libstdc++.a") + set(CMAKE_CXX_STANDARD_LIBRARIES "${LIBSTDCPP_DIR}/lib64/libstdc++.a ${LIBSTDCPP_DIR}/lib64/libsupc++.a -L${LIBGCC_PATH} -Wl,-nostdlib++ -Wl,-Bdynamic,-lgcc_s") else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") set(CMAKE_CXX_STANDARD_LIBRARIES "${LIBCPP_DIR}/lib/libc++.a ${LIBCPP_DIR}/lib/libc++abi.a") diff --git a/programs/cleos/CMakeLists.txt b/programs/cleos/CMakeLists.txt index 28c0e07a14c..0787c5fe937 100644 --- a/programs/cleos/CMakeLists.txt +++ b/programs/cleos/CMakeLists.txt @@ -35,12 +35,6 @@ configure_file(config.hpp.in config.hpp ESCAPE_QUOTES) target_include_directories(${CLI_CLIENT_EXECUTABLE_NAME} PUBLIC ${Intl_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) -if(EOSIO_PIN_COMPILER) - if (NOT APPLE) - target_link_libraries(${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE -nostdlib++) - endif() -endif() - target_link_libraries( ${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE appbase chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ${Intl_LIBRARIES} ) diff --git a/programs/keosd/CMakeLists.txt b/programs/keosd/CMakeLists.txt index 519de5db567..3c806fbed39 100644 --- a/programs/keosd/CMakeLists.txt +++ b/programs/keosd/CMakeLists.txt @@ -11,12 +11,6 @@ endif() configure_file(config.hpp.in config.hpp ESCAPE_QUOTES) -if(EOSIO_PIN_COMPILER) - if (NOT APPLE) - target_link_libraries(${KEY_STORE_EXECUTABLE_NAME} PRIVATE -nostdlib++) - endif() -endif() - target_link_libraries( ${KEY_STORE_EXECUTABLE_NAME} PRIVATE appbase PRIVATE wallet_api_plugin wallet_plugin diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index b499c68784d..d5fe8273eb5 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -70,12 +70,6 @@ if(BUILD_MONGO_DB_PLUGIN) target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} mongo_db_plugin -Wl,${no_whole_archive_flag} ) endif() -if(EOSIO_PIN_COMPILER) - if (NOT APPLE) - target_link_libraries(${NODE_EXECUTABLE_NAME} PRIVATE -nostdlib++) - endif() -endif() - include(additionalPlugins) copy_bin( ${NODE_EXECUTABLE_NAME} ) diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index 85a33acddf9..b0a08280336 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -43,11 +43,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/contracts.hpp.in ${CMAKE_CURRENT_BINA file(GLOB UNIT_TESTS "*.cpp") # find all unit test suites add_executable( unit_test ${UNIT_TESTS}) # build unit tests as one executable -if(EOSIO_PIN_COMPILER AND NOT APPLE) - target_link_libraries(unit_test PRIVATE -nostdlib++ PUBLIC eosio_chain chainbase eosio_testing fc appbase ${PLATFORM_SPECIFIC_LIBS}) -else() - target_link_libraries( unit_test eosio_chain chainbase eosio_testing fc appbase ${PLATFORM_SPECIFIC_LIBS} ) -endif() +target_link_libraries( unit_test eosio_chain chainbase eosio_testing fc appbase ${PLATFORM_SPECIFIC_LIBS} ) target_compile_options(unit_test PUBLIC -DDISABLE_EOSLIB_SERIALIZE) target_include_directories( unit_test PUBLIC From 16c7fcbab0ad8758ed517b7d7a51ce8a3c1036ec Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 19 Apr 2019 22:01:54 -0400 Subject: [PATCH 461/680] augment protocol_features_tests/no_duplicate_deferred_id_test to ensure that deferred transactions created prior to the activation of the NO_DUPLICATE_DEFERRED_ID protocol feature are able to successfully retire after its activation --- unittests/protocol_feature_tests.cpp | 56 ++++++++++++++++++++++++---- 1 file changed, 49 insertions(+), 7 deletions(-) diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 9e46a5dc876..cd20e6cd74f 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -540,7 +540,7 @@ BOOST_AUTO_TEST_CASE( no_duplicate_deferred_id_test ) try { c2.produce_empty_block( fc::minutes(10) ); transaction_trace_ptr trace0; - auto h = c2.control->applied_transaction.connect( [&](std::tuple x) { + auto h2 = c2.control->applied_transaction.connect( [&](std::tuple x) { auto& t = std::get<0>(x); if( t && t->receipt && t->receipt->status == transaction_receipt::expired) { trace0 = t; @@ -549,12 +549,35 @@ BOOST_AUTO_TEST_CASE( no_duplicate_deferred_id_test ) try { c2.produce_block(); - h.disconnect(); + h2.disconnect(); BOOST_REQUIRE( trace0 ); c.produce_block(); + const auto& index = c.control->db().get_index(); + + transaction_trace_ptr trace1; + auto h = c.control->applied_transaction.connect( [&](std::tuple x) { + auto& t = std::get<0>(x); + if( t && t->receipt && t->receipt->status == transaction_receipt::executed) { + trace1 = t; + } + } ); + + BOOST_REQUIRE_EQUAL(0, index.size()); + + c.push_action( config::system_account_name, N(reqauth), N(alice), fc::mutable_variant_object() + ("from", "alice"), + 5, 2 + ); + + BOOST_REQUIRE_EQUAL(1, index.size()); + + c.produce_block(); + + BOOST_REQUIRE_EQUAL(1, index.size()); + const auto& pfm = c.control->get_protocol_feature_manager(); auto d1 = pfm.get_builtin_digest( builtin_protocol_feature_t::replace_deferred ); @@ -562,10 +585,29 @@ BOOST_AUTO_TEST_CASE( no_duplicate_deferred_id_test ) try { auto d2 = pfm.get_builtin_digest( builtin_protocol_feature_t::no_duplicate_deferred_id ); BOOST_REQUIRE( d2 ); + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 1) + ("contract", "test") + ("payload", 42) + ); + BOOST_REQUIRE_EQUAL(2, index.size()); + c.preactivate_protocol_features( {*d1, *d2} ); c.produce_block(); + // The deferred transaction with payload 42 that was scheduled prior to the activation of the protocol features should now be retired. + + BOOST_REQUIRE( trace1 ); + BOOST_REQUIRE_EQUAL(1, index.size()); - auto& index = c.control->db().get_index(); + trace1 = nullptr; + + // Retire the delayed eosio::reqauth transaction. + c.produce_blocks(5); + BOOST_REQUIRE( trace1 ); + BOOST_REQUIRE_EQUAL(0, index.size()); + + h.disconnect(); auto check_generation_context = []( auto&& data, const transaction_id_type& sender_trx_id, @@ -607,7 +649,7 @@ BOOST_AUTO_TEST_CASE( no_duplicate_deferred_id_test ) try { BOOST_REQUIRE_EQUAL(0, index.size()); - auto trace1 = c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + auto trace2 = c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() ("payer", "alice") ("sender_id", 1) ("contract", "test") @@ -617,7 +659,7 @@ BOOST_AUTO_TEST_CASE( no_duplicate_deferred_id_test ) try { BOOST_REQUIRE_EQUAL(1, index.size()); check_generation_context( index.begin()->packed_trx, - trace1->id, + trace2->id, ((static_cast(N(alice)) << 64) | 1), N(test) ); @@ -625,7 +667,7 @@ BOOST_AUTO_TEST_CASE( no_duplicate_deferred_id_test ) try { BOOST_REQUIRE_EQUAL(0, index.size()); - auto trace2 = c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + auto trace3 = c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() ("payer", "alice") ("sender_id", 1) ("contract", "test") @@ -635,7 +677,7 @@ BOOST_AUTO_TEST_CASE( no_duplicate_deferred_id_test ) try { BOOST_REQUIRE_EQUAL(1, index.size()); check_generation_context( index.begin()->packed_trx, - trace2->id, + trace3->id, ((static_cast(N(alice)) << 64) | 1), N(test) ); From 4ac75707d3f2a562fdf9920fda8ed54c88912054 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Sat, 20 Apr 2019 02:42:34 -0400 Subject: [PATCH 462/680] hopefully finished --- CMakeLists.txt | 15 --------------- programs/nodeos/CMakeLists.txt | 6 ++++++ scripts/eosio_build.sh | 23 +++++++++++++++++------ 3 files changed, 23 insertions(+), 21 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 8574cf508df..fe8d020167e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -121,21 +121,6 @@ set(THREADS_PREFER_PTHREAD_FLAG 1) find_package(Threads) link_libraries(Threads::Threads) -if(${EOSIO_PIN_COMPILER}) - if(NOT APPLE AND UNIX) - execute_process(COMMAND ${CMAKE_CXX_COMPILER} --print-search-dirs OUTPUT_VARIABLE LIB_PATHS) - string(REGEX REPLACE "programs.*libraries: =" "" LIB_PATHS ${LIB_PATHS}) - string(REGEX REPLACE ":" ";" LIB_PATHS ${LIB_PATHS}) - find_library(LIBGCC_LIBRARY NAMES libgcc_s.so HINTS ${LIB_PATHS}) - get_filename_component(LIBGCC_PATH ${LIBGCC_LIBRARY} DIRECTORY) - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -nostdinc++ -nostdlib++ -I${LIBSTDCPP_DIR}/include/c++/7.1.0 -I${LIBSTDCPP_DIR}/include/c++/7.1.0/x86_64-pc-linux-gnu") - set(CMAKE_CXX_STANDARD_LIBRARIES "${LIBSTDCPP_DIR}/lib64/libstdc++.a ${LIBSTDCPP_DIR}/lib64/libsupc++.a -L${LIBGCC_PATH} -Wl,-nostdlib++ -Wl,-Bdynamic,-lgcc_s") - else() - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") - set(CMAKE_CXX_STANDARD_LIBRARIES "${LIBCPP_DIR}/lib/libc++.a ${LIBCPP_DIR}/lib/libc++abi.a") - endif() -endif() - if( WIN32 ) message( STATUS "Configuring EOSIO on WIN32") diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index d5fe8273eb5..5d7b12d6cad 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -70,6 +70,12 @@ if(BUILD_MONGO_DB_PLUGIN) target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} mongo_db_plugin -Wl,${no_whole_archive_flag} ) endif() +if(EOSIO_PIN_COMPILER) + if(NOT APPLE) + target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -nostdlib++ ) + endif() +endif() + include(additionalPlugins) copy_bin( ${NODE_EXECUTABLE_NAME} ) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 575aa12199a..b222e6453b3 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -350,12 +350,23 @@ printf "## ENABLE_COVERAGE_TESTING=%s\\n" "${ENABLE_COVERAGE_TESTING}" mkdir -p $BUILD_DIR cd $BUILD_DIR -$CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" \ - -DCMAKE_C_COMPILER="${CC}" -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ - -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ - -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ - -DCMAKE_PREFIX_PATH=$PREFIX \ - -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS $PIN_COMPILER_CMAKE "${REPO_ROOT}" + +if $PIN_COMPILER; then + sed -e "s~@~$OPT_LOCATION~g" ../scripts/pinned_toolchain.cmake &> $BUILD_DIR/pinned_toolchain.cmake + $CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake \ + -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ + -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ + -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ + -DCMAKE_PREFIX_PATH=$PREFIX \ + -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS -DEOSIO_PIN_COMPILER=1 "${REPO_ROOT}" +else + $CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" \ + -DCMAKE_C_COMPILER="${CC}" -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ + -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ + -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ + -DCMAKE_PREFIX_PATH=$PREFIX \ + -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS $PIN_COMPILER_CMAKE "${REPO_ROOT}" +fi if [ $? -ne 0 ]; then exit -1; fi make -j"${JOBS}" From 6bb66a65bb17706e5e049113783ef3add10d4095 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Sat, 20 Apr 2019 02:45:49 -0400 Subject: [PATCH 463/680] Update CMakeLists.txt --- programs/nodeos/CMakeLists.txt | 6 ------ 1 file changed, 6 deletions(-) diff --git a/programs/nodeos/CMakeLists.txt b/programs/nodeos/CMakeLists.txt index 5d7b12d6cad..d5fe8273eb5 100644 --- a/programs/nodeos/CMakeLists.txt +++ b/programs/nodeos/CMakeLists.txt @@ -70,12 +70,6 @@ if(BUILD_MONGO_DB_PLUGIN) target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -Wl,${whole_archive_flag} mongo_db_plugin -Wl,${no_whole_archive_flag} ) endif() -if(EOSIO_PIN_COMPILER) - if(NOT APPLE) - target_link_libraries( ${NODE_EXECUTABLE_NAME} PRIVATE -nostdlib++ ) - endif() -endif() - include(additionalPlugins) copy_bin( ${NODE_EXECUTABLE_NAME} ) From f630b3de2f5b7f02e52b2e8428676da226a41c49 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Sat, 20 Apr 2019 02:46:57 -0400 Subject: [PATCH 464/680] Update eosio_build_fedora.sh --- scripts/eosio_build_fedora.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 64eeb74d14d..264ca3e8df0 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -43,7 +43,7 @@ printf "Disk space available: ${DISK_AVAIL%.*}G\\n" # llvm is symlinked from /usr/lib64/llvm4.0 into user's home DEP_ARRAY=( git sudo procps-ng which gcc gcc-c++ autoconf automake libtool make \ - bzip2-devel curl -LO bzip2 compat-openssl10 graphviz doxygen \ + bzip2-devel curl bzip2 compat-openssl10 graphviz doxygen \ openssl-devel gmp-devel libstdc++-devel python2 python2-devel python3 python3-devel \ libedit ncurses-devel swig llvm4.0 llvm4.0-devel llvm4.0-libs llvm4.0-static libcurl-devel libusb-devel ) From 64020d6a16afbad77b04fd70c79f3889294ee302 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Sat, 20 Apr 2019 04:00:54 -0400 Subject: [PATCH 465/680] use script directory variable --- scripts/eosio_build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index b222e6453b3..a76f8974248 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -352,7 +352,7 @@ cd $BUILD_DIR if $PIN_COMPILER; then - sed -e "s~@~$OPT_LOCATION~g" ../scripts/pinned_toolchain.cmake &> $BUILD_DIR/pinned_toolchain.cmake + sed -e "s~@~$OPT_LOCATION~g" $SCRIPT_DIR/pinned_toolchain.cmake &> $BUILD_DIR/pinned_toolchain.cmake $CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake \ -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ From 627d351ccc91df16df309b559e5b7017f5a8ed1e Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Sat, 20 Apr 2019 15:05:41 -0400 Subject: [PATCH 466/680] gitignore missed toolchain file --- scripts/pinned_toolchain.cmake | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 scripts/pinned_toolchain.cmake diff --git a/scripts/pinned_toolchain.cmake b/scripts/pinned_toolchain.cmake new file mode 100644 index 00000000000..7033b20d2ac --- /dev/null +++ b/scripts/pinned_toolchain.cmake @@ -0,0 +1,15 @@ +set(OPT_PATH @) +set(CMAKE_C_COMPILER_WORKS 1) +set(CMAKE_CXX_COMPILER_WORKS 1) +set(CMAKE_C_COMPILER ${OPT_PATH}/clang8/bin/clang) +set(CMAKE_CXX_COMPILER ${OPT_PATH}/clang8/bin/clang++) +set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES ${OPT_PATH}/gcc/include/c++/7.1.0 ${OPT_PATH}/gcc/include/c++/7.1.0/x86_64-pc-linux-gnu) +set(CMAKE_CXX_FLAGS_INIT "-nostdinc++") + +if(NOT APPLE) + set(CMAKE_CXX_STANDARD_LIBRARIES "-Wl,-L${OPT_PATH}/gcc/lib64 -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic -Wl,-nostdlib -Wl,--no-whole-archive ${OPT_PATH}/gcc/lib64/libstdc++.a -Wl,--no-whole-archive ${OPT_PATH}/gcc/lib64/libsupc++.a") +else() + set(CMAKE_SHARED_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++ -static-libstdc++ -shared-libgcc") + set(CMAKE_MODULE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++ -static-libstdc++ -shared-libgcc") + set(CMAKE_CXX_STANDARD_LIBRARIES "${OPT_PATH}/clang8/lib/libc++.a ${OPT_PATH}/clang8/lib/libc++abi.a") +endif() From 128918d2603913dad9bf1874b6cf8887457fbfaf Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Sat, 20 Apr 2019 16:41:56 -0400 Subject: [PATCH 467/680] update toolchain file --- scripts/pinned_toolchain.cmake | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/scripts/pinned_toolchain.cmake b/scripts/pinned_toolchain.cmake index 7033b20d2ac..ee1fd65a4e1 100644 --- a/scripts/pinned_toolchain.cmake +++ b/scripts/pinned_toolchain.cmake @@ -3,13 +3,13 @@ set(CMAKE_C_COMPILER_WORKS 1) set(CMAKE_CXX_COMPILER_WORKS 1) set(CMAKE_C_COMPILER ${OPT_PATH}/clang8/bin/clang) set(CMAKE_CXX_COMPILER ${OPT_PATH}/clang8/bin/clang++) -set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES ${OPT_PATH}/gcc/include/c++/7.1.0 ${OPT_PATH}/gcc/include/c++/7.1.0/x86_64-pc-linux-gnu) -set(CMAKE_CXX_FLAGS_INIT "-nostdinc++") - -if(NOT APPLE) - set(CMAKE_CXX_STANDARD_LIBRARIES "-Wl,-L${OPT_PATH}/gcc/lib64 -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic -Wl,-nostdlib -Wl,--no-whole-archive ${OPT_PATH}/gcc/lib64/libstdc++.a -Wl,--no-whole-archive ${OPT_PATH}/gcc/lib64/libsupc++.a") -else() - set(CMAKE_SHARED_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++ -static-libstdc++ -shared-libgcc") - set(CMAKE_MODULE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++ -static-libstdc++ -shared-libgcc") - set(CMAKE_CXX_STANDARD_LIBRARIES "${OPT_PATH}/clang8/lib/libc++.a ${OPT_PATH}/clang8/lib/libc++abi.a") -endif() +#set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES ${OPT_PATH}/gcc/include/c++/7.1.0 ${OPT_PATH}/gcc/include/c++/7.1.0/x86_64-pc-linux-gnu) +#set(CMAKE_CXX_FLAGS_INIT "-nostdinc++") +# +#if(NOT APPLE) +# set(CMAKE_CXX_STANDARD_LIBRARIES "-Wl,-L${OPT_PATH}/gcc/lib64 -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic -Wl,-nostdlib -Wl,--no-whole-archive ${OPT_PATH}/gcc/lib64/libstdc++.a -Wl,--no-whole-archive ${OPT_PATH}/gcc/lib64/libsupc++.a") +#else() +# set(CMAKE_SHARED_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++ -static-libstdc++ -shared-libgcc") +# set(CMAKE_MODULE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++ -static-libstdc++ -shared-libgcc") +# set(CMAKE_CXX_STANDARD_LIBRARIES "${OPT_PATH}/clang8/lib/libc++.a ${OPT_PATH}/clang8/lib/libc++abi.a") +#endif() From 8a2e891c9d7ac9b61a8e0f8794f9d54727914a7c Mon Sep 17 00:00:00 2001 From: learnforpractice Date: Wed, 10 Apr 2019 22:22:19 +0800 Subject: [PATCH 468/680] Remove dead code --- libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp b/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp index d59e81f9ba7..2d93bad31d8 100644 --- a/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp +++ b/libraries/chain/include/eosio/chain/wasm_eosio_injection.hpp @@ -55,11 +55,6 @@ namespace eosio { namespace chain { namespace wasm_injections { // get the next available index that is greater than the last exported function static void get_next_indices( Module& module, int& next_function_index, int& next_actual_index ) { - int exports = 0; - for ( auto exp : module.exports ) - if ( exp.kind == IR::ObjectKind::function ) - exports++; - next_function_index = module.functions.imports.size() + module.functions.defs.size() + registered_injected.size(); next_actual_index = next_injected_index++; } From 117557ae1d90347543a075fdd9486f1c2adf9c28 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 22 Mar 2019 09:04:47 -0400 Subject: [PATCH 469/680] Destroy wavm module instances when deleting the wavm_instantiated_module When we instantiate a wasm with wavm we get back a ModuleInstance. But deleting this object is a noop because wavm has its own garpage collection thing. Implement the code to interface with the garbage collection so that deleting a ModuleInstance can really free up the objects that are associated with it --- .../include/eosio/chain/webassembly/wavm.hpp | 8 --- libraries/chain/webassembly/wavm.cpp | 61 ++++++++++++------- 2 files changed, 38 insertions(+), 31 deletions(-) diff --git a/libraries/chain/include/eosio/chain/webassembly/wavm.hpp b/libraries/chain/include/eosio/chain/webassembly/wavm.hpp index 23df2f04e08..5bce9db8b40 100644 --- a/libraries/chain/include/eosio/chain/webassembly/wavm.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/wavm.hpp @@ -23,14 +23,6 @@ class wavm_runtime : public eosio::chain::wasm_runtime_interface { std::unique_ptr instantiate_module(const char* code_bytes, size_t code_size, std::vector initial_memory) override; void immediately_exit_currently_running_module() override; - - struct runtime_guard { - runtime_guard(); - ~runtime_guard(); - }; - - private: - std::shared_ptr _runtime_guard; }; //This is a temporary hack for the single threaded implementation diff --git a/libraries/chain/webassembly/wavm.cpp b/libraries/chain/webassembly/wavm.cpp index febd2ec8b07..8d195df7720 100644 --- a/libraries/chain/webassembly/wavm.cpp +++ b/libraries/chain/webassembly/wavm.cpp @@ -12,7 +12,8 @@ #include "Runtime/Linker.h" #include "Runtime/Intrinsics.h" -#include +#include +#include using namespace IR; using namespace Runtime; @@ -21,14 +22,47 @@ namespace eosio { namespace chain { namespace webassembly { namespace wavm { running_instance_context the_running_instance_context; +namespace detail { +struct wavm_runtime_initializer { + wavm_runtime_initializer() { + Runtime::init(); + } +}; + +using live_module_ref = std::list::iterator; + +struct wavm_live_modules { + live_module_ref add_live_module(ModuleInstance* module_instance) { + return live_modules.insert(live_modules.begin(), asObject(module_instance)); + } + + void remove_live_module(live_module_ref it) { + live_modules.erase(it); + std::vector root; + std::copy(live_modules.begin(), live_modules.end(), std::back_inserter(root)); + Runtime::freeUnreferencedObjects(std::move(root)); + } + + std::list live_modules; +}; + +static wavm_live_modules the_wavm_live_modules; + +} + class wavm_instantiated_module : public wasm_instantiated_module_interface { public: wavm_instantiated_module(ModuleInstance* instance, std::unique_ptr module, std::vector initial_mem) : _initial_memory(initial_mem), _instance(instance), - _module(std::move(module)) + _module(std::move(module)), + _module_ref(detail::the_wavm_live_modules.add_live_module(instance)) {} + ~wavm_instantiated_module() { + detail::the_wavm_live_modules.remove_live_module(_module_ref); + } + void apply(apply_context& context) override { vector args = {Value(uint64_t(context.receiver)), Value(uint64_t(context.act.account)), @@ -79,30 +113,11 @@ class wavm_instantiated_module : public wasm_instantiated_module_interface { //_instance is deleted via WAVM's object garbage collection when wavm_rutime is deleted ModuleInstance* _instance; std::unique_ptr _module; + detail::live_module_ref _module_ref; }; - -wavm_runtime::runtime_guard::runtime_guard() { - // TODO clean this up - //check_wasm_opcode_dispositions(); - Runtime::init(); -} - -wavm_runtime::runtime_guard::~runtime_guard() { - Runtime::freeUnreferencedObjects({}); -} - -static weak_ptr __runtime_guard_ptr; -static std::mutex __runtime_guard_lock; - wavm_runtime::wavm_runtime() { - std::lock_guard l(__runtime_guard_lock); - if (__runtime_guard_ptr.use_count() == 0) { - _runtime_guard = std::make_shared(); - __runtime_guard_ptr = _runtime_guard; - } else { - _runtime_guard = __runtime_guard_ptr.lock(); - } + static detail::wavm_runtime_initializer the_wavm_runtime_initializer; } wavm_runtime::~wavm_runtime() { From 4335a223d73be76796e6f8fadbb95ded5b3cf9cc Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Sun, 21 Apr 2019 22:10:03 -0400 Subject: [PATCH 470/680] Avoid destruction of wasm instances on shutdown When the wasm_interface is destroyed, all unique_ptrs to the current instantiated modules are also destroyed. Unfortuately for some wasm runtimes -- wavm -- this incurs a lengthy garbage collection process for each module that is deleted. This can cause destruction of wasm_interface to take on the order of multiple minutes when there are thousands of instances! We need to violate the abstraction layer here and drop these instances on the floor during shutdown so the user isn't waiting minutes --- libraries/chain/include/eosio/chain/wasm_interface.hpp | 3 +++ .../chain/include/eosio/chain/wasm_interface_private.hpp | 6 ++++++ libraries/chain/wasm_interface.cpp | 4 ++++ plugins/chain_plugin/chain_plugin.cpp | 2 ++ 4 files changed, 15 insertions(+) diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 7e6991996af..1bdf1cb218f 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -59,6 +59,9 @@ namespace eosio { namespace chain { wasm_interface(vm_type vm); ~wasm_interface(); + //call before dtor to skip what can be minutes of dtor overhead with some runtimes; can cause leaks + void indicate_shutting_down(); + //validates code -- does a WASM validation pass and checks the wasm against EOSIO specific constraints static void validate(const controller& control, const bytes& code); diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index c3af34d79ea..826114caa4a 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -32,6 +32,11 @@ namespace eosio { namespace chain { EOS_THROW(wasm_exception, "wasm_interface_impl fall through"); } + ~wasm_interface_impl() { + if(is_shutting_down) + std::for_each(instantiation_cache.begin(), instantiation_cache.end(), [](auto& i) {i.second.release();}); + } + std::vector parse_initial_memory(const Module& module) { std::vector mem_image; @@ -89,6 +94,7 @@ namespace eosio { namespace chain { return it->second; } + bool is_shutting_down = false; std::unique_ptr runtime_interface; map> instantiation_cache; }; diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index feb9efbef8a..6665fc3f98a 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -54,6 +54,10 @@ namespace eosio { namespace chain { //Hard: Kick off instantiation in a separate thread at this location } + void wasm_interface::indicate_shutting_down() { + my->is_shutting_down = true; + } + void wasm_interface::apply( const digest_type& code_id, const shared_string& code, apply_context& context ) { my->get_instantiated_module(code_id, code, context.trx_context)->apply(context); } diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 2b8b493392e..4bc77fe2a03 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -770,6 +770,8 @@ void chain_plugin::plugin_shutdown() { my->irreversible_block_connection.reset(); my->accepted_transaction_connection.reset(); my->applied_transaction_connection.reset(); + if(app().is_quiting()) + my->chain->get_wasm_interface().indicate_shutting_down(); my->chain.reset(); } From 097e7454b3301fd48f6ed8c89a20ecef407ba767 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 22 Apr 2019 11:54:27 -0400 Subject: [PATCH 471/680] evict wasm_cache entries once the last block they are used in becomes irreversible the wasm_cache needs to be periodically pruned of old entries. controller will now note to wasm_interface when the reference count of some code_hash goes to 0 and wasm_interface will then evict those entries once that block becomes irreversible. Unfortunately there are still many corner cases that can cause items in the cache to never be evicted or evicted too soon, but some reasonably accurate eviction is direly needed for long replays so this is considered good enough for now --- libraries/chain/controller.cpp | 4 ++++ libraries/chain/eosio_contract.cpp | 1 + .../chain/include/eosio/chain/wasm_interface.hpp | 6 ++++++ .../include/eosio/chain/wasm_interface_private.hpp | 13 +++++++++++++ libraries/chain/wasm_interface.cpp | 8 ++++++++ 5 files changed, 32 insertions(+) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index bafe7933e9d..1b1f914db2d 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -313,6 +313,10 @@ struct controller_impl { set_activation_handler(); set_activation_handler(); + self.irreversible_block.connect([this](const block_state_ptr& bsp) { + wasmif.current_lib(bsp->block_num); + }); + #define SET_APP_HANDLER( receiver, contract, action) \ set_apply_handler( #receiver, #contract, #action, &BOOST_PP_CAT(apply_, BOOST_PP_CAT(contract, BOOST_PP_CAT(_,action) ) ) ) diff --git a/libraries/chain/eosio_contract.cpp b/libraries/chain/eosio_contract.cpp index 1fe849abb48..4a18406ee02 100644 --- a/libraries/chain/eosio_contract.cpp +++ b/libraries/chain/eosio_contract.cpp @@ -160,6 +160,7 @@ void apply_eosio_setcode(apply_context& context) { old_size = (int64_t)old_code_entry.code.size() * config::setcode_ram_bytes_multiplier; if( old_code_entry.code_ref_count == 1 ) { db.remove(old_code_entry); + context.control.get_wasm_interface().code_block_num_last_used(account.code_hash, account.vm_type, account.vm_version, context.control.head_block_num() + 1); } else { db.modify(old_code_entry, [](code_object& o) { --o.code_ref_count; diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 5ddea081d48..b5749179a9b 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -82,6 +82,12 @@ namespace eosio { namespace chain { //validates code -- does a WASM validation pass and checks the wasm against EOSIO specific constraints static void validate(const controller& control, const bytes& code); + //indicate that a particular code probably won't be used after given block_num + void code_block_num_last_used(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, const uint32_t& block_num); + + //indicate the current LIB. evicts old cache entries + void current_lib(const uint32_t lib); + //Calls apply or error on a given code void apply(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context); diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index 362a29e19f0..2f59bccf69f 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -64,6 +64,19 @@ namespace eosio { namespace chain { return mem_image; } + void code_block_num_last_used(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, const uint32_t& block_num) { + wasm_cache_index::iterator it = wasm_instantiation_cache.find(boost::make_tuple(code_hash, vm_type, vm_version)); + if(it != wasm_instantiation_cache.end()) + wasm_instantiation_cache.modify(it, [block_num](wasm_cache_entry& e) { + e.last_block_num_used = block_num; + }); + } + + void current_lib(uint32_t lib) { + //anything last used before or on the LIB can be evicted + wasm_instantiation_cache.get().erase(wasm_instantiation_cache.get().begin(), wasm_instantiation_cache.get().upper_bound(lib)); + } + const std::unique_ptr& get_instantiated_module( const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, transaction_context& trx_context ) { diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 9fd144a3cb1..75682ff4b80 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -57,6 +57,14 @@ namespace eosio { namespace chain { //Hard: Kick off instantiation in a separate thread at this location } + void wasm_interface::code_block_num_last_used(const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, const uint32_t& block_num) { + my->code_block_num_last_used(code_hash, vm_type, vm_version, block_num); + } + + void wasm_interface::current_lib(const uint32_t lib) { + my->current_lib(lib); + } + void wasm_interface::apply( const digest_type& code_hash, const uint8_t& vm_type, const uint8_t& vm_version, apply_context& context ) { my->get_instantiated_module(code_hash, vm_type, vm_version, context.trx_context)->apply(context); } From 436651d075989c3a7eec413672a8175c42e7d9e8 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 22 Apr 2019 12:53:04 -0400 Subject: [PATCH 472/680] switch over to libcxx and no mongo-cxx-driver --- CMakeLists.txt | 4 +- scripts/eosio_build.sh | 14 +- scripts/eosio_build_amazon.sh | 317 ++++++++++++++++---------------- scripts/eosio_build_centos.sh | 316 ++++++++++++++++---------------- scripts/eosio_build_darwin.sh | 11 +- scripts/eosio_build_fedora.sh | 301 +++++++++++++++--------------- scripts/eosio_build_ubuntu.sh | 322 +++++++++++++++++---------------- scripts/pinned_toolchain.cmake | 25 ++- 8 files changed, 643 insertions(+), 667 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index fe8d020167e..7ff89762398 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -113,11 +113,13 @@ FIND_PACKAGE(Boost 1.67 REQUIRED COMPONENTS iostreams) # Some new stdlibc++s will #error on ; a problem for boost pre-1.69 +add_definitions(-DBOOST_ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW) if( APPLE AND UNIX ) add_definitions(-DBOOST_ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW) endif() -set(THREADS_PREFER_PTHREAD_FLAG 1) +set(CMAKE_THREAD_PREFER_PTHREAD TRUE) +set(THREADS_PREFER_PTHREAD_FLAG TRUE) find_package(Threads) link_libraries(Threads::Threads) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index a76f8974248..322f7aa5913 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -49,6 +49,8 @@ SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" REPO_ROOT="${SCRIPT_DIR}/.." BUILD_DIR="${REPO_ROOT}/build" +export BUILD_DIR=$BUILD_DIR + # Use current directory's tmp directory if noexec is enabled for /tmp if (mount | grep "/tmp " | grep --quiet noexec); then mkdir -p $REPO_ROOT/tmp @@ -58,6 +60,8 @@ else # noexec wasn't found TEMP_DIR="/tmp" fi +export TMP_LOCATION=$TEMP_DIR + function usage() { cat >&2 < $BUILD_DIR/pinned_toolchain.cmake cd $REPO_ROOT STALE_SUBMODS=$(( $(git submodule status --recursive | grep -c "^[+\-]") )) @@ -254,7 +260,7 @@ fi CXX=$CPP_COMP CC=$CC_COMP -export BUILD_CLANG8=$BUILD_CLANG8 +export PIN_COMPILER=$PIN_COMPILER # Setup directories mkdir -p $SRC_LOCATION @@ -347,17 +353,15 @@ printf "======================= Starting EOSIO Build =======================\\n" printf "## CMAKE_BUILD_TYPE=%s\\n" "${CMAKE_BUILD_TYPE}" printf "## ENABLE_COVERAGE_TESTING=%s\\n" "${ENABLE_COVERAGE_TESTING}" -mkdir -p $BUILD_DIR cd $BUILD_DIR if $PIN_COMPILER; then - sed -e "s~@~$OPT_LOCATION~g" $SCRIPT_DIR/pinned_toolchain.cmake &> $BUILD_DIR/pinned_toolchain.cmake $CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake \ -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ - -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ + -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=false \ -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ - -DCMAKE_PREFIX_PATH=$PREFIX \ + -DCMAKE_PREFIX_PATH=$PREFIX -DCMAKE_PREFIX_PATH=$OPT_LOCATION/llvm4\ -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS -DEOSIO_PIN_COMPILER=1 "${REPO_ROOT}" else $CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" \ diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 0c4b5b35f27..af5220a658b 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -8,6 +8,8 @@ DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) +PINNED_TOOLCHAIN=-DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake + if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then # Amazonlinux1 DEP_ARRAY=( sudo procps util-linux which gcc72 gcc72-c++ autoconf automake libtool make doxygen graphviz \ @@ -140,170 +142,7 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" - -printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" -BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) -if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then - printf "Installing Boost library...\\n" - curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ - && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ - && cd $BOOST_ROOT \ - && ./bootstrap.sh --prefix=$BOOST_ROOT \ - && ./b2 -q -j"${JOBS}" install \ - && cd .. \ - && rm -f boost_$BOOST_VERSION.tar.bz2 \ - && rm -rf $BOOST_LINK_LOCATION \ - && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ - || exit 1 - printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -else - printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking MongoDB installation...\\n" -if [ ! -d $MONGODB_ROOT ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" -else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C driver installation...\\n" -if [ ! -d $MONGO_C_DRIVER_ROOT ]; then - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C++ driver installation...\\n" -if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking LLVM 4 support...\\n" -if [ ! -d $LLVM_ROOT ]; then - printf "Installing LLVM 4...\\n" - cd ../opt \ - && git clone --depth 1 --single-branch --branch $LLVM_VERSION https://github.com/llvm-mirror/llvm.git llvm && cd llvm \ - && mkdir build \ - && cd build \ - && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${LLVM_ROOT}" -DLLVM_TARGETS_TO_BUILD="host" -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE="Release" .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - || exit 1 - printf " - LLVM successfully installed @ ${LLVM_ROOT}\\n" -else - printf " - LLVM found @ ${LLVM_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -cd .. -printf "\\n" - -# Use current directory's tmp directory if noexec is enabled for /tmp -if (mount | grep "/tmp " | grep --quiet noexec); then - mkdir -p $REPO_ROOT/tmp - TMP_LOCATION="${REPO_ROOT}/tmp" - rm -rf $REPO_ROOT/tmp/* -else # noexec wasn't found - TMP_LOCATION="/tmp" -fi - -if $BUILD_CLANG8; then - export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr/lib:$LD_LIBRARY_PATH - export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib64:$LD_LIBRARY_PATH - - if [ ! -d ${OPT_LOCATION}/gmp ]; then - printf "Installing gmp...\\n" - cd ${TMP_LOCATION} \ - && curl -LO https://ftp.gnu.org/gnu/gmp/gmp-5.0.1.tar.gz && tar -xzf gmp-5.0.1.tar.gz \ - && cd gmp-5.0.1 && mkdir build && cd build \ - && ../configure --prefix=${OPT_LOCATION}/gmp \ - && make -j"${JOBS}" && make install \ - && cd ../ && rm -rf ${TMP_LOCATION}/gmp-5.0.1 ${TMP_LOCATION}/gmp-5.0.1.tar.gz \ - || exit 1 - fi - if [ ! -d ${OPT_LOCATION}/mpfr ]; then - printf "Installing mpfr...\\n" - cd ${TMP_LOCATION} \ - && curl -LO https://ftp.gnu.org/gnu/mpfr/mpfr-3.0.0.tar.gz && tar -xzf mpfr-3.0.0.tar.gz \ - && cd mpfr-3.0.0 && mkdir build && cd build \ - && ../configure --prefix=${OPT_LOCATION}/mpfr --with-gmp=${OPT_LOCATION}/gmp \ - && make -j"${JOBS}" && make install \ - && cd ../ && rm -rf ${TMP_LOCATION}/mpfr-3.0.0 ${TMP_LOCATION}/mpfr-3.0.0.tar.gz \ - || exit 1 - fi - if [ ! -d ${OPT_LOCATION}/mpc ]; then - printf "Installing mpc...\\n" - cd ${TMP_LOCATION} \ - && curl -LO https://ftp.gnu.org/gnu/mpc/mpc-1.0.1.tar.gz && tar -xzf mpc-1.0.1.tar.gz \ - && cd mpc-1.0.1 && mkdir build && cd build \ - && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ - && make -j"${JOBS}" && make install \ - && cd ../ && rm -rf ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1.tar.gz \ - || exit 1 - fi - if [ ! -d ${OPT_LOCATION}/gcc ]; then - printf "Installing libstdc++\\n" - cd ${TMP_LOCATION} \ - && curl -LO https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ - && cd gcc-7.1.0 \ - && sed '61 s/ucontext/ucontext_t/' libgcc/config/i386/linux-unwind.h &> libgcc/config/i386/linux-unwind.h \ - && mkdir build && cd build \ - &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ - && make -j"${JOBS}" && make install \ - && cd ../ && rm -rf ${TMP_LOCATION}/gcc-7.1.0 ${TMP_LOCATION}/gcc-7.1.0.tar.gz \ - || exit 1 - fi - +if $PIN_COMPILER; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" @@ -345,6 +184,156 @@ if $BUILD_CLANG8; then if [ $? -ne 0 ]; then exit -1; fi printf "\\n" + + printf "Checking LLVM 4 installation...\\n" + if [ ! -d $OPT_LOCATION/llvm4 ]; then + printf "Installing LLVM 4...\\n" + curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ + && cd llvm-4.0.0.src && mkdir -p build && cd build \ + && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ + && make -j"${JOBS}" install \ + || exit -1 + printf "Installed LLVM 4 @ ${OPT_LOCATION}/llvm4" + fi + cd $SRC_LOCATION + printf "Checking zlib library (${BOOST_VERSION}) installation...\\n" + if [ ! -d $OPT_LOCATION/zlib ]; then + printf "Installing zlib...\\n" + curl -LO https://www.zlib.net/zlib-1.2.11.tar.gz && tar -xf zlib-1.2.11.tar.gz \ + && cd zlib-1.2.11 && mkdir build && cd build \ + && ../configure --prefix=$OPT_LOCATION/zlib \ + && make -j"${JOBS}" install \ + || exit -1 + fi + cd $SRC_LOCATION + printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" + BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) + if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + printf "Installing Boost library...\\n" + curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ + && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ + && cd $BOOST_ROOT \ + && ./bootstrap.sh --prefix=$BOOST_ROOT \ + && ./b2 toolset=clang cxxflags="-stdlib=libc++ -D__STRICT_ANSI__ -nostdinc++ -I${CLANG8_ROOT}/include/c++/v1" linkflags="-stdlib=libc++" link=static threading=multi --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test -q -j"${JOBS}" -sZLIB_LIBRARY_PATH="${OPT_LOCATION}/zlib/lib" -sZLIB_INCLUDE="${OPT_LOCATION}/zlib/include" -sZLIB_SOURCE="${SRC_LOCATION}/zlib-1.2.11" install \ + && cd .. \ + && rm -f boost_$BOOST_VERSION.tar.bz2 \ + && rm -rf $BOOST_LINK_LOCATION \ + && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ + || exit 1 + printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" + else + printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + printf "\\n" +else + printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" + BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) + if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + printf "Installing Boost library...\\n" + curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ + && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ + && cd $BOOST_ROOT \ + && ./bootstrap.sh --prefix=$BOOST_ROOT \ + && ./b2 -q -j"${JOBS}" install \ + && cd .. \ + && rm -f boost_$BOOST_VERSION.tar.bz2 \ + && rm -rf $BOOST_LINK_LOCATION \ + && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ + || exit 1 + printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" + else + printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + + printf "\\n" + + + printf "Checking MongoDB installation...\\n" + if [ ! -d $MONGODB_ROOT ]; then + printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" + curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_LOCATION/mongod.log \ + && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_LOCATION \ + && rm -rf $MONGODB_LINK_LOCATION \ + && rm -rf $BIN_LOCATION/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ + && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ + || exit 1 + printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + else + printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C driver installation...\\n" + if [ ! -d $MONGO_C_DRIVER_ROOT ]; then + printf "Installing MongoDB C driver...\\n" + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C++ driver installation...\\n" + if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then + printf "Installing MongoDB C++ driver...\\n" + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ + && make -j"${JOBS}" VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + + printf "\\n" + + + printf "Checking LLVM 4 support...\\n" + if [ ! -d $LLVM_ROOT ]; then + printf "Installing LLVM 4...\\n" + cd ../opt \ + && git clone --depth 1 --single-branch --branch $LLVM_VERSION https://github.com/llvm-mirror/llvm.git llvm && cd llvm \ + && mkdir build \ + && cd build \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${LLVM_ROOT}" -DLLVM_TARGETS_TO_BUILD="host" -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE="Release" .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + || exit 1 + printf " - LLVM successfully installed @ ${LLVM_ROOT}\\n" + else + printf " - LLVM found @ ${LLVM_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + + cd .. + printf "\\n" fi function print_instructions() { diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 2820a93eae8..772c553ea7f 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -25,6 +25,8 @@ printf "Disk space total: ${DISK_TOTAL%.*}G\\n" printf "Disk space available: ${DISK_AVAIL%.*}G\\n" printf "Concurrent Jobs (make -j): ${JOBS}\\n" +PINNED_TOOLCHAIN=-DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake + if [ "${MEM_MEG}" -lt 7000 ]; then printf "\\nYour system must have 7 or more Gigabytes of physical memory installed.\\n" printf "Exiting now.\\n\\n" @@ -192,169 +194,7 @@ printf "\\n" export CPATH="${CPATH}:${PYTHON3PATH}/root/usr/include/python3.6m" # m on the end causes problems with boost finding python3 -printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" -BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) -if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then - printf "Installing Boost library...\\n" - curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ - && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ - && cd $BOOST_ROOT \ - && ./bootstrap.sh --prefix=$BOOST_ROOT \ - && ./b2 -q -j"${JOBS}" install \ - && cd .. \ - && rm -f boost_$BOOST_VERSION.tar.bz2 \ - && rm -rf $BOOST_LINK_LOCATION \ - && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ - || exit 1 - printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -else - printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking MongoDB installation...\\n" -if [ ! -d $MONGODB_ROOT ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" -else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C driver installation...\\n" -if [ ! -d $MONGO_C_DRIVER_ROOT ]; then - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C++ driver installation...\\n" -if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking LLVM 4 support...\\n" -if [ ! -d $LLVM_ROOT ]; then - printf "Installing LLVM 4...\\n" - cd ../opt \ - && git clone --depth 1 --single-branch --branch $LLVM_VERSION https://github.com/llvm-mirror/llvm.git llvm && cd llvm \ - && mkdir build \ - && cd build \ - && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${LLVM_ROOT}" -DLLVM_TARGETS_TO_BUILD="host" -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE="Release" .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - || exit 1 - printf " - LLVM successfully installed @ ${LLVM_ROOT}\\n" -else - printf " - LLVM found @ ${LLVM_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -cd .. -printf "\\n" - -# Use current directory's tmp directory if noexec is enabled for /tmp -if (mount | grep "/tmp " | grep --quiet noexec); then - mkdir -p $REPO_ROOT/tmp - TMP_LOCATION="${REPO_ROOT}/tmp" - rm -rf $REPO_ROOT/tmp/* -else # noexec wasn't found - TMP_LOCATION="/tmp" -fi - -if $BUILD_CLANG8; then - export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr/lib:$LD_LIBRARY_PATH - export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib64:$LD_LIBRARY_PATH - - if [ ! -d ${OPT_LOCATION}/gmp ]; then - printf "Installing gmp...\\n" - cd ${TMP_LOCATION} \ - && curl -LO https://ftp.gnu.org/gnu/gmp/gmp-5.0.1.tar.gz && tar -xzf gmp-5.0.1.tar.gz \ - && cd gmp-5.0.1 && mkdir build && cd build \ - && ../configure --prefix=${OPT_LOCATION}/gmp \ - && make -j"${JOBS}" && make install \ - && cd ../ && rm -rf ${TMP_LOCATION}/gmp-5.0.1 ${TMP_LOCATION}/gmp-5.0.1.tar.gz \ - || exit 1 - fi - if [ ! -d ${OPT_LOCATION}/mpfr ]; then - printf "Installing mpfr...\\n" - cd ${TMP_LOCATION} \ - && curl -LO https://ftp.gnu.org/gnu/mpfr/mpfr-3.0.0.tar.gz && tar -xzf mpfr-3.0.0.tar.gz \ - && cd mpfr-3.0.0 && mkdir build && cd build \ - && ../configure --prefix=${OPT_LOCATION}/mpfr --with-gmp=${OPT_LOCATION}/gmp \ - && make -j"${JOBS}" && make install \ - && cd ../ && rm -rf ${TMP_LOCATION}/mpfr-3.0.0 ${TMP_LOCATION}/mpfr-3.0.0.tar.gz \ - || exit 1 - fi - if [ ! -d ${OPT_LOCATION}/mpc ]; then - printf "Installing mpc...\\n" - cd ${TMP_LOCATION} \ - && curl -LO https://ftp.gnu.org/gnu/mpc/mpc-1.0.1.tar.gz && tar -xzf mpc-1.0.1.tar.gz \ - && cd mpc-1.0.1 && mkdir build && cd build \ - && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ - && make -j"${JOBS}" && make install \ - && cd ../ && rm -rf ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1.tar.gz \ - || exit 1 - fi - if [ ! -d ${OPT_LOCATION}/gcc ]; then - printf "Installing libstdc++\\n" - cd ${TMP_LOCATION} \ - && curl -LO https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ - && cd gcc-7.1.0 \ - && sed '61 s/ucontext/ucontext_t/' libgcc/config/i386/linux-unwind.h &> libgcc/config/i386/linux-unwind.h \ - && mkdir build && cd build \ - &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ - && make -j"${JOBS}" && make install \ - && cd ../ && rm -rf ${TMP_LOCATION}/gcc-7.1.0 ${TMP_LOCATION}/gcc-7.1.0.tar.gz \ - || exit 1 - fi - +if $PIN_COMPILER; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" @@ -396,6 +236,156 @@ if $BUILD_CLANG8; then if [ $? -ne 0 ]; then exit -1; fi printf "\\n" + + printf "Checking LLVM 4 installation...\\n" + if [ ! -d $OPT_LOCATION/llvm4 ]; then + printf "Installing LLVM 4...\\n" + curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ + && cd llvm-4.0.0.src && mkdir -p build && cd build \ + && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ + && make -j"${JOBS}" install \ + || exit -1 + printf "Installed LLVM 4 @ ${OPT_LOCATION}/llvm4" + fi + cd $SRC_LOCATION + printf "Checking zlib library (${BOOST_VERSION}) installation...\\n" + if [ ! -d $OPT_LOCATION/zlib ]; then + printf "Installing zlib...\\n" + curl -LO https://www.zlib.net/zlib-1.2.11.tar.gz && tar -xf zlib-1.2.11.tar.gz \ + && cd zlib-1.2.11 && mkdir build && cd build \ + && ../configure --prefix=$OPT_LOCATION/zlib \ + && make -j"${JOBS}" install \ + || exit -1 + fi + cd $SRC_LOCATION + printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" + BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) + if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + printf "Installing Boost library...\\n" + curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ + && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ + && cd $BOOST_ROOT \ + && ./bootstrap.sh --prefix=$BOOST_ROOT \ + && ./b2 toolset=clang cxxflags="-stdlib=libc++ -D__STRICT_ANSI__ -nostdinc++ -I${CLANG8_ROOT}/include/c++/v1" linkflags="-stdlib=libc++" link=static threading=multi --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test -q -j"${JOBS}" -sZLIB_LIBRARY_PATH="${OPT_LOCATION}/zlib/lib" -sZLIB_INCLUDE="${OPT_LOCATION}/zlib/include" -sZLIB_SOURCE="${SRC_LOCATION}/zlib-1.2.11" install \ + && cd .. \ + && rm -f boost_$BOOST_VERSION.tar.bz2 \ + && rm -rf $BOOST_LINK_LOCATION \ + && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ + || exit 1 + printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" + else + printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + printf "\\n" +else + printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" + BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) + if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + printf "Installing Boost library...\\n" + curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ + && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ + && cd $BOOST_ROOT \ + && ./bootstrap.sh --prefix=$BOOST_ROOT \ + && ./b2 -q -j"${JOBS}" install \ + && cd .. \ + && rm -f boost_$BOOST_VERSION.tar.bz2 \ + && rm -rf $BOOST_LINK_LOCATION \ + && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ + || exit 1 + printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" + else + printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + + printf "\\n" + + + printf "Checking MongoDB installation...\\n" + if [ ! -d $MONGODB_ROOT ]; then + printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" + curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_LOCATION/mongod.log \ + && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_LOCATION \ + && rm -rf $MONGODB_LINK_LOCATION \ + && rm -rf $BIN_LOCATION/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ + && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ + || exit 1 + printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + else + printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C driver installation...\\n" + if [ ! -d $MONGO_C_DRIVER_ROOT ]; then + printf "Installing MongoDB C driver...\\n" + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C++ driver installation...\\n" + if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then + printf "Installing MongoDB C++ driver...\\n" + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ + && make -j"${JOBS}" VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + + printf "\\n" + + + printf "Checking LLVM 4 support...\\n" + if [ ! -d $LLVM_ROOT ]; then + printf "Installing LLVM 4...\\n" + cd ../opt \ + && git clone --depth 1 --single-branch --branch $LLVM_VERSION https://github.com/llvm-mirror/llvm.git llvm && cd llvm \ + && mkdir build \ + && cd build \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${LLVM_ROOT}" -DLLVM_TARGETS_TO_BUILD="host" -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE="Release" .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + || exit 1 + printf " - LLVM successfully installed @ ${LLVM_ROOT}\\n" + else + printf " - LLVM found @ ${LLVM_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + + cd .. + printf "\\n" fi function print_instructions() { diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 1bbc97367b0..4150d379f27 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -261,16 +261,7 @@ fi cd .. printf "\\n" -# Use current directory's tmp directory if noexec is enabled for /tmp -if (mount | grep "/tmp " | grep --quiet noexec); then - mkdir -p $REPO_ROOT/tmp - TMP_LOCATION="${REPO_ROOT}/tmp" - rm -rf $REPO_ROOT/tmp/* -else # noexec wasn't found - TMP_LOCATION="/tmp" -fi - -if $BUILD_CLANG8; then +if $PIN_COMPILER; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 264ca3e8df0..e5b41d78390 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -10,6 +10,8 @@ if [ "${OS_VER}" -lt 25 ]; then exit 1; fi +PINNED_TOOLCHAIN=-DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake + # procps-ng includes free command if [[ -z "$( rpm -qi "procps-ng" 2>/dev/null | grep Name )" ]]; then yum install -y procps-ng; fi MEM_MEG=$( free -m | sed -n 2p | tr -s ' ' | cut -d\ -f2 ) @@ -131,162 +133,7 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" - -printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" -BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) -if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then - printf "Installing Boost library...\\n" - curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ - && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ - && cd $BOOST_ROOT \ - && ./bootstrap.sh --prefix=$BOOST_ROOT \ - && ./b2 -q -j"${JOBS}" install \ - && cd .. \ - && rm -f boost_$BOOST_VERSION.tar.bz2 \ - && rm -rf $BOOST_LINK_LOCATION \ - && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ - || exit 1 - printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -else - printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking MongoDB installation...\\n" -if [ ! -d $MONGODB_ROOT ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT}\\n" -else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C driver installation...\\n" -if [ ! -d $MONGO_C_DRIVER_ROOT ]; then - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C++ driver installation...\\n" -if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking LLVM 4 support...\\n" -if [ ! -d $LLVM_ROOT ]; then - ln -s /usr/lib64/llvm4.0 $LLVM_ROOT \ - || exit 1 - printf " - LLVM successfully linked from /usr/lib64/llvm4.0 to ${LLVM_ROOT}\\n" -else - printf " - LLVM found @ ${LLVM_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -cd .. -printf "\\n" - -# Use current directory's tmp directory if noexec is enabled for /tmp -if (mount | grep "/tmp " | grep --quiet noexec); then - mkdir -p $REPO_ROOT/tmp - TMP_LOCATION="${REPO_ROOT}/tmp" - rm -rf $REPO_ROOT/tmp/* -else # noexec wasn't found - TMP_LOCATION="/tmp" -fi - -if $BUILD_CLANG8; then - export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr/lib:$LD_LIBRARY_PATH - export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib64:$LD_LIBRARY_PATH - - if [ ! -d ${OPT_LOCATION}/gmp ]; then - printf "Installing gmp...\\n" - cd ${TMP_LOCATION} \ - && curl -LO https://ftp.gnu.org/gnu/gmp/gmp-5.0.1.tar.gz && tar -xzf gmp-5.0.1.tar.gz \ - && cd gmp-5.0.1 && mkdir build && cd build \ - && ../configure --prefix=${OPT_LOCATION}/gmp \ - && make -j"${JOBS}" && make install \ - && cd ../ && rm -rf ${TMP_LOCATION}/gmp-5.0.1 ${TMP_LOCATION}/gmp-5.0.1.tar.gz \ - || exit 1 - fi - if [ ! -d ${OPT_LOCATION}/mpfr ]; then - printf "Installing mpfr...\\n" - cd ${TMP_LOCATION} \ - && curl -LO https://ftp.gnu.org/gnu/mpfr/mpfr-3.0.0.tar.gz && tar -xzf mpfr-3.0.0.tar.gz \ - && cd mpfr-3.0.0 && mkdir build && cd build \ - && ../configure --prefix=${OPT_LOCATION}/mpfr --with-gmp=${OPT_LOCATION}/gmp \ - && make -j"${JOBS}" && make install \ - && cd ../ && rm -rf ${TMP_LOCATION}/mpfr-3.0.0 ${TMP_LOCATION}/mpfr-3.0.0.tar.gz \ - || exit 1 - fi - if [ ! -d ${OPT_LOCATION}/mpc ]; then - printf "Installing mpc...\\n" - cd ${TMP_LOCATION} \ - && curl -LO https://ftp.gnu.org/gnu/mpc/mpc-1.0.1.tar.gz && tar -xzf mpc-1.0.1.tar.gz \ - && cd mpc-1.0.1 && mkdir build && cd build \ - && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ - && make -j"${JOBS}" && make install \ - && cd ../ && rm -rf ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1.tar.gz \ - || exit 1 - fi - if [ ! -d ${OPT_LOCATION}/gcc ]; then - printf "Installing libstdc++\\n" - cd ${TMP_LOCATION} \ - && curl -LO https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ - && cd gcc-7.1.0 \ - && sed '61 s/ucontext/ucontext_t/' libgcc/config/i386/linux-unwind.h &> libgcc/config/i386/linux-unwind.h \ - && mkdir build && cd build \ - &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ - && make -j"${JOBS}" && make install \ - && cd ../ && rm -rf ${TMP_LOCATION}/gcc-7.1.0 ${TMP_LOCATION}/gcc-7.1.0.tar.gz \ - || exit 1 - fi - +if $PIN_COMPILER; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" @@ -328,6 +175,148 @@ if $BUILD_CLANG8; then if [ $? -ne 0 ]; then exit -1; fi printf "\\n" + + printf "Checking LLVM 4 installation...\\n" + if [ ! -d $OPT_LOCATION/llvm4 ]; then + printf "Installing LLVM 4...\\n" + curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ + && cd llvm-4.0.0.src && mkdir -p build && cd build \ + && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ + && make -j"${JOBS}" install \ + || exit -1 + printf "Installed LLVM 4 @ ${OPT_LOCATION}/llvm4" + fi + cd $SRC_LOCATION + printf "Checking zlib library (${BOOST_VERSION}) installation...\\n" + if [ ! -d $OPT_LOCATION/zlib ]; then + printf "Installing zlib...\\n" + curl -LO https://www.zlib.net/zlib-1.2.11.tar.gz && tar -xf zlib-1.2.11.tar.gz \ + && cd zlib-1.2.11 && mkdir build && cd build \ + && ../configure --prefix=$OPT_LOCATION/zlib \ + && make -j"${JOBS}" install \ + || exit -1 + fi + cd $SRC_LOCATION + printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" + BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) + if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + printf "Installing Boost library...\\n" + curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ + && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ + && cd $BOOST_ROOT \ + && ./bootstrap.sh --prefix=$BOOST_ROOT \ + && ./b2 toolset=clang cxxflags="-stdlib=libc++ -D__STRICT_ANSI__ -nostdinc++ -I${CLANG8_ROOT}/include/c++/v1" linkflags="-stdlib=libc++" link=static threading=multi --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test -q -j"${JOBS}" -sZLIB_LIBRARY_PATH="${OPT_LOCATION}/zlib/lib" -sZLIB_INCLUDE="${OPT_LOCATION}/zlib/include" -sZLIB_SOURCE="${SRC_LOCATION}/zlib-1.2.11" install \ + && cd .. \ + && rm -f boost_$BOOST_VERSION.tar.bz2 \ + && rm -rf $BOOST_LINK_LOCATION \ + && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ + || exit 1 + printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" + else + printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + printf "\\n" +else + printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" + BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) + if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + printf "Installing Boost library...\\n" + curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ + && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ + && cd $BOOST_ROOT \ + && ./bootstrap.sh --prefix=$BOOST_ROOT \ + && ./b2 -q -j"${JOBS}" install \ + && cd .. \ + && rm -f boost_$BOOST_VERSION.tar.bz2 \ + && rm -rf $BOOST_LINK_LOCATION \ + && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ + || exit 1 + printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" + else + printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + + printf "\\n" + + + printf "Checking MongoDB installation...\\n" + if [ ! -d $MONGODB_ROOT ]; then + printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" + curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_LOCATION/mongod.log \ + && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_LOCATION \ + && rm -rf $MONGODB_LINK_LOCATION \ + && rm -rf $BIN_LOCATION/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ + && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ + || exit 1 + printf " - MongoDB successfully installed @ ${MONGODB_ROOT}\\n" + else + printf " - MongoDB found with correct version @ ${MONGODB_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C driver installation...\\n" + if [ ! -d $MONGO_C_DRIVER_ROOT ]; then + printf "Installing MongoDB C driver...\\n" + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C++ driver installation...\\n" + if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then + printf "Installing MongoDB C++ driver...\\n" + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ + && make -j"${JOBS}" VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + + printf "\\n" + + + printf "Checking LLVM 4 support...\\n" + if [ ! -d $LLVM_ROOT ]; then + ln -s /usr/lib64/llvm4.0 $LLVM_ROOT \ + || exit 1 + printf " - LLVM successfully linked from /usr/lib64/llvm4.0 to ${LLVM_ROOT}\\n" + else + printf " - LLVM found @ ${LLVM_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + + cd .. + printf "\\n" fi function print_instructions() { diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 5ef0d3b73fd..c3525646ec3 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -25,6 +25,8 @@ printf "Disk install: ${DISK_INSTALL}\\n" printf "Disk space total: ${DISK_TOTAL%.*}G\\n" printf "Disk space available: ${DISK_AVAIL%.*}G\\n" +PINNED_TOOLCHAIN=-DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake + if [ "${MEM_MEG}" -lt 7000 ]; then printf "Your system must have 7 or more Gigabytes of physical memory installed.\\n" printf "Exiting now.\\n" @@ -69,12 +71,18 @@ if [ "${DISK_AVAIL%.*}" -lt "${DISK_MIN}" ]; then fi # llvm-4.0 is installed into /usr/lib/llvm-4.0 -# clang is necessary for building on ubuntu +if [ ! PIN_COMPILER ]; then + llvm4_deps=(llvm-4.0 libclang-4.0-dev) +fi + DEP_ARRAY=( - git llvm-4.0 clang-4.0 libclang-4.0-dev make automake libbz2-dev libssl-dev doxygen graphviz \ - libgmp3-dev autotools-dev build-essential libicu-dev python2.7 python2.7-dev python3 python3-dev \ - autoconf libtool curl zlib1g-dev sudo ruby libusb-1.0-0-dev libcurl4-gnutls-dev pkg-config + git make automake libbz2-dev libssl-dev doxygen graphviz \ + libgmp3-dev autotools-dev build-essential libicu-dev python2.7 python2.7-dev python3 python3-dev \ + autoconf libtool curl zlib1g-dev sudo ruby libusb-1.0-0-dev libcurl4-gnutls-dev pkg-config ) + +DEP_ARRAY+=$llvm4_deps + COUNT=1 DISPLAY="" DEP="" @@ -155,161 +163,7 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" -printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" -BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) -if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then - printf "Installing Boost library...\\n" - curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ - && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ - && cd $BOOST_ROOT \ - && ./bootstrap.sh --prefix=$BOOST_ROOT \ - && ./b2 -q -j"${JOBS}" install \ - && cd .. \ - && rm -f boost_$BOOST_VERSION.tar.bz2 \ - && rm -rf $BOOST_LINK_LOCATION \ - && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ - || exit 1 - printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -else - printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking MongoDB installation...\\n" -if [ ! -d $MONGODB_ROOT ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL http://downloads.mongodb.org/linux/mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" -else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C driver installation...\\n" -if [ ! -d $MONGO_C_DRIVER_ROOT ]; then - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C++ driver installation...\\n" -if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking LLVM 4 support...\\n" -if [ ! -d $LLVM_ROOT ]; then - ln -s /usr/lib/llvm-4.0 $LLVM_ROOT \ - || exit 1 - printf " - LLVM successfully linked from /usr/lib/llvm-4.0 to ${LLVM_ROOT}\\n" -else - printf " - LLVM found @ ${LLVM_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -cd .. -printf "\\n" - -# Use current directory's tmp directory if noexec is enabled for /tmp -if (mount | grep "/tmp " | grep --quiet noexec); then - mkdir -p $REPO_ROOT/tmp - TMP_LOCATION="${REPO_ROOT}/tmp" - rm -rf $REPO_ROOT/tmp/* -else # noexec wasn't found - TMP_LOCATION="/tmp" -fi - -if $BUILD_CLANG8; then - export LD_LIBRARY_PATH=${OPT_LOCATION}/mpfr/lib:$LD_LIBRARY_PATH - export LD_LIBRARY_PATH=${OPT_LOCATION}/gcc/lib64:$LD_LIBRARY_PATH - - if [ ! -d ${OPT_LOCATION}/gmp ]; then - printf "Installing gmp...\\n" - cd ${TMP_LOCATION} \ - && curl -LO https://ftp.gnu.org/gnu/gmp/gmp-5.0.1.tar.gz && tar -xzf gmp-5.0.1.tar.gz \ - && cd gmp-5.0.1 && mkdir build && cd build \ - && ../configure --prefix=${OPT_LOCATION}/gmp \ - && make -j"${JOBS}" && make install \ - && cd ../ && rm -rf ${TMP_LOCATION}/gmp-5.0.1 ${TMP_LOCATION}/gmp-5.0.1.tar.gz \ - || exit 1 - fi - if [ ! -d ${OPT_LOCATION}/mpfr ]; then - printf "Installing mpfr...\\n" - cd ${TMP_LOCATION} \ - && curl -LO https://ftp.gnu.org/gnu/mpfr/mpfr-3.0.0.tar.gz && tar -xzf mpfr-3.0.0.tar.gz \ - && cd mpfr-3.0.0 && mkdir build && cd build \ - && ../configure --prefix=${OPT_LOCATION}/mpfr --with-gmp=${OPT_LOCATION}/gmp \ - && make -j"${JOBS}" && make install \ - && cd ../ && rm -rf ${TMP_LOCATION}/mpfr-3.0.0 ${TMP_LOCATION}/mpfr-3.0.0.tar.gz \ - || exit 1 - fi - if [ ! -d ${OPT_LOCATION}/mpc ]; then - printf "Installing mpc...\\n" - cd ${TMP_LOCATION} \ - && curl -LO https://ftp.gnu.org/gnu/mpc/mpc-1.0.1.tar.gz && tar -xzf mpc-1.0.1.tar.gz \ - && cd mpc-1.0.1 && mkdir build && cd build \ - && ../configure --prefix=${OPT_LOCATION}/mpc --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr \ - && make -j"${JOBS}" && make install \ - && cd ../ && rm -rf ${TMP_LOCATION}/mpc-1.0.1 ${TMP_LOCATION}/mpc-1.0.1.tar.gz \ - || exit 1 - fi - if [ ! -d ${OPT_LOCATION}/gcc ]; then - printf "Installing libstdc++\\n" - cd ${TMP_LOCATION} \ - && curl -LO https://ftp.gnu.org/gnu/gcc/gcc-7.1.0/gcc-7.1.0.tar.gz && tar -xzf gcc-7.1.0.tar.gz \ - && cd gcc-7.1.0 \ - && sed '61 s/ucontext/ucontext_t/' libgcc/config/i386/linux-unwind.h &> libgcc/config/i386/linux-unwind.h \ - && mkdir build && cd build \ - &&../configure --enable-languages=c,c++ --prefix=${OPT_LOCATION}/gcc --disable-shared --enable-linker-build-id --without-included-gettext --enable-threads=posix --enable-nls --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --disable-libmpx --enable-plugin --with-system-zlib --with-target-system-zlib --disable-werror --disable-multilib --with-tune=generic --enable-checking=release --with-gmp=${OPT_LOCATION}/gmp --with-mpfr=${OPT_LOCATION}/mpfr --with-mpc=${OPT_LOCATION}/mpc --disable-libsanitizer --disable-testsuite --disable-libquadmath --disable-libitm --disable-libcc1 \ - && make -j"${JOBS}" && make install \ - && cd ../ && rm -rf ${TMP_LOCATION}/gcc-7.1.0 ${TMP_LOCATION}/gcc-7.1.0.tar.gz \ - || exit 1 - fi - +if $PIN_COMPILER; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" @@ -351,8 +205,158 @@ if $BUILD_CLANG8; then if [ $? -ne 0 ]; then exit -1; fi printf "\\n" + + printf "Checking LLVM 4 installation...\\n" + if [ ! -d $OPT_LOCATION/llvm4 ]; then + printf "Installing LLVM 4...\\n" + curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ + && cd llvm-4.0.0.src && mkdir -p build && cd build \ + && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ + && make -j"${JOBS}" install \ + || exit -1 + printf "Installed LLVM 4 @ ${OPT_LOCATION}/llvm4" + fi + + cd $SRC_LOCATION + printf "Checking zlib library (${BOOST_VERSION}) installation...\\n" + if [ ! -d $OPT_LOCATION/zlib ]; then + printf "Installing zlib...\\n" + curl -LO https://www.zlib.net/zlib-1.2.11.tar.gz && tar -xf zlib-1.2.11.tar.gz \ + && cd zlib-1.2.11 && mkdir build && cd build \ + && ../configure --prefix=$OPT_LOCATION/zlib \ + && make -j"${JOBS}" install \ + || exit -1 + fi + + cd $SRC_LOCATION + printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" + BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) + if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + printf "Installing Boost library...\\n" + curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ + && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ + && cd $BOOST_ROOT \ + && printf "using clang : 8.0 : ${CLANG8_ROOT}/bin/clang++ : \"-D__STRICT_ANSI__ -stdlib=libc++ -std=c++17 -nostdinc++ -I${CLANG8_ROOT}/include/c++/v1\";" &> clang8.jam \ + && ./bootstrap.sh --prefix=$BOOST_ROOT \ + && ./b2 toolset=clang cxxflags="-stdlib=libc++ -D__STRICT_ANSI__ -nostdinc++ -I${CLANG8_ROOT}/include/c++/v1" linkflags="-stdlib=libc++" link=static threading=multi --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test -q -j"${JOBS}" -sZLIB_LIBRARY_PATH="${OPT_LOCATION}/zlib/lib" -sZLIB_INCLUDE="${OPT_LOCATION}/zlib/include" -sZLIB_SOURCE="${SRC_LOCATION}/zlib-1.2.11" install \ + && cd .. \ + && rm -f boost_$BOOST_VERSION.tar.bz2 \ + && rm -rf $BOOST_LINK_LOCATION \ + && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ + || exit 1 + printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" + else + printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + printf "\\n" +else + printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" + BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) + if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + printf "Installing Boost library...\\n" + curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ + && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ + && cd $BOOST_ROOT \ + && ./bootstrap.sh --prefix=$BOOST_ROOT \ + && ./b2 -q -j"${JOBS}" install \ + && cd .. \ + && rm -f boost_$BOOST_VERSION.tar.bz2 \ + && rm -rf $BOOST_LINK_LOCATION \ + && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ + || exit 1 + printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" + else + printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + printf "\\n" + + printf "Checking MongoDB installation...\\n" + if [ ! -d $MONGODB_ROOT ]; then + printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" + curl -OL http://downloads.mongodb.org/linux/mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ + && mv $SRC_LOCATION/mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_LOCATION/mongod.log \ + && rm -f mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_LOCATION \ + && rm -rf $MONGODB_LINK_LOCATION \ + && rm -rf $BIN_LOCATION/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ + && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ + || exit 1 + printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + else + printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C driver installation...\\n" + if [ ! -d $MONGO_C_DRIVER_ROOT ]; then + if [ ! $BUILD_CLANG8 ]; then + PINNED_TOOLCHAIN="" + fi + printf "Installing MongoDB C driver...\\n" + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON $PINNED_TOOLCHAIN .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" + fi + + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C++ driver installation...\\n" + if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then + if [ ! $BUILD_CLANG8 ]; then + PINNED_TOOLCHAIN="" + fi + printf "Installing MongoDB C++ driver...\\n" + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION \ + && sed '111 s/count/static_cast(count)/' src/mongocxx/options/change_stream.cpp &> src/mongocxx/options/change_stream.cpp \ + && cd build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCXX_CMAKE_FLAGS=-D_GLIBCXX_USE_CXX11_ABI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX $PINNED_TOOLCHAIN .. \ + && make -j"${JOBS}" VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + + printf "\\n" + + printf "Checking LLVM 4 support...\\n" + if [ ! -d $LLVM_ROOT ]; then + ln -s /usr/lib/llvm-4.0 $LLVM_ROOT \ + || exit 1 + printf " - LLVM successfully linked from /usr/lib/llvm-4.0 to ${LLVM_ROOT}\\n" + else + printf " - LLVM found @ ${LLVM_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi fi +cd .. +printf "\\n" + function print_instructions() { return 0 } diff --git a/scripts/pinned_toolchain.cmake b/scripts/pinned_toolchain.cmake index ee1fd65a4e1..75a906111e8 100644 --- a/scripts/pinned_toolchain.cmake +++ b/scripts/pinned_toolchain.cmake @@ -3,13 +3,20 @@ set(CMAKE_C_COMPILER_WORKS 1) set(CMAKE_CXX_COMPILER_WORKS 1) set(CMAKE_C_COMPILER ${OPT_PATH}/clang8/bin/clang) set(CMAKE_CXX_COMPILER ${OPT_PATH}/clang8/bin/clang++) + #set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES ${OPT_PATH}/gcc/include/c++/7.1.0 ${OPT_PATH}/gcc/include/c++/7.1.0/x86_64-pc-linux-gnu) -#set(CMAKE_CXX_FLAGS_INIT "-nostdinc++") -# -#if(NOT APPLE) -# set(CMAKE_CXX_STANDARD_LIBRARIES "-Wl,-L${OPT_PATH}/gcc/lib64 -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic -Wl,-nostdlib -Wl,--no-whole-archive ${OPT_PATH}/gcc/lib64/libstdc++.a -Wl,--no-whole-archive ${OPT_PATH}/gcc/lib64/libsupc++.a") -#else() -# set(CMAKE_SHARED_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++ -static-libstdc++ -shared-libgcc") -# set(CMAKE_MODULE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++ -static-libstdc++ -shared-libgcc") -# set(CMAKE_CXX_STANDARD_LIBRARIES "${OPT_PATH}/clang8/lib/libc++.a ${OPT_PATH}/clang8/lib/libc++abi.a") -#endif() +set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES ${OPT_PATH}/clang8/include/c++/v1 /usr/local/include /usr/include) +set(CMAKE_CXX_FLAGS_INIT "-nostdinc++") +set(CMAKE_EXE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") + +if(NOT APPLE) + set(CMAKE_SHARED_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") + set(CMAKE_MODULE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") + set(CMAKE_CXX_STANDARD_LIBRARIES "-stdlib=libc++ -nostdlib++ ${OPT_PATH}/clang8/lib/libc++.a ${OPT_PATH}/clang8/lib/libc++abi.a") + #set(CMAKE_CXX_STANDARD_LIBRARIES "${OPT_PATH}/gcc/lib64/libstdc++.a ${OPT_PATH}/gcc/lib64/libsupc++.a") + #set(CMAKE_CXX_STANDARD_LIBRARIES "-static -Wl,-L${OPT_PATH}/gcc/lib64 -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic -lz -Wl,-Bdynamic, -lpthread -Wl,-Bdynamic -lm -Wl,-Bdynamic -lc -Wl,-Bdynamic -lgcc -Wl,-nostdlib -Wl,--no-whole-archive ${OPT_PATH}/gcc/lib64/libstdc++.a -Wl,--no-whole-archive ${OPT_PATH}/gcc/lib64/libsupc++.a -Wl,-Bdynamic -lpthread") +else() + set(CMAKE_SHARED_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") + set(CMAKE_MODULE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") + set(CMAKE_CXX_STANDARD_LIBRARIES "${OPT_PATH}/clang8/lib/libc++.a ${OPT_PATH}/clang8/lib/libc++abi.a") +endif() From 7209b24dbb5fee0ed49887edead23e9261ed82ad Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 22 Apr 2019 14:24:13 -0400 Subject: [PATCH 473/680] add cd to tmp dir --- scripts/eosio_build_amazon.sh | 3 ++- scripts/eosio_build_centos.sh | 3 ++- scripts/eosio_build_fedora.sh | 10 ++++++++-- scripts/eosio_build_ubuntu.sh | 12 ++++++------ 4 files changed, 18 insertions(+), 10 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index af5220a658b..126e86cfeab 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -188,7 +188,8 @@ if $PIN_COMPILER; then printf "Checking LLVM 4 installation...\\n" if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" - curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ + cd $TMP_LOCATION \ + && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ && cd llvm-4.0.0.src && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 772c553ea7f..525a6449e51 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -240,7 +240,8 @@ if $PIN_COMPILER; then printf "Checking LLVM 4 installation...\\n" if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" - curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ + cd $TMP_LOCATION \ + && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ && cd llvm-4.0.0.src && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index e5b41d78390..d96d24fab6f 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -47,8 +47,13 @@ DEP_ARRAY=( git sudo procps-ng which gcc gcc-c++ autoconf automake libtool make \ bzip2-devel curl bzip2 compat-openssl10 graphviz doxygen \ openssl-devel gmp-devel libstdc++-devel python2 python2-devel python3 python3-devel \ - libedit ncurses-devel swig llvm4.0 llvm4.0-devel llvm4.0-libs llvm4.0-static libcurl-devel libusb-devel + libedit ncurses-devel swig libcurl-devel libusb-devel ) + +if [ ! $PIN_COMPILER ]; then + DEP_ARRAY+=(llvm4.0 llvm4.0-devel llvm4.0-libs llvm4.0-static) +fi + COUNT=1 DISPLAY="" DEP="" @@ -179,7 +184,8 @@ if $PIN_COMPILER; then printf "Checking LLVM 4 installation...\\n" if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" - curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ + cd TMP_LOCATION \ + && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ && cd llvm-4.0.0.src && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index c3525646ec3..721a902e911 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -70,10 +70,6 @@ if [ "${DISK_AVAIL%.*}" -lt "${DISK_MIN}" ]; then exit 1 fi -# llvm-4.0 is installed into /usr/lib/llvm-4.0 -if [ ! PIN_COMPILER ]; then - llvm4_deps=(llvm-4.0 libclang-4.0-dev) -fi DEP_ARRAY=( git make automake libbz2-dev libssl-dev doxygen graphviz \ @@ -81,7 +77,10 @@ DEP_ARRAY=( autoconf libtool curl zlib1g-dev sudo ruby libusb-1.0-0-dev libcurl4-gnutls-dev pkg-config ) -DEP_ARRAY+=$llvm4_deps +# llvm-4.0 is installed into /usr/lib/llvm-4.0 +if [ ! PIN_COMPILER ]; then + DEP_ARRAY+=(llvm-4.0 libclang-4.0-dev) +fi COUNT=1 DISPLAY="" @@ -209,7 +208,8 @@ if $PIN_COMPILER; then printf "Checking LLVM 4 installation...\\n" if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" - curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ + cd $TMP_LOCATION \ + && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ && cd llvm-4.0.0.src && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ From 5f4fafc0b891d8dc5435e59371bfe15dcd72f893 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 22 Apr 2019 14:28:02 -0400 Subject: [PATCH 474/680] forgot $ for PIN_COMPILER --- scripts/eosio_build_ubuntu.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 721a902e911..73957acff17 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -78,7 +78,7 @@ DEP_ARRAY=( ) # llvm-4.0 is installed into /usr/lib/llvm-4.0 -if [ ! PIN_COMPILER ]; then +if [ ! $PIN_COMPILER ]; then DEP_ARRAY+=(llvm-4.0 libclang-4.0-dev) fi From b793f31b9c50f2548cc04880767d5711bd9d1a1c Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 22 Apr 2019 15:32:14 -0400 Subject: [PATCH 475/680] remove depth 1 from cloning; missing commits --- scripts/eosio_build_amazon.sh | 18 +++++++++--------- scripts/eosio_build_centos.sh | 18 +++++++++--------- scripts/eosio_build_darwin.sh | 18 +++++++++--------- scripts/eosio_build_fedora.sh | 18 +++++++++--------- scripts/eosio_build_ubuntu.sh | 18 +++++++++--------- 5 files changed, 45 insertions(+), 45 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 126e86cfeab..2e1da6db1a3 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -147,26 +147,26 @@ if $PIN_COMPILER; then if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" cd ${TMP_LOCATION} \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ && git checkout $PINNED_COMPILER_LLVM_COMMIT \ && cd tools \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ && cd lld && git checkout $PINNED_COMPILER_LLD_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ && cd polly && git checkout $PINNED_COMPILER_POLLY_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang/tools \ && git checkout $PINNED_COMPILER_CLANG_VERSION \ && mkdir extra && cd extra \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ && cd clang-tools-extra && git checkout $PINNED_COMPILER_CLANG_TOOLS_EXTRA_COMMIT && cd .. \ && cd ../../../../projects \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ && cd libcxx && git checkout $PINNED_COMPILER_LIBCXX_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ && cd libcxxabi && git checkout $PINNED_COMPILER_LIBCXXABI_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ && cd ${TMP_LOCATION}/clang8 \ && mkdir build && cd build \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 525a6449e51..bf0c5c99eb3 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -199,26 +199,26 @@ if $PIN_COMPILER; then if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" cd ${TMP_LOCATION} \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ && git checkout $PINNED_COMPILER_LLVM_COMMIT \ && cd tools \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ && cd lld && git checkout $PINNED_COMPILER_LLD_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ && cd polly && git checkout $PINNED_COMPILER_POLLY_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang/tools \ && git checkout $PINNED_COMPILER_CLANG_VERSION \ && mkdir extra && cd extra \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ && cd clang-tools-extra && git checkout $PINNED_COMPILER_CLANG_TOOLS_EXTRA_COMMIT && cd .. \ && cd ../../../../projects \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ && cd libcxx && git checkout $PINNED_COMPILER_LIBCXX_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ && cd libcxxabi && git checkout $PINNED_COMPILER_LIBCXXABI_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ && cd ${TMP_LOCATION}/clang8 \ && mkdir build && cd build \ diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 4150d379f27..66883ed5267 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -266,26 +266,26 @@ if $PIN_COMPILER; then if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" cd ${TMP_LOCATION} \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 \ && cd clang8 && git checkout $PINNED_COMPILER_LLVM_COMMIT \ && cd tools \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ && cd lld && git checkout $PINNED_COMPILER_LLD_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ && cd polly && git checkout $PINNED_COMPILER_POLLY_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang/tools \ && git checkout $PINNED_COMPILER_CLANG_VERSION \ && mkdir extra && cd extra \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ && cd clang-tools-extra && git checkout $PINNED_COMPILER_CLANG_TOOLS_EXTRA_COMMIT && cd .. \ && cd ../../../../projects \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ && cd libcxx && git checkout $PINNED_COMPILER_LIBCXX_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ && cd libcxxabi && git checkout $PINNED_COMPILER_LIBCXXABI_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ && cd ${TMP_LOCATION}/clang8 \ && mkdir build && cd build \ diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index d96d24fab6f..7187aa2ee22 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -143,26 +143,26 @@ if $PIN_COMPILER; then if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" cd ${TMP_LOCATION} \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ && git checkout $PINNED_COMPILER_LLVM_COMMIT \ && cd tools \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ && cd lld && git checkout $PINNED_COMPILER_LLD_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ && cd polly && git checkout $PINNED_COMPILER_POLLY_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang/tools \ && git checkout $PINNED_COMPILER_CLANG_VERSION \ && mkdir extra && cd extra \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ && cd clang-tools-extra && git checkout $PINNED_COMPILER_CLANG_TOOLS_EXTRA_COMMIT && cd .. \ && cd ../../../../projects \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ && cd libcxx && git checkout $PINNED_COMPILER_LIBCXX_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ && cd libcxxabi && git checkout $PINNED_COMPILER_LIBCXXABI_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ && cd ${TMP_LOCATION}/clang8 \ && mkdir build && cd build \ diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 73957acff17..e9edc6e8cba 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -167,26 +167,26 @@ if $PIN_COMPILER; then if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" cd ${TMP_LOCATION} \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ && git checkout $PINNED_COMPILER_LLVM_COMMIT \ && cd tools \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ && cd lld && git checkout $PINNED_COMPILER_LLD_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ && cd polly && git checkout $PINNED_COMPILER_POLLY_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang/tools \ && git checkout $PINNED_COMPILER_CLANG_VERSION \ && mkdir extra && cd extra \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ && cd clang-tools-extra && git checkout $PINNED_COMPILER_CLANG_TOOLS_EXTRA_COMMIT && cd .. \ && cd ../../../../projects \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ && cd libcxx && git checkout $PINNED_COMPILER_LIBCXX_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ && cd libcxxabi && git checkout $PINNED_COMPILER_LIBCXXABI_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ + && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ && cd ${TMP_LOCATION}/clang8 \ && mkdir build && cd build \ From d967c7377546b4d78ffed22df405747954317557 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 22 Apr 2019 16:05:32 -0400 Subject: [PATCH 476/680] more clearly doucment when setting up a wavm garbage collection pass --- libraries/chain/webassembly/wavm.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/libraries/chain/webassembly/wavm.cpp b/libraries/chain/webassembly/wavm.cpp index 8d195df7720..57f6f1edee7 100644 --- a/libraries/chain/webassembly/wavm.cpp +++ b/libraries/chain/webassembly/wavm.cpp @@ -38,6 +38,11 @@ struct wavm_live_modules { void remove_live_module(live_module_ref it) { live_modules.erase(it); + run_wavm_garbage_collection(); + } + + void run_wavm_garbage_collection() { + //need to pass in a mutable list of root objects we want the garbage collector to retain std::vector root; std::copy(live_modules.begin(), live_modules.end(), std::back_inserter(root)); Runtime::freeUnreferencedObjects(std::move(root)); From 2734def80525d5e40f480b83f80c5b9f4aa2a1e8 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 22 Apr 2019 16:41:33 -0400 Subject: [PATCH 477/680] use SRC directory --- scripts/eosio_build_amazon.sh | 2 +- scripts/eosio_build_centos.sh | 2 +- scripts/eosio_build_fedora.sh | 2 +- scripts/eosio_build_ubuntu.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 2e1da6db1a3..10cc4577074 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -188,7 +188,7 @@ if $PIN_COMPILER; then printf "Checking LLVM 4 installation...\\n" if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" - cd $TMP_LOCATION \ + cd $SRC_LOCATION \ && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ && cd llvm-4.0.0.src && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index bf0c5c99eb3..35f536abd8e 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -240,7 +240,7 @@ if $PIN_COMPILER; then printf "Checking LLVM 4 installation...\\n" if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" - cd $TMP_LOCATION \ + cd $SRC_LOCATION \ && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ && cd llvm-4.0.0.src && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 7187aa2ee22..53c0c32d863 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -184,7 +184,7 @@ if $PIN_COMPILER; then printf "Checking LLVM 4 installation...\\n" if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" - cd TMP_LOCATION \ + cd $SRC_LOCATION \ && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ && cd llvm-4.0.0.src && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index e9edc6e8cba..ada49bbc7f8 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -208,7 +208,7 @@ if $PIN_COMPILER; then printf "Checking LLVM 4 installation...\\n" if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" - cd $TMP_LOCATION \ + cd $SRC_LOCATION \ && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ && cd llvm-4.0.0.src && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ From 62ebdb81a56d22f41272019de68b1caf0ca09ca7 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 22 Apr 2019 16:49:21 -0400 Subject: [PATCH 478/680] replace subscript lookup in wavm support code with at() this lookup should be well protected; but no complaints to protect it further it's not a hot path or anything --- libraries/chain/webassembly/wavm.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/webassembly/wavm.cpp b/libraries/chain/webassembly/wavm.cpp index cd731bd4f8f..4a8eeaf6c8e 100644 --- a/libraries/chain/webassembly/wavm.cpp +++ b/libraries/chain/webassembly/wavm.cpp @@ -31,7 +31,7 @@ class wavm_instantiated_module : public wasm_instantiated_module_interface { // that didn't declare "memory", getDefaultMemory() won't see it. It would also be possible // to say something like if(module->memories.size()) here I believe if(getDefaultMemory(_instance)) - _initial_memory_config = module->memories.defs[0].type; + _initial_memory_config = module->memories.defs.at(0).type; } void apply(apply_context& context) override { From 00d665defbf705b520341a2c8d45d44e814aa6b8 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 22 Apr 2019 17:28:11 -0400 Subject: [PATCH 479/680] issues with tar --- scripts/eosio_build_amazon.sh | 3 ++- scripts/eosio_build_centos.sh | 3 ++- scripts/eosio_build_fedora.sh | 3 ++- scripts/eosio_build_ubuntu.sh | 3 ++- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 10cc4577074..1e29294ba2b 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -189,7 +189,8 @@ if $PIN_COMPILER; then if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ - && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ + && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz \ + && tar -xjf llvm-4.0.0.src.tar.xz \ && cd llvm-4.0.0.src && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 35f536abd8e..026d56e841f 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -241,7 +241,8 @@ if $PIN_COMPILER; then if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ - && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ + && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz \ + && tar -xjf llvm-4.0.0.src.tar.xz \ && cd llvm-4.0.0.src && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 53c0c32d863..4deb369cd08 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -185,7 +185,8 @@ if $PIN_COMPILER; then if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ - && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ + && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz \ + && tar -xjf llvm-4.0.0.src.tar.xz \ && cd llvm-4.0.0.src && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index ada49bbc7f8..4ef9e9105e9 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -209,7 +209,8 @@ if $PIN_COMPILER; then if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ - && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz && tar -xf llvm-4.0.0.src.tar.xz \ + && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz \ + && tar -xjf llvm-4.0.0.src.tar.xz \ && cd llvm-4.0.0.src && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ From 6ebb635b20a7ba4fe9e4d0f3ba445975afe3a834 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 22 Apr 2019 18:18:00 -0400 Subject: [PATCH 480/680] remove j from tar --- scripts/eosio_build_amazon.sh | 2 +- scripts/eosio_build_centos.sh | 2 +- scripts/eosio_build_fedora.sh | 2 +- scripts/eosio_build_ubuntu.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 1e29294ba2b..ab0e2bbd39c 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -190,7 +190,7 @@ if $PIN_COMPILER; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz \ - && tar -xjf llvm-4.0.0.src.tar.xz \ + && tar -xf llvm-4.0.0.src.tar.xz \ && cd llvm-4.0.0.src && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 026d56e841f..87be20870d4 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -242,7 +242,7 @@ if $PIN_COMPILER; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz \ - && tar -xjf llvm-4.0.0.src.tar.xz \ + && tar -xf llvm-4.0.0.src.tar.xz \ && cd llvm-4.0.0.src && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 4deb369cd08..cacfbdaa9fa 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -186,7 +186,7 @@ if $PIN_COMPILER; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz \ - && tar -xjf llvm-4.0.0.src.tar.xz \ + && tar -xf llvm-4.0.0.src.tar.xz \ && cd llvm-4.0.0.src && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 4ef9e9105e9..14ce0240719 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -210,7 +210,7 @@ if $PIN_COMPILER; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz \ - && tar -xjf llvm-4.0.0.src.tar.xz \ + && tar -xf llvm-4.0.0.src.tar.xz \ && cd llvm-4.0.0.src && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ From 3443d379fae8e90fb20624ff20a6cafa1a1f1969 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 22 Apr 2019 18:42:35 -0400 Subject: [PATCH 481/680] set wabt submodule back to eosio branch --- libraries/wabt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/wabt b/libraries/wabt index a136149d941..6032d829753 160000 --- a/libraries/wabt +++ b/libraries/wabt @@ -1 +1 @@ -Subproject commit a136149d941df2942a25a4b66d8865fada0a325e +Subproject commit 6032d829753b1eea7113cb1901410788ff687bdf From 88077b706eb2aae5945602bbe4b32a9987334892 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 22 Apr 2019 19:34:37 -0400 Subject: [PATCH 482/680] just going to use git for llvm4 --- scripts/eosio_build_amazon.sh | 7 ++++--- scripts/eosio_build_centos.sh | 7 ++++--- scripts/eosio_build_fedora.sh | 7 ++++--- scripts/eosio_build_ubuntu.sh | 6 +++--- 4 files changed, 15 insertions(+), 12 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index ab0e2bbd39c..97caca0fa19 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -189,14 +189,15 @@ if $PIN_COMPILER; then if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ - && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz \ - && tar -xf llvm-4.0.0.src.tar.xz \ - && cd llvm-4.0.0.src && mkdir -p build && cd build \ + && git clone https://github.com/llvm-mirror/llvm \ + && git checkout --single-branch --branch $LLVM_VERSION \ + && cd llvm && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ || exit -1 printf "Installed LLVM 4 @ ${OPT_LOCATION}/llvm4" fi + cd $SRC_LOCATION printf "Checking zlib library (${BOOST_VERSION}) installation...\\n" if [ ! -d $OPT_LOCATION/zlib ]; then diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 87be20870d4..918cae3b7d9 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -241,14 +241,15 @@ if $PIN_COMPILER; then if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ - && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz \ - && tar -xf llvm-4.0.0.src.tar.xz \ - && cd llvm-4.0.0.src && mkdir -p build && cd build \ + && git clone https://github.com/llvm-mirror/llvm \ + && git checkout --single-branch --branch $LLVM_VERSION \ + && cd llvm && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ || exit -1 printf "Installed LLVM 4 @ ${OPT_LOCATION}/llvm4" fi + cd $SRC_LOCATION printf "Checking zlib library (${BOOST_VERSION}) installation...\\n" if [ ! -d $OPT_LOCATION/zlib ]; then diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index cacfbdaa9fa..660a86af445 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -185,14 +185,15 @@ if $PIN_COMPILER; then if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ - && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz \ - && tar -xf llvm-4.0.0.src.tar.xz \ - && cd llvm-4.0.0.src && mkdir -p build && cd build \ + && git clone https://github.com/llvm-mirror/llvm \ + && git checkout --single-branch --branch $LLVM_VERSION \ + && cd llvm && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ || exit -1 printf "Installed LLVM 4 @ ${OPT_LOCATION}/llvm4" fi + cd $SRC_LOCATION printf "Checking zlib library (${BOOST_VERSION}) installation...\\n" if [ ! -d $OPT_LOCATION/zlib ]; then diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 14ce0240719..1e231a1888c 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -209,9 +209,9 @@ if $PIN_COMPILER; then if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ - && curl -LO http://releases.llvm.org/4.0.0/llvm-4.0.0.src.tar.xz \ - && tar -xf llvm-4.0.0.src.tar.xz \ - && cd llvm-4.0.0.src && mkdir -p build && cd build \ + && git clone https://github.com/llvm-mirror/llvm \ + && git checkout --single-branch --branch $LLVM_VERSION \ + && cd llvm && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ || exit -1 From cdb47a59b3a6dd267d639e857ff516596b866dc8 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 22 Apr 2019 19:36:11 -0400 Subject: [PATCH 483/680] mistake keystrokes --- scripts/eosio_build_ubuntu.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 1e231a1888c..9aed50942a9 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -64,7 +64,7 @@ case "${OS_NAME}" in ;; esac -if [ "${DISK_AVAIL%.*}" -lt "${DISK_MIN}" ]; then +if [ "${DISK_AVAIL}" -lt "${DISK_MIN}" ]; then printf "You must have at least %sGB of available storage to install EOSIO.\\n" "${DISK_MIN}" printf "Exiting now.\\n" exit 1 From b34a6bd743d41973e0cc4baf1d681c09114b94cb Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 22 Apr 2019 20:19:44 -0400 Subject: [PATCH 484/680] only build host for clang and fix dumb mistake --- scripts/eosio_build_amazon.sh | 6 +++--- scripts/eosio_build_centos.sh | 6 +++--- scripts/eosio_build_fedora.sh | 6 +++--- scripts/eosio_build_ubuntu.sh | 8 ++++---- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 97caca0fa19..9c2f777543d 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -170,7 +170,7 @@ if $PIN_COMPILER; then && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ && cd ${TMP_LOCATION}/clang8 \ && mkdir build && cd build \ - && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=host -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ && make install \ && rm -rf ${TMP_LOCATION}/clang8 \ @@ -189,9 +189,9 @@ if $PIN_COMPILER; then if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ - && git clone https://github.com/llvm-mirror/llvm \ + && git clone https://github.com/llvm-mirror/llvm && cd llvm \ && git checkout --single-branch --branch $LLVM_VERSION \ - && cd llvm && mkdir -p build && cd build \ + && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ || exit -1 diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 918cae3b7d9..474926b41e5 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -222,7 +222,7 @@ if $PIN_COMPILER; then && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ && cd ${TMP_LOCATION}/clang8 \ && mkdir build && cd build \ - && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=host -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ && make install \ && rm -rf ${TMP_LOCATION}/clang8 \ @@ -241,9 +241,9 @@ if $PIN_COMPILER; then if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ - && git clone https://github.com/llvm-mirror/llvm \ + && git clone https://github.com/llvm-mirror/llvm && cd llvm \ && git checkout --single-branch --branch $LLVM_VERSION \ - && cd llvm && mkdir -p build && cd build \ + && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ || exit -1 diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 660a86af445..8cc16884f69 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -166,7 +166,7 @@ if $PIN_COMPILER; then && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ && cd ${TMP_LOCATION}/clang8 \ && mkdir build && cd build \ - && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=host -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ && make install \ && rm -rf ${TMP_LOCATION}/clang8 \ @@ -185,9 +185,9 @@ if $PIN_COMPILER; then if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ - && git clone https://github.com/llvm-mirror/llvm \ + && git clone https://github.com/llvm-mirror/llvm && cd llvm \ && git checkout --single-branch --branch $LLVM_VERSION \ - && cd llvm && mkdir -p build && cd build \ + && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ || exit -1 diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 9aed50942a9..f932139f084 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -64,7 +64,7 @@ case "${OS_NAME}" in ;; esac -if [ "${DISK_AVAIL}" -lt "${DISK_MIN}" ]; then +if [ "${DISK_AVAIL%.*}" -lt "${DISK_MIN}" ]; then printf "You must have at least %sGB of available storage to install EOSIO.\\n" "${DISK_MIN}" printf "Exiting now.\\n" exit 1 @@ -190,7 +190,7 @@ if $PIN_COMPILER; then && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ && cd ${TMP_LOCATION}/clang8 \ && mkdir build && cd build \ - && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=host -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ && make install \ && rm -rf ${TMP_LOCATION}/clang8 \ @@ -209,9 +209,9 @@ if $PIN_COMPILER; then if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ - && git clone https://github.com/llvm-mirror/llvm \ + && git clone https://github.com/llvm-mirror/llvm && and cd llvm \ && git checkout --single-branch --branch $LLVM_VERSION \ - && cd llvm && mkdir -p build && cd build \ + && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ || exit -1 From 1729f49b6f5a3b58ffd7ff8bd8abf822f7b6d773 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 22 Apr 2019 20:23:28 -0400 Subject: [PATCH 485/680] long day --- scripts/eosio_build_ubuntu.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index f932139f084..233ea18ddc9 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -209,7 +209,7 @@ if $PIN_COMPILER; then if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ - && git clone https://github.com/llvm-mirror/llvm && and cd llvm \ + && git clone https://github.com/llvm-mirror/llvm && cd llvm \ && git checkout --single-branch --branch $LLVM_VERSION \ && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ From 920d6aa7572a7a09f69175fde4f1357a0fc35cb7 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 22 Apr 2019 20:26:04 -0400 Subject: [PATCH 486/680] still a long day --- scripts/eosio_build_amazon.sh | 3 +-- scripts/eosio_build_centos.sh | 3 +-- scripts/eosio_build_fedora.sh | 3 +-- scripts/eosio_build_ubuntu.sh | 3 +-- 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 9c2f777543d..439da8893d2 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -189,8 +189,7 @@ if $PIN_COMPILER; then if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ - && git clone https://github.com/llvm-mirror/llvm && cd llvm \ - && git checkout --single-branch --branch $LLVM_VERSION \ + && git clone https://github.com/llvm-mirror/llvm --single-branch --branch $LLVM_VERSION && cd llvm \ && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 474926b41e5..fb17fdbfc9b 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -241,8 +241,7 @@ if $PIN_COMPILER; then if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ - && git clone https://github.com/llvm-mirror/llvm && cd llvm \ - && git checkout --single-branch --branch $LLVM_VERSION \ + && git clone https://github.com/llvm-mirror/llvm --single-branch --branch $LLVM_VERSION && cd llvm \ && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 8cc16884f69..366f56bba01 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -185,8 +185,7 @@ if $PIN_COMPILER; then if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ - && git clone https://github.com/llvm-mirror/llvm && cd llvm \ - && git checkout --single-branch --branch $LLVM_VERSION \ + && git clone https://github.com/llvm-mirror/llvm --single-branch --branch $LLVM_VERSION && cd llvm \ && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 233ea18ddc9..f63c39dd408 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -209,8 +209,7 @@ if $PIN_COMPILER; then if [ ! -d $OPT_LOCATION/llvm4 ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ - && git clone https://github.com/llvm-mirror/llvm && cd llvm \ - && git checkout --single-branch --branch $LLVM_VERSION \ + && git clone https://github.com/llvm-mirror/llvm --single-branch --branch $LLVM_VERSION && cd llvm \ && mkdir -p build && cd build \ && $CMAKE -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/llvm4 -DLLVM_TARGETS_TO_BUILD=host -DLLVM_BUILD_TOOLS=false -DLLVM_ENABLE_RTTI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake .. \ && make -j"${JOBS}" install \ From 033187c84c4333dc88f725aab53834439c4c4aa0 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 22 Apr 2019 21:39:29 -0400 Subject: [PATCH 487/680] clean up toolchain file --- scripts/pinned_toolchain.cmake | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/scripts/pinned_toolchain.cmake b/scripts/pinned_toolchain.cmake index 75a906111e8..cdb517c5e1f 100644 --- a/scripts/pinned_toolchain.cmake +++ b/scripts/pinned_toolchain.cmake @@ -4,19 +4,12 @@ set(CMAKE_CXX_COMPILER_WORKS 1) set(CMAKE_C_COMPILER ${OPT_PATH}/clang8/bin/clang) set(CMAKE_CXX_COMPILER ${OPT_PATH}/clang8/bin/clang++) -#set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES ${OPT_PATH}/gcc/include/c++/7.1.0 ${OPT_PATH}/gcc/include/c++/7.1.0/x86_64-pc-linux-gnu) set(CMAKE_CXX_STANDARD_INCLUDE_DIRECTORIES ${OPT_PATH}/clang8/include/c++/v1 /usr/local/include /usr/include) + set(CMAKE_CXX_FLAGS_INIT "-nostdinc++") + set(CMAKE_EXE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") +set(CMAKE_SHARED_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") +set(CMAKE_MODULE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") -if(NOT APPLE) - set(CMAKE_SHARED_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") - set(CMAKE_MODULE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") - set(CMAKE_CXX_STANDARD_LIBRARIES "-stdlib=libc++ -nostdlib++ ${OPT_PATH}/clang8/lib/libc++.a ${OPT_PATH}/clang8/lib/libc++abi.a") - #set(CMAKE_CXX_STANDARD_LIBRARIES "${OPT_PATH}/gcc/lib64/libstdc++.a ${OPT_PATH}/gcc/lib64/libsupc++.a") - #set(CMAKE_CXX_STANDARD_LIBRARIES "-static -Wl,-L${OPT_PATH}/gcc/lib64 -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic -lz -Wl,-Bdynamic, -lpthread -Wl,-Bdynamic -lm -Wl,-Bdynamic -lc -Wl,-Bdynamic -lgcc -Wl,-nostdlib -Wl,--no-whole-archive ${OPT_PATH}/gcc/lib64/libstdc++.a -Wl,--no-whole-archive ${OPT_PATH}/gcc/lib64/libsupc++.a -Wl,-Bdynamic -lpthread") -else() - set(CMAKE_SHARED_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") - set(CMAKE_MODULE_LINKER_FLAGS_INIT "-stdlib=libc++ -nostdlib++") - set(CMAKE_CXX_STANDARD_LIBRARIES "${OPT_PATH}/clang8/lib/libc++.a ${OPT_PATH}/clang8/lib/libc++abi.a") -endif() +set(CMAKE_CXX_STANDARD_LIBRARIES "${OPT_PATH}/clang8/lib/libc++.a ${OPT_PATH}/clang8/lib/libc++abi.a") From 1a54110b3874fff10c997e7da4b231fe0d7b23b7 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Tue, 23 Apr 2019 11:28:03 -0400 Subject: [PATCH 488/680] add force build flag and build mongo flag --- scripts/eosio_build.sh | 13 ++- scripts/eosio_build_amazon.sh | 133 ++++++++++++++++--------------- scripts/eosio_build_centos.sh | 134 ++++++++++++++++--------------- scripts/eosio_build_darwin.sh | 120 ++++++++++++++-------------- scripts/eosio_build_fedora.sh | 136 ++++++++++++++++---------------- scripts/eosio_build_ubuntu.sh | 144 +++++++++++++++++----------------- 6 files changed, 344 insertions(+), 336 deletions(-) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 322f7aa5913..3cd8ec6a7d5 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -73,12 +73,15 @@ Usage: $0 OPTION... -d Generate Doxygen -s NAME Core Symbol Name <1-7 characters> (default: SYS) -y Noninteractive mode (this script) + -P Build with pinned clang 8 and libcxx + -f Force rebuilding of dependencies + -m Build MongoDB dependencies EOT exit 1 } if [ $# -ne 0 ]; then - while getopts ":cdo:s:p:b:Phy" opt; do + while getopts ":cdo:s:p:b:mfPhy" opt; do case "${opt}" in o ) options=( "Debug" "Release" "RelWithDebInfo" "MinSizeRel" ) @@ -121,6 +124,12 @@ if [ $# -ne 0 ]; then y) NONINTERACTIVE=1 ;; + f) + FORCE_BUILD=1 + ;; + m) + BUILD_MONGO=1 + ;; \? ) printf "\\nInvalid Option: %s\\n" "-${OPTARG}" 1>&2 usage @@ -192,6 +201,8 @@ export DOXYGEN_VERSION=1_8_14 export DOXYGEN_ROOT=${SRC_LOCATION}/doxygen-${DOXYGEN_VERSION} export TINI_VERSION=0.18.0 export DISK_MIN=5 +export FORCE_BUILD=$FORCE_BUILD +export BUILD_MONGO=$BUILD_MONGO mkdir -p $BUILD_DIR sed -e "s~@~$OPT_LOCATION~g" $SCRIPT_DIR/pinned_toolchain.cmake &> $BUILD_DIR/pinned_toolchain.cmake diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 439da8893d2..935764a3149 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -122,7 +122,7 @@ printf "\\n" printf "Checking CMAKE installation...\\n" -if [ ! -e $CMAKE ]; then +if [ ! -e $CMAKE ] || [ $FORCE_BUILD ]; then printf "Installing CMAKE...\\n" curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \ && tar -xzf cmake-$CMAKE_VERSION.tar.gz \ @@ -144,7 +144,7 @@ printf "\\n" if $PIN_COMPILER; then printf "Checking Clang 8 support...\\n" - if [ ! -d $CLANG8_ROOT ]; then + if [ ! -d $CLANG8_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing Clang 8...\\n" cd ${TMP_LOCATION} \ && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ @@ -186,7 +186,7 @@ if $PIN_COMPILER; then printf "\\n" printf "Checking LLVM 4 installation...\\n" - if [ ! -d $OPT_LOCATION/llvm4 ]; then + if [ ! -d $OPT_LOCATION/llvm4 ] || [ $FORCE_BUILD ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ && git clone https://github.com/llvm-mirror/llvm --single-branch --branch $LLVM_VERSION && cd llvm \ @@ -199,7 +199,7 @@ if $PIN_COMPILER; then cd $SRC_LOCATION printf "Checking zlib library (${BOOST_VERSION}) installation...\\n" - if [ ! -d $OPT_LOCATION/zlib ]; then + if [ ! -d $OPT_LOCATION/zlib ] || [ $FORCE_BUILD ]; then printf "Installing zlib...\\n" curl -LO https://www.zlib.net/zlib-1.2.11.tar.gz && tar -xf zlib-1.2.11.tar.gz \ && cd zlib-1.2.11 && mkdir build && cd build \ @@ -210,7 +210,7 @@ if $PIN_COMPILER; then cd $SRC_LOCATION printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) - if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ] || [ $FORCE_BUILD ]; then printf "Installing Boost library...\\n" curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ @@ -232,7 +232,7 @@ if $PIN_COMPILER; then else printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) - if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ] || [ $FORCE_BUILD ]; then printf "Installing Boost library...\\n" curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ @@ -253,70 +253,69 @@ else printf "\\n" - - printf "Checking MongoDB installation...\\n" - if [ ! -d $MONGODB_ROOT ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" - else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + if [ $BUILD_MONGO ]; then + printf "Checking MongoDB installation...\\n" + if [ ! -d $MONGODB_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" + curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_LOCATION/mongod.log \ + && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_LOCATION \ + && rm -rf $MONGODB_LINK_LOCATION \ + && rm -rf $BIN_LOCATION/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ + && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ + || exit 1 + printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + else + printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C driver installation...\\n" + if [ ! -d $MONGO_C_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB C driver...\\n" + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C++ driver installation...\\n" + if [ ! -d $MONGO_CXX_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB C++ driver...\\n" + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ + && make -j"${JOBS}" VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + printf "\\n" fi - if [ $? -ne 0 ]; then exit -1; fi - printf "Checking MongoDB C driver installation...\\n" - if [ ! -d $MONGO_C_DRIVER_ROOT ]; then - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" - else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" - fi - if [ $? -ne 0 ]; then exit -1; fi - printf "Checking MongoDB C++ driver installation...\\n" - if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" - else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" - fi - if [ $? -ne 0 ]; then exit -1; fi - - - printf "\\n" - printf "Checking LLVM 4 support...\\n" - if [ ! -d $LLVM_ROOT ]; then + if [ ! -d $LLVM_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing LLVM 4...\\n" cd ../opt \ && git clone --depth 1 --single-branch --branch $LLVM_VERSION https://github.com/llvm-mirror/llvm.git llvm && cd llvm \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index fb17fdbfc9b..754b6164ad8 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -172,7 +172,7 @@ fi printf "\\n" printf "Checking CMAKE installation...\\n" -if [ ! -e $CMAKE ]; then +if [ ! -e $CMAKE ] || [ $FORCE_BUILD ]; then printf "Installing CMAKE...\\n" curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \ && tar -xzf cmake-$CMAKE_VERSION.tar.gz \ @@ -196,7 +196,7 @@ printf "\\n" export CPATH="${CPATH}:${PYTHON3PATH}/root/usr/include/python3.6m" # m on the end causes problems with boost finding python3 if $PIN_COMPILER; then printf "Checking Clang 8 support...\\n" - if [ ! -d $CLANG8_ROOT ]; then + if [ ! -d $CLANG8_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing Clang 8...\\n" cd ${TMP_LOCATION} \ && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ @@ -238,7 +238,7 @@ if $PIN_COMPILER; then printf "\\n" printf "Checking LLVM 4 installation...\\n" - if [ ! -d $OPT_LOCATION/llvm4 ]; then + if [ ! -d $OPT_LOCATION/llvm4 ] || [ $FORCE_BUILD ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ && git clone https://github.com/llvm-mirror/llvm --single-branch --branch $LLVM_VERSION && cd llvm \ @@ -251,7 +251,7 @@ if $PIN_COMPILER; then cd $SRC_LOCATION printf "Checking zlib library (${BOOST_VERSION}) installation...\\n" - if [ ! -d $OPT_LOCATION/zlib ]; then + if [ ! -d $OPT_LOCATION/zlib ] || [ $FORCE_BUILD ]; then printf "Installing zlib...\\n" curl -LO https://www.zlib.net/zlib-1.2.11.tar.gz && tar -xf zlib-1.2.11.tar.gz \ && cd zlib-1.2.11 && mkdir build && cd build \ @@ -262,7 +262,7 @@ if $PIN_COMPILER; then cd $SRC_LOCATION printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) - if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ] || [ $FORCE_BUILD ]; then printf "Installing Boost library...\\n" curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ @@ -284,7 +284,7 @@ if $PIN_COMPILER; then else printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) - if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ] || [ $FORCE_BUILD ]; then printf "Installing Boost library...\\n" curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ @@ -305,70 +305,69 @@ else printf "\\n" - - printf "Checking MongoDB installation...\\n" - if [ ! -d $MONGODB_ROOT ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" - else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" - fi - if [ $? -ne 0 ]; then exit -1; fi - printf "Checking MongoDB C driver installation...\\n" - if [ ! -d $MONGO_C_DRIVER_ROOT ]; then - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" - else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" - fi - if [ $? -ne 0 ]; then exit -1; fi - printf "Checking MongoDB C++ driver installation...\\n" - if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" - else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + if [ $BUILD_MONGO ]; then + printf "Checking MongoDB installation...\\n" + if [ ! -d $MONGODB_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" + curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_LOCATION/mongod.log \ + && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_LOCATION \ + && rm -rf $MONGODB_LINK_LOCATION \ + && rm -rf $BIN_LOCATION/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ + && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ + || exit 1 + printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + else + printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C driver installation...\\n" + if [ ! -d $MONGO_C_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB C driver...\\n" + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C++ driver installation...\\n" + if [ ! -d $MONGO_CXX_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB C++ driver...\\n" + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ + && make -j"${JOBS}" VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + printf "\\n" fi - if [ $? -ne 0 ]; then exit -1; fi - - - printf "\\n" - printf "Checking LLVM 4 support...\\n" - if [ ! -d $LLVM_ROOT ]; then + if [ ! -d $LLVM_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing LLVM 4...\\n" cd ../opt \ && git clone --depth 1 --single-branch --branch $LLVM_VERSION https://github.com/llvm-mirror/llvm.git llvm && cd llvm \ @@ -385,7 +384,6 @@ else fi if [ $? -ne 0 ]; then exit -1; fi - cd .. printf "\\n" fi diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 66883ed5267..7ea9781197f 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -165,7 +165,7 @@ printf "\\n" export CPATH="$(python-config --includes | awk '{print $1}' | cut -dI -f2):$CPATH" # Boost has trouble finding pyconfig.h printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) -if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then +if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ] || [ $FORCE_BUILD ]; then printf "Installing Boost library...\\n" curl -LO https://dl.bintray.com/boostorg/release/$BOOST_VERSION_MAJOR.$BOOST_VERSION_MINOR.$BOOST_VERSION_PATCH/source/boost_$BOOST_VERSION.tar.bz2 \ && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ @@ -187,64 +187,66 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" -printf "Checking MongoDB installation...\\n" -if [ ! -d $MONGODB_ROOT ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL https://fastdl.mongodb.org/osx/mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-osx-x86_64-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT}\\n" -else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C driver installation...\\n" -if [ ! -d $MONGO_C_DRIVER_ROOT ]; then - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C++ driver installation...\\n" -if [ "$(grep "Version:" $PREFIX/lib/pkgconfig/libmongocxx-static.pc 2>/dev/null | tr -s ' ' | awk '{print $2}')" != $MONGO_CXX_DRIVER_VERSION ]; then - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi +if [ $BUILD_MONGO ]; then + printf "Checking MongoDB installation...\\n" + if [ ! -d $MONGODB_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" + curl -OL https://fastdl.mongodb.org/osx/mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ + && mv $SRC_LOCATION/mongodb-osx-x86_64-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_LOCATION/mongod.log \ + && rm -f mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_LOCATION \ + && rm -rf $MONGODB_LINK_LOCATION \ + && rm -rf $BIN_LOCATION/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ + && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ + || exit 1 + printf " - MongoDB successfully installed @ ${MONGODB_ROOT}\\n" + else + printf " - MongoDB found with correct version @ ${MONGODB_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C driver installation...\\n" + if [ ! -d $MONGO_C_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB C driver...\\n" + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C++ driver installation...\\n" + if [ "$(grep "Version:" $PREFIX/lib/pkgconfig/libmongocxx-static.pc 2>/dev/null | tr -s ' ' | awk '{print $2}')" != $MONGO_CXX_DRIVER_VERSION ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB C++ driver...\\n" + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ + && make -j"${JOBS}" VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi -printf "\\n" + printf "\\n" +fi # We install llvm into /usr/local/opt using brew install llvm@4 @@ -263,7 +265,7 @@ printf "\\n" if $PIN_COMPILER; then printf "Checking Clang 8 support...\\n" - if [ ! -d $CLANG8_ROOT ]; then + if [ ! -d $CLANG8_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing Clang 8...\\n" cd ${TMP_LOCATION} \ && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 \ diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 366f56bba01..8acc15a9ced 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -118,7 +118,7 @@ printf "\\n" printf "Checking CMAKE installation...\\n" -if [ ! -e $CMAKE ]; then +if [ ! -e $CMAKE ] || [ $FORCE_BUILD ]; then printf "Installing CMAKE...\\n" curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \ && tar -xzf cmake-$CMAKE_VERSION.tar.gz \ @@ -135,12 +135,11 @@ else fi if [ $? -ne 0 ]; then exit -1; fi - printf "\\n" if $PIN_COMPILER; then printf "Checking Clang 8 support...\\n" - if [ ! -d $CLANG8_ROOT ]; then + if [ ! -d $CLANG8_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing Clang 8...\\n" cd ${TMP_LOCATION} \ && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ @@ -182,7 +181,7 @@ if $PIN_COMPILER; then printf "\\n" printf "Checking LLVM 4 installation...\\n" - if [ ! -d $OPT_LOCATION/llvm4 ]; then + if [ ! -d $OPT_LOCATION/llvm4 ] || [ $FORCE_BUILD ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ && git clone https://github.com/llvm-mirror/llvm --single-branch --branch $LLVM_VERSION && cd llvm \ @@ -195,7 +194,7 @@ if $PIN_COMPILER; then cd $SRC_LOCATION printf "Checking zlib library (${BOOST_VERSION}) installation...\\n" - if [ ! -d $OPT_LOCATION/zlib ]; then + if [ ! -d $OPT_LOCATION/zlib ] || [ $FORCE_BUILD ]; then printf "Installing zlib...\\n" curl -LO https://www.zlib.net/zlib-1.2.11.tar.gz && tar -xf zlib-1.2.11.tar.gz \ && cd zlib-1.2.11 && mkdir build && cd build \ @@ -206,7 +205,7 @@ if $PIN_COMPILER; then cd $SRC_LOCATION printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) - if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ] || [ $FORCE_BUILD ]; then printf "Installing Boost library...\\n" curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ @@ -228,7 +227,7 @@ if $PIN_COMPILER; then else printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) - if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ] || [ $FORCE_BUILD ]; then printf "Installing Boost library...\\n" curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ @@ -249,70 +248,70 @@ else printf "\\n" - - printf "Checking MongoDB installation...\\n" - if [ ! -d $MONGODB_ROOT ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT}\\n" - else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT}.\\n" - fi - if [ $? -ne 0 ]; then exit -1; fi - printf "Checking MongoDB C driver installation...\\n" - if [ ! -d $MONGO_C_DRIVER_ROOT ]; then - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" - else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" + if [ $BUILD_MONGO ]; then + printf "Checking MongoDB installation...\\n" + if [ ! -d $MONGODB_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" + curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_LOCATION/mongod.log \ + && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_LOCATION \ + && rm -rf $MONGODB_LINK_LOCATION \ + && rm -rf $BIN_LOCATION/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ + && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ + || exit 1 + printf " - MongoDB successfully installed @ ${MONGODB_ROOT}\\n" + else + printf " - MongoDB found with correct version @ ${MONGODB_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C driver installation...\\n" + if [ ! -d $MONGO_C_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB C driver...\\n" + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C++ driver installation...\\n" + if [ ! -d $MONGO_CXX_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB C++ driver...\\n" + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ + && make -j"${JOBS}" VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + + printf "\\n" fi - if [ $? -ne 0 ]; then exit -1; fi - printf "Checking MongoDB C++ driver installation...\\n" - if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" - else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" - fi - if [ $? -ne 0 ]; then exit -1; fi - - - printf "\\n" - printf "Checking LLVM 4 support...\\n" - if [ ! -d $LLVM_ROOT ]; then + if [ ! -d $LLVM_ROOT ] || [ $FORCE_BUILD ]; then ln -s /usr/lib64/llvm4.0 $LLVM_ROOT \ || exit 1 printf " - LLVM successfully linked from /usr/lib64/llvm4.0 to ${LLVM_ROOT}\\n" @@ -321,7 +320,6 @@ else fi if [ $? -ne 0 ]; then exit -1; fi - cd .. printf "\\n" fi diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index f63c39dd408..5091b8c380e 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -142,7 +142,7 @@ printf "\\n" printf "Checking CMAKE installation...\\n" -if [ ! -e $CMAKE ]; then +if [ ! -e $CMAKE ] || [ $FORCE_BUILD ]; then printf "Installing CMAKE...\\n" curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \ && tar -xzf cmake-$CMAKE_VERSION.tar.gz \ @@ -164,7 +164,7 @@ printf "\\n" if $PIN_COMPILER; then printf "Checking Clang 8 support...\\n" - if [ ! -d $CLANG8_ROOT ]; then + if [ ! -d $CLANG8_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing Clang 8...\\n" cd ${TMP_LOCATION} \ && git clone --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ @@ -206,7 +206,7 @@ if $PIN_COMPILER; then printf "\\n" printf "Checking LLVM 4 installation...\\n" - if [ ! -d $OPT_LOCATION/llvm4 ]; then + if [ ! -d $OPT_LOCATION/llvm4 ] || [ $FORCE_BUILD ]; then printf "Installing LLVM 4...\\n" cd $SRC_LOCATION \ && git clone https://github.com/llvm-mirror/llvm --single-branch --branch $LLVM_VERSION && cd llvm \ @@ -219,7 +219,7 @@ if $PIN_COMPILER; then cd $SRC_LOCATION printf "Checking zlib library (${BOOST_VERSION}) installation...\\n" - if [ ! -d $OPT_LOCATION/zlib ]; then + if [ ! -d $OPT_LOCATION/zlib || $FORCE_BUILD ]; then printf "Installing zlib...\\n" curl -LO https://www.zlib.net/zlib-1.2.11.tar.gz && tar -xf zlib-1.2.11.tar.gz \ && cd zlib-1.2.11 && mkdir build && cd build \ @@ -231,12 +231,11 @@ if $PIN_COMPILER; then cd $SRC_LOCATION printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) - if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ] || [ $FORCE_BUILD ]; then printf "Installing Boost library...\\n" curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ && cd $BOOST_ROOT \ - && printf "using clang : 8.0 : ${CLANG8_ROOT}/bin/clang++ : \"-D__STRICT_ANSI__ -stdlib=libc++ -std=c++17 -nostdinc++ -I${CLANG8_ROOT}/include/c++/v1\";" &> clang8.jam \ && ./bootstrap.sh --prefix=$BOOST_ROOT \ && ./b2 toolset=clang cxxflags="-stdlib=libc++ -D__STRICT_ANSI__ -nostdinc++ -I${CLANG8_ROOT}/include/c++/v1" linkflags="-stdlib=libc++" link=static threading=multi --with-iostreams --with-date_time --with-filesystem --with-system --with-program_options --with-chrono --with-test -q -j"${JOBS}" -sZLIB_LIBRARY_PATH="${OPT_LOCATION}/zlib/lib" -sZLIB_INCLUDE="${OPT_LOCATION}/zlib/include" -sZLIB_SOURCE="${SRC_LOCATION}/zlib-1.2.11" install \ && cd .. \ @@ -254,7 +253,7 @@ if $PIN_COMPILER; then else printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) - if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then + if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ] || [ $FORCE_BUILD ]; then printf "Installing Boost library...\\n" curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ @@ -274,77 +273,78 @@ else printf "\\n" - printf "Checking MongoDB installation...\\n" - if [ ! -d $MONGODB_ROOT ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL http://downloads.mongodb.org/linux/mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" - else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" - fi - if [ $? -ne 0 ]; then exit -1; fi - printf "Checking MongoDB C driver installation...\\n" - if [ ! -d $MONGO_C_DRIVER_ROOT ]; then - if [ ! $BUILD_CLANG8 ]; then - PINNED_TOOLCHAIN="" + if [ $BUILD_MONGO ]; then + printf "Checking MongoDB installation...\\n" + if [ ! -d $MONGODB_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" + curl -OL http://downloads.mongodb.org/linux/mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ + && mv $SRC_LOCATION/mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_LOCATION/mongod.log \ + && rm -f mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_LOCATION \ + && rm -rf $MONGODB_LINK_LOCATION \ + && rm -rf $BIN_LOCATION/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ + && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ + || exit 1 + printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + else + printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" fi - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON $PINNED_TOOLCHAIN .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" - else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" - fi - - if [ $? -ne 0 ]; then exit -1; fi - printf "Checking MongoDB C++ driver installation...\\n" - if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then - if [ ! $BUILD_CLANG8 ]; then - PINNED_TOOLCHAIN="" + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C driver installation...\\n" + if [ ! -d $MONGO_C_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then + if [ ! $BUILD_CLANG8 ]; then + PINNED_TOOLCHAIN="" + fi + printf "Installing MongoDB C driver...\\n" + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON $PINNED_TOOLCHAIN .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" fi - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION \ - && sed '111 s/count/static_cast(count)/' src/mongocxx/options/change_stream.cpp &> src/mongocxx/options/change_stream.cpp \ - && cd build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCXX_CMAKE_FLAGS=-D_GLIBCXX_USE_CXX11_ABI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX $PINNED_TOOLCHAIN .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" - else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" - fi - if [ $? -ne 0 ]; then exit -1; fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C++ driver installation...\\n" + if [ ! -d $MONGO_CXX_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then + if [ ! $BUILD_CLANG8 ]; then + PINNED_TOOLCHAIN="" + fi + printf "Installing MongoDB C++ driver...\\n" + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION \ + && sed '111 s/count/static_cast(count)/' src/mongocxx/options/change_stream.cpp &> src/mongocxx/options/change_stream.cpp \ + && cd build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCXX_CMAKE_FLAGS=-D_GLIBCXX_USE_CXX11_ABI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX $PINNED_TOOLCHAIN .. \ + && make -j"${JOBS}" VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi - printf "\\n" + printf "\\n" + fi printf "Checking LLVM 4 support...\\n" - if [ ! -d $LLVM_ROOT ]; then + if [ ! -d $LLVM_ROOT ] || [ $FORCE_BUILD ]; then ln -s /usr/lib/llvm-4.0 $LLVM_ROOT \ || exit 1 printf " - LLVM successfully linked from /usr/lib/llvm-4.0 to ${LLVM_ROOT}\\n" From 9e81631073e3f319f36755b5fe2efa27b4665023 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Tue, 23 Apr 2019 11:29:16 -0400 Subject: [PATCH 489/680] add force flag temporarily --- .buildkite/pipeline.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 41e29d8c19a..57bcdbf6d92 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,7 +1,7 @@ steps: - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P + ./scripts/eosio_build.sh -y -P -f echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -24,7 +24,7 @@ steps: - command: | # CentOS 7 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P + ./scripts/eosio_build.sh -y -P -f echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -47,7 +47,7 @@ steps: - command: | # Ubuntu 16.04 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P + ./scripts/eosio_build.sh -y -P -f echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -70,7 +70,7 @@ steps: - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P + ./scripts/eosio_build.sh -y -P -f echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -97,7 +97,7 @@ steps: ln -s "$(pwd)" /data/job cd /data/job echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y -P + ./scripts/eosio_build.sh -y -P -f echo "--- Compressing build directory :compression:" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -435,4 +435,4 @@ steps: label: "Git Submodule Regression Check" agents: queue: "automation-large-builder-fleet" - timeout: 5 \ No newline at end of file + timeout: 5 From 77a68c50adfe5f6918c56a462f291b8f275496b9 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Tue, 23 Apr 2019 13:26:02 -0400 Subject: [PATCH 490/680] add clang8 bin to path for boost builds --- scripts/eosio_build_amazon.sh | 3 ++- scripts/eosio_build_centos.sh | 3 ++- scripts/eosio_build_fedora.sh | 3 ++- scripts/eosio_build_ubuntu.sh | 3 ++- 4 files changed, 8 insertions(+), 4 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 935764a3149..ae1a8a8fd24 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -198,7 +198,7 @@ if $PIN_COMPILER; then fi cd $SRC_LOCATION - printf "Checking zlib library (${BOOST_VERSION}) installation...\\n" + printf "Checking zlib library installation...\\n" if [ ! -d $OPT_LOCATION/zlib ] || [ $FORCE_BUILD ]; then printf "Installing zlib...\\n" curl -LO https://www.zlib.net/zlib-1.2.11.tar.gz && tar -xf zlib-1.2.11.tar.gz \ @@ -208,6 +208,7 @@ if $PIN_COMPILER; then || exit -1 fi cd $SRC_LOCATION + export PATH=$OPT_LOCATION/clang8/bin:$PATH printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ] || [ $FORCE_BUILD ]; then diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 754b6164ad8..07964458b57 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -250,7 +250,7 @@ if $PIN_COMPILER; then fi cd $SRC_LOCATION - printf "Checking zlib library (${BOOST_VERSION}) installation...\\n" + printf "Checking zlib library installation...\\n" if [ ! -d $OPT_LOCATION/zlib ] || [ $FORCE_BUILD ]; then printf "Installing zlib...\\n" curl -LO https://www.zlib.net/zlib-1.2.11.tar.gz && tar -xf zlib-1.2.11.tar.gz \ @@ -260,6 +260,7 @@ if $PIN_COMPILER; then || exit -1 fi cd $SRC_LOCATION + export PATH=$OPT_LOCATION/clang8/bin:$PATH printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ] || [ $FORCE_BUILD ]; then diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 8acc15a9ced..2cf077538c7 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -193,7 +193,7 @@ if $PIN_COMPILER; then fi cd $SRC_LOCATION - printf "Checking zlib library (${BOOST_VERSION}) installation...\\n" + printf "Checking zlib library installation...\\n" if [ ! -d $OPT_LOCATION/zlib ] || [ $FORCE_BUILD ]; then printf "Installing zlib...\\n" curl -LO https://www.zlib.net/zlib-1.2.11.tar.gz && tar -xf zlib-1.2.11.tar.gz \ @@ -203,6 +203,7 @@ if $PIN_COMPILER; then || exit -1 fi cd $SRC_LOCATION + export PATH=$OPT_LOCATION/clang8/bin:$PATH printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ] || [ $FORCE_BUILD ]; then diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 5091b8c380e..93a995d009b 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -218,7 +218,7 @@ if $PIN_COMPILER; then fi cd $SRC_LOCATION - printf "Checking zlib library (${BOOST_VERSION}) installation...\\n" + printf "Checking zlib library installation...\\n" if [ ! -d $OPT_LOCATION/zlib || $FORCE_BUILD ]; then printf "Installing zlib...\\n" curl -LO https://www.zlib.net/zlib-1.2.11.tar.gz && tar -xf zlib-1.2.11.tar.gz \ @@ -229,6 +229,7 @@ if $PIN_COMPILER; then fi cd $SRC_LOCATION + export PATH=$OPT_LOCATION/clang8/bin:$PATH printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ] || [ $FORCE_BUILD ]; then From 19ea05cbdf55e25855e763acf7278de833d2c67e Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 23 Apr 2019 13:47:12 -0400 Subject: [PATCH 491/680] add get_account_ram_correction RPC to producer API --- .../producer_api_plugin.cpp | 2 + .../eosio/producer_plugin/producer_plugin.hpp | 16 +++++++ plugins/producer_plugin/producer_plugin.cpp | 44 +++++++++++++++++++ 3 files changed, 62 insertions(+) diff --git a/plugins/producer_api_plugin/producer_api_plugin.cpp b/plugins/producer_api_plugin/producer_api_plugin.cpp index b0f06cabea4..b513ae6a442 100644 --- a/plugins/producer_api_plugin/producer_api_plugin.cpp +++ b/plugins/producer_api_plugin/producer_api_plugin.cpp @@ -126,6 +126,8 @@ void producer_api_plugin::plugin_startup() { CALL(producer, producer, get_supported_protocol_features, INVOKE_R_R(producer, get_supported_protocol_features, producer_plugin::get_supported_protocol_features_params), 201), + CALL(producer, producer, get_account_ram_corrections, + INVOKE_R_R(producer, get_account_ram_corrections, producer_plugin::get_account_ram_corrections_params), 201), }); } diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index eee97eee1f9..5d1335407ca 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -60,6 +60,18 @@ class producer_plugin : public appbase::plugin { bool exclude_unactivatable = false; }; + struct get_account_ram_corrections_params { + optional lower_bound; + optional upper_bound; + uint32_t limit = 10; + bool reverse = false; + }; + + struct get_account_ram_corrections_result { + std::vector rows; + optional more; + }; + template using next_function = std::function&)>; @@ -100,6 +112,8 @@ class producer_plugin : public appbase::plugin { fc::variants get_supported_protocol_features( const get_supported_protocol_features_params& params ) const; + get_account_ram_corrections_result get_account_ram_corrections( const get_account_ram_corrections_params& params ) const; + signal confirmed_block; private: std::shared_ptr my; @@ -114,3 +128,5 @@ FC_REFLECT(eosio::producer_plugin::integrity_hash_information, (head_block_id)(i FC_REFLECT(eosio::producer_plugin::snapshot_information, (head_block_id)(snapshot_name)) FC_REFLECT(eosio::producer_plugin::scheduled_protocol_feature_activations, (protocol_features_to_activate)) FC_REFLECT(eosio::producer_plugin::get_supported_protocol_features_params, (exclude_disabled)(exclude_unactivatable)) +FC_REFLECT(eosio::producer_plugin::get_account_ram_corrections_params, (lower_bound)(upper_bound)(limit)(reverse)) +FC_REFLECT(eosio::producer_plugin::get_account_ram_corrections_result, (rows)(more)) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index cb6eb48504e..e2586db8fb7 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1167,6 +1167,50 @@ fc::variants producer_plugin::get_supported_protocol_features( const get_support return results; } +producer_plugin::get_account_ram_corrections_result +producer_plugin::get_account_ram_corrections( const get_account_ram_corrections_params& params ) const { + get_account_ram_corrections_result result; + const auto& db = my->chain_plug->chain().db(); + + const auto& idx = db.get_index(); + account_name lower_bound_value = std::numeric_limits::lowest(); + account_name upper_bound_value = std::numeric_limits::max(); + + if( params.lower_bound ) { + lower_bound_value = *params.lower_bound; + } + + if( params.upper_bound ) { + upper_bound_value = *params.upper_bound; + } + + if( upper_bound_value < lower_bound_value ) + return result; + + auto walk_range = [&]( auto itr, auto end_itr ) { + for( unsigned int count = 0; + count < params.limit && itr != end_itr; + ++itr ) + { + result.rows.push_back( fc::variant( *itr ) ); + ++count; + } + if( itr != end_itr ) { + result.more = itr->name; + } + }; + + auto lower = idx.lower_bound( lower_bound_value ); + auto upper = idx.upper_bound( upper_bound_value ); + if( params.reverse ) { + walk_range( boost::make_reverse_iterator(upper), boost::make_reverse_iterator(lower) ); + } else { + walk_range( lower, upper ); + } + + return result; +} + optional producer_plugin_impl::calculate_next_block_time(const account_name& producer_name, const block_timestamp_type& current_block_time) const { chain::controller& chain = chain_plug->chain(); const auto& hbs = chain.head_block_state(); From 9826e640dca4154e97e1d9bdf6bd5e1f975b845f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 23 Apr 2019 12:51:38 -0500 Subject: [PATCH 492/680] Keep block log open to minimize open/close of file --- libraries/chain/block_log.cpp | 115 ++++++++++++++-------------------- 1 file changed, 46 insertions(+), 69 deletions(-) diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index aa1f65cc1cd..e90019f0975 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -9,6 +9,7 @@ #define LOG_READ (std::ios::in | std::ios::binary) #define LOG_WRITE (std::ios::out | std::ios::binary | std::ios::app) +#define LOG_RW ( std::ios::in | std::ios::out | std::ios::binary ) namespace eosio { namespace chain { @@ -31,47 +32,42 @@ namespace eosio { namespace chain { std::fstream index_stream; fc::path block_file; fc::path index_file; - bool block_write; - bool index_write; + bool open_files = false; bool genesis_written_to_block_log = false; uint32_t version = 0; uint32_t first_block_num = 0; - inline void check_block_read() { - if (block_write) { - block_stream.close(); - block_stream.open(block_file.generic_string().c_str(), LOG_READ); - block_write = false; + inline void check_open_files() { + if( !open_files ) { + reopen(); } } + void reopen(); - inline void check_block_write() { - if (!block_write) { + void close() { + if( block_stream.is_open() ) block_stream.close(); - block_stream.open(block_file.generic_string().c_str(), LOG_WRITE); - block_write = true; - } - } - - inline void check_index_read() { - try { - if (index_write) { - index_stream.close(); - index_stream.open(index_file.generic_string().c_str(), LOG_READ); - index_write = false; - } - } - FC_LOG_AND_RETHROW() - } - - inline void check_index_write() { - if (!index_write) { + if( index_stream.is_open() ) index_stream.close(); - index_stream.open(index_file.generic_string().c_str(), LOG_WRITE); - index_write = true; - } + open_files = false; } }; + + void block_log_impl::reopen() { + close(); + + // open to create files if they don't exist + //ilog("Opening block log at ${path}", ("path", my->block_file.generic_string())); + block_stream.open(block_file.generic_string().c_str(), LOG_WRITE); + index_stream.open(index_file.generic_string().c_str(), LOG_WRITE); + + close(); + + block_stream.open(block_file.generic_string().c_str(), LOG_RW); + index_stream.open(index_file.generic_string().c_str(), LOG_RW); + + open_files = true; + } } block_log::block_log(const fc::path& data_dir) @@ -88,26 +84,21 @@ namespace eosio { namespace chain { block_log::~block_log() { if (my) { flush(); + my->close(); my.reset(); } } void block_log::open(const fc::path& data_dir) { - if (my->block_stream.is_open()) - my->block_stream.close(); - if (my->index_stream.is_open()) - my->index_stream.close(); + my->close(); if (!fc::is_directory(data_dir)) fc::create_directories(data_dir); + my->block_file = data_dir / "blocks.log"; my->index_file = data_dir / "blocks.index"; - //ilog("Opening block log at ${path}", ("path", my->block_file.generic_string())); - my->block_stream.open(my->block_file.generic_string().c_str(), LOG_WRITE); - my->index_stream.open(my->index_file.generic_string().c_str(), LOG_WRITE); - my->block_write = true; - my->index_write = true; + my->reopen(); /* On startup of the block log, there are several states the log file and the index file can be * in relation to each other. @@ -132,7 +123,6 @@ namespace eosio { namespace chain { if (log_size) { ilog("Log is nonempty"); - my->check_block_read(); my->block_stream.seekg( 0 ); my->version = 0; my->block_stream.read( (char*)&my->version, sizeof(my->version) ); @@ -155,9 +145,6 @@ namespace eosio { namespace chain { my->head_id = my->head->id(); if (index_size) { - my->check_block_read(); - my->check_index_read(); - ilog("Index is nonempty"); uint64_t block_pos; my->block_stream.seekg(-sizeof(uint64_t), std::ios::end); @@ -180,10 +167,9 @@ namespace eosio { namespace chain { } } else if (index_size) { ilog("Index is nonempty, remove and recreate it"); - my->index_stream.close(); + my->close(); fc::remove_all(my->index_file); - my->index_stream.open(my->index_file.generic_string().c_str(), LOG_WRITE); - my->index_write = true; + my->reopen(); } } @@ -191,9 +177,10 @@ namespace eosio { namespace chain { try { EOS_ASSERT( my->genesis_written_to_block_log, block_log_append_fail, "Cannot append to block log until the genesis is first written" ); - my->check_block_write(); - my->check_index_write(); + my->check_open_files(); + my->block_stream.seekp(0, std::ios::end); + my->index_stream.seekp(0, std::ios::end); uint64_t pos = my->block_stream.tellp(); EOS_ASSERT(my->index_stream.tellp() == sizeof(uint64_t) * (b->block_num() - my->first_block_num), block_log_append_fail, @@ -220,22 +207,17 @@ namespace eosio { namespace chain { } void block_log::reset( const genesis_state& gs, const signed_block_ptr& first_block, uint32_t first_block_num ) { - if (my->block_stream.is_open()) - my->block_stream.close(); - if (my->index_stream.is_open()) - my->index_stream.close(); + my->close(); fc::remove_all(my->block_file); fc::remove_all(my->index_file); - my->block_stream.open(my->block_file.generic_string().c_str(), LOG_WRITE); - my->index_stream.open(my->index_file.generic_string().c_str(), LOG_WRITE); - my->block_write = true; - my->index_write = true; + my->reopen(); auto data = fc::raw::pack(gs); my->version = 0; // version of 0 is invalid; it indicates that the genesis was not properly written to the block log my->first_block_num = first_block_num; + my->block_stream.seekp(0, std::ios::end); my->block_stream.write((char*)&my->version, sizeof(my->version)); my->block_stream.write((char*)&my->first_block_num, sizeof(my->first_block_num)); my->block_stream.write(data.data(), data.size()); @@ -251,22 +233,16 @@ namespace eosio { namespace chain { auto pos = my->block_stream.tellp(); - my->block_stream.close(); - my->block_stream.open(my->block_file.generic_string().c_str(), std::ios::in | std::ios::out | std::ios::binary ); // Bypass append-only writing just once - static_assert( block_log::max_supported_version > 0, "a version number of zero is not supported" ); my->version = block_log::max_supported_version; my->block_stream.seekp( 0 ); my->block_stream.write( (char*)&my->version, sizeof(my->version) ); my->block_stream.seekp( pos ); flush(); - - my->block_write = false; - my->check_block_write(); // Reset to append-only writing. } std::pair block_log::read_block(uint64_t pos)const { - my->check_block_read(); + my->check_open_files(); my->block_stream.seekg(pos); std::pair result; @@ -290,7 +266,7 @@ namespace eosio { namespace chain { } uint64_t block_log::get_block_pos(uint32_t block_num) const { - my->check_index_read(); + my->check_open_files(); if (!(my->head && block_num <= block_header::num_from_id(my->head_id) && block_num >= my->first_block_num)) return npos; my->index_stream.seekg(sizeof(uint64_t) * (block_num - my->first_block_num)); @@ -300,7 +276,7 @@ namespace eosio { namespace chain { } signed_block_ptr block_log::read_head()const { - my->check_block_read(); + my->check_open_files(); uint64_t pos; @@ -328,13 +304,13 @@ namespace eosio { namespace chain { void block_log::construct_index() { ilog("Reconstructing Block Log Index..."); - my->index_stream.close(); + my->close(); + fc::remove_all(my->index_file); - my->index_stream.open(my->index_file.generic_string().c_str(), LOG_WRITE); - my->index_write = true; + + my->reopen(); uint64_t end_pos; - my->check_block_read(); my->block_stream.seekg(-sizeof( uint64_t), std::ios::end); my->block_stream.read((char*)&end_pos, sizeof(end_pos)); @@ -357,6 +333,7 @@ namespace eosio { namespace chain { my->block_stream.read((char*) &totem, sizeof(totem)); } + my->index_stream.seekp(0, std::ios::end); while( pos < end_pos ) { fc::raw::unpack(my->block_stream, tmp); my->block_stream.read((char*)&pos, sizeof(pos)); From 5b342dcb98d15ac021d6a9abf4dd92050a897dc6 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Tue, 23 Apr 2019 15:36:08 -0400 Subject: [PATCH 493/680] modified .pipelinebranch --- .pipelinebranch | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pipelinebranch b/.pipelinebranch index 089fa42ccc2..e763e7d7fdc 100644 --- a/.pipelinebranch +++ b/.pipelinebranch @@ -1 +1 @@ -use-protocol-features-sync-nodes +protocol-features-sync-nodes From 80f4551c2cef2d660c4ac8940a2fd292ab76d998 Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 23 Apr 2019 16:50:42 -0400 Subject: [PATCH 494/680] allow opening block log with no blocks (fixes undefined behavior bug); contruct_index should leave index file empty if block log contains no blocks --- libraries/chain/block_log.cpp | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/libraries/chain/block_log.cpp b/libraries/chain/block_log.cpp index 827cab3d298..5a490bd9432 100644 --- a/libraries/chain/block_log.cpp +++ b/libraries/chain/block_log.cpp @@ -152,7 +152,11 @@ namespace eosio { namespace chain { } my->head = read_head(); - my->head_id = my->head->id(); + if( my->head ) { + my->head_id = my->head->id(); + } else { + my->head_id = {}; + } if (index_size) { my->check_block_read(); @@ -341,6 +345,12 @@ namespace eosio { namespace chain { my->block_stream.seekg(-sizeof( uint64_t), std::ios::end); my->block_stream.read((char*)&end_pos, sizeof(end_pos)); + + if( end_pos == npos ) { + ilog( "Block log contains no blocks. No need to construct index." ); + return; + } + signed_block tmp; uint64_t pos = 0; From f123138e7627d85aa06564a2bf30e3206579bbbd Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Tue, 23 Apr 2019 17:30:08 -0400 Subject: [PATCH 495/680] addressed changes --- CMakeLists.txt | 4 ---- scripts/eosio_build.sh | 8 ++++---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 7ff89762398..4d708ac5d44 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -112,11 +112,7 @@ FIND_PACKAGE(Boost 1.67 REQUIRED COMPONENTS unit_test_framework iostreams) -# Some new stdlibc++s will #error on ; a problem for boost pre-1.69 add_definitions(-DBOOST_ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW) -if( APPLE AND UNIX ) - add_definitions(-DBOOST_ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW) -endif() set(CMAKE_THREAD_PREFER_PTHREAD TRUE) set(THREADS_PREFER_PTHREAD_FLAG TRUE) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 3cd8ec6a7d5..05a3903ce20 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -247,7 +247,7 @@ else fi fi -if $PIN_COMPILER; then +if [ $PIN_COMPILER ]; then BUILD_CLANG8=true CPP_COMP=${OPT_LOCATION}/clang8/bin/clang++ CC_COMP=${OPT_LOCATION}/clang8/bin/clang @@ -367,17 +367,17 @@ printf "## ENABLE_COVERAGE_TESTING=%s\\n" "${ENABLE_COVERAGE_TESTING}" cd $BUILD_DIR -if $PIN_COMPILER; then +if [ $PIN_COMPILER ]; then $CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake \ -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ - -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=false \ + -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=$ENABLE_MONGO \ -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ -DCMAKE_PREFIX_PATH=$PREFIX -DCMAKE_PREFIX_PATH=$OPT_LOCATION/llvm4\ -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS -DEOSIO_PIN_COMPILER=1 "${REPO_ROOT}" else $CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" \ -DCMAKE_C_COMPILER="${CC}" -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ - -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ + -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=$ENABLE_MONGO \ -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ -DCMAKE_PREFIX_PATH=$PREFIX \ -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS $PIN_COMPILER_CMAKE "${REPO_ROOT}" From caf409e31166ec40dcec0fbd1cbefce2b5d33909 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Tue, 23 Apr 2019 19:39:10 -0400 Subject: [PATCH 496/680] pipeline changes to use new images --- .buildkite/pipeline.yml | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 57bcdbf6d92..f0404f90182 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -17,7 +17,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" propagate-environment: true workdir: /data/job timeout: 120 @@ -40,7 +40,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" propagate-environment: true workdir: /data/job timeout: 120 @@ -63,7 +63,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" propagate-environment: true workdir: /data/job timeout: 120 @@ -86,7 +86,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" propagate-environment: true workdir: /data/job timeout: 120 @@ -127,7 +127,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" propagate-environment: true workdir: /data/job timeout: 60 @@ -148,7 +148,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" propagate-environment: true workdir: /data/job timeout: 60 @@ -170,7 +170,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" propagate-environment: true workdir: /data/job timeout: 60 @@ -191,7 +191,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" propagate-environment: true workdir: /data/job timeout: 60 @@ -213,7 +213,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" propagate-environment: true workdir: /data/job timeout: 60 @@ -234,7 +234,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" propagate-environment: true workdir: /data/job timeout: 60 @@ -256,7 +256,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" propagate-environment: true workdir: /data/job timeout: 60 @@ -277,7 +277,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" propagate-environment: true workdir: /data/job timeout: 60 @@ -336,7 +336,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" propagate-environment: true workdir: /data/job env: @@ -364,7 +364,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" propagate-environment: true workdir: /data/job env: @@ -392,7 +392,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" propagate-environment: true workdir: /data/job env: From c520f37608da715a826f6c67a3f33276bb7fa39c Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 24 Apr 2019 10:40:17 -0400 Subject: [PATCH 497/680] addressed comments --- scripts/eosio_build.sh | 2 ++ scripts/eosio_build_amazon.sh | 14 +++++++++++++- scripts/eosio_build_centos.sh | 14 +++++++++++++- scripts/eosio_build_darwin.sh | 10 ++++++++++ scripts/eosio_build_fedora.sh | 13 ++++++++++++- scripts/eosio_build_ubuntu.sh | 13 ++++++++++++- 6 files changed, 62 insertions(+), 4 deletions(-) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 05a3903ce20..717e723ed03 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -48,6 +48,7 @@ txtrst=$(tput sgr0) SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" REPO_ROOT="${SCRIPT_DIR}/.." BUILD_DIR="${REPO_ROOT}/build" +ENABLE_MONGO=false export BUILD_DIR=$BUILD_DIR @@ -129,6 +130,7 @@ if [ $# -ne 0 ]; then ;; m) BUILD_MONGO=1 + ENABLE_MONGO=1 ;; \? ) printf "\\nInvalid Option: %s\\n" "-${OPTARG}" 1>&2 diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index ae1a8a8fd24..36aaef767c0 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -120,6 +120,17 @@ fi printf "\\n" +### clean up force build before starting +if [ $FORCE_BUILD ];then + rm -rf \ + ${SRC_LOCATION}/cmake-$CMAKE_VERSION \ + ${SRC_LOCATION}/llvm ${OPT_LOCATION}/llvm4 \ + ${TMP_LOCATION}/clang8 ${OPT_LOCATION}/clang8 \ + ${SRC_LOCATION}/zlib ${OPT_LOCATION}/zlib \ + ${SRC_LOCATION}/boost \ + ${SRC_LOCATION}/mongodb-linux-x86_64-amazon-$MONGODB_VERSION \ + ${SRC_LOCATION}/mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + ${SRC_LOCATION}/mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION printf "Checking CMAKE installation...\\n" if [ ! -e $CMAKE ] || [ $FORCE_BUILD ]; then @@ -142,7 +153,8 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" -if $PIN_COMPILER; then + +if [ $PIN_COMPILER ]; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing Clang 8...\\n" diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 07964458b57..f8ae55dc9a3 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -171,6 +171,18 @@ fi printf "\\n" +### clean up force build before starting +if [ $FORCE_BUILD ];then + rm -rf \ + ${SRC_LOCATION}/cmake-$CMAKE_VERSION \ + ${SRC_LOCATION}/llvm ${OPT_LOCATION}/llvm4 \ + ${TMP_LOCATION}/clang8 ${OPT_LOCATION}/clang8 \ + ${SRC_LOCATION}/zlib ${OPT_LOCATION}/zlib \ + ${SRC_LOCATION}/boost ${BOOST_ROOT} \ + ${SRC_LOCATION}/mongodb-linux-x86_64-amazon-$MONGODB_VERSION \ + ${SRC_LOCATION}/mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + ${SRC_LOCATION}/mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION + printf "Checking CMAKE installation...\\n" if [ ! -e $CMAKE ] || [ $FORCE_BUILD ]; then printf "Installing CMAKE...\\n" @@ -194,7 +206,7 @@ printf "\\n" export CPATH="${CPATH}:${PYTHON3PATH}/root/usr/include/python3.6m" # m on the end causes problems with boost finding python3 -if $PIN_COMPILER; then +if [ $PIN_COMPILER ]; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing Clang 8...\\n" diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 7ea9781197f..41352f4bcbf 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -161,6 +161,16 @@ fi printf "\\n" +### clean up force build before starting +if [ $FORCE_BUILD ];then + rm -rf \ + ${SRC_LOCATION}/llvm ${OPT_LOCATION}/llvm4 \ + ${TMP_LOCATION}/clang8 ${OPT_LOCATION}/clang8 \ + ${SRC_LOCATION}/zlib ${OPT_LOCATION}/zlib \ + ${SRC_LOCATION}/boost \ + ${SRC_LOCATION}/mongodb-linux-x86_64-amazon-$MONGODB_VERSION \ + ${SRC_LOCATION}/mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + ${SRC_LOCATION}/mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION export CPATH="$(python-config --includes | awk '{print $1}' | cut -dI -f2):$CPATH" # Boost has trouble finding pyconfig.h printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 2cf077538c7..44bec05fdb4 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -116,6 +116,17 @@ fi printf "\\n" +### clean up force build before starting +if [ $FORCE_BUILD ];then + rm -rf \ + ${SRC_LOCATION}/cmake-$CMAKE_VERSION \ + ${SRC_LOCATION}/llvm ${OPT_LOCATION}/llvm4 \ + ${TMP_LOCATION}/clang8 ${OPT_LOCATION}/clang8 \ + ${SRC_LOCATION}/zlib ${OPT_LOCATION}/zlib \ + ${SRC_LOCATION}/boost \ + ${SRC_LOCATION}/mongodb-linux-x86_64-amazon-$MONGODB_VERSION \ + ${SRC_LOCATION}/mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + ${SRC_LOCATION}/mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION printf "Checking CMAKE installation...\\n" if [ ! -e $CMAKE ] || [ $FORCE_BUILD ]; then @@ -137,7 +148,7 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" -if $PIN_COMPILER; then +if [ $PIN_COMPILER ]; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing Clang 8...\\n" diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 93a995d009b..b7cefd88e83 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -140,6 +140,17 @@ fi printf "\\n" +### clean up force build before starting +if [ $FORCE_BUILD ];then + rm -rf \ + ${SRC_LOCATION}/cmake-$CMAKE_VERSION \ + ${SRC_LOCATION}/llvm ${OPT_LOCATION}/llvm4 \ + ${TMP_LOCATION}/clang8 ${OPT_LOCATION}/clang8 \ + ${SRC_LOCATION}/zlib ${OPT_LOCATION}/zlib \ + ${SRC_LOCATION}/boost \ + ${SRC_LOCATION}/mongodb-linux-x86_64-amazon-$MONGODB_VERSION \ + ${SRC_LOCATION}/mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + ${SRC_LOCATION}/mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION \ printf "Checking CMAKE installation...\\n" if [ ! -e $CMAKE ] || [ $FORCE_BUILD ]; then @@ -162,7 +173,7 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" -if $PIN_COMPILER; then +if [ $PIN_COMPILER ]; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing Clang 8...\\n" From 4228e97e3358c5b6b97ec65b42afc48c477b4323 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 24 Apr 2019 10:44:41 -0400 Subject: [PATCH 498/680] forgot fi --- scripts/eosio_build_amazon.sh | 1 + scripts/eosio_build_centos.sh | 1 + scripts/eosio_build_darwin.sh | 1 + scripts/eosio_build_fedora.sh | 1 + scripts/eosio_build_ubuntu.sh | 1 + 5 files changed, 5 insertions(+) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 36aaef767c0..5d0438ca314 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -131,6 +131,7 @@ if [ $FORCE_BUILD ];then ${SRC_LOCATION}/mongodb-linux-x86_64-amazon-$MONGODB_VERSION \ ${SRC_LOCATION}/mongo-c-driver-$MONGO_C_DRIVER_VERSION \ ${SRC_LOCATION}/mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION +fi printf "Checking CMAKE installation...\\n" if [ ! -e $CMAKE ] || [ $FORCE_BUILD ]; then diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index f8ae55dc9a3..140705f4d6d 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -182,6 +182,7 @@ if [ $FORCE_BUILD ];then ${SRC_LOCATION}/mongodb-linux-x86_64-amazon-$MONGODB_VERSION \ ${SRC_LOCATION}/mongo-c-driver-$MONGO_C_DRIVER_VERSION \ ${SRC_LOCATION}/mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION +fi printf "Checking CMAKE installation...\\n" if [ ! -e $CMAKE ] || [ $FORCE_BUILD ]; then diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 41352f4bcbf..64b6222e34f 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -171,6 +171,7 @@ if [ $FORCE_BUILD ];then ${SRC_LOCATION}/mongodb-linux-x86_64-amazon-$MONGODB_VERSION \ ${SRC_LOCATION}/mongo-c-driver-$MONGO_C_DRIVER_VERSION \ ${SRC_LOCATION}/mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION +fi export CPATH="$(python-config --includes | awk '{print $1}' | cut -dI -f2):$CPATH" # Boost has trouble finding pyconfig.h printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 44bec05fdb4..46b5c4c041f 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -127,6 +127,7 @@ if [ $FORCE_BUILD ];then ${SRC_LOCATION}/mongodb-linux-x86_64-amazon-$MONGODB_VERSION \ ${SRC_LOCATION}/mongo-c-driver-$MONGO_C_DRIVER_VERSION \ ${SRC_LOCATION}/mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION +fi printf "Checking CMAKE installation...\\n" if [ ! -e $CMAKE ] || [ $FORCE_BUILD ]; then diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index b7cefd88e83..55b3793a2df 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -151,6 +151,7 @@ if [ $FORCE_BUILD ];then ${SRC_LOCATION}/mongodb-linux-x86_64-amazon-$MONGODB_VERSION \ ${SRC_LOCATION}/mongo-c-driver-$MONGO_C_DRIVER_VERSION \ ${SRC_LOCATION}/mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION \ +fi printf "Checking CMAKE installation...\\n" if [ ! -e $CMAKE ] || [ $FORCE_BUILD ]; then From ccd0be4c4e6b12627a834d02616e08fdd125d283 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 24 Apr 2019 10:55:27 -0400 Subject: [PATCH 499/680] left over / --- scripts/eosio_build_ubuntu.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 55b3793a2df..6284d8e5a5a 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -150,7 +150,7 @@ if [ $FORCE_BUILD ];then ${SRC_LOCATION}/boost \ ${SRC_LOCATION}/mongodb-linux-x86_64-amazon-$MONGODB_VERSION \ ${SRC_LOCATION}/mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - ${SRC_LOCATION}/mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION \ + ${SRC_LOCATION}/mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION fi printf "Checking CMAKE installation...\\n" From bd97221c332ae08f334175e3da048e3c29d06817 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 24 Apr 2019 10:58:40 -0400 Subject: [PATCH 500/680] remove force flag from pipeline.yml --- .buildkite/pipeline.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index f0404f90182..6be9df0a12a 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,7 +1,7 @@ steps: - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P -f + ./scripts/eosio_build.sh -y -P echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -24,7 +24,7 @@ steps: - command: | # CentOS 7 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P -f + ./scripts/eosio_build.sh -y -P echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -47,7 +47,7 @@ steps: - command: | # Ubuntu 16.04 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P -f + ./scripts/eosio_build.sh -y -P echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -70,7 +70,7 @@ steps: - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P -f + ./scripts/eosio_build.sh -y -P echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -97,7 +97,7 @@ steps: ln -s "$(pwd)" /data/job cd /data/job echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y -P -f + ./scripts/eosio_build.sh -y -P echo "--- Compressing build directory :compression:" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi From e09efb8345587f6b0dfad200110c3e0544e99b9f Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 24 Apr 2019 11:02:02 -0400 Subject: [PATCH 501/680] update fc submodule --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 469bdf3298f..62612633136 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 469bdf3298fff96cb138a901ee1231eccb155471 +Subproject commit 6261263313673fbefe24e2cf48035f3f9796768e From 745b0bbf518872e11ca35bcc57be81de9885aa44 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 24 Apr 2019 11:09:46 -0400 Subject: [PATCH 502/680] adding back -f until base images are built correctly --- .buildkite/pipeline.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 6be9df0a12a..f0404f90182 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,7 +1,7 @@ steps: - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P + ./scripts/eosio_build.sh -y -P -f echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -24,7 +24,7 @@ steps: - command: | # CentOS 7 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P + ./scripts/eosio_build.sh -y -P -f echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -47,7 +47,7 @@ steps: - command: | # Ubuntu 16.04 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P + ./scripts/eosio_build.sh -y -P -f echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -70,7 +70,7 @@ steps: - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P + ./scripts/eosio_build.sh -y -P -f echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -97,7 +97,7 @@ steps: ln -s "$(pwd)" /data/job cd /data/job echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y -P + ./scripts/eosio_build.sh -y -P -f echo "--- Compressing build directory :compression:" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi From 2b3b579cff04254b4f3ab39a137395c2fc93d1fc Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 24 Apr 2019 12:16:41 -0400 Subject: [PATCH 503/680] clean up compile error from merge --- .../chain/include/eosio/chain/wasm_interface_private.hpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp index b545155bd04..45f70460b02 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface_private.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface_private.hpp @@ -48,7 +48,10 @@ namespace eosio { namespace chain { ~wasm_interface_impl() { if(is_shutting_down) - std::for_each(instantiation_cache.begin(), instantiation_cache.end(), [](auto& i) {i.second.release();}); + for(wasm_cache_index::iterator it = wasm_instantiation_cache.begin(); it != wasm_instantiation_cache.end(); ++it) + wasm_instantiation_cache.modify(it, [](wasm_cache_entry& e) { + e.module.release(); + }); } std::vector parse_initial_memory(const Module& module) { From 690966505dc03a97342c446483baa2d0e42dff29 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 24 Apr 2019 14:39:55 -0400 Subject: [PATCH 504/680] disable mongo tests --- scripts/parallel-test.sh | 5 +---- scripts/serial-test.sh | 8 ++++---- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/scripts/parallel-test.sh b/scripts/parallel-test.sh index fd53ca55198..0153b95c87e 100755 --- a/scripts/parallel-test.sh +++ b/scripts/parallel-test.sh @@ -4,8 +4,6 @@ set -e # exit on failure of any "simple" command (excludes &&, ||, or | chains) PATH=$PATH:~/opt/mongodb/bin echo "Extracting build directory..." tar -zxf build.tar.gz -echo "Starting MongoDB..." -~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log cd /data/job/build # run tests echo "Running tests..." @@ -26,7 +24,6 @@ mv $(pwd)/Testing/$(ls $(pwd)/Testing/ | grep '20' | tail -n 1)/Test.xml $XML_FI buildkite-agent artifact upload config.ini buildkite-agent artifact upload genesis.json cd .. -buildkite-agent artifact upload mongod.log cd build buildkite-agent artifact upload $XML_FILENAME echo "Done uploading artifacts." @@ -35,4 +32,4 @@ if [[ "$EXIT_STATUS" != 0 ]]; then echo "Failing due to non-zero exit status from ctest: $EXIT_STATUS" echo ' ^^^ scroll up for more information ^^^' exit $EXIT_STATUS -fi \ No newline at end of file +fi diff --git a/scripts/serial-test.sh b/scripts/serial-test.sh index 1d36e081712..c1f83ae9bfa 100755 --- a/scripts/serial-test.sh +++ b/scripts/serial-test.sh @@ -4,8 +4,8 @@ set -e # exit on failure of any "simple" command (excludes &&, ||, or | chains) PATH=$PATH:~/opt/mongodb/bin echo "Extracting build directory..." tar -zxf build.tar.gz -echo "Starting MongoDB..." -~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log +#echo "Starting MongoDB..." +#~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log cd /data/job/build # run tests echo "Running tests..." @@ -24,7 +24,7 @@ mv $(pwd)/Testing/$(ls $(pwd)/Testing/ | grep '20' | tail -n 1)/Test.xml $XML_FI buildkite-agent artifact upload config.ini buildkite-agent artifact upload genesis.json cd .. -buildkite-agent artifact upload mongod.log +#buildkite-agent artifact upload mongod.log cd build buildkite-agent artifact upload $XML_FILENAME echo "Done uploading artifacts." @@ -33,4 +33,4 @@ if [[ "$EXIT_STATUS" != 0 ]]; then echo "Failing due to non-zero exit status from ctest: $EXIT_STATUS" echo ' ^^^ scroll up for more information ^^^' exit $EXIT_STATUS -fi \ No newline at end of file +fi From c9f325aaed00965167f5a26290d07f59dca7fadb Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 24 Apr 2019 15:17:17 -0400 Subject: [PATCH 505/680] remove unsupported platforms from README and build scripts --- README.md | 21 +-- scripts/eosio_build.sh | 20 +-- scripts/eosio_build_fedora.sh | 236 ---------------------------------- scripts/eosio_build_ubuntu.sh | 7 - 4 files changed, 6 insertions(+), 278 deletions(-) delete mode 100755 scripts/eosio_build_fedora.sh diff --git a/README.md b/README.md index e4ec6e0b69d..3c14af2bbb7 100644 --- a/README.md +++ b/README.md @@ -65,15 +65,6 @@ $ sudo yum install ./eosio-1.7.0-rc1.el7.x86_64.rpm ```sh $ sudo yum remove eosio ``` -#### Fedora RPM Package Install -```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio-1.7.0-rc1.fc27.x86_64.rpm -$ sudo yum install ./eosio-1.7.0-rc1.fc27.x86_64.rpm -``` -#### Fedora RPM Package Uninstall -```sh -$ sudo yum remove eosio.cdt -``` #### Build Script Uninstall @@ -84,13 +75,11 @@ If you have previously installed EOSIO using build scripts, you have two options ## Supported Operating Systems EOSIO currently supports the following operating systems: -1. Amazon 2017.09 and higher -2. Centos 7 -3. Fedora 25 and higher (Fedora 27 recommended) -4. Mint 18 -5. Ubuntu 16.04 (Ubuntu 16.10 recommended) -6. Ubuntu 18.04 -7. MacOS Darwin 10.12 and higher (MacOS 10.13.x recommended) +1. Amazon Linux 2 +2. CentOS 7 +3. Ubuntu 16.04 +4. Ubuntu 18.04 +5. MacOS 10.14 (Mojave) ## Resources 1. [Website](https://eos.io) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 32395919051..7b0f43a04f8 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -212,12 +212,10 @@ if [ "$ARCH" == "Linux" ]; then export OS_NAME=$( cat /etc/os-release | grep ^NAME | cut -d'=' -f2 | sed 's/\"//gI' ) OPENSSL_ROOT_DIR=/usr/include/openssl if [ ! -e /etc/os-release ]; then - printf "\\nEOSIO currently supports Amazon, Centos, Fedora, Mint & Ubuntu Linux only.\\n" + printf "\\nEOSIO currently supports Amazon, Centos, and Ubuntu Linux only.\\n" printf "Please install on the latest version of one of these Linux distributions.\\n" printf "https://aws.amazon.com/amazon-linux-ami/\\n" printf "https://www.centos.org/\\n" - printf "https://start.fedoraproject.org/\\n" - printf "https://linuxmint.com/\\n" printf "https://www.ubuntu.com/\\n" printf "Exiting now.\\n" exit 1 @@ -233,22 +231,6 @@ if [ "$ARCH" == "Linux" ]; then CXX_COMPILER=g++ C_COMPILER=gcc ;; - "elementary OS") - FILE="${REPO_ROOT}/scripts/eosio_build_ubuntu.sh" - CXX_COMPILER=clang++-4.0 - C_COMPILER=clang-4.0 - ;; - "Fedora") - export CPATH=/usr/include/llvm4.0:$CPATH # llvm4.0 for fedora package path inclusion - FILE="${REPO_ROOT}/scripts/eosio_build_fedora.sh" - CXX_COMPILER=g++ - C_COMPILER=gcc - ;; - "Linux Mint") - FILE="${REPO_ROOT}/scripts/eosio_build_ubuntu.sh" - CXX_COMPILER=clang++-4.0 - C_COMPILER=clang-4.0 - ;; "Ubuntu") FILE="${REPO_ROOT}/scripts/eosio_build_ubuntu.sh" CXX_COMPILER=clang++-4.0 diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh deleted file mode 100755 index b1429b01931..00000000000 --- a/scripts/eosio_build_fedora.sh +++ /dev/null @@ -1,236 +0,0 @@ -if [ $1 == 1 ]; then ANSWER=1; else ANSWER=0; fi - -CPU_SPEED=$( lscpu | grep "MHz" | tr -s ' ' | cut -d\ -f3 | cut -d'.' -f1 ) -CPU_CORE=$( nproc ) - -OS_VER=$( grep VERSION_ID /etc/os-release | cut -d'=' -f2 | sed 's/[^0-9\.]//gI' ) -if [ "${OS_VER}" -lt 25 ]; then - printf "You must be running Fedora 25 or higher to install EOSIO.\\n" - printf "Exiting now.\\n" - exit 1; -fi - -# procps-ng includes free command -if [[ -z "$( rpm -qi "procps-ng" 2>/dev/null | grep Name )" ]]; then yum install -y procps-ng; fi -MEM_MEG=$( free -m | sed -n 2p | tr -s ' ' | cut -d\ -f2 ) -if [ "${MEM_MEG}" -lt 7000 ]; then - printf "Your system must have 7 or more Gigabytes of physical memory installed.\\n" - printf "Exiting now.\\n" - exit 1; -fi -MEM_GIG=$(( ((MEM_MEG / 1000) / 2) )) -export JOBS=$(( MEM_GIG > CPU_CORE ? CPU_CORE : MEM_GIG )) - -DISK_INSTALL=$( df -h . | tail -1 | tr -s ' ' | cut -d\\ -f1 ) -DISK_TOTAL_KB=$( df . | tail -1 | awk '{print $2}' ) -DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) -DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) -DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) -if [ "${DISK_AVAIL%.*}" -lt "${DISK_MIN}" ]; then - printf "You must have at least %sGB of available storage to install EOSIO.\\n" "${DISK_MIN}" - printf "Exiting now.\\n" - exit 1; -fi - -printf "\\nOS name: ${OS_NAME}\\n" -printf "OS Version: ${OS_VER}\\n" -printf "CPU speed: ${CPU_SPEED}Mhz\\n" -printf "CPU cores: ${CPU_CORE}\\n" -printf "Physical Memory: ${MEM_MEG} Mgb\\n" -printf "Disk space total: ${DISK_TOTAL%.*}G\\n" -printf "Disk space available: ${DISK_AVAIL%.*}G\\n" - -# llvm is symlinked from /usr/lib64/llvm4.0 into user's home -DEP_ARRAY=( - git sudo procps-ng which gcc gcc-c++ autoconf automake libtool make \ - bzip2-devel wget bzip2 compat-openssl10 graphviz doxygen \ - openssl-devel gmp-devel libstdc++-devel python2 python2-devel python3 python3-devel \ - libedit ncurses-devel swig llvm4.0 llvm4.0-devel llvm4.0-libs llvm4.0-static libcurl-devel libusb-devel -) -COUNT=1 -DISPLAY="" -DEP="" - -printf "\\nChecking Yum installation...\\n" -if ! YUM=$( command -v yum 2>/dev/null ); then - printf "!! Yum must be installed to compile EOS.IO !!\\n" - printf "Exiting now.\\n" - exit 1; -fi -printf " - Yum installation found at %s.\\n" "${YUM}" - - -if [ $ANSWER != 1 ]; then read -p "Do you wish to update YUM repositories? (y/n) " ANSWER; fi -case $ANSWER in - 1 | [Yy]* ) - if ! sudo $YUM -y update; then - printf " - YUM update failed.\\n" - exit 1; - else - printf " - YUM update complete.\\n" - fi - ;; - [Nn]* ) echo " - Proceeding without update!";; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; -esac - -printf "Checking RPM for installed dependencies...\\n" -for (( i=0; i<${#DEP_ARRAY[@]}; i++ )); do - pkg=$( rpm -qi "${DEP_ARRAY[$i]}" 2>/dev/null | grep Name ) - if [[ -z $pkg ]]; then - DEP=$DEP" ${DEP_ARRAY[$i]} " - DISPLAY="${DISPLAY}${COUNT}. ${DEP_ARRAY[$i]}\\n" - printf " - Package %s ${bldred} NOT ${txtrst} found!\\n" "${DEP_ARRAY[$i]}" - (( COUNT++ )) - else - printf " - Package %s found.\\n" "${DEP_ARRAY[$i]}" - continue - fi -done -if [ "${COUNT}" -gt 1 ]; then - printf "\\nThe following dependencies are required to install EOSIO:\\n" - printf "${DISPLAY}\\n\\n" - if [ $ANSWER != 1 ]; then read -p "Do you wish to install these dependencies? (y/n) " ANSWER; fi - case $ANSWER in - 1 | [Yy]* ) - if ! sudo $YUM -y install ${DEP}; then - printf " - YUM dependency installation failed!\\n" - exit 1; - else - printf " - YUM dependencies installed successfully.\\n" - fi - ;; - [Nn]* ) echo "User aborting installation of required dependencies, Exiting now."; exit;; - * ) echo "Please type 'y' for yes or 'n' for no."; exit;; - esac -else - printf " - No required YUM dependencies to install.\\n" -fi - -printf "\\n" - - -printf "Checking CMAKE installation...\\n" -if [ ! -e $CMAKE ]; then - printf "Installing CMAKE...\\n" - curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \ - && tar -xzf cmake-$CMAKE_VERSION.tar.gz \ - && cd cmake-$CMAKE_VERSION \ - && ./bootstrap --prefix=$PREFIX \ - && make -j"${JOBS}" \ - && make install \ - && cd .. \ - && rm -f cmake-$CMAKE_VERSION.tar.gz \ - || exit 1 - printf " - CMAKE successfully installed @ ${CMAKE} \\n" -else - printf " - CMAKE found @ ${CMAKE}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" -BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) -if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then - printf "Installing Boost library...\\n" - curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ - && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ - && cd $BOOST_ROOT \ - && ./bootstrap.sh --prefix=$BOOST_ROOT \ - && ./b2 -q -j"${JOBS}" install \ - && cd .. \ - && rm -f boost_$BOOST_VERSION.tar.bz2 \ - && rm -rf $BOOST_LINK_LOCATION \ - && ln -s $BOOST_ROOT $BOOST_LINK_LOCATION \ - || exit 1 - printf " - Boost library successfully installed @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -else - printf " - Boost library found with correct version @ ${BOOST_ROOT} (Symlinked to ${BOOST_LINK_LOCATION}).\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking MongoDB installation...\\n" -if [ ! -d $MONGODB_ROOT ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT}\\n" -else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C driver installation...\\n" -if [ ! -d $MONGO_C_DRIVER_ROOT ]; then - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C++ driver installation...\\n" -if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -printf "\\n" - - -printf "Checking LLVM 4 support...\\n" -if [ ! -d $LLVM_ROOT ]; then - ln -s /usr/lib64/llvm4.0 $LLVM_ROOT \ - || exit 1 - printf " - LLVM successfully linked from /usr/lib64/llvm4.0 to ${LLVM_ROOT}\\n" -else - printf " - LLVM found @ ${LLVM_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - - -cd .. -printf "\\n" - -function print_instructions() { - return 0 -} diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 6b78c58f92e..116811fa23e 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -32,13 +32,6 @@ if [ "${MEM_MEG}" -lt 7000 ]; then fi case "${OS_NAME}" in - "Linux Mint") - if [ "${OS_MAJ}" -lt 18 ]; then - printf "You must be running Linux Mint 18.x or higher to install EOSIO.\\n" - printf "Exiting now.\\n" - exit 1 - fi - ;; "Ubuntu") . /etc/lsb-release if [ "${DISTRIB_CODENAME}" != "xenial" -a "${DISTRIB_CODENAME}" != "bionic" ]; then From c3180bd2ed1c06ac8c593127d325755c14150765 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 24 Apr 2019 15:44:05 -0400 Subject: [PATCH 506/680] Remove Debian from ubuntu build script --- scripts/eosio_build_ubuntu.sh | 7 ------- 1 file changed, 7 deletions(-) diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 116811fa23e..bf8184e0010 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -46,13 +46,6 @@ case "${OS_NAME}" in # We have to re-set this with the new version export MONGODB_ROOT=${OPT_LOCATION}/mongodb-${MONGODB_VERSION} ;; - "Debian") - if [ $OS_MAJ -lt 10 ]; then - printf "You must be running Debian 10 to install EOSIO, and resolve missing dependencies from unstable (sid).\n" - printf "Exiting now.\n" - exit 1 - fi - ;; esac if [ "${DISK_AVAIL%.*}" -lt "${DISK_MIN}" ]; then From 55f34c4a8608dfbf8df275fe7e4d46f2797f3f82 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 24 Apr 2019 15:47:46 -0400 Subject: [PATCH 507/680] Remove amazon linux "1" support from build script --- scripts/eosio_build_amazon.sh | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 1fb2af1ebc9..a698b17eebc 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -8,26 +8,18 @@ DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) -if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then # Amazonlinux1 - DEP_ARRAY=( - sudo procps util-linux which gcc72 gcc72-c++ autoconf automake libtool make doxygen graphviz \ - bzip2 bzip2-devel openssl-devel gmp gmp-devel libstdc++72 python27 python27-devel python36 python36-devel \ - libedit-devel ncurses-devel swig wget file libcurl-devel libusb1-devel - ) -else # Amazonlinux2 - DEP_ARRAY=( - git procps-ng util-linux gcc gcc-c++ autoconf automake libtool make bzip2 \ - bzip2-devel openssl-devel gmp-devel libstdc++ libcurl-devel libusbx-devel \ - python3 python3-devel python-devel libedit-devel doxygen graphviz - ) -fi +DEP_ARRAY=( + git procps-ng util-linux gcc gcc-c++ autoconf automake libtool make bzip2 \ + bzip2-devel openssl-devel gmp-devel libstdc++ libcurl-devel libusbx-devel \ + python3 python3-devel python-devel libedit-devel doxygen graphviz +) COUNT=1 DISPLAY="" DEP="" -if [[ "${OS_NAME}" == "Amazon Linux AMI" && "${OS_VER}" -lt 2017 ]]; then - printf "You must be running Amazon Linux 2017.09 or higher to install EOSIO.\\n" +if ! (. /etc/os-release; [ "$VERSION_ID" = "2" ]); then + printf "Amazon Linux 2 is the only version of Amazon Linux supported by EOSIO build scripts.\\n" printf "exiting now.\\n" exit 1 fi From 654cbeea6d4dcdef20019a43dad1ba8ee1d3a319 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 24 Apr 2019 15:49:23 -0400 Subject: [PATCH 508/680] Limit centos build script to version 7 only; too hard to predict future --- scripts/eosio_build_centos.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 3988aef50a4..7fdc621192e 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -31,8 +31,8 @@ if [ "${MEM_MEG}" -lt 7000 ]; then exit 1; fi -if [ "${OS_VER}" -lt 7 ]; then - printf "\\nYou must be running Centos 7 or higher to install EOSIO.\\n" +if ! (. /etc/os-release; [ "$VERSION_ID" = "7" ]); then + printf "\\nCentos 7 is the only version of Centos supported by EOSIO build scripts.\\n" printf "Exiting now.\\n\\n" exit 1; fi From 2f3cd5d05750427a25582afa649ee12e6d7dbcad Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 24 Apr 2019 16:01:07 -0400 Subject: [PATCH 509/680] Remove debian from root eosio_build script; cleanup error message --- scripts/eosio_build.sh | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 7b0f43a04f8..9035b17fa83 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -206,19 +206,23 @@ printf "\\nARCHITECTURE: %s\\n" "${ARCH}" # Find and use existing CMAKE export CMAKE=$(command -v cmake 2>/dev/null) +print_supported_linux_distros_and_exit() { + printf "\\nOn Linux the EOSIO build script only supports Amazon, Centos, and Ubuntu.\\n" + printf "Please install on a supported version of one of these Linux distributions.\\n" + printf "https://aws.amazon.com/amazon-linux-ami/\\n" + printf "https://www.centos.org/\\n" + printf "https://www.ubuntu.com/\\n" + printf "Exiting now.\\n" + exit 1 +} + if [ "$ARCH" == "Linux" ]; then # Check if cmake is already installed or not and use source install location if [ -z $CMAKE ]; then export CMAKE=$PREFIX/bin/cmake; fi export OS_NAME=$( cat /etc/os-release | grep ^NAME | cut -d'=' -f2 | sed 's/\"//gI' ) OPENSSL_ROOT_DIR=/usr/include/openssl if [ ! -e /etc/os-release ]; then - printf "\\nEOSIO currently supports Amazon, Centos, and Ubuntu Linux only.\\n" - printf "Please install on the latest version of one of these Linux distributions.\\n" - printf "https://aws.amazon.com/amazon-linux-ami/\\n" - printf "https://www.centos.org/\\n" - printf "https://www.ubuntu.com/\\n" - printf "Exiting now.\\n" - exit 1 + print_supported_linux_distros_and_exit fi case "$OS_NAME" in "Amazon Linux AMI"|"Amazon Linux") @@ -236,14 +240,8 @@ if [ "$ARCH" == "Linux" ]; then CXX_COMPILER=clang++-4.0 C_COMPILER=clang-4.0 ;; - "Debian GNU/Linux") - FILE="${REPO_ROOT}/scripts/eosio_build_ubuntu.sh" - CXX_COMPILER=clang++-4.0 - C_COMPILER=clang-4.0 - ;; *) - printf "\\nUnsupported Linux Distribution. Exiting now.\\n\\n" - exit 1 + print_supported_linux_distros_and_exit esac fi From eaa1c81bced695fb3a4c7ae8ac446217251a5523 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 24 Apr 2019 18:14:03 -0400 Subject: [PATCH 510/680] Remove dockerfiles --- Docker/Dockerfile | 23 ----- Docker/README.md | 162 ------------------------------- Docker/builder/Dockerfile | 58 ----------- Docker/cleos.sh | 17 ---- Docker/config.ini | 145 --------------------------- Docker/dev/Dockerfile | 17 ---- Docker/docker-compose-latest.yml | 33 ------- Docker/docker-compose.yml | 40 -------- Docker/nodeosd.sh | 37 ------- 9 files changed, 532 deletions(-) delete mode 100644 Docker/Dockerfile delete mode 100644 Docker/README.md delete mode 100644 Docker/builder/Dockerfile delete mode 100755 Docker/cleos.sh delete mode 100644 Docker/config.ini delete mode 100644 Docker/dev/Dockerfile delete mode 100644 Docker/docker-compose-latest.yml delete mode 100755 Docker/docker-compose.yml delete mode 100755 Docker/nodeosd.sh diff --git a/Docker/Dockerfile b/Docker/Dockerfile deleted file mode 100644 index 74da4edf1ec..00000000000 --- a/Docker/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM eosio/builder as builder -ARG branch=master -ARG symbol=SYS - -RUN git clone -b $branch https://github.com/EOSIO/eos.git --recursive \ - && cd eos && echo "$branch:$(git rev-parse HEAD)" > /etc/eosio-version \ - && cmake -H. -B"/tmp/build" -GNinja -DCMAKE_BUILD_TYPE=Release -DWASM_ROOT=/opt/wasm -DCMAKE_CXX_COMPILER=clang++ \ - -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/tmp/build -DBUILD_MONGO_DB_PLUGIN=true -DCORE_SYMBOL_NAME=$symbol \ - && cmake --build /tmp/build --target install - - -FROM ubuntu:18.04 - -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install openssl ca-certificates libusb-1.0 libcurl3-gnutls && rm -rf /var/lib/apt/lists/* -COPY --from=builder /usr/local/lib/* /usr/local/lib/ -COPY --from=builder /tmp/build/bin /opt/eosio/bin -COPY --from=builder /eos/Docker/config.ini / -COPY --from=builder /etc/eosio-version /etc -COPY --from=builder /eos/Docker/nodeosd.sh /opt/eosio/bin/nodeosd.sh -ENV EOSIO_ROOT=/opt/eosio -RUN chmod +x /opt/eosio/bin/nodeosd.sh -ENV LD_LIBRARY_PATH /usr/local/lib -ENV PATH /opt/eosio/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin diff --git a/Docker/README.md b/Docker/README.md deleted file mode 100644 index 6eade280f9b..00000000000 --- a/Docker/README.md +++ /dev/null @@ -1,162 +0,0 @@ -# Run in docker - -Simple and fast setup of EOS.IO on Docker is also available. - -## Install Dependencies - -- [Docker](https://docs.docker.com) Docker 17.05 or higher is required -- [docker-compose](https://docs.docker.com/compose/) version >= 1.10.0 - -## Docker Requirement - -- At least 7GB RAM (Docker -> Preferences -> Advanced -> Memory -> 7GB or above) -- If the build below fails, make sure you've adjusted Docker Memory settings and try again. - -## Build eos image - -```bash -git clone https://github.com/EOSIO/eos.git --recursive --depth 1 -cd eos/Docker -docker build . -t eosio/eos -``` - -The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.7.0-rc1 tag, you could do the following: - -```bash -docker build -t eosio/eos:v1.7.0-rc1 --build-arg branch=v1.7.0-rc1 . -``` - -By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image. - -```bash -docker build -t eosio/eos --build-arg symbol= . -``` - -## Start nodeos docker container only - -```bash -docker run --name nodeos -p 8888:8888 -p 9876:9876 -t eosio/eos nodeosd.sh -e --http-alias=nodeos:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 arg1 arg2 -``` - -By default, all data is persisted in a docker volume. It can be deleted if the data is outdated or corrupted: - -```bash -$ docker inspect --format '{{ range .Mounts }}{{ .Name }} {{ end }}' nodeos -fdc265730a4f697346fa8b078c176e315b959e79365fc9cbd11f090ea0cb5cbc -$ docker volume rm fdc265730a4f697346fa8b078c176e315b959e79365fc9cbd11f090ea0cb5cbc -``` - -Alternately, you can directly mount host directory into the container - -```bash -docker run --name nodeos -v /path-to-data-dir:/opt/eosio/bin/data-dir -p 8888:8888 -p 9876:9876 -t eosio/eos nodeosd.sh -e --http-alias=nodeos:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 arg1 arg2 -``` - -## Get chain info - -```bash -curl http://127.0.0.1:8888/v1/chain/get_info -``` - -## Start both nodeos and keosd containers - -```bash -docker volume create --name=nodeos-data-volume -docker volume create --name=keosd-data-volume -docker-compose up -d -``` - -After `docker-compose up -d`, two services named `nodeosd` and `keosd` will be started. nodeos service would expose ports 8888 and 9876 to the host. keosd service does not expose any port to the host, it is only accessible to cleos when running cleos is running inside the keosd container as described in "Execute cleos commands" section. - -### Execute cleos commands - -You can run the `cleos` commands via a bash alias. - -```bash -alias cleos='docker-compose exec keosd /opt/eosio/bin/cleos -u http://nodeosd:8888 --wallet-url http://localhost:8900' -cleos get info -cleos get account inita -``` - -Upload sample exchange contract - -```bash -cleos set contract exchange contracts/exchange/ -``` - -If you don't need keosd afterwards, you can stop the keosd service using - -```bash -docker-compose stop keosd -``` - -### Develop/Build custom contracts - -Due to the fact that the eosio/eos image does not contain the required dependencies for contract development (this is by design, to keep the image size small), you will need to utilize the eosio/eos-dev image. This image contains both the required binaries and dependencies to build contracts using eosiocpp. - -You can either use the image available on [Docker Hub](https://hub.docker.com/r/eosio/eos-dev/) or navigate into the dev folder and build the image manually. - -```bash -cd dev -docker build -t eosio/eos-dev . -``` - -### Change default configuration - -You can use docker compose override file to change the default configurations. For example, create an alternate config file `config2.ini` and a `docker-compose.override.yml` with the following content. - -```yaml -version: "2" - -services: - nodeos: - volumes: - - nodeos-data-volume:/opt/eosio/bin/data-dir - - ./config2.ini:/opt/eosio/bin/data-dir/config.ini -``` - -Then restart your docker containers as follows: - -```bash -docker-compose down -docker-compose up -``` - -### Clear data-dir - -The data volume created by docker-compose can be deleted as follows: - -```bash -docker volume rm nodeos-data-volume -docker volume rm keosd-data-volume -``` - -### Docker Hub - -Docker Hub images are now deprecated. New build images were discontinued on January 1st, 2019. The existing old images will be removed on June 1st, 2019. - -### EOSIO Testnet - -We can easily set up a EOSIO local testnet using docker images. Just run the following commands: - -Note: if you want to use the mongo db plugin, you have to enable it in your `data-dir/config.ini` first. - -``` -# create volume -docker volume create --name=nodeos-data-volume -docker volume create --name=keosd-data-volume -# pull images and start containers -docker-compose -f docker-compose-eosio-latest.yaml up -d -# get chain info -curl http://127.0.0.1:8888/v1/chain/get_info -# get logs -docker-compose logs -f nodeosd -# stop containers -docker-compose -f docker-compose-eosio-latest.yaml down -``` - -The `blocks` data are stored under `--data-dir` by default, and the wallet files are stored under `--wallet-dir` by default, of course you can change these as you want. - -### About MongoDB Plugin - -Currently, the mongodb plugin is disabled in `config.ini` by default, you have to change it manually in `config.ini` or you can mount a `config.ini` file to `/opt/eosio/bin/data-dir/config.ini` in the docker-compose file. diff --git a/Docker/builder/Dockerfile b/Docker/builder/Dockerfile deleted file mode 100644 index 11493039a10..00000000000 --- a/Docker/builder/Dockerfile +++ /dev/null @@ -1,58 +0,0 @@ -FROM ubuntu:18.04 - -LABEL author="xiaobo " maintainer="Xiaobo Huang-Ming Huang " version="0.1.1" \ - description="This is a base image for building eosio/eos" - -RUN echo 'APT::Install-Recommends 0;' >> /etc/apt/apt.conf.d/01norecommends \ - && echo 'APT::Install-Suggests 0;' >> /etc/apt/apt.conf.d/01norecommends \ - && apt-get update \ - && DEBIAN_FRONTEND=noninteractive apt-get install -y sudo wget curl net-tools ca-certificates unzip gnupg - -RUN echo "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic main" >> /etc/apt/sources.list.d/llvm.list \ - && wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key|sudo apt-key add - \ - && apt-get update \ - && DEBIAN_FRONTEND=noninteractive apt-get install -y git-core automake autoconf libtool build-essential pkg-config libtool \ - mpi-default-dev libicu-dev python-dev python3-dev libbz2-dev zlib1g-dev libssl-dev libgmp-dev \ - clang-4.0 lldb-4.0 lld-4.0 llvm-4.0-dev libclang-4.0-dev ninja-build libusb-1.0-0-dev libcurl4-gnutls-dev pkg-config \ - && rm -rf /var/lib/apt/lists/* - -RUN update-alternatives --install /usr/bin/clang clang /usr/lib/llvm-4.0/bin/clang 400 \ - && update-alternatives --install /usr/bin/clang++ clang++ /usr/lib/llvm-4.0/bin/clang++ 400 - -RUN wget https://cmake.org/files/v3.9/cmake-3.9.6-Linux-x86_64.sh \ - && bash cmake-3.9.6-Linux-x86_64.sh --prefix=/usr/local --exclude-subdir --skip-license \ - && rm cmake-3.9.6-Linux-x86_64.sh - -ENV CC clang -ENV CXX clang++ - -RUN wget https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 -O - | tar -xj \ - && cd boost_1_67_0 \ - && ./bootstrap.sh --prefix=/usr/local \ - && echo 'using clang : 4.0 : clang++-4.0 ;' >> project-config.jam \ - && ./b2 -d0 -j$(nproc) --with-thread --with-date_time --with-system --with-filesystem --with-program_options \ - --with-signals --with-serialization --with-chrono --with-test --with-context --with-locale --with-coroutine --with-iostreams toolset=clang link=static install \ - && cd .. && rm -rf boost_1_67_0 - -RUN wget https://github.com/mongodb/mongo-c-driver/releases/download/1.10.2/mongo-c-driver-1.10.2.tar.gz -O - | tar -xz \ - && cd mongo-c-driver-1.10.2 \ - && mkdir cmake-build && cd cmake-build \ - && cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local -DENABLE_BSON=ON \ - -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j$(nproc) \ - && make install \ - && cd ../../ && rm -rf mongo-c-driver-1.10.2 - -RUN git clone --depth 1 --single-branch --branch release_40 https://github.com/llvm-mirror/llvm.git \ - && git clone --depth 1 --single-branch --branch release_40 https://github.com/llvm-mirror/clang.git llvm/tools/clang \ - && cd llvm \ - && cmake -H. -Bbuild -GNinja -DCMAKE_INSTALL_PREFIX=/opt/wasm -DLLVM_TARGETS_TO_BUILD= -DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD=WebAssembly -DCMAKE_BUILD_TYPE=Release \ - && cmake --build build --target install \ - && cd .. && rm -rf llvm - -RUN git clone --depth 1 -b releases/v3.3 https://github.com/mongodb/mongo-cxx-driver \ - && cd mongo-cxx-driver/build \ - && cmake -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local .. \ - && make -j$(nproc) \ - && make install \ - && cd ../../ && rm -rf mongo-cxx-driver diff --git a/Docker/cleos.sh b/Docker/cleos.sh deleted file mode 100755 index 36246d27545..00000000000 --- a/Docker/cleos.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -# Usage: -# Go into cmd loop: sudo ./cleos.sh -# Run single cmd: sudo ./cleos.sh - -PREFIX="docker-compose exec nodeosd cleos" -if [ -z $1 ] ; then - while : - do - read -e -p "cleos " cmd - history -s "$cmd" - $PREFIX $cmd - done -else - $PREFIX "$@" -fi diff --git a/Docker/config.ini b/Docker/config.ini deleted file mode 100644 index a85918d236b..00000000000 --- a/Docker/config.ini +++ /dev/null @@ -1,145 +0,0 @@ -# the location of the blocks directory (absolute path or relative to application data dir) (eosio::chain_plugin) -blocks-dir = "blocks" - -# Pairs of [BLOCK_NUM,BLOCK_ID] that should be enforced as checkpoints. (eosio::chain_plugin) -# checkpoint = - -# Override default WASM runtime (eosio::chain_plugin) -# wasm-runtime = - -# Maximum size (in MB) of the chain state database (eosio::chain_plugin) -chain-state-db-size-mb = 8192 - -# Maximum size (in MB) of the reversible blocks database (eosio::chain_plugin) -reversible-blocks-db-size-mb = 340 - -# print contract's output to console (eosio::chain_plugin) -contracts-console = false - -# Account added to actor whitelist (may specify multiple times) (eosio::chain_plugin) -# actor-whitelist = - -# Account added to actor blacklist (may specify multiple times) (eosio::chain_plugin) -# actor-blacklist = - -# Contract account added to contract whitelist (may specify multiple times) (eosio::chain_plugin) -# contract-whitelist = - -# Contract account added to contract blacklist (may specify multiple times) (eosio::chain_plugin) -# contract-blacklist = - -# Track actions which match receiver:action:actor. Actor may be blank to include all. Receiver and Action may not be blank. (eosio::history_plugin) -# filter-on = - -# PEM encoded trusted root certificate (or path to file containing one) used to validate any TLS connections made. (may specify multiple times) -# (eosio::http_client_plugin) -# https-client-root-cert = - -# true: validate that the peer certificates are valid and trusted, false: ignore cert errors (eosio::http_client_plugin) -https-client-validate-peers = 1 - -# The local IP and port to listen for incoming http connections; set blank to disable. (eosio::http_plugin) -http-server-address = 0.0.0.0:8888 - -# The local IP and port to listen for incoming https connections; leave blank to disable. (eosio::http_plugin) -# https-server-address = - -# Filename with the certificate chain to present on https connections. PEM format. Required for https. (eosio::http_plugin) -# https-certificate-chain-file = - -# Filename with https private key in PEM format. Required for https (eosio::http_plugin) -# https-private-key-file = - -# Specify the Access-Control-Allow-Origin to be returned on each request. (eosio::http_plugin) -# access-control-allow-origin = - -# Specify the Access-Control-Allow-Headers to be returned on each request. (eosio::http_plugin) -# access-control-allow-headers = - -# Specify the Access-Control-Max-Age to be returned on each request. (eosio::http_plugin) -# access-control-max-age = - -# Specify if Access-Control-Allow-Credentials: true should be returned on each request. (eosio::http_plugin) -access-control-allow-credentials = false - -# The actual host:port used to listen for incoming p2p connections. (eosio::net_plugin) -p2p-listen-endpoint = 0.0.0.0:9876 - -# An externally accessible host:port for identifying this node. Defaults to p2p-listen-endpoint. (eosio::net_plugin) -# p2p-server-address = - -# The public endpoint of a peer node to connect to. Use multiple p2p-peer-address options as needed to compose a network. (eosio::net_plugin) -# p2p-peer-address = - -# Maximum number of client0nodes from any single IP address (eosio::net_plugin) -p2p-max-nodes-per-host = 1 - -# The name supplied to identify this node amongst the peers. (eosio::net_plugin) -agent-name = "EOS Test Agent" - -# Can be 'any' or 'producers' or 'specified' or 'none'. If 'specified', peer-key must be specified at least once. If only 'producers', peer-key is not required. 'producers' and 'specified' may be combined. (eosio::net_plugin) -allowed-connection = any - -# Optional public key of peer allowed to connect. May be used multiple times. (eosio::net_plugin) -# peer-key = - -# Tuple of [PublicKey, WIF private key] (may specify multiple times) (eosio::net_plugin) -# peer-private-key = - -# Maximum number of clients from which connections are accepted, use 0 for no limit (eosio::net_plugin) -max-clients = 25 - -# number of seconds to wait before cleaning up dead connections (eosio::net_plugin) -connection-cleanup-period = 30 - -# True to require exact match of peer network version. (eosio::net_plugin) -network-version-match = 0 - -# number of blocks to retrieve in a chunk from any individual peer during synchronization (eosio::net_plugin) -sync-fetch-span = 100 - -# Enable block production, even if the chain is stale. (eosio::producer_plugin) -enable-stale-production = false - -# Start this node in a state where production is paused (eosio::producer_plugin) -pause-on-startup = false - -# Limits the maximum time (in milliseconds) that is allowed a pushed transaction's code to execute before being considered invalid (eosio::producer_plugin) -max-transaction-time = 30 - -# Limits the maximum age (in seconds) of the DPOS Irreversible Block for a chain this node will produce blocks on (use negative value to indicate unlimited) (eosio::producer_plugin) -max-irreversible-block-age = -1 - -# ID of producer controlled by this node (e.g. inita; may specify multiple times) (eosio::producer_plugin) -# producer-name = -producer-name = eosio - -# (DEPRECATED - Use signature-provider instead) Tuple of [public key, WIF private key] (may specify multiple times) (eosio::producer_plugin) -# private-key = - -# Key=Value pairs in the form = -# Where: -# is a string form of a vaild EOSIO public key -# -# is a string in the form : -# -# is KEY, or KEOSD -# -# KEY: is a string form of a valid EOSIO private key which maps to the provided public key -# -# KEOSD: is the URL where keosd is available and the approptiate wallet(s) are unlocked (eosio::producer_plugin) -signature-provider = EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV=KEY:5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3 - -# Limits the maximum time (in milliseconds) that is allowd for sending blocks to a keosd provider for signing (eosio::producer_plugin) -keosd-provider-timeout = 5 - -# Lag in number of blocks from the head block when selecting the reference block for transactions (-1 means Last Irreversible Block) (eosio::txn_test_gen_plugin) -txn-reference-block-lag = 0 - -# eosio key that will be imported automatically when a wallet is created. (eosio::wallet_plugin) -# eosio-key = - -# Plugin(s) to enable, may be specified multiple times -# plugin = -plugin = eosio::chain_api_plugin -plugin = eosio::history_api_plugin diff --git a/Docker/dev/Dockerfile b/Docker/dev/Dockerfile deleted file mode 100644 index f2dea74ac6c..00000000000 --- a/Docker/dev/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM eosio/builder -ARG branch=master -ARG symbol=SYS - -RUN git clone -b $branch https://github.com/EOSIO/eos.git --recursive \ - && cd eos && echo "$branch:$(git rev-parse HEAD)" > /etc/eosio-version \ - && cmake -H. -B"/opt/eosio" -GNinja -DCMAKE_BUILD_TYPE=Release -DWASM_ROOT=/opt/wasm -DCMAKE_CXX_COMPILER=clang++ \ - -DCMAKE_C_COMPILER=clang -DCMAKE_INSTALL_PREFIX=/opt/eosio -DBUILD_MONGO_DB_PLUGIN=true -DCORE_SYMBOL_NAME=$symbol \ - && cmake --build /opt/eosio --target install \ - && cp /eos/Docker/config.ini / && ln -s /opt/eosio/contracts /contracts && cp /eos/Docker/nodeosd.sh /opt/eosio/bin/nodeosd.sh && ln -s /eos/tutorials /tutorials - -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install openssl ca-certificates vim psmisc python3-pip && rm -rf /var/lib/apt/lists/* -RUN pip3 install numpy -ENV EOSIO_ROOT=/opt/eosio -RUN chmod +x /opt/eosio/bin/nodeosd.sh -ENV LD_LIBRARY_PATH /usr/local/lib -ENV PATH /opt/eosio/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin diff --git a/Docker/docker-compose-latest.yml b/Docker/docker-compose-latest.yml deleted file mode 100644 index 7384e230cb8..00000000000 --- a/Docker/docker-compose-latest.yml +++ /dev/null @@ -1,33 +0,0 @@ -version: "3" - -services: - nodeosd: - image: eosio/eos:latest - command: /opt/eosio/bin/nodeosd.sh --data-dir /opt/eosio/bin/data-dir -e --http-alias=nodeosd:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 - hostname: nodeosd - ports: - - 8888:8888 - - 9876:9876 - expose: - - "8888" - volumes: - - nodeos-data-volume:/opt/eosio/bin/data-dir - cap_add: - - IPC_LOCK - stop_grace_period: 10m - - keosd: - image: eosio/eos:latest - command: /opt/eosio/bin/keosd --wallet-dir /opt/eosio/bin/data-dir --http-server-address=127.0.0.1:8900 --http-alias=keosd:8900 --http-alias=localhost:8900 - hostname: keosd - links: - - nodeosd - volumes: - - keosd-data-volume:/opt/eosio/bin/data-dir - stop_grace_period: 10m - -volumes: - nodeos-data-volume: - external: true - keosd-data-volume: - external: true diff --git a/Docker/docker-compose.yml b/Docker/docker-compose.yml deleted file mode 100755 index a00ffffa4e4..00000000000 --- a/Docker/docker-compose.yml +++ /dev/null @@ -1,40 +0,0 @@ -version: "3" - -services: - builder: - build: - context: builder - image: eosio/builder - - nodeosd: - build: - context: . - image: eosio/eos - command: /opt/eosio/bin/nodeosd.sh --data-dir /opt/eosio/bin/data-dir -e --http-alias=nodeosd:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 - hostname: nodeosd - ports: - - 8888:8888 - - 9876:9876 - expose: - - "8888" - volumes: - - nodeos-data-volume:/opt/eosio/bin/data-dir - cap_add: - - IPC_LOCK - stop_grace_period: 10m - - keosd: - image: eosio/eos - command: /opt/eosio/bin/keosd --wallet-dir /opt/eosio/bin/data-dir --http-server-address=127.0.0.1:8900 --http-alias=keosd:8900 --http-alias=localhost:8900 - hostname: keosd - links: - - nodeosd - volumes: - - keosd-data-volume:/opt/eosio/bin/data-dir - stop_grace_period: 10m - -volumes: - nodeos-data-volume: - external: true - keosd-data-volume: - external: true diff --git a/Docker/nodeosd.sh b/Docker/nodeosd.sh deleted file mode 100755 index 870548d6b6b..00000000000 --- a/Docker/nodeosd.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/sh -cd /opt/eosio/bin - -if [ ! -d "/opt/eosio/bin/data-dir" ]; then - mkdir /opt/eosio/bin/data-dir -fi - -if [ -f '/opt/eosio/bin/data-dir/config.ini' ]; then - echo - else - cp /config.ini /opt/eosio/bin/data-dir -fi - -if [ -d '/opt/eosio/bin/data-dir/contracts' ]; then - echo - else - cp -r /contracts /opt/eosio/bin/data-dir -fi - -while :; do - case $1 in - --config-dir=?*) - CONFIG_DIR=${1#*=} - ;; - *) - break - esac - shift -done - -if [ ! "$CONFIG_DIR" ]; then - CONFIG_DIR="--config-dir=/opt/eosio/bin/data-dir" -else - CONFIG_DIR="" -fi - -exec /opt/eosio/bin/nodeos $CONFIG_DIR "$@" From c996927a0fd9860aa8e649b5e44f7d5764c80eea Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 24 Apr 2019 18:17:03 -0400 Subject: [PATCH 511/680] remove old testnet.md file that isn't relevant --- testnet.md | 201 ----------------------------------------------------- 1 file changed, 201 deletions(-) delete mode 100644 testnet.md diff --git a/testnet.md b/testnet.md deleted file mode 100644 index b3cc58d8471..00000000000 --- a/testnet.md +++ /dev/null @@ -1,201 +0,0 @@ -# EOS Testnet -To date, all work done to experiment with the EOS blockchain has been performed using a single instance of eosd hosting all 21 block producers. While this is a perfectly valid solution for validating features of the blockchain, developing new contracts, or whatever, it does not scale. Nor does it expose the sort of issues raised when contract and block data must be shared across multiple instances. Providing the ability to scale involves deploying multiple eosd nodes across many hosts and lining then into a peer-to-peer (p2p) network. Composing this network involves tailoring and distributing configuration files, coordinating starts and stops and other tasks. - -Doing this manually is a tedious task and easily error prone. Fortunately a solution is provided, in the form of the Launcher application, described below. - -## Testnet nodes, networks, and topology -Before getting into the details of the EOS testnet, lets clarify some terms. In this document I use the terms "host" and "machine" fairly interchangeably. A host generally boils down to a single IP address, although in practice it could have more. - -The next term is "node." A node is an instance of the eosd executable configured to serve as 0 or more producers. There is not a one-to-one mapping between nodes and hosts, a host may serve more than one node, but one node cannot span more than one host. - -I use "local network" to refer to any group of nodes, whether on a single host or several, are all close in that access does not have to leave a secure network environment. - -Finally there is the idea of distributed networks that involve remote hosts. These may be hosts on which you may not have direct access for starting and stopping eosd instances, but with whom you may wish to collaborate for setting up a decentralized testnet. - -### Localhost networks -Running a testnet on a single machine is the quickest way to get started. As you will see below, this is the default mode for the Launcher application. You can set up a localhost network immediately by simply telling the launcher how many producing or non-producing nodes to activate, and perhaps what type of network topology to use. - -The downside is that you need a lot of hardware when running many nodes on a single host. Also the multiple nodes will contend with each other in terms of CPU cycles, limiting true concurrency, and also localhost network performance is much different from inter-host performance, even with very high speed lans. - -### Distributed networks -The most representative model of the live net is to spread the eosd nodes across many hosts. The Launcher app is able to start distributed nodes by the use of bash scripts pushed through ssh. In this case additional configuration is required to replace configured references to "localhost" or "127.0.0.1" with the actual host name or ip addresses of the various peer machines. - -Launching a distributed testnet requires the operator to have ssh access to all the remote machines configured to authenticate without the need for a user entered password. This configuration is described in detail below. - -In cases where a testnet spans multiple remote networks, a common launcher defined configuration file may be shared externally between distributed operators, each being responsible for launching his or her own local network. - -Note that the Launcher will not push instances of eosd to the remote hosts, you must prepare the various test network hosts separately. - -### Network Topology -Network topology or "shape" describes how the nodes are connected in order to share transaction and block data, and requests for the same. The idea for varying network topology is that there is a trade off between the number of times a node must send a message reporting a new transaction or block, vs the number of times that message must be repeated to ensure all nodes know of it. - -The Launcher has definitions of two basic different network "shapes" based on inter-nodal connections, which can be selected by a command line option. If you wish to create your own custom network topology, you can do so by supplying a json formatted file. This file is typically the edited version of the template created by the launcher in "output" mode. - -#### Star network -![](https://github.com/EOSIO/eos/raw/master/star.png) -A "star" is intended to support a larger number of nodes in the testnet. In this case the number of peers connected to a node and the distribution of those nodes varies based on the number of nodes in the network. - -#### Mesh network -![](https://github.com/EOSIO/eos/raw/master/mesh.png) -In a "mesh" network, each node is connected to as many peer nodes as possible. - -#### Custom network shape -![](custom.png) -This is an example of a custom deployment where clusters of nodes are isolated except through a single crosslink. - -# The Launcher Application -To address the complexity implied by distributing multiple eosd nodes across a LAN or a wider network, the launcher application was created. - -Based on a handful of command line arguments the Launcher is able to compose per-node configuration files, distribute these files securely amongst the peer hosts, then start up the multiple instances of eosd. - -Eosd instances started this way have their output logged in individual text files. Finally the launcher application is also able to shut down some or all of the test network. - -## Running the Launcher application - -The launcher program is used to configure and deploy producing and non-producing eosd nodes that talk to each other using configured routes. The configuration for each node is stored in separate directories, permitting multiple nodes to be active on the same host, assuming the machine has sufficient memory and disk space for multiple eosd instances. The launcher makes use of multiple configuration sources in order to deploy a testnet. A handful of command line arguments can be used to set up simple local networks. - -To support deploying distributed networks, the launcher will read more detailed configuration from a JSON file. You can use the launcher to create a default JSON file based on the command line options you supply. Edit that file to substitute actual hostnames and other details -as needed, then rerun the launcher supplying this file. - -For the moment the launcher only activates platform-native nodes, dockerized nodes will be added later. It should be straight forward to use the generated configuration files with dockerized nodes. - -## Launcher command line arguments -Here is the current list of command line arguments recognized by the launcher. - -``` -launcher command line arguments: - -n [ --nodes ] arg (=1) total number of nodes to configure and - launch - -p [ --pnodes ] arg (=1) number of nodes that are producers - -d [ --delay ] arg (=0) number of seconds to wait before starting the next node. Used to simulate a person keying in a series of individual eosd startup command lines. - -s [ --shape ] arg (=star) network topology, use "star" - "mesh" or give a filename for custom - -g [ --genesis ] arg (="./genesis.json") - set the path to genesis.json - -o [ --output ] arg save a copy of the generated topology - in this file - --skip-signature EOSD does not require transaction - signatures. - -i [ --timestamp ] arg set the timestamp for the first block. - Use "now" to indicate the current time - -l [ --launch ] arg select a subset of nodes to launch. - Currently may be "all", "none", or - "local". If not set, the default is to - launch all unless an output file is - named, in which case it starts none. - -k [ --kill ] arg The launcher retrieves the previously - started process ids and signals each with the specified signum. Use 15 for a sigterm and 9 for sigkill. - -h [ --help ] print this list -``` -Note that if a testnet.json file is supplied as the `--shape` argument, then the `--nodes`, `--pnodes`, and `--genesis` arguments are all ignored. - -## The Generated Multihost Testnet Configuration File -This is the file generated by running the following command: - - `launcher --output [other options]` - -In this mode, the launcher does not activate any eosd instances, it produces a file of the given filename. This file is a JSON formatted template that provides an easy means of - -The object described in this file is composed of a helper for using ssl, and a collection of testnet node descriptors. The node descriptors are listed as name, value pairs. Note that the names serve a dual purpose acting as both the key in a map of node descriptors and as an alias for the node in the peer lists. For example: - -``` -{ - "ssh_helper": { - "ssh_cmd": "/usr/bin/ssh", - "scp_cmd": "/usr/bin/scp", - "ssh_identity": "phil", - "ssh_args": "-i ~phil/.ssh/id-sample" - }, -``` -The ssh helper fields are paths to ssh and scp, an identity if necessary, and any optional arguments. - -``` - "nodes": [[ - "testnet_0",{ - "genesis": "./genesis.json", - "remote": true, - "ssh_identity": "", - "ssh_args": "", - "eos_root_dir": "/home/phil/blockchain/eos", - "data_dir": "tn_data_0", - "hostname": "remoteserv", - "public_name": "remoteserv", - "p2p_port": 9876, - "http_port": 8888, - "filesize": 8192, - "keys": [{ - "public_key": "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV", - "wif_private_key": "5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3" - } - ], - "peers": [ - "testnet_1", - "testnet_2", - "testnet_3", - "testnet_4", - "testnet_5" - ], - "producers": [ - "inita", - "initg", - "initm", - "inits" - ] - } - ],[ - "testnet_1",{ - -``` - -The rest of the testnet.json file is the collection of node descriptors. The fragment shown above was created with the command line `programs/launcher/launcher -p6 -s mesh -o testnet.json` and then edited to refer to a remote host named "remoteserv." - -### Elements Of The JSON File -This table describes all of the key/value pairs used in the testnet.json file. - -|Value | Description -| :------------ | :----------- -| ssh_helper | a set of values used to facilitate the use of SSH and SCP -| nodes | a collection of descriptors defining the eosd instances used to assemble this testnet. The names used as keys in this collection are also aliases used within as placeholders for peer nodes. - -|ssh_helper elements | Description -| :---------- | :------------ -| ssh_cmd | path to the local ssh command -| scp_cmd | path to the local scp command -| ssh_args | any additional command line arguments needed to successfully connect to remote peers -| ssh_identity | The user name to use when accessing the remote hosts - -|node elements | Description -| :-------- | :---------- -| genesis | path to the genesis.json file. This should be the same file for all members of the testnet. -| remote | specifies whether this node is in the local network or not. This flag ties in with the launch mode command line option (-l) to determine if the local launcher instance will attempt to start this node. -| ssh_identity | a per-node override of the general ssh_identity defined above. -| ssh_args | a per-node override of the general ssh_args -| eos_root_dir | specifies the directory into which all eosd artifacts are based. This is required for any hosts that are not the local host. -| data_dir | the root for the remaining node-specific settings below. -| hostname | the domain name for the server, or its IP address. -| public_name | possibly different from the hostname, this name will get substituted for the aliases when creating the per-node config.ini file's peer list. -| p2p_port | combined with the public name to identify the endpoint listed on for peer connections. When multiple nodes share a host, the p2p_port is automatically incremented for each node. -| http_port | defines the listen endpoint for the client API services -| filesize | sets the capacity in megabytes for the size of the blockchain backing store file. -| keys | specify the authentication tokens for this node. -| peers | this list indicates the other nodes in the network to which this one actively connects. Since this file may be edited to alter the hostname, public name, or p2p port values, the peers list here holds aliases for the actual endpoints eventually written to the individual config.ini files. -| producers | this list identifies which of the producers from the genesis.json file are held by this node. Note that the launcher uses a round-robin algorithm to spread the producer instances across the producing nodes. - -### Provisioning Distributed Servers -The ssh_helper section of the testnet.json file contains the ssh elements necessary to connect and issue commands to other servers. In addition to the ssh_helper section which provides access to global configuration settings, the per-node configuration may provide overriding identity and connection arguments. - -It is also necessary to provision the server by at least copying the eosd executable, and the genesis.json files to their appropriate locations relative to some named EOS root directory. For example, I defined the EOS root to be `/home/phil/blockchain/eos`. When run, the launcher will run through a variety of shell commands using ssh and finally using scp to copy a config.ini file to the appropriate data directory on the remote. - -## Runtime Artifacts -The launcher app creates a separate date and configuration directory for each node instance. This directory is named `tn_data_` with n ranging from 0 to the number of nodes being launched. - -| Per-Node File | Description -| :------------ | :---------- -| config.ini | The eosd configuration file. -| eosd.pid | The process ID of the running eosd instance. -| blockchain/* | The blockchain backing store -| blocks/* | The blockchain log store -| stderr.txt | The cerr output from eosd. -| stdout.txt | The cout output from eosd. - -A file called "last_run.json" contains hints for a later instance of the launcher to be able to kill local and remote nodes when run with -k 15. From 8951d34de6638a3d7d550b0da54d3e873c31c342 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 24 Apr 2019 18:17:54 -0400 Subject: [PATCH 512/680] Remove stale license template --- HEADER | 23 ----------------------- 1 file changed, 23 deletions(-) delete mode 100644 HEADER diff --git a/HEADER b/HEADER deleted file mode 100644 index ff55f97b69f..00000000000 --- a/HEADER +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (c) 2017, Respective Authors. - * - * The MIT License - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ From b8fee492303d0a12c48600a6af2fa5fccd29077d Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 24 Apr 2019 19:22:50 -0400 Subject: [PATCH 513/680] remove force flag and use new mac builder --- .buildkite/pipeline.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index f0404f90182..a4eb89b6d64 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,7 +1,7 @@ steps: - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P -f + ./scripts/eosio_build.sh -y -P echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -24,7 +24,7 @@ steps: - command: | # CentOS 7 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P -f + ./scripts/eosio_build.sh -y -P echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -47,7 +47,7 @@ steps: - command: | # Ubuntu 16.04 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P -f + ./scripts/eosio_build.sh -y -P echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -70,7 +70,7 @@ steps: - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P -f + ./scripts/eosio_build.sh -y -P echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -97,13 +97,13 @@ steps: ln -s "$(pwd)" /data/job cd /data/job echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y -P -f + ./scripts/eosio_build.sh -y -P echo "--- Compressing build directory :compression:" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi label: ":darwin: Mojave Build" agents: - - "role=builder-v2-1" + - "role=builder-v2-2" - "os=mojave" artifact_paths: "build.tar.gz" timeout: 120 From 3d7cc2a23ac9d3b5f295ef8a212e81b9b7aa4301 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 24 Apr 2019 19:43:25 -0400 Subject: [PATCH 514/680] point tests to new mac builder and remove remnant flags to cmake --- .buildkite/pipeline.yml | 6 +++--- scripts/eosio_build.sh | 5 ++--- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index a4eb89b6d64..408c8f19677 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -291,7 +291,7 @@ steps: ./scripts/parallel-test.sh label: ":darwin: Mojave Tests" agents: - - "role=tester-v2-1" + - "role=tester-v2-2" - "os=mojave" timeout: 60 @@ -303,7 +303,7 @@ steps: ./scripts/serial-test.sh label: ":darwin: Mojave NP Tests" agents: - - "role=tester-v2-1" + - "role=tester-v2-2" - "os=mojave" timeout: 60 @@ -410,7 +410,7 @@ steps: bash generate_package.sh brew label: ":darwin: Mojave Package Builder" agents: - - "role=builder-v2-1" + - "role=builder-v2-2" - "os=mojave" artifact_paths: - "build/packages/*.tar.gz" diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 717e723ed03..5c09ea6bae1 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -253,7 +253,6 @@ if [ $PIN_COMPILER ]; then BUILD_CLANG8=true CPP_COMP=${OPT_LOCATION}/clang8/bin/clang++ CC_COMP=${OPT_LOCATION}/clang8/bin/clang - PIN_COMPILER_CMAKE="-DEOSIO_PIN_COMPILER=1 -DLIBSTDCPP_DIR=${OPT_LOCATION}/gcc -DLIBCPP_DIR=${OPT_LOCATION}/clang8/" elif $NO_CPP17; then if [ $NONINTERACTIVE -eq 0 ]; then BUILD_CLANG8=true @@ -375,14 +374,14 @@ if [ $PIN_COMPILER ]; then -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=$ENABLE_MONGO \ -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ -DCMAKE_PREFIX_PATH=$PREFIX -DCMAKE_PREFIX_PATH=$OPT_LOCATION/llvm4\ - -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS -DEOSIO_PIN_COMPILER=1 "${REPO_ROOT}" + -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS "${REPO_ROOT}" else $CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" \ -DCMAKE_C_COMPILER="${CC}" -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=$ENABLE_MONGO \ -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ -DCMAKE_PREFIX_PATH=$PREFIX \ - -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS $PIN_COMPILER_CMAKE "${REPO_ROOT}" + -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS "${REPO_ROOT}" fi if [ $? -ne 0 ]; then exit -1; fi From d3d7416816d194b9112e010e73d79823f06708d7 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Wed, 24 Apr 2019 21:45:12 -0400 Subject: [PATCH 515/680] testing anka --- .buildkite/pipeline.yml | 62 ++++++++++++++++++++++++++++++----------- 1 file changed, 45 insertions(+), 17 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 408c8f19677..41545db9ca6 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,4 +1,49 @@ steps: + +env: + ANKA_WORKDIR: "/data/job" + ANKA_MOJAVE_TEMPLATE: "10.14.4_6C_14G_23G" + ANKA_TEMPLATE_TAG: "clean::cicd::git-ssh::nas::brew" + CHECKSUMABLE: "scripts/eosio_build*" + MAC_TAG: "eosio_2-3" + +steps: + + - trigger: "mac-anka-fleet" + label: ":anka: Ensure Mojave Anka Template Tag Exists" + branches: "mac-anka-fleet" + async: false + build: + branch: "master" + env: + REPO: "${BUILDKITE_REPO}" + REPO_BRANCH: "${BUILDKITE_BRANCH}" + CHECKSUMABLE: "${CHECKSUMABLE}" + TEMPLATE: "${ANKA_MOJAVE_TEMPLATE}" + TEMPLATE_TAG: "${ANKA_TEMPLATE_TAG}" + TAG_COMMANDS: "CLONED_REPO_DIR/scripts/eosio_build.sh -y" # CLONED_REPO_DIR is where the repo is always cloned into + PROJECT_TAG: "${MAC_TAG}" + + - wait + + - command: | # macOS Mojave Build + echo "--- Creating symbolic link to job directory :file_folder:" + sleep 5 + ln -s "$(pwd)" /data/job + cd /data/job + echo "+++ Building :hammer:" + ./scripts/eosio_build.sh -y -P + echo "--- Compressing build directory :compression:" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":darwin: Mojave Build" + agents: + - "role=builder-v2-2" + - "os=mojave" + artifact_paths: "build.tar.gz" + timeout: 120 + + - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y -P @@ -91,23 +136,6 @@ steps: workdir: /data/job timeout: 120 - - command: | # macOS Mojave Build - echo "--- Creating symbolic link to job directory :file_folder:" - sleep 5 - ln -s "$(pwd)" /data/job - cd /data/job - echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y -P - echo "--- Compressing build directory :compression:" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":darwin: Mojave Build" - agents: - - "role=builder-v2-2" - - "os=mojave" - artifact_paths: "build.tar.gz" - timeout: 120 - - wait # Amazon Linux 2 Tests From fcdd3470ee6f61fe13043f9a04e4ae4c3482385e Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Wed, 24 Apr 2019 21:47:00 -0400 Subject: [PATCH 516/680] testing anka --- .buildkite/pipeline.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 41545db9ca6..4baf2de95bc 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,5 +1,3 @@ -steps: - env: ANKA_WORKDIR: "/data/job" ANKA_MOJAVE_TEMPLATE: "10.14.4_6C_14G_23G" @@ -43,7 +41,7 @@ steps: artifact_paths: "build.tar.gz" timeout: 120 - + - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y -P From 04fc1d6039f7c278ddb13c47e340354cedeec68c Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Wed, 24 Apr 2019 21:49:35 -0400 Subject: [PATCH 517/680] testing anka --- .buildkite/pipeline.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 4baf2de95bc..f6041cf4649 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -12,14 +12,14 @@ steps: branches: "mac-anka-fleet" async: false build: - branch: "master" + branch: "${BUILDKITE_BRANCH}" env: REPO: "${BUILDKITE_REPO}" REPO_BRANCH: "${BUILDKITE_BRANCH}" CHECKSUMABLE: "${CHECKSUMABLE}" TEMPLATE: "${ANKA_MOJAVE_TEMPLATE}" TEMPLATE_TAG: "${ANKA_TEMPLATE_TAG}" - TAG_COMMANDS: "CLONED_REPO_DIR/scripts/eosio_build.sh -y" # CLONED_REPO_DIR is where the repo is always cloned into + TAG_COMMANDS: "CLONED_REPO_DIR/scripts/eosio_build.sh -y -P -f" # CLONED_REPO_DIR is where the repo is always cloned into PROJECT_TAG: "${MAC_TAG}" - wait From 061079fa4e05f53a4b235bc6f595db5556d08725 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Wed, 24 Apr 2019 22:05:22 -0400 Subject: [PATCH 518/680] testing anka --- .buildkite/pipeline.yml | 834 ++++++++++++++++++++-------------------- 1 file changed, 417 insertions(+), 417 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index f6041cf4649..d6a6d0a059d 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,6 +1,6 @@ env: ANKA_WORKDIR: "/data/job" - ANKA_MOJAVE_TEMPLATE: "10.14.4_6C_14G_23G" + ANKA_MOJAVE_TEMPLATE: "10.14.4_6C_14G_40G" ANKA_TEMPLATE_TAG: "clean::cicd::git-ssh::nas::brew" CHECKSUMABLE: "scripts/eosio_build*" MAC_TAG: "eosio_2-3" @@ -19,446 +19,446 @@ steps: CHECKSUMABLE: "${CHECKSUMABLE}" TEMPLATE: "${ANKA_MOJAVE_TEMPLATE}" TEMPLATE_TAG: "${ANKA_TEMPLATE_TAG}" - TAG_COMMANDS: "CLONED_REPO_DIR/scripts/eosio_build.sh -y -P -f" # CLONED_REPO_DIR is where the repo is always cloned into + TAG_COMMANDS: "CLONED_REPO_DIR/scripts/eosio_build.sh -y -P -f" # CLONED_REPO_DIR IS REQUIRED and is where the repo is always cloned into PROJECT_TAG: "${MAC_TAG}" - - wait + # - wait - - command: | # macOS Mojave Build - echo "--- Creating symbolic link to job directory :file_folder:" - sleep 5 - ln -s "$(pwd)" /data/job - cd /data/job - echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y -P - echo "--- Compressing build directory :compression:" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":darwin: Mojave Build" - agents: - - "role=builder-v2-2" - - "os=mojave" - artifact_paths: "build.tar.gz" - timeout: 120 + # - command: | # macOS Mojave Build + # echo "--- Creating symbolic link to job directory :file_folder:" + # sleep 5 + # ln -s "$(pwd)" /data/job + # cd /data/job + # echo "+++ Building :hammer:" + # ./scripts/eosio_build.sh -y -P + # echo "--- Compressing build directory :compression:" + # tar -pczf build.tar.gz build + # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + # label: ":darwin: Mojave Build" + # agents: + # - "role=builder-v2-2" + # - "os=mojave" + # artifact_paths: "build.tar.gz" + # timeout: 120 - - command: | # Amazon Linux 2 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":aws: Amazon Linux 2 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" - propagate-environment: true - workdir: /data/job - timeout: 120 + # - command: | # Amazon Linux 2 Build + # echo "+++ :hammer: Building" + # ./scripts/eosio_build.sh -y -P + # echo "--- :compression: Compressing build directory" + # tar -pczf build.tar.gz build + # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + # label: ":aws: Amazon Linux 2 Build" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: "build.tar.gz" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 120 - - command: | # CentOS 7 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":centos: CentOS 7 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" - propagate-environment: true - workdir: /data/job - timeout: 120 + # - command: | # CentOS 7 Build + # echo "+++ :hammer: Building" + # ./scripts/eosio_build.sh -y -P + # echo "--- :compression: Compressing build directory" + # tar -pczf build.tar.gz build + # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + # label: ":centos: CentOS 7 Build" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: "build.tar.gz" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 120 - - command: | # Ubuntu 16.04 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":ubuntu: Ubuntu 16.04 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" - propagate-environment: true - workdir: /data/job - timeout: 120 + # - command: | # Ubuntu 16.04 Build + # echo "+++ :hammer: Building" + # ./scripts/eosio_build.sh -y -P + # echo "--- :compression: Compressing build directory" + # tar -pczf build.tar.gz build + # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + # label: ":ubuntu: Ubuntu 16.04 Build" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: "build.tar.gz" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 120 - - command: | # Ubuntu 18.04 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":ubuntu: Ubuntu 18.04 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" - propagate-environment: true - workdir: /data/job - timeout: 120 + # - command: | # Ubuntu 18.04 Build + # echo "+++ :hammer: Building" + # ./scripts/eosio_build.sh -y -P + # echo "--- :compression: Compressing build directory" + # tar -pczf build.tar.gz build + # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + # label: ":ubuntu: Ubuntu 18.04 Build" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: "build.tar.gz" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 120 - - wait + # - wait - # Amazon Linux 2 Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" - echo "+++ :microscope: Running Tests" - ./scripts/parallel-test.sh - label: ":aws: Amazon Linux 2 Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" - propagate-environment: true - workdir: /data/job - timeout: 60 + # # Amazon Linux 2 Tests + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/parallel-test.sh + # label: ":aws: Amazon Linux 2 Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 60 - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" - echo "+++ :microscope: Running Tests" - ./scripts/serial-test.sh - label: ":aws: Amazon Linux 2 NP Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" - propagate-environment: true - workdir: /data/job - timeout: 60 + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/serial-test.sh + # label: ":aws: Amazon Linux 2 NP Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 60 - # centOS 7 Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" - echo "+++ :microscope: Running Tests" - ./scripts/parallel-test.sh - label: ":centos: CentOS 7 Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" - propagate-environment: true - workdir: /data/job - timeout: 60 + # # centOS 7 Tests + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/parallel-test.sh + # label: ":centos: CentOS 7 Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 60 - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" - echo "+++ :microscope: Running Tests" - ./scripts/serial-test.sh - label: ":centos: CentOS 7 NP Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" - propagate-environment: true - workdir: /data/job - timeout: 60 + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/serial-test.sh + # label: ":centos: CentOS 7 NP Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 60 - # Ubuntu 16.04 Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" - echo "+++ :microscope: Running Tests" - ./scripts/parallel-test.sh - label: ":ubuntu: Ubuntu 16.04 Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" - propagate-environment: true - workdir: /data/job - timeout: 60 + # # Ubuntu 16.04 Tests + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/parallel-test.sh + # label: ":ubuntu: Ubuntu 16.04 Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 60 - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" - echo "+++ :microscope: Running Tests" - ./scripts/serial-test.sh - label: ":ubuntu: Ubuntu 16.04 NP Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" - propagate-environment: true - workdir: /data/job - timeout: 60 + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/serial-test.sh + # label: ":ubuntu: Ubuntu 16.04 NP Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 60 - # Ubuntu 18.04 Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" - echo "+++ :microscope: Running Tests" - ./scripts/parallel-test.sh - label: ":ubuntu: Ubuntu 18.04 Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" - propagate-environment: true - workdir: /data/job - timeout: 60 + # # Ubuntu 18.04 Tests + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/parallel-test.sh + # label: ":ubuntu: Ubuntu 18.04 Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 60 - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" - echo "+++ :microscope: Running Tests" - ./scripts/serial-test.sh - label: ":ubuntu: Ubuntu 18.04 NP Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" - propagate-environment: true - workdir: /data/job - timeout: 60 + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/serial-test.sh + # label: ":ubuntu: Ubuntu 18.04 NP Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 60 - # Mojave Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - echo "+++ :microscope: Running Tests" - ln -s "$(pwd)" /data/job - ./scripts/parallel-test.sh - label: ":darwin: Mojave Tests" - agents: - - "role=tester-v2-2" - - "os=mojave" - timeout: 60 + # # Mojave Tests + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + # echo "+++ :microscope: Running Tests" + # ln -s "$(pwd)" /data/job + # ./scripts/parallel-test.sh + # label: ":darwin: Mojave Tests" + # agents: + # - "role=tester-v2-2" + # - "os=mojave" + # timeout: 60 - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - echo "+++ :microscope: Running Tests" - ln -s "$(pwd)" /data/job - ./scripts/serial-test.sh - label: ":darwin: Mojave NP Tests" - agents: - - "role=tester-v2-2" - - "os=mojave" - timeout: 60 + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + # echo "+++ :microscope: Running Tests" + # ln -s "$(pwd)" /data/job + # ./scripts/serial-test.sh + # label: ":darwin: Mojave NP Tests" + # agents: + # - "role=tester-v2-2" + # - "os=mojave" + # timeout: 60 - - wait + # - wait - - command: | # CentOS 7 Package Builder - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - yum install -y rpm-build - mkdir -p /root/rpmbuild/BUILD - mkdir -p /root/rpmbuild/BUILDROOT - mkdir -p /root/rpmbuild/RPMS - mkdir -p /root/rpmbuild/SOURCES - mkdir -p /root/rpmbuild/SPECS - mkdir -p /root/rpmbuild/SRPMS - cd /data/job/build/packages - bash generate_package.sh rpm - label: ":centos: CentOS 7 Package Builder" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build/packages/*.rpm" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" - propagate-environment: true - workdir: /data/job - env: - OS: "el7" - PKGTYPE: "rpm" - timeout: 60 + # - command: | # CentOS 7 Package Builder + # echo "--- :arrow_down: Downloading build directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" + # tar -zxf build.tar.gz + # echo "+++ :microscope: Starting package build" + # yum install -y rpm-build + # mkdir -p /root/rpmbuild/BUILD + # mkdir -p /root/rpmbuild/BUILDROOT + # mkdir -p /root/rpmbuild/RPMS + # mkdir -p /root/rpmbuild/SOURCES + # mkdir -p /root/rpmbuild/SPECS + # mkdir -p /root/rpmbuild/SRPMS + # cd /data/job/build/packages + # bash generate_package.sh rpm + # label: ":centos: CentOS 7 Package Builder" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: + # - "build/packages/*.rpm" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" + # propagate-environment: true + # workdir: /data/job + # env: + # OS: "el7" + # PKGTYPE: "rpm" + # timeout: 60 - - command: | # Ubuntu 16.04 Package Builder - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - cd /data/job/build/packages - bash generate_package.sh deb - label: ":ubuntu: Ubuntu 16.04 Package Builder" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build/packages/*.deb" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" - propagate-environment: true - workdir: /data/job - env: - OS: "ubuntu-16.04" - PKGTYPE: "deb" - timeout: 60 + # - command: | # Ubuntu 16.04 Package Builder + # echo "--- :arrow_down: Downloading build directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" + # tar -zxf build.tar.gz + # echo "+++ :microscope: Starting package build" + # cd /data/job/build/packages + # bash generate_package.sh deb + # label: ":ubuntu: Ubuntu 16.04 Package Builder" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: + # - "build/packages/*.deb" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" + # propagate-environment: true + # workdir: /data/job + # env: + # OS: "ubuntu-16.04" + # PKGTYPE: "deb" + # timeout: 60 - - command: | # Ubuntu 18.04 Package Builder - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - cd /data/job/build/packages - bash generate_package.sh deb - label: ":ubuntu: Ubuntu 18.04 Package Builder" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build/packages/*.deb" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" - propagate-environment: true - workdir: /data/job - env: - OS: "ubuntu-18.04" - PKGTYPE: "deb" - timeout: 60 + # - command: | # Ubuntu 18.04 Package Builder + # echo "--- :arrow_down: Downloading build directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" + # tar -zxf build.tar.gz + # echo "+++ :microscope: Starting package build" + # cd /data/job/build/packages + # bash generate_package.sh deb + # label: ":ubuntu: Ubuntu 18.04 Package Builder" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: + # - "build/packages/*.deb" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" + # propagate-environment: true + # workdir: /data/job + # env: + # OS: "ubuntu-18.04" + # PKGTYPE: "deb" + # timeout: 60 - - command: | # macOS Mojave Package Builder - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - ln -s "$(pwd)" /data/job - cd /data/job/build/packages - bash generate_package.sh brew - label: ":darwin: Mojave Package Builder" - agents: - - "role=builder-v2-2" - - "os=mojave" - artifact_paths: - - "build/packages/*.tar.gz" - - "build/packages/*.rb" - timeout: 60 + # - command: | # macOS Mojave Package Builder + # echo "--- :arrow_down: Downloading build directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + # tar -zxf build.tar.gz + # echo "+++ :microscope: Starting package build" + # ln -s "$(pwd)" /data/job + # cd /data/job/build/packages + # bash generate_package.sh brew + # label: ":darwin: Mojave Package Builder" + # agents: + # - "role=builder-v2-2" + # - "os=mojave" + # artifact_paths: + # - "build/packages/*.tar.gz" + # - "build/packages/*.rb" + # timeout: 60 - - wait + # - wait - - command: | # Brew Updater - echo "--- :arrow_down: Downloading brew files" - buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" - label: ":darwin: Brew Updater" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build/packages/eosio.rb" - timeout: 5 + # - command: | # Brew Updater + # echo "--- :arrow_down: Downloading brew files" + # buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" + # label: ":darwin: Brew Updater" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: + # - "build/packages/eosio.rb" + # timeout: 5 - - command: | # Git Submodule Regression Check - echo "+++ :microscope: Running git submodule regression check" && \ - ./scripts/submodule_check.sh - label: "Git Submodule Regression Check" - agents: - queue: "automation-large-builder-fleet" - timeout: 5 + # - command: | # Git Submodule Regression Check + # echo "+++ :microscope: Running git submodule regression check" && \ + # ./scripts/submodule_check.sh + # label: "Git Submodule Regression Check" + # agents: + # queue: "automation-large-builder-fleet" + # timeout: 5 From a0194afec48333ee583c662e2a2d88b56dd7ae58 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Wed, 24 Apr 2019 22:06:21 -0400 Subject: [PATCH 519/680] testing anka --- .buildkite/pipeline.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index d6a6d0a059d..6ab14ed02a9 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -13,14 +13,6 @@ steps: async: false build: branch: "${BUILDKITE_BRANCH}" - env: - REPO: "${BUILDKITE_REPO}" - REPO_BRANCH: "${BUILDKITE_BRANCH}" - CHECKSUMABLE: "${CHECKSUMABLE}" - TEMPLATE: "${ANKA_MOJAVE_TEMPLATE}" - TEMPLATE_TAG: "${ANKA_TEMPLATE_TAG}" - TAG_COMMANDS: "CLONED_REPO_DIR/scripts/eosio_build.sh -y -P -f" # CLONED_REPO_DIR IS REQUIRED and is where the repo is always cloned into - PROJECT_TAG: "${MAC_TAG}" # - wait From cf150490a1f21de73aaaad8c8ed0539ce4f2cba6 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Wed, 24 Apr 2019 22:18:07 -0400 Subject: [PATCH 520/680] testing anka --- .buildkite/pipeline.yml | 111 +++++++++++++++++++++++++--------------- 1 file changed, 69 insertions(+), 42 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 6ab14ed02a9..45667c37565 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -9,30 +9,36 @@ steps: - trigger: "mac-anka-fleet" label: ":anka: Ensure Mojave Anka Template Tag Exists" - branches: "mac-anka-fleet" + branches: "*" async: false build: branch: "${BUILDKITE_BRANCH}" + env: + REPO: "${BUILDKITE_REPO}" + REPO_BRANCH: "${BUILDKITE_BRANCH}" + CHECKSUMABLE: "${CHECKSUMABLE}" + TEMPLATE: "${ANKA_MOJAVE_TEMPLATE}" + TEMPLATE_TAG: "${ANKA_TEMPLATE_TAG}" + TAG_COMMANDS: "CLONED_REPO_DIR/scripts/eosio_build.sh -y -P -f" # CLONED_REPO_DIR IS REQUIRED and is where the repo is always cloned into + PROJECT_TAG: "${MAC_TAG}" - # - wait - - # - command: | # macOS Mojave Build - # echo "--- Creating symbolic link to job directory :file_folder:" - # sleep 5 - # ln -s "$(pwd)" /data/job - # cd /data/job - # echo "+++ Building :hammer:" - # ./scripts/eosio_build.sh -y -P - # echo "--- Compressing build directory :compression:" - # tar -pczf build.tar.gz build - # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - # label: ":darwin: Mojave Build" - # agents: - # - "role=builder-v2-2" - # - "os=mojave" - # artifact_paths: "build.tar.gz" - # timeout: 120 + - wait + - label: ":darwin: [Darwin] Mojave Build" + command: + - "./scripts/eosio_build.sh -y -P" + - "tar -pczf /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz build" + plugins: + chef/anka#v0.4.3: + vm-name: $ANKA_MOJAVE_TEMPLATE + vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" + workdir: $ANKA_WORKDIR + always-pull: "true" + debug: true + wait-network: true + agents: + - "queue=mac-anka-node-fleet" + timeout: 60 # - command: | # Amazon Linux 2 Build # echo "+++ :hammer: Building" @@ -300,30 +306,51 @@ steps: # workdir: /data/job # timeout: 60 - # # Mojave Tests - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - # echo "+++ :microscope: Running Tests" - # ln -s "$(pwd)" /data/job - # ./scripts/parallel-test.sh - # label: ":darwin: Mojave Tests" - # agents: - # - "role=tester-v2-2" - # - "os=mojave" - # timeout: 60 + # Mojave - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - # echo "+++ :microscope: Running Tests" - # ln -s "$(pwd)" /data/job - # ./scripts/serial-test.sh - # label: ":darwin: Mojave NP Tests" - # agents: - # - "role=tester-v2-2" - # - "os=mojave" - # timeout: 60 + - label: ":darwin: [Darwin] Mojave Tests" + command: + - "tar -pxzf /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz" + - "cd ./build && ./scripts/parallel-test.sh" + artifact_paths: + - "build/mongod.log" + - "build/genesis.json" + - "build/config.ini" + agents: + - "queue=mac-anka-node-fleet" + plugins: + chef/anka#v0.4.3: + vm-name: $ANKA_MOJAVE_TEMPLATE + vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" + workdir: $ANKA_WORKDIR + # Tests don't need the host "working directory" mounted + no-volume: true + always-pull: "true" + debug: true + wait-network: true + timeout: 60 + + - label: ":darwin: [Darwin] Mojave Tests" + command: + - "tar -pxzf /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz" + - "cd ./build && ./scripts/serial-test.sh" + artifact_paths: + - "build/mongod.log" + - "build/genesis.json" + - "build/config.ini" + agents: + - "queue=mac-anka-node-fleet" + plugins: + chef/anka#v0.4.3: + vm-name: $ANKA_MOJAVE_TEMPLATE + vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" + workdir: $ANKA_WORKDIR + # Tests don't need the host "working directory" mounted + no-volume: true + always-pull: "true" + debug: true + wait-network: true + timeout: 60 # - wait From 9ca743d85ce5c240915d360defd8a4899b72a705 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Wed, 24 Apr 2019 22:21:21 -0400 Subject: [PATCH 521/680] testing anka --- .buildkite/pipeline.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 45667c37565..2fe99630336 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -12,7 +12,7 @@ steps: branches: "*" async: false build: - branch: "${BUILDKITE_BRANCH}" + branch: "master" env: REPO: "${BUILDKITE_REPO}" REPO_BRANCH: "${BUILDKITE_BRANCH}" From dee9f106f30ab6cc2f6077eb29c969ceaf2aaed8 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Wed, 24 Apr 2019 23:49:40 -0400 Subject: [PATCH 522/680] Anka finalization --- .buildkite/pipeline.yml | 762 ++++++++++++++++++++-------------------- 1 file changed, 380 insertions(+), 382 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 2fe99630336..a042d4134ff 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -33,280 +33,278 @@ steps: vm-name: $ANKA_MOJAVE_TEMPLATE vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" workdir: $ANKA_WORKDIR - always-pull: "true" + always-pull: true debug: true wait-network: true agents: - "queue=mac-anka-node-fleet" timeout: 60 - # - command: | # Amazon Linux 2 Build - # echo "+++ :hammer: Building" - # ./scripts/eosio_build.sh -y -P - # echo "--- :compression: Compressing build directory" - # tar -pczf build.tar.gz build - # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - # label: ":aws: Amazon Linux 2 Build" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: "build.tar.gz" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 120 - - # - command: | # CentOS 7 Build - # echo "+++ :hammer: Building" - # ./scripts/eosio_build.sh -y -P - # echo "--- :compression: Compressing build directory" - # tar -pczf build.tar.gz build - # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - # label: ":centos: CentOS 7 Build" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: "build.tar.gz" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 120 + - command: | # Amazon Linux 2 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y -P + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":aws: Amazon Linux 2 Build" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: "build.tar.gz" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" + propagate-environment: true + workdir: /data/job + timeout: 120 - # - command: | # Ubuntu 16.04 Build - # echo "+++ :hammer: Building" - # ./scripts/eosio_build.sh -y -P - # echo "--- :compression: Compressing build directory" - # tar -pczf build.tar.gz build - # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - # label: ":ubuntu: Ubuntu 16.04 Build" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: "build.tar.gz" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 120 + - command: | # CentOS 7 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y -P + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":centos: CentOS 7 Build" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: "build.tar.gz" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" + propagate-environment: true + workdir: /data/job + timeout: 120 - # - command: | # Ubuntu 18.04 Build - # echo "+++ :hammer: Building" - # ./scripts/eosio_build.sh -y -P - # echo "--- :compression: Compressing build directory" - # tar -pczf build.tar.gz build - # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - # label: ":ubuntu: Ubuntu 18.04 Build" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: "build.tar.gz" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 120 + - command: | # Ubuntu 16.04 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y -P + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":ubuntu: Ubuntu 16.04 Build" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: "build.tar.gz" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" + propagate-environment: true + workdir: /data/job + timeout: 120 - # - wait + - command: | # Ubuntu 18.04 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y -P + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":ubuntu: Ubuntu 18.04 Build" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: "build.tar.gz" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" + propagate-environment: true + workdir: /data/job + timeout: 120 - # # Amazon Linux 2 Tests - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/parallel-test.sh - # label: ":aws: Amazon Linux 2 Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 60 + - wait - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/serial-test.sh - # label: ":aws: Amazon Linux 2 NP Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 60 + # Amazon Linux 2 Tests + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh + label: ":aws: Amazon Linux 2 Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" + propagate-environment: true + workdir: /data/job + timeout: 60 - # # centOS 7 Tests - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/parallel-test.sh - # label: ":centos: CentOS 7 Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 60 + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh + label: ":aws: Amazon Linux 2 NP Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" + propagate-environment: true + workdir: /data/job + timeout: 60 - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/serial-test.sh - # label: ":centos: CentOS 7 NP Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 60 + # centOS 7 Tests + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh + label: ":centos: CentOS 7 Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" + propagate-environment: true + workdir: /data/job + timeout: 60 - # # Ubuntu 16.04 Tests - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/parallel-test.sh - # label: ":ubuntu: Ubuntu 16.04 Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 60 + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh + label: ":centos: CentOS 7 NP Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" + propagate-environment: true + workdir: /data/job + timeout: 60 - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/serial-test.sh - # label: ":ubuntu: Ubuntu 16.04 NP Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 60 + # Ubuntu 16.04 Tests + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh + label: ":ubuntu: Ubuntu 16.04 Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" + propagate-environment: true + workdir: /data/job + timeout: 60 - # # Ubuntu 18.04 Tests - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/parallel-test.sh - # label: ":ubuntu: Ubuntu 18.04 Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 60 + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh + label: ":ubuntu: Ubuntu 16.04 NP Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" + propagate-environment: true + workdir: /data/job + timeout: 60 - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/serial-test.sh - # label: ":ubuntu: Ubuntu 18.04 NP Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 60 + # Ubuntu 18.04 Tests + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh + label: ":ubuntu: Ubuntu 18.04 Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" + propagate-environment: true + workdir: /data/job + timeout: 60 - # Mojave + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh + label: ":ubuntu: Ubuntu 18.04 NP Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" + propagate-environment: true + workdir: /data/job + timeout: 60 - label: ":darwin: [Darwin] Mojave Tests" command: @@ -323,9 +321,9 @@ steps: vm-name: $ANKA_MOJAVE_TEMPLATE vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" workdir: $ANKA_WORKDIR - # Tests don't need the host "working directory" mounted + # Tests can't have the host "working directory" mounted no-volume: true - always-pull: "true" + always-pull: true debug: true wait-network: true timeout: 60 @@ -345,139 +343,139 @@ steps: vm-name: $ANKA_MOJAVE_TEMPLATE vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" workdir: $ANKA_WORKDIR - # Tests don't need the host "working directory" mounted + # Tests can't have the host "working directory" mounted no-volume: true - always-pull: "true" + always-pull: true debug: true wait-network: true timeout: 60 - # - wait + - wait - # - command: | # CentOS 7 Package Builder - # echo "--- :arrow_down: Downloading build directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" - # tar -zxf build.tar.gz - # echo "+++ :microscope: Starting package build" - # yum install -y rpm-build - # mkdir -p /root/rpmbuild/BUILD - # mkdir -p /root/rpmbuild/BUILDROOT - # mkdir -p /root/rpmbuild/RPMS - # mkdir -p /root/rpmbuild/SOURCES - # mkdir -p /root/rpmbuild/SPECS - # mkdir -p /root/rpmbuild/SRPMS - # cd /data/job/build/packages - # bash generate_package.sh rpm - # label: ":centos: CentOS 7 Package Builder" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: - # - "build/packages/*.rpm" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" - # propagate-environment: true - # workdir: /data/job - # env: - # OS: "el7" - # PKGTYPE: "rpm" - # timeout: 60 + - command: | # CentOS 7 Package Builder + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + yum install -y rpm-build + mkdir -p /root/rpmbuild/BUILD + mkdir -p /root/rpmbuild/BUILDROOT + mkdir -p /root/rpmbuild/RPMS + mkdir -p /root/rpmbuild/SOURCES + mkdir -p /root/rpmbuild/SPECS + mkdir -p /root/rpmbuild/SRPMS + cd /data/job/build/packages + bash generate_package.sh rpm + label: ":centos: CentOS 7 Package Builder" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: + - "build/packages/*.rpm" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" + propagate-environment: true + workdir: /data/job + env: + OS: "el7" + PKGTYPE: "rpm" + timeout: 60 - # - command: | # Ubuntu 16.04 Package Builder - # echo "--- :arrow_down: Downloading build directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" - # tar -zxf build.tar.gz - # echo "+++ :microscope: Starting package build" - # cd /data/job/build/packages - # bash generate_package.sh deb - # label: ":ubuntu: Ubuntu 16.04 Package Builder" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: - # - "build/packages/*.deb" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" - # propagate-environment: true - # workdir: /data/job - # env: - # OS: "ubuntu-16.04" - # PKGTYPE: "deb" - # timeout: 60 + - command: | # Ubuntu 16.04 Package Builder + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + cd /data/job/build/packages + bash generate_package.sh deb + label: ":ubuntu: Ubuntu 16.04 Package Builder" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: + - "build/packages/*.deb" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" + propagate-environment: true + workdir: /data/job + env: + OS: "ubuntu-16.04" + PKGTYPE: "deb" + timeout: 60 - # - command: | # Ubuntu 18.04 Package Builder - # echo "--- :arrow_down: Downloading build directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" - # tar -zxf build.tar.gz - # echo "+++ :microscope: Starting package build" - # cd /data/job/build/packages - # bash generate_package.sh deb - # label: ":ubuntu: Ubuntu 18.04 Package Builder" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: - # - "build/packages/*.deb" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" - # propagate-environment: true - # workdir: /data/job - # env: - # OS: "ubuntu-18.04" - # PKGTYPE: "deb" - # timeout: 60 + - command: | # Ubuntu 18.04 Package Builder + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + cd /data/job/build/packages + bash generate_package.sh deb + label: ":ubuntu: Ubuntu 18.04 Package Builder" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: + - "build/packages/*.deb" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" + propagate-environment: true + workdir: /data/job + env: + OS: "ubuntu-18.04" + PKGTYPE: "deb" + timeout: 60 - # - command: | # macOS Mojave Package Builder - # echo "--- :arrow_down: Downloading build directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - # tar -zxf build.tar.gz - # echo "+++ :microscope: Starting package build" - # ln -s "$(pwd)" /data/job - # cd /data/job/build/packages - # bash generate_package.sh brew - # label: ":darwin: Mojave Package Builder" - # agents: - # - "role=builder-v2-2" - # - "os=mojave" - # artifact_paths: - # - "build/packages/*.tar.gz" - # - "build/packages/*.rb" - # timeout: 60 + - command: | # macOS Mojave Package Builder + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + ln -s "$(pwd)" /data/job + cd /data/job/build/packages + bash generate_package.sh brew + label: ":darwin: Mojave Package Builder" + agents: + - "role=builder-v2-2" + - "os=mojave" + artifact_paths: + - "build/packages/*.tar.gz" + - "build/packages/*.rb" + timeout: 60 - # - wait + - wait - # - command: | # Brew Updater - # echo "--- :arrow_down: Downloading brew files" - # buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" - # label: ":darwin: Brew Updater" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: - # - "build/packages/eosio.rb" - # timeout: 5 + - command: | # Brew Updater + echo "--- :arrow_down: Downloading brew files" + buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" + label: ":darwin: Brew Updater" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: + - "build/packages/eosio.rb" + timeout: 5 - # - command: | # Git Submodule Regression Check - # echo "+++ :microscope: Running git submodule regression check" && \ - # ./scripts/submodule_check.sh - # label: "Git Submodule Regression Check" - # agents: - # queue: "automation-large-builder-fleet" - # timeout: 5 + - command: | # Git Submodule Regression Check + echo "+++ :microscope: Running git submodule regression check" && \ + ./scripts/submodule_check.sh + label: "Git Submodule Regression Check" + agents: + queue: "automation-large-builder-fleet" + timeout: 5 From 1f2266d588b397023cbeba842a79df519cc9c6e3 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Wed, 24 Apr 2019 23:58:08 -0400 Subject: [PATCH 523/680] Anka finalization --- .buildkite/pipeline.yml | 791 +++++++++++++++++++++------------------- 1 file changed, 417 insertions(+), 374 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index a042d4134ff..e1eb823c32a 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -40,271 +40,289 @@ steps: - "queue=mac-anka-node-fleet" timeout: 60 - - command: | # Amazon Linux 2 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":aws: Amazon Linux 2 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" - propagate-environment: true - workdir: /data/job - timeout: 120 - - command: | # CentOS 7 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":centos: CentOS 7 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" - propagate-environment: true - workdir: /data/job - timeout: 120 + # - command: | # macOS Mojave Build + # echo "--- Creating symbolic link to job directory :file_folder:" + # sleep 5 + # ln -s "$(pwd)" /data/job + # cd /data/job + # echo "+++ Building :hammer:" + # ./scripts/eosio_build.sh -y -P + # echo "--- Compressing build directory :compression:" + # tar -pczf build.tar.gz build + # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + # label: ":darwin: Mojave Build" + # agents: + # - "role=builder-v2-2" + # - "os=mojave" + # artifact_paths: "build.tar.gz" + # timeout: 120 - - command: | # Ubuntu 16.04 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":ubuntu: Ubuntu 16.04 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" - propagate-environment: true - workdir: /data/job - timeout: 120 + # - command: | # Amazon Linux 2 Build + # echo "+++ :hammer: Building" + # ./scripts/eosio_build.sh -y -P + # echo "--- :compression: Compressing build directory" + # tar -pczf build.tar.gz build + # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + # label: ":aws: Amazon Linux 2 Build" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: "build.tar.gz" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 120 - - command: | # Ubuntu 18.04 Build - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build - if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - label: ":ubuntu: Ubuntu 18.04 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" - propagate-environment: true - workdir: /data/job - timeout: 120 + # - command: | # CentOS 7 Build + # echo "+++ :hammer: Building" + # ./scripts/eosio_build.sh -y -P + # echo "--- :compression: Compressing build directory" + # tar -pczf build.tar.gz build + # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + # label: ":centos: CentOS 7 Build" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: "build.tar.gz" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 120 + + # - command: | # Ubuntu 16.04 Build + # echo "+++ :hammer: Building" + # ./scripts/eosio_build.sh -y -P + # echo "--- :compression: Compressing build directory" + # tar -pczf build.tar.gz build + # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + # label: ":ubuntu: Ubuntu 16.04 Build" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: "build.tar.gz" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 120 + + # - command: | # Ubuntu 18.04 Build + # echo "+++ :hammer: Building" + # ./scripts/eosio_build.sh -y -P + # echo "--- :compression: Compressing build directory" + # tar -pczf build.tar.gz build + # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + # label: ":ubuntu: Ubuntu 18.04 Build" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: "build.tar.gz" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 120 - wait - # Amazon Linux 2 Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" - echo "+++ :microscope: Running Tests" - ./scripts/parallel-test.sh - label: ":aws: Amazon Linux 2 Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" - propagate-environment: true - workdir: /data/job - timeout: 60 + # # Amazon Linux 2 Tests + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/parallel-test.sh + # label: ":aws: Amazon Linux 2 Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 60 - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" - echo "+++ :microscope: Running Tests" - ./scripts/serial-test.sh - label: ":aws: Amazon Linux 2 NP Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" - propagate-environment: true - workdir: /data/job - timeout: 60 + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/serial-test.sh + # label: ":aws: Amazon Linux 2 NP Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 60 - # centOS 7 Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" - echo "+++ :microscope: Running Tests" - ./scripts/parallel-test.sh - label: ":centos: CentOS 7 Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" - propagate-environment: true - workdir: /data/job - timeout: 60 + # # centOS 7 Tests + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/parallel-test.sh + # label: ":centos: CentOS 7 Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 60 - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" - echo "+++ :microscope: Running Tests" - ./scripts/serial-test.sh - label: ":centos: CentOS 7 NP Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" - propagate-environment: true - workdir: /data/job - timeout: 60 + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/serial-test.sh + # label: ":centos: CentOS 7 NP Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 60 - # Ubuntu 16.04 Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" - echo "+++ :microscope: Running Tests" - ./scripts/parallel-test.sh - label: ":ubuntu: Ubuntu 16.04 Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" - propagate-environment: true - workdir: /data/job - timeout: 60 + # # Ubuntu 16.04 Tests + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/parallel-test.sh + # label: ":ubuntu: Ubuntu 16.04 Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 60 - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" - echo "+++ :microscope: Running Tests" - ./scripts/serial-test.sh - label: ":ubuntu: Ubuntu 16.04 NP Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" - propagate-environment: true - workdir: /data/job - timeout: 60 + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/serial-test.sh + # label: ":ubuntu: Ubuntu 16.04 NP Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 60 - # Ubuntu 18.04 Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" - echo "+++ :microscope: Running Tests" - ./scripts/parallel-test.sh - label: ":ubuntu: Ubuntu 18.04 Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" - propagate-environment: true - workdir: /data/job - timeout: 60 + # # Ubuntu 18.04 Tests + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/parallel-test.sh + # label: ":ubuntu: Ubuntu 18.04 Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 60 - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" - echo "+++ :microscope: Running Tests" - ./scripts/serial-test.sh - label: ":ubuntu: Ubuntu 18.04 NP Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" - propagate-environment: true - workdir: /data/job - timeout: 60 + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" + # echo "+++ :microscope: Running Tests" + # ./scripts/serial-test.sh + # label: ":ubuntu: Ubuntu 18.04 NP Tests" + # agents: + # queue: "automation-large-builder-fleet" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" + # propagate-environment: true + # workdir: /data/job + # timeout: 60 - label: ":darwin: [Darwin] Mojave Tests" command: @@ -350,132 +368,157 @@ steps: wait-network: true timeout: 60 - - wait + # # Mojave Tests + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + # echo "+++ :microscope: Running Tests" + # ln -s "$(pwd)" /data/job + # ./scripts/parallel-test.sh + # label: ":darwin: Mojave Tests" + # agents: + # - "role=tester-v2-2" + # - "os=mojave" + # timeout: 60 + # - command: | + # echo "--- :arrow_down: Downloading Build Directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + # echo "+++ :microscope: Running Tests" + # ln -s "$(pwd)" /data/job + # ./scripts/serial-test.sh + # label: ":darwin: Mojave NP Tests" + # agents: + # - "role=tester-v2-2" + # - "os=mojave" + # timeout: 60 - - command: | # CentOS 7 Package Builder - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - yum install -y rpm-build - mkdir -p /root/rpmbuild/BUILD - mkdir -p /root/rpmbuild/BUILDROOT - mkdir -p /root/rpmbuild/RPMS - mkdir -p /root/rpmbuild/SOURCES - mkdir -p /root/rpmbuild/SPECS - mkdir -p /root/rpmbuild/SRPMS - cd /data/job/build/packages - bash generate_package.sh rpm - label: ":centos: CentOS 7 Package Builder" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build/packages/*.rpm" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" - propagate-environment: true - workdir: /data/job - env: - OS: "el7" - PKGTYPE: "rpm" - timeout: 60 - - command: | # Ubuntu 16.04 Package Builder - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - cd /data/job/build/packages - bash generate_package.sh deb - label: ":ubuntu: Ubuntu 16.04 Package Builder" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build/packages/*.deb" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" - propagate-environment: true - workdir: /data/job - env: - OS: "ubuntu-16.04" - PKGTYPE: "deb" - timeout: 60 + # - wait - - command: | # Ubuntu 18.04 Package Builder - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - cd /data/job/build/packages - bash generate_package.sh deb - label: ":ubuntu: Ubuntu 18.04 Package Builder" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build/packages/*.deb" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" - propagate-environment: true - workdir: /data/job - env: - OS: "ubuntu-18.04" - PKGTYPE: "deb" - timeout: 60 + # - command: | # CentOS 7 Package Builder + # echo "--- :arrow_down: Downloading build directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" + # tar -zxf build.tar.gz + # echo "+++ :microscope: Starting package build" + # yum install -y rpm-build + # mkdir -p /root/rpmbuild/BUILD + # mkdir -p /root/rpmbuild/BUILDROOT + # mkdir -p /root/rpmbuild/RPMS + # mkdir -p /root/rpmbuild/SOURCES + # mkdir -p /root/rpmbuild/SPECS + # mkdir -p /root/rpmbuild/SRPMS + # cd /data/job/build/packages + # bash generate_package.sh rpm + # label: ":centos: CentOS 7 Package Builder" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: + # - "build/packages/*.rpm" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" + # propagate-environment: true + # workdir: /data/job + # env: + # OS: "el7" + # PKGTYPE: "rpm" + # timeout: 60 - - command: | # macOS Mojave Package Builder - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - ln -s "$(pwd)" /data/job - cd /data/job/build/packages - bash generate_package.sh brew - label: ":darwin: Mojave Package Builder" - agents: - - "role=builder-v2-2" - - "os=mojave" - artifact_paths: - - "build/packages/*.tar.gz" - - "build/packages/*.rb" - timeout: 60 + # - command: | # Ubuntu 16.04 Package Builder + # echo "--- :arrow_down: Downloading build directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" + # tar -zxf build.tar.gz + # echo "+++ :microscope: Starting package build" + # cd /data/job/build/packages + # bash generate_package.sh deb + # label: ":ubuntu: Ubuntu 16.04 Package Builder" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: + # - "build/packages/*.deb" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" + # propagate-environment: true + # workdir: /data/job + # env: + # OS: "ubuntu-16.04" + # PKGTYPE: "deb" + # timeout: 60 - - wait + # - command: | # Ubuntu 18.04 Package Builder + # echo "--- :arrow_down: Downloading build directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" + # tar -zxf build.tar.gz + # echo "+++ :microscope: Starting package build" + # cd /data/job/build/packages + # bash generate_package.sh deb + # label: ":ubuntu: Ubuntu 18.04 Package Builder" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: + # - "build/packages/*.deb" + # plugins: + # ecr#v1.1.4: + # login: true + # account_ids: "436617320021" + # no-include-email: true + # region: "us-west-2" + # docker#v2.1.0: + # debug: true + # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" + # propagate-environment: true + # workdir: /data/job + # env: + # OS: "ubuntu-18.04" + # PKGTYPE: "deb" + # timeout: 60 - - command: | # Brew Updater - echo "--- :arrow_down: Downloading brew files" - buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" - label: ":darwin: Brew Updater" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build/packages/eosio.rb" - timeout: 5 + # - command: | # macOS Mojave Package Builder + # echo "--- :arrow_down: Downloading build directory" + # buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + # tar -zxf build.tar.gz + # echo "+++ :microscope: Starting package build" + # ln -s "$(pwd)" /data/job + # cd /data/job/build/packages + # bash generate_package.sh brew + # label: ":darwin: Mojave Package Builder" + # agents: + # - "role=builder-v2-2" + # - "os=mojave" + # artifact_paths: + # - "build/packages/*.tar.gz" + # - "build/packages/*.rb" + # timeout: 60 - - command: | # Git Submodule Regression Check - echo "+++ :microscope: Running git submodule regression check" && \ - ./scripts/submodule_check.sh - label: "Git Submodule Regression Check" - agents: - queue: "automation-large-builder-fleet" - timeout: 5 + # - wait + + # - command: | # Brew Updater + # echo "--- :arrow_down: Downloading brew files" + # buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" + # label: ":darwin: Brew Updater" + # agents: + # queue: "automation-large-builder-fleet" + # artifact_paths: + # - "build/packages/eosio.rb" + # timeout: 5 + + # - command: | # Git Submodule Regression Check + # echo "+++ :microscope: Running git submodule regression check" && \ + # ./scripts/submodule_check.sh + # label: "Git Submodule Regression Check" + # agents: + # queue: "automation-large-builder-fleet" + # timeout: 5 From 75f69a049658828964c7391f34130ccd1a851308 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Wed, 24 Apr 2019 23:59:01 -0400 Subject: [PATCH 524/680] Anka finalization --- .buildkite/pipeline.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index e1eb823c32a..4534fb366c6 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -38,7 +38,7 @@ steps: wait-network: true agents: - "queue=mac-anka-node-fleet" - timeout: 60 + timeout: 120 # - command: | # macOS Mojave Build @@ -344,7 +344,7 @@ steps: always-pull: true debug: true wait-network: true - timeout: 60 + timeout: 120 - label: ":darwin: [Darwin] Mojave Tests" command: @@ -366,7 +366,7 @@ steps: always-pull: true debug: true wait-network: true - timeout: 60 + timeout: 120 # # Mojave Tests # - command: | From d32626363678adaf7821ed900ea5d0c536b52b5c Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Thu, 25 Apr 2019 00:18:48 -0400 Subject: [PATCH 525/680] Anka finalization --- .buildkite/pipeline.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 4534fb366c6..f41402ebd5c 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -327,7 +327,7 @@ steps: - label: ":darwin: [Darwin] Mojave Tests" command: - "tar -pxzf /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz" - - "cd ./build && ./scripts/parallel-test.sh" + - "./scripts/parallel-test.sh" artifact_paths: - "build/mongod.log" - "build/genesis.json" @@ -349,7 +349,7 @@ steps: - label: ":darwin: [Darwin] Mojave Tests" command: - "tar -pxzf /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz" - - "cd ./build && ./scripts/serial-test.sh" + - "./scripts/serial-test.sh" artifact_paths: - "build/mongod.log" - "build/genesis.json" From 0cbead98fc06cf1433834cef635cff3845ea955d Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Thu, 25 Apr 2019 00:37:38 -0400 Subject: [PATCH 526/680] Anka finalization --- .buildkite/pipeline.yml | 10 ++-------- scripts/parallel-test.sh | 2 +- scripts/serial-test.sh | 2 +- 3 files changed, 4 insertions(+), 10 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index f41402ebd5c..ec9a02e3fca 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -326,8 +326,7 @@ steps: - label: ":darwin: [Darwin] Mojave Tests" command: - - "tar -pxzf /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz" - - "./scripts/parallel-test.sh" + - "./scripts/parallel-test.sh /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz" artifact_paths: - "build/mongod.log" - "build/genesis.json" @@ -339,8 +338,6 @@ steps: vm-name: $ANKA_MOJAVE_TEMPLATE vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" workdir: $ANKA_WORKDIR - # Tests can't have the host "working directory" mounted - no-volume: true always-pull: true debug: true wait-network: true @@ -348,8 +345,7 @@ steps: - label: ":darwin: [Darwin] Mojave Tests" command: - - "tar -pxzf /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz" - - "./scripts/serial-test.sh" + - "./scripts/serial-test.sh /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz" artifact_paths: - "build/mongod.log" - "build/genesis.json" @@ -361,8 +357,6 @@ steps: vm-name: $ANKA_MOJAVE_TEMPLATE vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" workdir: $ANKA_WORKDIR - # Tests can't have the host "working directory" mounted - no-volume: true always-pull: true debug: true wait-network: true diff --git a/scripts/parallel-test.sh b/scripts/parallel-test.sh index 0153b95c87e..cf555637437 100755 --- a/scripts/parallel-test.sh +++ b/scripts/parallel-test.sh @@ -3,7 +3,7 @@ set -e # exit on failure of any "simple" command (excludes &&, ||, or | chains) # prepare environment PATH=$PATH:~/opt/mongodb/bin echo "Extracting build directory..." -tar -zxf build.tar.gz +[[ -z "${1}" ]] && tar -zxf build.tar.gz || tar -xzf $1 cd /data/job/build # run tests echo "Running tests..." diff --git a/scripts/serial-test.sh b/scripts/serial-test.sh index c1f83ae9bfa..109efb6e9d1 100755 --- a/scripts/serial-test.sh +++ b/scripts/serial-test.sh @@ -3,7 +3,7 @@ set -e # exit on failure of any "simple" command (excludes &&, ||, or | chains) # prepare environment PATH=$PATH:~/opt/mongodb/bin echo "Extracting build directory..." -tar -zxf build.tar.gz +[[ -z "${1}" ]] && tar -zxf build.tar.gz || tar -xzf $1 #echo "Starting MongoDB..." #~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log cd /data/job/build From 45e52eab05675f020cb716776caf818d40f15af0 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Thu, 25 Apr 2019 00:56:32 -0400 Subject: [PATCH 527/680] Anka revert --- .buildkite/pipeline.yml | 210 ++++++++++++++++++++-------------------- 1 file changed, 105 insertions(+), 105 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index ec9a02e3fca..02bc8c91507 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -7,57 +7,57 @@ env: steps: - - trigger: "mac-anka-fleet" - label: ":anka: Ensure Mojave Anka Template Tag Exists" - branches: "*" - async: false - build: - branch: "master" - env: - REPO: "${BUILDKITE_REPO}" - REPO_BRANCH: "${BUILDKITE_BRANCH}" - CHECKSUMABLE: "${CHECKSUMABLE}" - TEMPLATE: "${ANKA_MOJAVE_TEMPLATE}" - TEMPLATE_TAG: "${ANKA_TEMPLATE_TAG}" - TAG_COMMANDS: "CLONED_REPO_DIR/scripts/eosio_build.sh -y -P -f" # CLONED_REPO_DIR IS REQUIRED and is where the repo is always cloned into - PROJECT_TAG: "${MAC_TAG}" - - - wait - - - label: ":darwin: [Darwin] Mojave Build" - command: - - "./scripts/eosio_build.sh -y -P" - - "tar -pczf /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz build" - plugins: - chef/anka#v0.4.3: - vm-name: $ANKA_MOJAVE_TEMPLATE - vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" - workdir: $ANKA_WORKDIR - always-pull: true - debug: true - wait-network: true - agents: - - "queue=mac-anka-node-fleet" - timeout: 120 + # - trigger: "mac-anka-fleet" + # label: ":anka: Ensure Mojave Anka Template Tag Exists" + # branches: "*" + # async: false + # build: + # branch: "master" + # env: + # REPO: "${BUILDKITE_REPO}" + # REPO_BRANCH: "${BUILDKITE_BRANCH}" + # CHECKSUMABLE: "${CHECKSUMABLE}" + # TEMPLATE: "${ANKA_MOJAVE_TEMPLATE}" + # TEMPLATE_TAG: "${ANKA_TEMPLATE_TAG}" + # TAG_COMMANDS: "CLONED_REPO_DIR/scripts/eosio_build.sh -y -P -f" # CLONED_REPO_DIR IS REQUIRED and is where the repo is always cloned into + # PROJECT_TAG: "${MAC_TAG}" + # - wait - # - command: | # macOS Mojave Build - # echo "--- Creating symbolic link to job directory :file_folder:" - # sleep 5 - # ln -s "$(pwd)" /data/job - # cd /data/job - # echo "+++ Building :hammer:" - # ./scripts/eosio_build.sh -y -P - # echo "--- Compressing build directory :compression:" - # tar -pczf build.tar.gz build - # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - # label: ":darwin: Mojave Build" + # - label: ":darwin: [Darwin] Mojave Build" + # command: + # - "./scripts/eosio_build.sh -y -P" + # - "tar -pczf /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz build" + # plugins: + # chef/anka#v0.4.3: + # vm-name: $ANKA_MOJAVE_TEMPLATE + # vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" + # workdir: $ANKA_WORKDIR + # always-pull: true + # debug: true + # wait-network: true # agents: - # - "role=builder-v2-2" - # - "os=mojave" - # artifact_paths: "build.tar.gz" + # - "queue=mac-anka-node-fleet" # timeout: 120 + + - command: | # macOS Mojave Build + echo "--- Creating symbolic link to job directory :file_folder:" + sleep 5 + ln -s "$(pwd)" /data/job + cd /data/job + echo "+++ Building :hammer:" + ./scripts/eosio_build.sh -y -P + echo "--- Compressing build directory :compression:" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":darwin: Mojave Build" + agents: + - "role=builder-v2-2" + - "os=mojave" + artifact_paths: "build.tar.gz" + timeout: 120 + # - command: | # Amazon Linux 2 Build # echo "+++ :hammer: Building" # ./scripts/eosio_build.sh -y -P @@ -324,67 +324,67 @@ steps: # workdir: /data/job # timeout: 60 - - label: ":darwin: [Darwin] Mojave Tests" - command: - - "./scripts/parallel-test.sh /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz" - artifact_paths: - - "build/mongod.log" - - "build/genesis.json" - - "build/config.ini" - agents: - - "queue=mac-anka-node-fleet" - plugins: - chef/anka#v0.4.3: - vm-name: $ANKA_MOJAVE_TEMPLATE - vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" - workdir: $ANKA_WORKDIR - always-pull: true - debug: true - wait-network: true - timeout: 120 - - - label: ":darwin: [Darwin] Mojave Tests" - command: - - "./scripts/serial-test.sh /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz" - artifact_paths: - - "build/mongod.log" - - "build/genesis.json" - - "build/config.ini" - agents: - - "queue=mac-anka-node-fleet" - plugins: - chef/anka#v0.4.3: - vm-name: $ANKA_MOJAVE_TEMPLATE - vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" - workdir: $ANKA_WORKDIR - always-pull: true - debug: true - wait-network: true - timeout: 120 - - # # Mojave Tests - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - # echo "+++ :microscope: Running Tests" - # ln -s "$(pwd)" /data/job - # ./scripts/parallel-test.sh - # label: ":darwin: Mojave Tests" + # - label: ":darwin: [Darwin] Mojave Tests" + # command: + # - "./scripts/parallel-test.sh /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz" + # artifact_paths: + # - "build/mongod.log" + # - "build/genesis.json" + # - "build/config.ini" # agents: - # - "role=tester-v2-2" - # - "os=mojave" - # timeout: 60 - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - # echo "+++ :microscope: Running Tests" - # ln -s "$(pwd)" /data/job - # ./scripts/serial-test.sh - # label: ":darwin: Mojave NP Tests" + # - "queue=mac-anka-node-fleet" + # plugins: + # chef/anka#v0.4.3: + # vm-name: $ANKA_MOJAVE_TEMPLATE + # vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" + # workdir: $ANKA_WORKDIR + # always-pull: true + # debug: true + # wait-network: true + # timeout: 120 + + # - label: ":darwin: [Darwin] Mojave Tests" + # command: + # - "./scripts/serial-test.sh /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz" + # artifact_paths: + # - "build/mongod.log" + # - "build/genesis.json" + # - "build/config.ini" # agents: - # - "role=tester-v2-2" - # - "os=mojave" - # timeout: 60 + # - "queue=mac-anka-node-fleet" + # plugins: + # chef/anka#v0.4.3: + # vm-name: $ANKA_MOJAVE_TEMPLATE + # vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" + # workdir: $ANKA_WORKDIR + # always-pull: true + # debug: true + # wait-network: true + # timeout: 120 + + # Mojave Tests + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + echo "+++ :microscope: Running Tests" + ln -s "$(pwd)" /data/job + ./scripts/parallel-test.sh + label: ":darwin: Mojave Tests" + agents: + - "role=tester-v2-2" + - "os=mojave" + timeout: 60 + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + echo "+++ :microscope: Running Tests" + ln -s "$(pwd)" /data/job + ./scripts/serial-test.sh + label: ":darwin: Mojave NP Tests" + agents: + - "role=tester-v2-2" + - "os=mojave" + timeout: 60 # - wait From 22db39698813adf3af069388306a0c75d9e40898 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Thu, 25 Apr 2019 01:36:39 -0400 Subject: [PATCH 528/680] Anka reverted --- .buildkite/pipeline.yml | 760 ++++++++++++++++++++-------------------- 1 file changed, 380 insertions(+), 380 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 02bc8c91507..2a188f66eca 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,9 +1,9 @@ -env: - ANKA_WORKDIR: "/data/job" - ANKA_MOJAVE_TEMPLATE: "10.14.4_6C_14G_40G" - ANKA_TEMPLATE_TAG: "clean::cicd::git-ssh::nas::brew" - CHECKSUMABLE: "scripts/eosio_build*" - MAC_TAG: "eosio_2-3" +# env: +# ANKA_WORKDIR: "/data/job" +# ANKA_MOJAVE_TEMPLATE: "10.14.4_6C_14G_40G" +# ANKA_TEMPLATE_TAG: "clean::cicd::git-ssh::nas::brew" +# CHECKSUMABLE: "scripts/eosio_build*" +# MAC_TAG: "eosio_2-3" steps: @@ -58,271 +58,271 @@ steps: artifact_paths: "build.tar.gz" timeout: 120 - # - command: | # Amazon Linux 2 Build - # echo "+++ :hammer: Building" - # ./scripts/eosio_build.sh -y -P - # echo "--- :compression: Compressing build directory" - # tar -pczf build.tar.gz build - # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - # label: ":aws: Amazon Linux 2 Build" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: "build.tar.gz" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 120 + - command: | # Amazon Linux 2 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y -P + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":aws: Amazon Linux 2 Build" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: "build.tar.gz" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" + propagate-environment: true + workdir: /data/job + timeout: 120 - # - command: | # CentOS 7 Build - # echo "+++ :hammer: Building" - # ./scripts/eosio_build.sh -y -P - # echo "--- :compression: Compressing build directory" - # tar -pczf build.tar.gz build - # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - # label: ":centos: CentOS 7 Build" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: "build.tar.gz" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 120 + - command: | # CentOS 7 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y -P + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":centos: CentOS 7 Build" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: "build.tar.gz" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" + propagate-environment: true + workdir: /data/job + timeout: 120 - # - command: | # Ubuntu 16.04 Build - # echo "+++ :hammer: Building" - # ./scripts/eosio_build.sh -y -P - # echo "--- :compression: Compressing build directory" - # tar -pczf build.tar.gz build - # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - # label: ":ubuntu: Ubuntu 16.04 Build" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: "build.tar.gz" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 120 + - command: | # Ubuntu 16.04 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y -P + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":ubuntu: Ubuntu 16.04 Build" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: "build.tar.gz" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" + propagate-environment: true + workdir: /data/job + timeout: 120 - # - command: | # Ubuntu 18.04 Build - # echo "+++ :hammer: Building" - # ./scripts/eosio_build.sh -y -P - # echo "--- :compression: Compressing build directory" - # tar -pczf build.tar.gz build - # if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi - # label: ":ubuntu: Ubuntu 18.04 Build" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: "build.tar.gz" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 120 + - command: | # Ubuntu 18.04 Build + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y -P + echo "--- :compression: Compressing build directory" + tar -pczf build.tar.gz build + if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi + label: ":ubuntu: Ubuntu 18.04 Build" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: "build.tar.gz" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" + propagate-environment: true + workdir: /data/job + timeout: 120 - wait - # # Amazon Linux 2 Tests - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/parallel-test.sh - # label: ":aws: Amazon Linux 2 Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 60 + # Amazon Linux 2 Tests + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh + label: ":aws: Amazon Linux 2 Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" + propagate-environment: true + workdir: /data/job + timeout: 60 - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/serial-test.sh - # label: ":aws: Amazon Linux 2 NP Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 60 + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: Amazon Linux 2 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh + label: ":aws: Amazon Linux 2 NP Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" + propagate-environment: true + workdir: /data/job + timeout: 60 - # # centOS 7 Tests - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/parallel-test.sh - # label: ":centos: CentOS 7 Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 60 + # centOS 7 Tests + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh + label: ":centos: CentOS 7 Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" + propagate-environment: true + workdir: /data/job + timeout: 60 - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/serial-test.sh - # label: ":centos: CentOS 7 NP Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 60 + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh + label: ":centos: CentOS 7 NP Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" + propagate-environment: true + workdir: /data/job + timeout: 60 - # # Ubuntu 16.04 Tests - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/parallel-test.sh - # label: ":ubuntu: Ubuntu 16.04 Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 60 + # Ubuntu 16.04 Tests + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh + label: ":ubuntu: Ubuntu 16.04 Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" + propagate-environment: true + workdir: /data/job + timeout: 60 - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/serial-test.sh - # label: ":ubuntu: Ubuntu 16.04 NP Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 60 + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh + label: ":ubuntu: Ubuntu 16.04 NP Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" + propagate-environment: true + workdir: /data/job + timeout: 60 - # # Ubuntu 18.04 Tests - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/parallel-test.sh - # label: ":ubuntu: Ubuntu 18.04 Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 60 + # Ubuntu 18.04 Tests + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh + label: ":ubuntu: Ubuntu 18.04 Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" + propagate-environment: true + workdir: /data/job + timeout: 60 - # - command: | - # echo "--- :arrow_down: Downloading Build Directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" - # echo "+++ :microscope: Running Tests" - # ./scripts/serial-test.sh - # label: ":ubuntu: Ubuntu 18.04 NP Tests" - # agents: - # queue: "automation-large-builder-fleet" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" - # propagate-environment: true - # workdir: /data/job - # timeout: 60 + - command: | + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh + label: ":ubuntu: Ubuntu 18.04 NP Tests" + agents: + queue: "automation-large-builder-fleet" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" + propagate-environment: true + workdir: /data/job + timeout: 60 # - label: ":darwin: [Darwin] Mojave Tests" # command: @@ -387,132 +387,132 @@ steps: timeout: 60 - # - wait + - wait - # - command: | # CentOS 7 Package Builder - # echo "--- :arrow_down: Downloading build directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" - # tar -zxf build.tar.gz - # echo "+++ :microscope: Starting package build" - # yum install -y rpm-build - # mkdir -p /root/rpmbuild/BUILD - # mkdir -p /root/rpmbuild/BUILDROOT - # mkdir -p /root/rpmbuild/RPMS - # mkdir -p /root/rpmbuild/SOURCES - # mkdir -p /root/rpmbuild/SPECS - # mkdir -p /root/rpmbuild/SRPMS - # cd /data/job/build/packages - # bash generate_package.sh rpm - # label: ":centos: CentOS 7 Package Builder" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: - # - "build/packages/*.rpm" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" - # propagate-environment: true - # workdir: /data/job - # env: - # OS: "el7" - # PKGTYPE: "rpm" - # timeout: 60 + - command: | # CentOS 7 Package Builder + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: CentOS 7 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + yum install -y rpm-build + mkdir -p /root/rpmbuild/BUILD + mkdir -p /root/rpmbuild/BUILDROOT + mkdir -p /root/rpmbuild/RPMS + mkdir -p /root/rpmbuild/SOURCES + mkdir -p /root/rpmbuild/SPECS + mkdir -p /root/rpmbuild/SRPMS + cd /data/job/build/packages + bash generate_package.sh rpm + label: ":centos: CentOS 7 Package Builder" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: + - "build/packages/*.rpm" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" + propagate-environment: true + workdir: /data/job + env: + OS: "el7" + PKGTYPE: "rpm" + timeout: 60 - # - command: | # Ubuntu 16.04 Package Builder - # echo "--- :arrow_down: Downloading build directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" - # tar -zxf build.tar.gz - # echo "+++ :microscope: Starting package build" - # cd /data/job/build/packages - # bash generate_package.sh deb - # label: ":ubuntu: Ubuntu 16.04 Package Builder" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: - # - "build/packages/*.deb" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" - # propagate-environment: true - # workdir: /data/job - # env: - # OS: "ubuntu-16.04" - # PKGTYPE: "deb" - # timeout: 60 + - command: | # Ubuntu 16.04 Package Builder + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 16.04 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + cd /data/job/build/packages + bash generate_package.sh deb + label: ":ubuntu: Ubuntu 16.04 Package Builder" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: + - "build/packages/*.deb" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" + propagate-environment: true + workdir: /data/job + env: + OS: "ubuntu-16.04" + PKGTYPE: "deb" + timeout: 60 - # - command: | # Ubuntu 18.04 Package Builder - # echo "--- :arrow_down: Downloading build directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" - # tar -zxf build.tar.gz - # echo "+++ :microscope: Starting package build" - # cd /data/job/build/packages - # bash generate_package.sh deb - # label: ":ubuntu: Ubuntu 18.04 Package Builder" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: - # - "build/packages/*.deb" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" - # propagate-environment: true - # workdir: /data/job - # env: - # OS: "ubuntu-18.04" - # PKGTYPE: "deb" - # timeout: 60 + - command: | # Ubuntu 18.04 Package Builder + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Ubuntu 18.04 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + cd /data/job/build/packages + bash generate_package.sh deb + label: ":ubuntu: Ubuntu 18.04 Package Builder" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: + - "build/packages/*.deb" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" + propagate-environment: true + workdir: /data/job + env: + OS: "ubuntu-18.04" + PKGTYPE: "deb" + timeout: 60 - # - command: | # macOS Mojave Package Builder - # echo "--- :arrow_down: Downloading build directory" - # buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - # tar -zxf build.tar.gz - # echo "+++ :microscope: Starting package build" - # ln -s "$(pwd)" /data/job - # cd /data/job/build/packages - # bash generate_package.sh brew - # label: ":darwin: Mojave Package Builder" - # agents: - # - "role=builder-v2-2" - # - "os=mojave" - # artifact_paths: - # - "build/packages/*.tar.gz" - # - "build/packages/*.rb" - # timeout: 60 + - command: | # macOS Mojave Package Builder + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + ln -s "$(pwd)" /data/job + cd /data/job/build/packages + bash generate_package.sh brew + label: ":darwin: Mojave Package Builder" + agents: + - "role=builder-v2-2" + - "os=mojave" + artifact_paths: + - "build/packages/*.tar.gz" + - "build/packages/*.rb" + timeout: 60 - # - wait + - wait - # - command: | # Brew Updater - # echo "--- :arrow_down: Downloading brew files" - # buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" - # label: ":darwin: Brew Updater" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: - # - "build/packages/eosio.rb" - # timeout: 5 + - command: | # Brew Updater + echo "--- :arrow_down: Downloading brew files" + buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" + label: ":darwin: Brew Updater" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: + - "build/packages/eosio.rb" + timeout: 5 - # - command: | # Git Submodule Regression Check - # echo "+++ :microscope: Running git submodule regression check" && \ - # ./scripts/submodule_check.sh - # label: "Git Submodule Regression Check" - # agents: - # queue: "automation-large-builder-fleet" - # timeout: 5 + - command: | # Git Submodule Regression Check + echo "+++ :microscope: Running git submodule regression check" && \ + ./scripts/submodule_check.sh + label: "Git Submodule Regression Check" + agents: + queue: "automation-large-builder-fleet" + timeout: 5 From 68a7c53a333a38118253722c8c021764e876f5d8 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Thu, 25 Apr 2019 15:40:48 +0800 Subject: [PATCH 529/680] Cleanup cluster.py and also increase retry time for bios node check pulse --- tests/Cluster.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index efd6dbccfdc..3ed13404e27 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -403,8 +403,8 @@ def connectGroup(group, producerNodes, bridgeNodes) : if unstartedNodes > 0: self.unstartedNodes=self.discoverUnstartedLocalNodes(unstartedNodes, totalNodes) - biosNode=self.discoverBiosNode() - if not biosNode or not biosNode.checkPulse(): + biosNode=self.discoverBiosNode(timeout=Utils.systemWaitTimeout) + if not biosNode or not Utils.waitForBool(biosNode.checkPulse, Utils.systemWaitTimeout): Utils.Print("ERROR: Bios node doesn't appear to be running...") return False @@ -1332,7 +1332,7 @@ def discoverLocalNodes(self, totalNodes, timeout=None): psOutDisplay=psOut[:6660]+"..." if Utils.Debug: Utils.Print("pgrep output: \"%s\"" % psOutDisplay) for i in range(0, totalNodes): - instance=self.discoverLocalNode(i, psOut) + instance=self.discoverLocalNode(i, psOut, timeout) if instance is None: break nodes.append(instance) @@ -1341,12 +1341,12 @@ def discoverLocalNodes(self, totalNodes, timeout=None): return nodes # Populate a node matched to actual running instance - def discoverLocalNode(self, nodeNum, psOut=None): + def discoverLocalNode(self, nodeNum, psOut=None, timeout=None): if psOut is None: psOut=Cluster.pgrepEosServers(timeout) if psOut is None: Utils.Print("ERROR: No nodes discovered.") - return nodes + return None pattern=Cluster.pgrepEosServerPattern(nodeNum) m=re.search(pattern, psOut, re.MULTILINE) if m is None: From 0679e563c534cf31fc452cfcc368e2475ebbb971 Mon Sep 17 00:00:00 2001 From: Bart Wyatt Date: Thu, 25 Apr 2019 09:55:50 -0400 Subject: [PATCH 530/680] when replay optimizations are disabled we still create database sessions however, we were never committing those database sessions leaving lots of stale undo states around until the very end of the replay --- libraries/chain/controller.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 1b1f914db2d..1b08ca335a5 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1846,6 +1846,11 @@ struct controller_impl { // On replay, log_irreversible is not called and so no irreversible_block signal is emittted. // So emit it explicitly here. emit( self.irreversible_block, bsp ); + + if (!self.skip_db_sessions(s)) { + db.commit(bsp->block_num); + } + } else { EOS_ASSERT( read_mode != db_read_mode::IRREVERSIBLE, block_validate_exception, "invariant failure: cannot replay reversible blocks while in irreversible mode" ); From 966d9fd7453e247113301287cf93e1dc3ba90a81 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 25 Apr 2019 11:54:37 -0400 Subject: [PATCH 531/680] properly shutdown from database_guard_exception during irreversible blocks replay --- libraries/chain/controller.cpp | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 1b1f914db2d..5f75d8205cc 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -453,17 +453,22 @@ struct controller_impl { auto start_block_num = head->block_num + 1; auto start = fc::time_point::now(); + std::exception_ptr except_ptr; + if( start_block_num <= blog_head->block_num() ) { ilog( "existing block log, attempting to replay from ${s} to ${n} blocks", ("s", start_block_num)("n", blog_head->block_num()) ); - while( auto next = blog.read_block_by_num( head->block_num + 1 ) ) { - replay_push_block( next, controller::block_status::irreversible ); - if( next->block_num() % 500 == 0 ) { - ilog( "${n} of ${head}", ("n", next->block_num())("head", blog_head->block_num()) ); - if( shutdown() ) break; + try { + while( auto next = blog.read_block_by_num( head->block_num + 1 ) ) { + replay_push_block( next, controller::block_status::irreversible ); + if( next->block_num() % 500 == 0 ) { + ilog( "${n} of ${head}", ("n", next->block_num())("head", blog_head->block_num()) ); + if( shutdown() ) break; + } } + } catch( const database_guard_exception& e ) { + except_ptr = std::current_exception(); } - std::cerr<< "\n"; ilog( "${n} irreversible blocks replayed", ("n", 1 + head->block_num - start_block_num) ); auto pending_head = fork_db.pending_head(); @@ -488,18 +493,24 @@ struct controller_impl { ilog( "no irreversible blocks need to be replayed" ); } - int rev = 0; - while( auto obj = reversible_blocks.find(head->block_num+1) ) { - ++rev; - replay_push_block( obj->get_block(), controller::block_status::validated ); + if( !except_ptr && !shutdown() ) { + int rev = 0; + while( auto obj = reversible_blocks.find(head->block_num+1) ) { + ++rev; + replay_push_block( obj->get_block(), controller::block_status::validated ); + } + ilog( "${n} reversible blocks replayed", ("n",rev) ); } - ilog( "${n} reversible blocks replayed", ("n",rev) ); auto end = fc::time_point::now(); ilog( "replayed ${n} blocks in ${duration} seconds, ${mspb} ms/block", ("n", head->block_num + 1 - start_block_num)("duration", (end-start).count()/1000000) ("mspb", ((end-start).count()/1000.0)/(head->block_num-start_block_num)) ); replay_head_time.reset(); + + if( except_ptr ) { + std::rethrow_exception( except_ptr ); + } } void init(std::function shutdown, const snapshot_reader_ptr& snapshot) { From ab8440f7b100a7bec7ee7119f33e89e9b2a5a530 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 25 Apr 2019 13:08:58 -0400 Subject: [PATCH 532/680] remove references to "Debian" when talking about "Ubuntu" --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 3c14af2bbb7..6230df33628 100644 --- a/README.md +++ b/README.md @@ -42,17 +42,17 @@ $ brew install eosio $ brew remove eosio ``` -#### Ubuntu 18.04 Debian Package Install +#### Ubuntu 18.04 Package Install ```sh $ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.7.0-rc1-ubuntu-18.04_amd64.deb $ sudo apt install ./eosio_1.7.0-rc1-ubuntu-18.04_amd64.deb ``` -#### Ubuntu 16.04 Debian Package Install +#### Ubuntu 16.04 Package Install ```sh $ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.7.0-rc1-ubuntu-16.04_amd64.deb $ sudo apt install ./eosio_1.7.0-rc1-ubuntu-16.04_amd64.deb ``` -#### Debian Package Uninstall +#### Ubuntu Package Uninstall ```sh $ sudo apt remove eosio ``` From 968ab7984c682abc3f1d72a748c563a5edeff7f5 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 25 Apr 2019 14:01:15 -0400 Subject: [PATCH 533/680] reenable building of mongo c/cxx drivers --- .buildkite/pipeline.yml | 10 +- plugins/mongo_db_plugin/CMakeLists.txt | 42 ++------ scripts/eosio_build.sh | 2 +- scripts/eosio_build_amazon.sh | 144 ++++++++++++------------- scripts/eosio_build_centos.sh | 129 +++++++++++----------- scripts/eosio_build_darwin.sh | 7 +- scripts/eosio_build_ubuntu.sh | 139 ++++++++++++------------ 7 files changed, 224 insertions(+), 249 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 2a188f66eca..cc62b7e41a0 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -47,7 +47,7 @@ steps: ln -s "$(pwd)" /data/job cd /data/job echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y -P + ./scripts/eosio_build.sh -y -P -m echo "--- Compressing build directory :compression:" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -60,7 +60,7 @@ steps: - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P + ./scripts/eosio_build.sh -y -P -m echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -83,7 +83,7 @@ steps: - command: | # CentOS 7 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P + ./scripts/eosio_build.sh -y -P -m echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -106,7 +106,7 @@ steps: - command: | # Ubuntu 16.04 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P + ./scripts/eosio_build.sh -y -P -m echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi @@ -129,7 +129,7 @@ steps: - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y -P + ./scripts/eosio_build.sh -y -P -m echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build if [[ ! -f build.tar.gz ]]; then echo 'ERROR: No build.tar.gz artifact found!' && exit 1; fi diff --git a/plugins/mongo_db_plugin/CMakeLists.txt b/plugins/mongo_db_plugin/CMakeLists.txt index dc76525f3a2..923e1f11c08 100644 --- a/plugins/mongo_db_plugin/CMakeLists.txt +++ b/plugins/mongo_db_plugin/CMakeLists.txt @@ -4,44 +4,22 @@ if(BUILD_MONGO_DB_PLUGIN) if (libmongoc-1.0_FOUND) - # EOS has no direct dependencies on libmongoc but its shared libraries - # will need to be present at runtime for the C++ libraries we use: - # libbsoncxx & libmongocxx (both from github.com/mongodb/mongo-cxx-driver) - - # The *.cmake package files provided by mongo-cxx-driver don't give us the - # absolute path to the libraries, which is needed whenever they are not - # installed in system-known locations. CMake requires the absolute paths - # in target_link_libraries() since we are builiding an archive and the - # link step for all executables using this archive must include the - # mongo-cxx-driver libraries libmongocxx and libbsoncxx. - find_package(libbsoncxx-static REQUIRED) - message(STATUS "Found bsoncxx headers: ${LIBBSONCXX_STATIC_INCLUDE_DIRS}") + find_library(EOS_LIBBSONCXX "libbsoncxx-static${CMAKE_STATIC_LIBRARY_SUFFIX}" PATHS ${LIBBSONCXX_STATIC_LIBRARY_DIRS}) - # mongo-cxx-driver 3.2 release altered LIBBSONCXX_LIBRARIES semantics. Instead of library names, - # it now hold library paths. - if((LIBBSONCXX_STATIC_VERSION_MAJOR LESS 3) OR ((LIBBSONCXX_STATIC_VERSION_MAJOR EQUAL 3) AND (LIBBSONCXX_STATIC_VERSION_MINOR LESS 2))) - find_library(EOS_LIBBSONCXX ${LIBBSONCXX_STATIC_LIBRARIES} - PATHS ${LIBBSONCXX_STATIC_LIBRARY_DIRS} NO_DEFAULT_PATH) - else() - set(EOS_LIBBSONCXX ${LIBBSONCXX_STATIC_LIBRARIES}) - endif() + find_package(libmongocxx-static REQUIRED) + find_library(EOS_LIBMONGOCXX "libmongocxx-static${CMAKE_STATIC_LIBRARY_SUFFIX}" PATHS ${LIBMONGOCXX_STATIC_LIBRARY_DIRS}) - message(STATUS "Found bsoncxx library: ${EOS_LIBBSONCXX}") + find_package(libmongoc-static-1.0 REQUIRED) + find_library(EOS_LIBMONGOC "libmongoc-static-1.0${CMAKE_STATIC_LIBRARY_SUFFIX}" PATHS ${LIBMONGOC_STATIC_LIBRARY_DIRS}) - find_package(libmongocxx-static REQUIRED) - message(STATUS "Found mongocxx headers: ${LIBMONGOCXX_STATIC_INCLUDE_DIRS}") + find_package(libbson-static-1.0 REQUIRED) + find_library(EOS_LIBBSONC "libbson-static-1.0${CMAKE_STATIC_LIBRARY_SUFFIX}" PATHS ${LIBBSON_STATIC_LIBRARY_DIRS}) - # mongo-cxx-driver 3.2 release altered LIBBSONCXX_LIBRARIES semantics. Instead of library names, - # it now hold library paths. - if((LIBMONGOCXX_STATIC_VERSION_MAJOR LESS 3) OR ((LIBMONGOCXX_STATIC_VERSION_MAJOR EQUAL 3) AND (LIBMONGOCXX_STATIC_VERSION_MINOR LESS 2))) - find_library(EOS_LIBMONGOCXX ${LIBMONGOCXX_STATIC_LIBRARIES} - PATHS ${LIBMONGOCXX_STATIC_LIBRARY_DIRS} NO_DEFAULT_PATH) - else() - set(EOS_LIBMONGOCXX ${LIBMONGOCXX_STATIC_LIBRARIES}) + if(NOT EOS_LIBBSONCXX OR NOT EOS_LIBMONGOCXX OR NOT EOS_LIBMONGOC OR NOT EOS_LIBBSONC) + message(FATAL_ERROR "Could not find one or more mongo driver static libraries") endif() - message(STATUS "Found mongocxx library: ${EOS_LIBMONGOCXX}") else() message("Could NOT find MongoDB. mongo_db_plugin with MongoDB support will not be included.") return() @@ -64,7 +42,7 @@ if(BUILD_MONGO_DB_PLUGIN) target_link_libraries(mongo_db_plugin PUBLIC chain_plugin eosio_chain appbase - ${EOS_LIBMONGOCXX} ${EOS_LIBBSONCXX} + ${EOS_LIBMONGOCXX} ${EOS_LIBBSONCXX} ${EOS_LIBMONGOC} ${EOS_LIBBSONC} resolv ) else() diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 5c09ea6bae1..878243b0cd5 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -347,7 +347,7 @@ if [ "$ARCH" == "Darwin" ]; then export OS_NAME=MacOSX # opt/gettext: cleos requires Intl, which requires gettext; it's keg only though and we don't want to force linking: https://github.com/EOSIO/eos/issues/2240#issuecomment-396309884 # HOME/lib/cmake: mongo_db_plugin.cpp:25:10: fatal error: 'bsoncxx/builder/basic/kvp.hpp' file not found - LOCAL_CMAKE_FLAGS="-DCMAKE_PREFIX_PATH=/usr/local/opt/gettext;$HOME/lib/cmake ${LOCAL_CMAKE_FLAGS}" + LOCAL_CMAKE_FLAGS="-DCMAKE_PREFIX_PATH=/usr/local/opt/gettext;$HOME ${LOCAL_CMAKE_FLAGS}" FILE="${REPO_ROOT}/scripts/eosio_build_darwin.sh" OPENSSL_ROOT_DIR=/usr/local/opt/openssl fi diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 5d0438ca314..df620ab475c 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -8,7 +8,9 @@ DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) -PINNED_TOOLCHAIN=-DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake +if [ $BUILD_CLANG8 ]; then + PINNED_TOOLCHAIN=-DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake +fi if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then # Amazonlinux1 DEP_ARRAY=( @@ -120,18 +122,6 @@ fi printf "\\n" -### clean up force build before starting -if [ $FORCE_BUILD ];then - rm -rf \ - ${SRC_LOCATION}/cmake-$CMAKE_VERSION \ - ${SRC_LOCATION}/llvm ${OPT_LOCATION}/llvm4 \ - ${TMP_LOCATION}/clang8 ${OPT_LOCATION}/clang8 \ - ${SRC_LOCATION}/zlib ${OPT_LOCATION}/zlib \ - ${SRC_LOCATION}/boost \ - ${SRC_LOCATION}/mongodb-linux-x86_64-amazon-$MONGODB_VERSION \ - ${SRC_LOCATION}/mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - ${SRC_LOCATION}/mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION -fi printf "Checking CMAKE installation...\\n" if [ ! -e $CMAKE ] || [ $FORCE_BUILD ]; then @@ -154,8 +144,7 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" - -if [ $PIN_COMPILER ]; then +if $PIN_COMPILER; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing Clang 8...\\n" @@ -267,67 +256,6 @@ else printf "\\n" - if [ $BUILD_MONGO ]; then - printf "Checking MongoDB installation...\\n" - if [ ! -d $MONGODB_ROOT ] || [ $FORCE_BUILD ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" - else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" - fi - if [ $? -ne 0 ]; then exit -1; fi - printf "Checking MongoDB C driver installation...\\n" - if [ ! -d $MONGO_C_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" - else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" - fi - if [ $? -ne 0 ]; then exit -1; fi - printf "Checking MongoDB C++ driver installation...\\n" - if [ ! -d $MONGO_CXX_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" - else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" - fi - if [ $? -ne 0 ]; then exit -1; fi - - printf "\\n" - fi - printf "Checking LLVM 4 support...\\n" if [ ! -d $LLVM_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing LLVM 4...\\n" @@ -351,6 +279,70 @@ else printf "\\n" fi +if [ $BUILD_MONGO ]; then + printf "Checking MongoDB installation...\\n" + if [ ! -d $MONGODB_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" + curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_LOCATION/mongod.log \ + && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_LOCATION \ + && rm -rf $MONGODB_LINK_LOCATION \ + && rm -rf $BIN_LOCATION/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ + && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ + || exit 1 + printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + else + printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C driver installation...\\n" + if [ ! -d $MONGO_C_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB C driver...\\n" + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF $PINNED_TOOLCHAIN .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C++ driver installation...\\n" + if [ ! -d $MONGO_CXX_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB C++ driver...\\n" + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION \ + && sed -i 's/"maxAwaitTimeMS", count/"maxAwaitTimeMS", static_cast(count)/' src/mongocxx/options/change_stream.cpp \ + && sed -i 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt \ + && cd build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX $PINNED_TOOLCHAIN .. \ + && make -j"${JOBS}" VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + printf "\\n" +fi + function print_instructions() { return 0 } diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 140705f4d6d..2e5688473bf 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -25,7 +25,9 @@ printf "Disk space total: ${DISK_TOTAL%.*}G\\n" printf "Disk space available: ${DISK_AVAIL%.*}G\\n" printf "Concurrent Jobs (make -j): ${JOBS}\\n" -PINNED_TOOLCHAIN=-DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake +if [ $BUILD_CLANG8 ]; then + PINNED_TOOLCHAIN=-DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake +fi if [ "${MEM_MEG}" -lt 7000 ]; then printf "\\nYour system must have 7 or more Gigabytes of physical memory installed.\\n" @@ -319,67 +321,6 @@ else printf "\\n" - if [ $BUILD_MONGO ]; then - printf "Checking MongoDB installation...\\n" - if [ ! -d $MONGODB_ROOT ] || [ $FORCE_BUILD ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" - else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" - fi - if [ $? -ne 0 ]; then exit -1; fi - printf "Checking MongoDB C driver installation...\\n" - if [ ! -d $MONGO_C_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" - else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" - fi - if [ $? -ne 0 ]; then exit -1; fi - printf "Checking MongoDB C++ driver installation...\\n" - if [ ! -d $MONGO_CXX_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" - else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" - fi - if [ $? -ne 0 ]; then exit -1; fi - - printf "\\n" - fi - printf "Checking LLVM 4 support...\\n" if [ ! -d $LLVM_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing LLVM 4...\\n" @@ -402,6 +343,70 @@ else printf "\\n" fi +if [ $BUILD_MONGO ]; then + printf "Checking MongoDB installation...\\n" + if [ ! -d $MONGODB_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" + curl -OL https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && mv $SRC_LOCATION/mongodb-linux-x86_64-amazon-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_LOCATION/mongod.log \ + && rm -f mongodb-linux-x86_64-amazon-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_LOCATION \ + && rm -rf $MONGODB_LINK_LOCATION \ + && rm -rf $BIN_LOCATION/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ + && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ + || exit 1 + printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + else + printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C driver installation...\\n" + if [ ! -d $MONGO_C_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB C driver...\\n" + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF $PINNED_TOOLCHAIN .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C++ driver installation...\\n" + if [ ! -d $MONGO_CXX_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB C++ driver...\\n" + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION \ + && sed -i 's/"maxAwaitTimeMS", count/"maxAwaitTimeMS", static_cast(count)/' src/mongocxx/options/change_stream.cpp \ + && sed -i 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt \ + && cd build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX $PINNED_TOOLCHAIN .. \ + && make -j"${JOBS}" VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + printf "\\n" +fi + function print_instructions() { printf "source ${PYTHON3PATH}/enable\\n" printf "source /opt/rh/devtoolset-7/enable\\n" diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 64b6222e34f..8579a3f0370 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -227,7 +227,7 @@ if [ $BUILD_MONGO ]; then && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ && mkdir -p cmake-build \ && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SASL=OFF .. \ && make -j"${JOBS}" \ && make install \ && cd ../.. \ @@ -243,7 +243,10 @@ if [ $BUILD_MONGO ]; then printf "Installing MongoDB C++ driver...\\n" curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION \ + && sed -i '' 's/"maxAwaitTimeMS", count/"maxAwaitTimeMS", static_cast(count)/' src/mongocxx/options/change_stream.cpp \ + && sed -i '' 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt \ + && cd build \ && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ && make -j"${JOBS}" VERBOSE=1 \ && make install \ diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 6284d8e5a5a..d6d40828759 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -25,7 +25,9 @@ printf "Disk install: ${DISK_INSTALL}\\n" printf "Disk space total: ${DISK_TOTAL%.*}G\\n" printf "Disk space available: ${DISK_AVAIL%.*}G\\n" -PINNED_TOOLCHAIN=-DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake +if [ $BUILD_CLANG8 ]; then + PINNED_TOOLCHAIN=-DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake +fi if [ "${MEM_MEG}" -lt 7000 ]; then printf "Your system must have 7 or more Gigabytes of physical memory installed.\\n" @@ -286,76 +288,6 @@ else printf "\\n" - if [ $BUILD_MONGO ]; then - printf "Checking MongoDB installation...\\n" - if [ ! -d $MONGODB_ROOT ] || [ $FORCE_BUILD ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL http://downloads.mongodb.org/linux/mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" - else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" - fi - if [ $? -ne 0 ]; then exit -1; fi - printf "Checking MongoDB C driver installation...\\n" - if [ ! -d $MONGO_C_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then - if [ ! $BUILD_CLANG8 ]; then - PINNED_TOOLCHAIN="" - fi - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON $PINNED_TOOLCHAIN .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" - else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" - fi - - if [ $? -ne 0 ]; then exit -1; fi - printf "Checking MongoDB C++ driver installation...\\n" - if [ ! -d $MONGO_CXX_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then - if [ ! $BUILD_CLANG8 ]; then - PINNED_TOOLCHAIN="" - fi - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION \ - && sed '111 s/count/static_cast(count)/' src/mongocxx/options/change_stream.cpp &> src/mongocxx/options/change_stream.cpp \ - && cd build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCXX_CMAKE_FLAGS=-D_GLIBCXX_USE_CXX11_ABI=1 -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX $PINNED_TOOLCHAIN .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" - else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" - fi - if [ $? -ne 0 ]; then exit -1; fi - - printf "\\n" - fi - printf "Checking LLVM 4 support...\\n" if [ ! -d $LLVM_ROOT ] || [ $FORCE_BUILD ]; then ln -s /usr/lib/llvm-4.0 $LLVM_ROOT \ @@ -367,6 +299,71 @@ else if [ $? -ne 0 ]; then exit -1; fi fi +if [ $BUILD_MONGO ]; then + printf "Checking MongoDB installation...\\n" + if [ ! -d $MONGODB_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" + curl -OL http://downloads.mongodb.org/linux/mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ + && mv $SRC_LOCATION/mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_LOCATION/mongod.log \ + && rm -f mongodb-linux-x86_64-ubuntu$OS_MAJ$OS_MIN-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_LOCATION \ + && rm -rf $MONGODB_LINK_LOCATION \ + && rm -rf $BIN_LOCATION/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ + && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ + || exit 1 + printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + else + printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C driver installation...\\n" + if [ ! -d $MONGO_C_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB C driver...\\n" + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF $PINNED_TOOLCHAIN .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" + fi + + if [ $? -ne 0 ]; then exit -1; fi + printf "Checking MongoDB C++ driver installation...\\n" + if [ ! -d $MONGO_CXX_DRIVER_ROOT ] || [ $FORCE_BUILD ]; then + printf "Installing MongoDB C++ driver...\\n" + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION \ + && sed -i 's/"maxAwaitTimeMS", count/"maxAwaitTimeMS", static_cast(count)/' src/mongocxx/options/change_stream.cpp \ + && sed -i 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt \ + && cd build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX $PINNED_TOOLCHAIN .. \ + && make -j"${JOBS}" VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + else + printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + printf "\\n" +fi + cd .. printf "\\n" From 57fc7c347291815ac679131cc5f44c0059449c7d Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 25 Apr 2019 16:36:22 -0400 Subject: [PATCH 534/680] Reenable mongod during buildkite serialized tests & revert VM --- .buildkite/pipeline.yml | 10 +++++----- scripts/serial-test.sh | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index cc62b7e41a0..bf0db2dc12e 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -190,7 +190,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-3" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" propagate-environment: true workdir: /data/job timeout: 60 @@ -233,7 +233,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-3" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" propagate-environment: true workdir: /data/job timeout: 60 @@ -276,7 +276,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-3" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" propagate-environment: true workdir: /data/job timeout: 60 @@ -319,7 +319,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-3" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" propagate-environment: true workdir: /data/job timeout: 60 @@ -382,7 +382,7 @@ steps: ./scripts/serial-test.sh label: ":darwin: Mojave NP Tests" agents: - - "role=tester-v2-2" + - "role=tester-v2-1" - "os=mojave" timeout: 60 diff --git a/scripts/serial-test.sh b/scripts/serial-test.sh index 109efb6e9d1..4e2b935e09c 100755 --- a/scripts/serial-test.sh +++ b/scripts/serial-test.sh @@ -4,8 +4,8 @@ set -e # exit on failure of any "simple" command (excludes &&, ||, or | chains) PATH=$PATH:~/opt/mongodb/bin echo "Extracting build directory..." [[ -z "${1}" ]] && tar -zxf build.tar.gz || tar -xzf $1 -#echo "Starting MongoDB..." -#~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log +echo "Starting MongoDB..." +mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log cd /data/job/build # run tests echo "Running tests..." @@ -24,7 +24,7 @@ mv $(pwd)/Testing/$(ls $(pwd)/Testing/ | grep '20' | tail -n 1)/Test.xml $XML_FI buildkite-agent artifact upload config.ini buildkite-agent artifact upload genesis.json cd .. -#buildkite-agent artifact upload mongod.log +buildkite-agent artifact upload mongod.log cd build buildkite-agent artifact upload $XML_FILENAME echo "Done uploading artifacts." From 3042142088070dab1c139a0c74eb5827c2af728d Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 25 Apr 2019 19:56:48 -0400 Subject: [PATCH 535/680] on macos, have cmake search through $PREFIX, not $HOME, as $HOME may no longer be where the script installed deps --- scripts/eosio_build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 9987c42a2ac..f08a50dfa03 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -335,7 +335,7 @@ if [ "$ARCH" == "Darwin" ]; then export OS_NAME=MacOSX # opt/gettext: cleos requires Intl, which requires gettext; it's keg only though and we don't want to force linking: https://github.com/EOSIO/eos/issues/2240#issuecomment-396309884 # HOME/lib/cmake: mongo_db_plugin.cpp:25:10: fatal error: 'bsoncxx/builder/basic/kvp.hpp' file not found - LOCAL_CMAKE_FLAGS="-DCMAKE_PREFIX_PATH=/usr/local/opt/gettext;$HOME ${LOCAL_CMAKE_FLAGS}" + LOCAL_CMAKE_FLAGS="-DCMAKE_PREFIX_PATH=/usr/local/opt/gettext;$PREFIX ${LOCAL_CMAKE_FLAGS}" FILE="${REPO_ROOT}/scripts/eosio_build_darwin.sh" OPENSSL_ROOT_DIR=/usr/local/opt/openssl fi From 9ba29521a04cad0e980e9c7d4f2322f67dd1dd20 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 25 Apr 2019 22:07:15 -0400 Subject: [PATCH 536/680] restore a few pieces mistakingly dropped from amazon script during merge --- scripts/eosio_build_amazon.sh | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 2ae1bea3f98..141a3af592a 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -114,6 +114,18 @@ fi printf "\\n" +### clean up force build before starting +if [ $FORCE_BUILD ];then + rm -rf \ + ${SRC_LOCATION}/cmake-$CMAKE_VERSION \ + ${SRC_LOCATION}/llvm ${OPT_LOCATION}/llvm4 \ + ${TMP_LOCATION}/clang8 ${OPT_LOCATION}/clang8 \ + ${SRC_LOCATION}/zlib ${OPT_LOCATION}/zlib \ + ${SRC_LOCATION}/boost \ + ${SRC_LOCATION}/mongodb-linux-x86_64-amazon-$MONGODB_VERSION \ + ${SRC_LOCATION}/mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + ${SRC_LOCATION}/mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION +fi printf "Checking CMAKE installation...\\n" if [ ! -e $CMAKE ] || [ $FORCE_BUILD ]; then @@ -134,9 +146,10 @@ fi if [ $? -ne 0 ]; then exit -1; fi + printf "\\n" -if $PIN_COMPILER; then +if [ $PIN_COMPILER ]; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing Clang 8...\\n" From 8abb4cabc6d39abba4a45ddbfa55030db6dd6ec3 Mon Sep 17 00:00:00 2001 From: Kayan Date: Fri, 26 Apr 2019 16:51:06 +0800 Subject: [PATCH 537/680] try to fix relaunch error in nodeos_irreversible_mode_lr_test --- tests/Node.py | 2 +- tests/nodeos_irreversible_mode_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index fc519a41569..343308be2c6 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1393,7 +1393,7 @@ def isNodeAlive(): pass return False - isAlive=Utils.waitForBool(isNodeAlive, timeout) + isAlive=Utils.waitForBool(isNodeAlive, timeout, sleepTime=1) if isAlive: Utils.Print("Node relaunch was successfull.") else: diff --git a/tests/nodeos_irreversible_mode_test.py b/tests/nodeos_irreversible_mode_test.py index 934c5fcdaaa..dd7c836e98f 100755 --- a/tests/nodeos_irreversible_mode_test.py +++ b/tests/nodeos_irreversible_mode_test.py @@ -27,7 +27,7 @@ Print = Utils.Print errorExit = Utils.errorExit cmdError = Utils.cmdError -relaunchTimeout = 5 +relaunchTimeout = 10 numOfProducers = 4 totalNodes = 10 From ed20ca533e389188057e3e2ac9e90d09f337422c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 26 Apr 2019 12:26:12 -0500 Subject: [PATCH 538/680] Update to fc with gcc fix --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 62612633136..b06f25475a3 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 6261263313673fbefe24e2cf48035f3f9796768e +Subproject commit b06f25475a3bb327bfd94b17ea25c2b1864d89e0 From 5d8ef4153f11c05b5cf94ceea1c87ec8d048f2c1 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 26 Apr 2019 10:34:38 -0400 Subject: [PATCH 539/680] add NO_DEFAULT_PATH to mongo find_libraries to ensure we don't mistakingly pick up a static library from elsewhere --- plugins/mongo_db_plugin/CMakeLists.txt | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/plugins/mongo_db_plugin/CMakeLists.txt b/plugins/mongo_db_plugin/CMakeLists.txt index 923e1f11c08..f4f65778ca8 100644 --- a/plugins/mongo_db_plugin/CMakeLists.txt +++ b/plugins/mongo_db_plugin/CMakeLists.txt @@ -5,20 +5,9 @@ if(BUILD_MONGO_DB_PLUGIN) if (libmongoc-1.0_FOUND) find_package(libbsoncxx-static REQUIRED) - find_library(EOS_LIBBSONCXX "libbsoncxx-static${CMAKE_STATIC_LIBRARY_SUFFIX}" PATHS ${LIBBSONCXX_STATIC_LIBRARY_DIRS}) - find_package(libmongocxx-static REQUIRED) - find_library(EOS_LIBMONGOCXX "libmongocxx-static${CMAKE_STATIC_LIBRARY_SUFFIX}" PATHS ${LIBMONGOCXX_STATIC_LIBRARY_DIRS}) - find_package(libmongoc-static-1.0 REQUIRED) - find_library(EOS_LIBMONGOC "libmongoc-static-1.0${CMAKE_STATIC_LIBRARY_SUFFIX}" PATHS ${LIBMONGOC_STATIC_LIBRARY_DIRS}) - find_package(libbson-static-1.0 REQUIRED) - find_library(EOS_LIBBSONC "libbson-static-1.0${CMAKE_STATIC_LIBRARY_SUFFIX}" PATHS ${LIBBSON_STATIC_LIBRARY_DIRS}) - - if(NOT EOS_LIBBSONCXX OR NOT EOS_LIBMONGOCXX OR NOT EOS_LIBMONGOC OR NOT EOS_LIBBSONC) - message(FATAL_ERROR "Could not find one or more mongo driver static libraries") - endif() else() message("Could NOT find MongoDB. mongo_db_plugin with MongoDB support will not be included.") @@ -42,7 +31,9 @@ if(BUILD_MONGO_DB_PLUGIN) target_link_libraries(mongo_db_plugin PUBLIC chain_plugin eosio_chain appbase - ${EOS_LIBMONGOCXX} ${EOS_LIBBSONCXX} ${EOS_LIBMONGOC} ${EOS_LIBBSONC} resolv + ${LIBMONGOCXX_STATIC_LIBRARY_PATH} ${LIBBSONCXX_STATIC_LIBRARY_PATH} + ${MONGOC_STATIC_LIBRARY} ${BSON_STATIC_LIBRARY} + resolv ) else() From 29e0ead7dc0d4a5433cb9e308dfa2b838e546b4c Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 26 Apr 2019 13:58:42 -0400 Subject: [PATCH 540/680] fix the incorrect check on $BUILD_CLANG8 --- scripts/eosio_build_amazon.sh | 2 +- scripts/eosio_build_centos.sh | 2 +- scripts/eosio_build_ubuntu.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 141a3af592a..3131cbfc755 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -8,7 +8,7 @@ DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) -if [ $BUILD_CLANG8 ]; then +if [ "$BUILD_CLANG8" = "true" ]; then PINNED_TOOLCHAIN=-DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake fi diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 2479e8df282..0c1f7ce86ad 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -25,7 +25,7 @@ printf "Disk space total: ${DISK_TOTAL%.*}G\\n" printf "Disk space available: ${DISK_AVAIL%.*}G\\n" printf "Concurrent Jobs (make -j): ${JOBS}\\n" -if [ $BUILD_CLANG8 ]; then +if [ "$BUILD_CLANG8" = "true" ]; then PINNED_TOOLCHAIN=-DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake fi diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 489266d28d7..acb25a16f31 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -25,7 +25,7 @@ printf "Disk install: ${DISK_INSTALL}\\n" printf "Disk space total: ${DISK_TOTAL%.*}G\\n" printf "Disk space available: ${DISK_AVAIL%.*}G\\n" -if [ $BUILD_CLANG8 ]; then +if [ "$BUILD_CLANG8" = "true" ]; then PINNED_TOOLCHAIN=-DCMAKE_TOOLCHAIN_FILE=$BUILD_DIR/pinned_toolchain.cmake fi From f7cefd8bea468584acd87b9071da9579682d8f9e Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 26 Apr 2019 14:38:26 -0400 Subject: [PATCH 541/680] update wabt submodule that removes -Werror --- libraries/wabt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/wabt b/libraries/wabt index 6032d829753..fc2fd07ee2f 160000 --- a/libraries/wabt +++ b/libraries/wabt @@ -1 +1 @@ -Subproject commit 6032d829753b1eea7113cb1901410788ff687bdf +Subproject commit fc2fd07ee2f62fe97c2f90d27b7837ad8ca42fb9 From f2208a97f1b4ac2d8773071a5ef5ce4a442ed80c Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 26 Apr 2019 14:41:09 -0400 Subject: [PATCH 542/680] when building mongo cxx driver, point cmake toward built c driver --- scripts/eosio_build_amazon.sh | 2 +- scripts/eosio_build_centos.sh | 2 +- scripts/eosio_build_darwin.sh | 2 +- scripts/eosio_build_ubuntu.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 3131cbfc755..71953c48b25 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -333,7 +333,7 @@ if [ $BUILD_MONGO ]; then && sed -i 's/"maxAwaitTimeMS", count/"maxAwaitTimeMS", static_cast(count)/' src/mongocxx/options/change_stream.cpp \ && sed -i 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt \ && cd build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX $PINNED_TOOLCHAIN .. \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DCMAKE_PREFIX_PATH=$PREFIX $PINNED_TOOLCHAIN .. \ && make -j"${JOBS}" VERBOSE=1 \ && make install \ && cd ../.. \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 0c1f7ce86ad..b48b5ea45f4 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -392,7 +392,7 @@ if [ $BUILD_MONGO ]; then && sed -i 's/"maxAwaitTimeMS", count/"maxAwaitTimeMS", static_cast(count)/' src/mongocxx/options/change_stream.cpp \ && sed -i 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt \ && cd build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX $PINNED_TOOLCHAIN .. \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DCMAKE_PREFIX_PATH=$PREFIX $PINNED_TOOLCHAIN .. \ && make -j"${JOBS}" VERBOSE=1 \ && make install \ && cd ../.. \ diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 8579a3f0370..b6ddf93236c 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -247,7 +247,7 @@ if [ $BUILD_MONGO ]; then && sed -i '' 's/"maxAwaitTimeMS", count/"maxAwaitTimeMS", static_cast(count)/' src/mongocxx/options/change_stream.cpp \ && sed -i '' 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt \ && cd build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DCMAKE_PREFIX_PATH=$PREFIX .. \ && make -j"${JOBS}" VERBOSE=1 \ && make install \ && cd ../.. \ diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index acb25a16f31..66c1d1203cc 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -335,7 +335,7 @@ if [ $BUILD_MONGO ]; then && sed -i 's/"maxAwaitTimeMS", count/"maxAwaitTimeMS", static_cast(count)/' src/mongocxx/options/change_stream.cpp \ && sed -i 's/add_subdirectory(test)//' src/mongocxx/CMakeLists.txt src/bsoncxx/CMakeLists.txt \ && cd build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX $PINNED_TOOLCHAIN .. \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DCMAKE_PREFIX_PATH=$PREFIX $PINNED_TOOLCHAIN .. \ && make -j"${JOBS}" VERBOSE=1 \ && make install \ && cd ../.. \ From 09722192db9c28e9100d739ad06a6c6135334301 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 26 Apr 2019 14:55:25 -0400 Subject: [PATCH 543/680] update wabt submodule again --- libraries/wabt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/wabt b/libraries/wabt index fc2fd07ee2f..ce5c90e456f 160000 --- a/libraries/wabt +++ b/libraries/wabt @@ -1 +1 @@ -Subproject commit fc2fd07ee2f62fe97c2f90d27b7837ad8ca42fb9 +Subproject commit ce5c90e456f004558e123d606a6d1601587f00bc From 884714f3fd05ce813f3f198b85a254efb168ff7a Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 26 Apr 2019 17:18:07 -0400 Subject: [PATCH 544/680] link to icu or sasl2 when appropriate for mongo mongo-c-driver could require linkage to libicu or libsasl2 depending on how it was configured when built. We can't just use MONGOC_STATIC_LIBRARIES variables from its cmake file because these static variants may try to static link against libraries we don't want (like a system libc/c++). But we need to know if mongo c driver was built with ICU or SASL2 support so hat we can continue to link to those. This certainly is a bit on the fragile side but try to parse what is included in MONGOC_STATIC_LIBRARIES to see what we should link to --- plugins/mongo_db_plugin/CMakeLists.txt | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/plugins/mongo_db_plugin/CMakeLists.txt b/plugins/mongo_db_plugin/CMakeLists.txt index f4f65778ca8..1a8003af44c 100644 --- a/plugins/mongo_db_plugin/CMakeLists.txt +++ b/plugins/mongo_db_plugin/CMakeLists.txt @@ -29,11 +29,28 @@ if(BUILD_MONGO_DB_PLUGIN) PRIVATE ${LIBMONGOCXX_STATIC_DEFINITIONS} ${LIBBSONCXX_STATIC_DEFINITIONS} ) + # We can't just use *_STATIC_LIBRARIES variables to link against because the static + # variants of these may try to static link against libraries we don't want (like a system + # libc/c++). But we need to know if mongo c driver was built with ICU or SASL2 support so + # that we can continue to link to those. This certainly is a bit on the fragile side but + # try to parse what is included in MONGOC_STATIC_LIBRARIES to see what we should link to + foreach(MONGO_S_LIB ${MONGOC_STATIC_LIBRARIES}) + string(REGEX MATCH "libsasl2\\${CMAKE_SHARED_LIBRARY_SUFFIX}$" REGOUT ${MONGO_S_LIB}) + if(REGOUT) + set(LINK_SASL "sasl2") + endif() + + string(REGEX MATCH "libicuuc\\${CMAKE_SHARED_LIBRARY_SUFFIX}$" REGOUT ${MONGO_S_LIB}) + if(REGOUT) + set(LINK_ICU "icuuc") + endif() + endforeach() + target_link_libraries(mongo_db_plugin PUBLIC chain_plugin eosio_chain appbase ${LIBMONGOCXX_STATIC_LIBRARY_PATH} ${LIBBSONCXX_STATIC_LIBRARY_PATH} ${MONGOC_STATIC_LIBRARY} ${BSON_STATIC_LIBRARY} - resolv + resolv ${LINK_SASL} ${LINK_ICU} ) else() From f4f58794fa07c5753267cf5ef61b7f487b266048 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 26 Apr 2019 18:34:15 -0400 Subject: [PATCH 545/680] build clang8 only when $BUILD_CLANG8 is "true" --- scripts/eosio_build_amazon.sh | 2 +- scripts/eosio_build_centos.sh | 2 +- scripts/eosio_build_darwin.sh | 2 +- scripts/eosio_build_ubuntu.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 71953c48b25..1c04d8c4b1c 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -149,7 +149,7 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" -if [ $PIN_COMPILER ]; then +if [ "$BUILD_CLANG8" = "true" ]; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing Clang 8...\\n" diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index b48b5ea45f4..003e90783d3 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -209,7 +209,7 @@ printf "\\n" export CPATH="${CPATH}:${PYTHON3PATH}/root/usr/include/python3.6m" # m on the end causes problems with boost finding python3 -if [ $PIN_COMPILER ]; then +if [ "$BUILD_CLANG8" = "true" ]; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing Clang 8...\\n" diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index b6ddf93236c..f9170f4b8c2 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -277,7 +277,7 @@ fi cd .. printf "\\n" -if $PIN_COMPILER; then +if [ "$BUILD_CLANG8" = "true" ]; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing Clang 8...\\n" diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 66c1d1203cc..97221823a88 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -162,7 +162,7 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" -if [ $PIN_COMPILER ]; then +if [ "$BUILD_CLANG8" = "true" ]; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ] || [ $FORCE_BUILD ]; then printf "Installing Clang 8...\\n" From a6a172b0305c423943269ec977deb83b94bafd5d Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 26 Apr 2019 19:57:49 -0400 Subject: [PATCH 546/680] link to snappy when appropriate for mongo --- plugins/mongo_db_plugin/CMakeLists.txt | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/plugins/mongo_db_plugin/CMakeLists.txt b/plugins/mongo_db_plugin/CMakeLists.txt index 1a8003af44c..0882550c947 100644 --- a/plugins/mongo_db_plugin/CMakeLists.txt +++ b/plugins/mongo_db_plugin/CMakeLists.txt @@ -31,8 +31,8 @@ if(BUILD_MONGO_DB_PLUGIN) # We can't just use *_STATIC_LIBRARIES variables to link against because the static # variants of these may try to static link against libraries we don't want (like a system - # libc/c++). But we need to know if mongo c driver was built with ICU or SASL2 support so - # that we can continue to link to those. This certainly is a bit on the fragile side but + # libc/c++). But we need to know if mongo c driver was built with ICU, SASL2, or snappy support + # so that we can continue to link to those. This certainly is a bit on the fragile side but # try to parse what is included in MONGOC_STATIC_LIBRARIES to see what we should link to foreach(MONGO_S_LIB ${MONGOC_STATIC_LIBRARIES}) string(REGEX MATCH "libsasl2\\${CMAKE_SHARED_LIBRARY_SUFFIX}$" REGOUT ${MONGO_S_LIB}) @@ -44,15 +44,20 @@ if(BUILD_MONGO_DB_PLUGIN) if(REGOUT) set(LINK_ICU "icuuc") endif() + + string(REGEX MATCH "libsnappy\\${CMAKE_SHARED_LIBRARY_SUFFIX}$" REGOUT ${MONGO_S_LIB}) + if(REGOUT) + set(LINK_SNAPPY "snappy") + endif() endforeach() target_link_libraries(mongo_db_plugin PUBLIC chain_plugin eosio_chain appbase ${LIBMONGOCXX_STATIC_LIBRARY_PATH} ${LIBBSONCXX_STATIC_LIBRARY_PATH} ${MONGOC_STATIC_LIBRARY} ${BSON_STATIC_LIBRARY} - resolv ${LINK_SASL} ${LINK_ICU} + resolv ${LINK_SASL} ${LINK_ICU} ${LINK_SNAPPY} ) - + else() message("mongo_db_plugin not selected and will be omitted.") endif() From 84f817b6eb9c3181cc5223c30f80c7274c88ce10 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 26 Apr 2019 20:20:56 -0400 Subject: [PATCH 547/680] disable snappy support for mongo-c-driver if mongo-c-driver links to snappy then snappy's dynamic lib may pull in system's stdlibc++ would could cause problems in pinning build mode --- scripts/eosio_build_amazon.sh | 2 +- scripts/eosio_build_centos.sh | 2 +- scripts/eosio_build_darwin.sh | 2 +- scripts/eosio_build_ubuntu.sh | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 1c04d8c4b1c..c379e3ed96a 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -313,7 +313,7 @@ if [ $BUILD_MONGO ]; then && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ && mkdir -p cmake-build \ && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF $PINNED_TOOLCHAIN .. \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SNAPPY=OFF $PINNED_TOOLCHAIN .. \ && make -j"${JOBS}" \ && make install \ && cd ../.. \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 003e90783d3..d5fea6cccce 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -372,7 +372,7 @@ if [ $BUILD_MONGO ]; then && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ && mkdir -p cmake-build \ && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF $PINNED_TOOLCHAIN .. \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SNAPPY=OFF $PINNED_TOOLCHAIN .. \ && make -j"${JOBS}" \ && make install \ && cd ../.. \ diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index f9170f4b8c2..8fb9b895766 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -227,7 +227,7 @@ if [ $BUILD_MONGO ]; then && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ && mkdir -p cmake-build \ && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SASL=OFF .. \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SASL=OFF -DENABLE_SNAPPY=OFF .. \ && make -j"${JOBS}" \ && make install \ && cd ../.. \ diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 97221823a88..a5b1563af8d 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -314,7 +314,7 @@ if [ $BUILD_MONGO ]; then && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ && mkdir -p cmake-build \ && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF $PINNED_TOOLCHAIN .. \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON -DENABLE_ICU=OFF -DENABLE_SNAPPY=OFF $PINNED_TOOLCHAIN .. \ && make -j"${JOBS}" \ && make install \ && cd ../.. \ From cf63106cd420b69a8a6c19eb0f51fbabe96f27ec Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 18 Apr 2019 08:01:12 -0500 Subject: [PATCH 548/680] Fix shutdown of catchup node --- tests/nodeos_startup_catchup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index 6d811fbce67..5934750d3a9 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -165,7 +165,7 @@ def waitForNodeStarted(node): catchupNode.interruptAndVerifyExitStatus(60) Print("Restart catchup node") - catchupNode.relaunch(catchupNodeNum) + catchupNode.relaunch(catchupNodeNum, cachePopen=True) waitForNodeStarted(catchupNode) lastCatchupLibNum=lib(catchupNode) From 2e2fbbc0efa5777b282ff3812d634cc6e7360e96 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 29 Apr 2019 10:54:50 -0400 Subject: [PATCH 549/680] bump version to 1.8.0-rc1 --- CMakeLists.txt | 2 +- README.md | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 4d708ac5d44..abba4ccde42 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -32,7 +32,7 @@ set( CXX_STANDARD_REQUIRED ON) set(VERSION_MAJOR 1) set(VERSION_MINOR 8) set(VERSION_PATCH 0) -set(VERSION_SUFFIX develop) +set(VERSION_SUFFIX rc1) if(VERSION_SUFFIX) set(VERSION_FULL "${VERSION_MAJOR}.${VERSION_MINOR}.${VERSION_PATCH}-${VERSION_SUFFIX}") diff --git a/README.md b/README.md index 6230df33628..59f4d9c612a 100644 --- a/README.md +++ b/README.md @@ -44,13 +44,13 @@ $ brew remove eosio #### Ubuntu 18.04 Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.7.0-rc1-ubuntu-18.04_amd64.deb -$ sudo apt install ./eosio_1.7.0-rc1-ubuntu-18.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.8.0-rc1/eosio_1.8.0-rc1-ubuntu-18.04_amd64.deb +$ sudo apt install ./eosio_1.8.0-rc1-ubuntu-18.04_amd64.deb ``` #### Ubuntu 16.04 Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.7.0-rc1-ubuntu-16.04_amd64.deb -$ sudo apt install ./eosio_1.7.0-rc1-ubuntu-16.04_amd64.deb +$ wget https://github.com/eosio/eos/releases/download/v1.8.0-rc1/eosio_1.8.0-rc1-ubuntu-16.04_amd64.deb +$ sudo apt install ./eosio_1.8.0-rc1-ubuntu-16.04_amd64.deb ``` #### Ubuntu Package Uninstall ```sh @@ -58,8 +58,8 @@ $ sudo apt remove eosio ``` #### Centos RPM Package Install ```sh -$ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio-1.7.0-rc1.el7.x86_64.rpm -$ sudo yum install ./eosio-1.7.0-rc1.el7.x86_64.rpm +$ wget https://github.com/eosio/eos/releases/download/v1.8.0-rc1/eosio-1.8.0-rc1.el7.x86_64.rpm +$ sudo yum install ./eosio-1.8.0-rc1.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall ```sh From 881ac415443857e95f299b1797346302027cbdf1 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Tue, 30 Apr 2019 12:45:16 -0400 Subject: [PATCH 550/680] Created universal pipeline configuration file --- .pipelinebranch | 1 - pipeline.jsonc | 18 ++++++++++++++++++ tests/multiversion.conf | 2 -- 3 files changed, 18 insertions(+), 3 deletions(-) delete mode 100644 .pipelinebranch create mode 100644 pipeline.jsonc delete mode 100644 tests/multiversion.conf diff --git a/.pipelinebranch b/.pipelinebranch deleted file mode 100644 index e763e7d7fdc..00000000000 --- a/.pipelinebranch +++ /dev/null @@ -1 +0,0 @@ -protocol-features-sync-nodes diff --git a/pipeline.jsonc b/pipeline.jsonc new file mode 100644 index 00000000000..80edbf13da2 --- /dev/null +++ b/pipeline.jsonc @@ -0,0 +1,18 @@ +{ + "eosio": + { + "pipeline-branch": "master" + }, + "eos-multiversion-tests": + { + "pipeline-branch": "protocol-features-sync-nodes", + "configuration": + [ + "170=v1.7.0" + ] + }, + "eosio-sync-tests": + { + "pipeline-branch": "protocol-features-sync-nodes" + } +} \ No newline at end of file diff --git a/tests/multiversion.conf b/tests/multiversion.conf deleted file mode 100644 index 544263173a9..00000000000 --- a/tests/multiversion.conf +++ /dev/null @@ -1,2 +0,0 @@ -[eosio] -170=v1.7.0 From 9ddc493da452c0aadc7e16d12f4965231eb493c4 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Tue, 30 Apr 2019 14:14:13 -0400 Subject: [PATCH 551/680] Added eosio-build-unpinned pipeline --- pipeline.json | 1 + 1 file changed, 1 insertion(+) create mode 100644 pipeline.json diff --git a/pipeline.json b/pipeline.json new file mode 100644 index 00000000000..19765bd501b --- /dev/null +++ b/pipeline.json @@ -0,0 +1 @@ +null From f502fe4e4b5083c25b2bf9abfecbdf420e66bafb Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Tue, 30 Apr 2019 14:17:32 -0400 Subject: [PATCH 552/680] Accidentally added intermediate file --- pipeline.json | 1 - pipeline.jsonc | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) delete mode 100644 pipeline.json diff --git a/pipeline.json b/pipeline.json deleted file mode 100644 index 19765bd501b..00000000000 --- a/pipeline.json +++ /dev/null @@ -1 +0,0 @@ -null diff --git a/pipeline.jsonc b/pipeline.jsonc index 80edbf13da2..cf98935463b 100644 --- a/pipeline.jsonc +++ b/pipeline.jsonc @@ -1,7 +1,7 @@ { - "eosio": + "eosio-build-unpinned": { - "pipeline-branch": "master" + "pipeline-branch": "protocol-features-sync-nodes" }, "eos-multiversion-tests": { From af08a29bd539141a261a9c683af6ad89db13cef8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 1 May 2019 12:01:58 -0500 Subject: [PATCH 553/680] Change default log level from debug to info. --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index b06f25475a3..4e30824aba2 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit b06f25475a3bb327bfd94b17ea25c2b1864d89e0 +Subproject commit 4e30824aba228c96dae57dfea84b489b81df9537 From 07f7c9970305ee289c3dd808139416b1ee0cfce5 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Thu, 2 May 2019 21:33:25 -0400 Subject: [PATCH 554/680] Removed commented-out code --- .buildkite/pipeline.yml | 82 +---------------------------------------- 1 file changed, 1 insertion(+), 81 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index bf0db2dc12e..ad160602773 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,46 +1,4 @@ -# env: -# ANKA_WORKDIR: "/data/job" -# ANKA_MOJAVE_TEMPLATE: "10.14.4_6C_14G_40G" -# ANKA_TEMPLATE_TAG: "clean::cicd::git-ssh::nas::brew" -# CHECKSUMABLE: "scripts/eosio_build*" -# MAC_TAG: "eosio_2-3" - steps: - - # - trigger: "mac-anka-fleet" - # label: ":anka: Ensure Mojave Anka Template Tag Exists" - # branches: "*" - # async: false - # build: - # branch: "master" - # env: - # REPO: "${BUILDKITE_REPO}" - # REPO_BRANCH: "${BUILDKITE_BRANCH}" - # CHECKSUMABLE: "${CHECKSUMABLE}" - # TEMPLATE: "${ANKA_MOJAVE_TEMPLATE}" - # TEMPLATE_TAG: "${ANKA_TEMPLATE_TAG}" - # TAG_COMMANDS: "CLONED_REPO_DIR/scripts/eosio_build.sh -y -P -f" # CLONED_REPO_DIR IS REQUIRED and is where the repo is always cloned into - # PROJECT_TAG: "${MAC_TAG}" - - # - wait - - # - label: ":darwin: [Darwin] Mojave Build" - # command: - # - "./scripts/eosio_build.sh -y -P" - # - "tar -pczf /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz build" - # plugins: - # chef/anka#v0.4.3: - # vm-name: $ANKA_MOJAVE_TEMPLATE - # vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" - # workdir: $ANKA_WORKDIR - # always-pull: true - # debug: true - # wait-network: true - # agents: - # - "queue=mac-anka-node-fleet" - # timeout: 120 - - - command: | # macOS Mojave Build echo "--- Creating symbolic link to job directory :file_folder:" sleep 5 @@ -324,44 +282,6 @@ steps: workdir: /data/job timeout: 60 - # - label: ":darwin: [Darwin] Mojave Tests" - # command: - # - "./scripts/parallel-test.sh /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz" - # artifact_paths: - # - "build/mongod.log" - # - "build/genesis.json" - # - "build/config.ini" - # agents: - # - "queue=mac-anka-node-fleet" - # plugins: - # chef/anka#v0.4.3: - # vm-name: $ANKA_MOJAVE_TEMPLATE - # vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" - # workdir: $ANKA_WORKDIR - # always-pull: true - # debug: true - # wait-network: true - # timeout: 120 - - # - label: ":darwin: [Darwin] Mojave Tests" - # command: - # - "./scripts/serial-test.sh /Network/NAS/MAC_FLEET/BUILDKITE/artifacts/${ANKA_MOJAVE_TEMPLATE}-${BUILDKITE_PIPELINE_SLUG}-${BUILDKITE_BUILD_ID}.tar.gz" - # artifact_paths: - # - "build/mongod.log" - # - "build/genesis.json" - # - "build/config.ini" - # agents: - # - "queue=mac-anka-node-fleet" - # plugins: - # chef/anka#v0.4.3: - # vm-name: $ANKA_MOJAVE_TEMPLATE - # vm-registry-tag: "${ANKA_TEMPLATE_TAG}::${MAC_TAG}" - # workdir: $ANKA_WORKDIR - # always-pull: true - # debug: true - # wait-network: true - # timeout: 120 - # Mojave Tests - command: | echo "--- :arrow_down: Downloading Build Directory" @@ -515,4 +435,4 @@ steps: label: "Git Submodule Regression Check" agents: queue: "automation-large-builder-fleet" - timeout: 5 + timeout: 5 \ No newline at end of file From 288b97f9039d2ee95811cfce473186f102120859 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Thu, 2 May 2019 21:35:18 -0400 Subject: [PATCH 555/680] Created test metrics Buildkite job --- .buildkite/pipeline.yml | 10 + .../node_modules/node-fetch/CHANGELOG.md | 260 +++ .../node_modules/node-fetch/LICENSE.md | 22 + .../metrics/node_modules/node-fetch/README.md | 538 ++++++ .../node_modules/node-fetch/browser.js | 23 + .../node_modules/node-fetch/lib/index.es.js | 1631 ++++++++++++++++ .../node_modules/node-fetch/lib/index.js | 1640 +++++++++++++++++ .../node_modules/node-fetch/lib/index.mjs | 1629 ++++++++++++++++ .../node_modules/node-fetch/package.json | 94 + scripts/metrics/node_modules/sax/LICENSE | 41 + scripts/metrics/node_modules/sax/README.md | 225 +++ scripts/metrics/node_modules/sax/lib/sax.js | 1565 ++++++++++++++++ scripts/metrics/node_modules/sax/package.json | 61 + scripts/metrics/node_modules/xml2js/LICENSE | 19 + scripts/metrics/node_modules/xml2js/README.md | 406 ++++ .../metrics/node_modules/xml2js/lib/bom.js | 12 + .../node_modules/xml2js/lib/builder.js | 127 ++ .../node_modules/xml2js/lib/defaults.js | 72 + .../metrics/node_modules/xml2js/lib/parser.js | 357 ++++ .../node_modules/xml2js/lib/processors.js | 34 + .../metrics/node_modules/xml2js/lib/xml2js.js | 37 + .../metrics/node_modules/xml2js/package.json | 280 +++ .../node_modules/xmlbuilder/.npmignore | 5 + .../node_modules/xmlbuilder/CHANGELOG.md | 423 +++++ .../metrics/node_modules/xmlbuilder/LICENSE | 21 + .../metrics/node_modules/xmlbuilder/README.md | 85 + .../node_modules/xmlbuilder/lib/Utility.js | 73 + .../xmlbuilder/lib/XMLAttribute.js | 31 + .../node_modules/xmlbuilder/lib/XMLCData.js | 32 + .../node_modules/xmlbuilder/lib/XMLComment.js | 32 + .../xmlbuilder/lib/XMLDTDAttList.js | 50 + .../xmlbuilder/lib/XMLDTDElement.js | 35 + .../xmlbuilder/lib/XMLDTDEntity.js | 56 + .../xmlbuilder/lib/XMLDTDNotation.js | 37 + .../xmlbuilder/lib/XMLDeclaration.js | 40 + .../node_modules/xmlbuilder/lib/XMLDocType.js | 107 ++ .../xmlbuilder/lib/XMLDocument.js | 48 + .../xmlbuilder/lib/XMLDocumentCB.js | 402 ++++ .../node_modules/xmlbuilder/lib/XMLElement.js | 111 ++ .../node_modules/xmlbuilder/lib/XMLNode.js | 432 +++++ .../lib/XMLProcessingInstruction.js | 35 + .../node_modules/xmlbuilder/lib/XMLRaw.js | 32 + .../xmlbuilder/lib/XMLStreamWriter.js | 279 +++ .../xmlbuilder/lib/XMLStringWriter.js | 334 ++++ .../xmlbuilder/lib/XMLStringifier.js | 163 ++ .../node_modules/xmlbuilder/lib/XMLText.js | 32 + .../xmlbuilder/lib/XMLWriterBase.js | 90 + .../node_modules/xmlbuilder/lib/index.js | 53 + .../node_modules/xmlbuilder/package.json | 65 + scripts/metrics/package-lock.json | 30 + scripts/metrics/test-metrics.js | 415 +++++ scripts/metrics/test-metrics.json | 1 + 52 files changed, 12632 insertions(+) create mode 100644 scripts/metrics/node_modules/node-fetch/CHANGELOG.md create mode 100644 scripts/metrics/node_modules/node-fetch/LICENSE.md create mode 100644 scripts/metrics/node_modules/node-fetch/README.md create mode 100644 scripts/metrics/node_modules/node-fetch/browser.js create mode 100644 scripts/metrics/node_modules/node-fetch/lib/index.es.js create mode 100644 scripts/metrics/node_modules/node-fetch/lib/index.js create mode 100644 scripts/metrics/node_modules/node-fetch/lib/index.mjs create mode 100644 scripts/metrics/node_modules/node-fetch/package.json create mode 100644 scripts/metrics/node_modules/sax/LICENSE create mode 100644 scripts/metrics/node_modules/sax/README.md create mode 100644 scripts/metrics/node_modules/sax/lib/sax.js create mode 100644 scripts/metrics/node_modules/sax/package.json create mode 100644 scripts/metrics/node_modules/xml2js/LICENSE create mode 100644 scripts/metrics/node_modules/xml2js/README.md create mode 100644 scripts/metrics/node_modules/xml2js/lib/bom.js create mode 100644 scripts/metrics/node_modules/xml2js/lib/builder.js create mode 100644 scripts/metrics/node_modules/xml2js/lib/defaults.js create mode 100644 scripts/metrics/node_modules/xml2js/lib/parser.js create mode 100644 scripts/metrics/node_modules/xml2js/lib/processors.js create mode 100644 scripts/metrics/node_modules/xml2js/lib/xml2js.js create mode 100644 scripts/metrics/node_modules/xml2js/package.json create mode 100644 scripts/metrics/node_modules/xmlbuilder/.npmignore create mode 100644 scripts/metrics/node_modules/xmlbuilder/CHANGELOG.md create mode 100644 scripts/metrics/node_modules/xmlbuilder/LICENSE create mode 100644 scripts/metrics/node_modules/xmlbuilder/README.md create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/Utility.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLAttribute.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLCData.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLComment.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDTDAttList.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDTDElement.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDTDEntity.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDTDNotation.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDeclaration.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDocType.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDocument.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLDocumentCB.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLElement.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLNode.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLProcessingInstruction.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLRaw.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLStreamWriter.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLStringWriter.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLStringifier.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLText.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/XMLWriterBase.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/lib/index.js create mode 100644 scripts/metrics/node_modules/xmlbuilder/package.json create mode 100644 scripts/metrics/package-lock.json create mode 100755 scripts/metrics/test-metrics.js create mode 100644 scripts/metrics/test-metrics.json diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index ad160602773..379d02015cb 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -306,6 +306,16 @@ steps: - "os=mojave" timeout: 60 + - wait: + continue_on_failure: true + + - command: | + cd scripts/metrics + node --max-old-space-size=4096 test-metrics.js + label: ":bar_chart: Test Metrics" + agents: + queue: "automation-apps-builder-fleet" + timeout: 10 - wait diff --git a/scripts/metrics/node_modules/node-fetch/CHANGELOG.md b/scripts/metrics/node_modules/node-fetch/CHANGELOG.md new file mode 100644 index 00000000000..941b6a8d8b7 --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/CHANGELOG.md @@ -0,0 +1,260 @@ + +Changelog +========= + + +# 2.x release + +## v2.5.0 + +- Enhance: `Response` object now includes `redirected` property. +- Enhance: `fetch()` now accepts third-party `Blob` implementation as body. +- Other: disable `package-lock.json` generation as we never commit them. +- Other: dev dependency update. +- Other: readme update. + +## v2.4.1 + +- Fix: `Blob` import rule for node < 10, as `Readable` isn't a named export. + +## v2.4.0 + +- Enhance: added `Brotli` compression support (using node's zlib). +- Enhance: updated `Blob` implementation per spec. +- Fix: set content type automatically for `URLSearchParams`. +- Fix: `Headers` now reject empty header names. +- Fix: test cases, as node 12+ no longer accepts invalid header response. + +## v2.3.0 + +- Enhance: added `AbortSignal` support, with README example. +- Enhance: handle invalid `Location` header during redirect by rejecting them explicitly with `FetchError`. +- Fix: update `browser.js` to support react-native environment, where `self` isn't available globally. + +## v2.2.1 + +- Fix: `compress` flag shouldn't overwrite existing `Accept-Encoding` header. +- Fix: multiple `import` rules, where `PassThrough` etc. doesn't have a named export when using node <10 and `--exerimental-modules` flag. +- Other: Better README. + +## v2.2.0 + +- Enhance: Support all `ArrayBuffer` view types +- Enhance: Support Web Workers +- Enhance: Support Node.js' `--experimental-modules` mode; deprecate `.es.js` file +- Fix: Add `__esModule` property to the exports object +- Other: Better example in README for writing response to a file +- Other: More tests for Agent + +## v2.1.2 + +- Fix: allow `Body` methods to work on `ArrayBuffer`-backed `Body` objects +- Fix: reject promise returned by `Body` methods when the accumulated `Buffer` exceeds the maximum size +- Fix: support custom `Host` headers with any casing +- Fix: support importing `fetch()` from TypeScript in `browser.js` +- Fix: handle the redirect response body properly + +## v2.1.1 + +Fix packaging errors in v2.1.0. + +## v2.1.0 + +- Enhance: allow using ArrayBuffer as the `body` of a `fetch()` or `Request` +- Fix: store HTTP headers of a `Headers` object internally with the given case, for compatibility with older servers that incorrectly treated header names in a case-sensitive manner +- Fix: silently ignore invalid HTTP headers +- Fix: handle HTTP redirect responses without a `Location` header just like non-redirect responses +- Fix: include bodies when following a redirection when appropriate + +## v2.0.0 + +This is a major release. Check [our upgrade guide](https://github.com/bitinn/node-fetch/blob/master/UPGRADE-GUIDE.md) for an overview on some key differences between v1 and v2. + +### General changes + +- Major: Node.js 0.10.x and 0.12.x support is dropped +- Major: `require('node-fetch/lib/response')` etc. is now unsupported; use `require('node-fetch').Response` or ES6 module imports +- Enhance: start testing on Node.js v4.x, v6.x, v8.x LTS, as well as v9.x stable +- Enhance: use Rollup to produce a distributed bundle (less memory overhead and faster startup) +- Enhance: make `Object.prototype.toString()` on Headers, Requests, and Responses return correct class strings +- Other: rewrite in ES2015 using Babel +- Other: use Codecov for code coverage tracking +- Other: update package.json script for npm 5 +- Other: `encoding` module is now optional (alpha.7) +- Other: expose browser.js through package.json, avoid bundling mishaps (alpha.9) +- Other: allow TypeScript to `import` node-fetch by exposing default (alpha.9) + +### HTTP requests + +- Major: overwrite user's `Content-Length` if we can be sure our information is correct (per spec) +- Fix: errors in a response are caught before the body is accessed +- Fix: support WHATWG URL objects, created by `whatwg-url` package or `require('url').URL` in Node.js 7+ + +### Response and Request classes + +- Major: `response.text()` no longer attempts to detect encoding, instead always opting for UTF-8 (per spec); use `response.textConverted()` for the v1 behavior +- Major: make `response.json()` throw error instead of returning an empty object on 204 no-content respose (per spec; reverts behavior changed in v1.6.2) +- Major: internal methods are no longer exposed +- Major: throw error when a `GET` or `HEAD` Request is constructed with a non-null body (per spec) +- Enhance: add `response.arrayBuffer()` (also applies to Requests) +- Enhance: add experimental `response.blob()` (also applies to Requests) +- Enhance: `URLSearchParams` is now accepted as a body +- Enhance: wrap `response.json()` json parsing error as `FetchError` +- Fix: fix Request and Response with `null` body + +### Headers class + +- Major: remove `headers.getAll()`; make `get()` return all headers delimited by commas (per spec) +- Enhance: make Headers iterable +- Enhance: make Headers constructor accept an array of tuples +- Enhance: make sure header names and values are valid in HTTP +- Fix: coerce Headers prototype function parameters to strings, where applicable + +### Documentation + +- Enhance: more comprehensive API docs +- Enhance: add a list of default headers in README + + +# 1.x release + +## backport releases (v1.7.0 and beyond) + +See [changelog on 1.x branch](https://github.com/bitinn/node-fetch/blob/1.x/CHANGELOG.md) for details. + +## v1.6.3 + +- Enhance: error handling document to explain `FetchError` design +- Fix: support `form-data` 2.x releases (requires `form-data` >= 2.1.0) + +## v1.6.2 + +- Enhance: minor document update +- Fix: response.json() returns empty object on 204 no-content response instead of throwing a syntax error + +## v1.6.1 + +- Fix: if `res.body` is a non-stream non-formdata object, we will call `body.toString` and send it as a string +- Fix: `counter` value is incorrectly set to `follow` value when wrapping Request instance +- Fix: documentation update + +## v1.6.0 + +- Enhance: added `res.buffer()` api for convenience, it returns body as a Node.js buffer +- Enhance: better old server support by handling raw deflate response +- Enhance: skip encoding detection for non-HTML/XML response +- Enhance: minor document update +- Fix: HEAD request doesn't need decompression, as body is empty +- Fix: `req.body` now accepts a Node.js buffer + +## v1.5.3 + +- Fix: handle 204 and 304 responses when body is empty but content-encoding is gzip/deflate +- Fix: allow resolving response and cloned response in any order +- Fix: avoid setting `content-length` when `form-data` body use streams +- Fix: send DELETE request with content-length when body is present +- Fix: allow any url when calling new Request, but still reject non-http(s) url in fetch + +## v1.5.2 + +- Fix: allow node.js core to handle keep-alive connection pool when passing a custom agent + +## v1.5.1 + +- Fix: redirect mode `manual` should work even when there is no redirection or broken redirection + +## v1.5.0 + +- Enhance: rejected promise now use custom `Error` (thx to @pekeler) +- Enhance: `FetchError` contains `err.type` and `err.code`, allows for better error handling (thx to @pekeler) +- Enhance: basic support for redirect mode `manual` and `error`, allows for location header extraction (thx to @jimmywarting for the initial PR) + +## v1.4.1 + +- Fix: wrapping Request instance with FormData body again should preserve the body as-is + +## v1.4.0 + +- Enhance: Request and Response now have `clone` method (thx to @kirill-konshin for the initial PR) +- Enhance: Request and Response now have proper string and buffer body support (thx to @kirill-konshin) +- Enhance: Body constructor has been refactored out (thx to @kirill-konshin) +- Enhance: Headers now has `forEach` method (thx to @tricoder42) +- Enhance: back to 100% code coverage +- Fix: better form-data support (thx to @item4) +- Fix: better character encoding detection under chunked encoding (thx to @dsuket for the initial PR) + +## v1.3.3 + +- Fix: make sure `Content-Length` header is set when body is string for POST/PUT/PATCH requests +- Fix: handle body stream error, for cases such as incorrect `Content-Encoding` header +- Fix: when following certain redirects, use `GET` on subsequent request per Fetch Spec +- Fix: `Request` and `Response` constructors now parse headers input using `Headers` + +## v1.3.2 + +- Enhance: allow auto detect of form-data input (no `FormData` spec on node.js, this is form-data specific feature) + +## v1.3.1 + +- Enhance: allow custom host header to be set (server-side only feature, as it's a forbidden header on client-side) + +## v1.3.0 + +- Enhance: now `fetch.Request` is exposed as well + +## v1.2.1 + +- Enhance: `Headers` now normalized `Number` value to `String`, prevent common mistakes + +## v1.2.0 + +- Enhance: now fetch.Headers and fetch.Response are exposed, making testing easier + +## v1.1.2 + +- Fix: `Headers` should only support `String` and `Array` properties, and ignore others + +## v1.1.1 + +- Enhance: now req.headers accept both plain object and `Headers` instance + +## v1.1.0 + +- Enhance: timeout now also applies to response body (in case of slow response) +- Fix: timeout is now cleared properly when fetch is done/has failed + +## v1.0.6 + +- Fix: less greedy content-type charset matching + +## v1.0.5 + +- Fix: when `follow = 0`, fetch should not follow redirect +- Enhance: update tests for better coverage +- Enhance: code formatting +- Enhance: clean up doc + +## v1.0.4 + +- Enhance: test iojs support +- Enhance: timeout attached to socket event only fire once per redirect + +## v1.0.3 + +- Fix: response size limit should reject large chunk +- Enhance: added character encoding detection for xml, such as rss/atom feed (encoding in DTD) + +## v1.0.2 + +- Fix: added res.ok per spec change + +## v1.0.0 + +- Enhance: better test coverage and doc + + +# 0.x release + +## v0.1 + +- Major: initial public release diff --git a/scripts/metrics/node_modules/node-fetch/LICENSE.md b/scripts/metrics/node_modules/node-fetch/LICENSE.md new file mode 100644 index 00000000000..660ffecb58b --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/LICENSE.md @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 David Frank + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/scripts/metrics/node_modules/node-fetch/README.md b/scripts/metrics/node_modules/node-fetch/README.md new file mode 100644 index 00000000000..48f4215e4e7 --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/README.md @@ -0,0 +1,538 @@ +node-fetch +========== + +[![npm version][npm-image]][npm-url] +[![build status][travis-image]][travis-url] +[![coverage status][codecov-image]][codecov-url] +[![install size][install-size-image]][install-size-url] + +A light-weight module that brings `window.fetch` to Node.js + +(We are looking for [v2 maintainers and collaborators](https://github.com/bitinn/node-fetch/issues/567)) + + + +- [Motivation](#motivation) +- [Features](#features) +- [Difference from client-side fetch](#difference-from-client-side-fetch) +- [Installation](#installation) +- [Loading and configuring the module](#loading-and-configuring-the-module) +- [Common Usage](#common-usage) + - [Plain text or HTML](#plain-text-or-html) + - [JSON](#json) + - [Simple Post](#simple-post) + - [Post with JSON](#post-with-json) + - [Post with form parameters](#post-with-form-parameters) + - [Handling exceptions](#handling-exceptions) + - [Handling client and server errors](#handling-client-and-server-errors) +- [Advanced Usage](#advanced-usage) + - [Streams](#streams) + - [Buffer](#buffer) + - [Accessing Headers and other Meta data](#accessing-headers-and-other-meta-data) + - [Post data using a file stream](#post-data-using-a-file-stream) + - [Post with form-data (detect multipart)](#post-with-form-data-detect-multipart) + - [Request cancellation with AbortSignal](#request-cancellation-with-abortsignal) +- [API](#api) + - [fetch(url[, options])](#fetchurl-options) + - [Options](#options) + - [Class: Request](#class-request) + - [Class: Response](#class-response) + - [Class: Headers](#class-headers) + - [Interface: Body](#interface-body) + - [Class: FetchError](#class-fetcherror) +- [License](#license) +- [Acknowledgement](#acknowledgement) + + + +## Motivation + +Instead of implementing `XMLHttpRequest` in Node.js to run browser-specific [Fetch polyfill](https://github.com/github/fetch), why not go from native `http` to `fetch` API directly? Hence `node-fetch`, minimal code for a `window.fetch` compatible API on Node.js runtime. + +See Matt Andrews' [isomorphic-fetch](https://github.com/matthew-andrews/isomorphic-fetch) or Leonardo Quixada's [cross-fetch](https://github.com/lquixada/cross-fetch) for isomorphic usage (exports `node-fetch` for server-side, `whatwg-fetch` for client-side). + +## Features + +- Stay consistent with `window.fetch` API. +- Make conscious trade-off when following [WHATWG fetch spec][whatwg-fetch] and [stream spec](https://streams.spec.whatwg.org/) implementation details, document known differences. +- Use native promise, but allow substituting it with [insert your favorite promise library]. +- Use native Node streams for body, on both request and response. +- Decode content encoding (gzip/deflate) properly, and convert string output (such as `res.text()` and `res.json()`) to UTF-8 automatically. +- Useful extensions such as timeout, redirect limit, response size limit, [explicit errors](ERROR-HANDLING.md) for troubleshooting. + +## Difference from client-side fetch + +- See [Known Differences](LIMITS.md) for details. +- If you happen to use a missing feature that `window.fetch` offers, feel free to open an issue. +- Pull requests are welcomed too! + +## Installation + +Current stable release (`2.x`) + +```sh +$ npm install node-fetch --save +``` + +## Loading and configuring the module +We suggest you load the module via `require`, pending the stabalizing of es modules in node: +```js +const fetch = require('node-fetch'); +``` + +If you are using a Promise library other than native, set it through fetch.Promise: +```js +const Bluebird = require('bluebird'); + +fetch.Promise = Bluebird; +``` + +## Common Usage + +NOTE: The documentation below is up-to-date with `2.x` releases, [see `1.x` readme](https://github.com/bitinn/node-fetch/blob/1.x/README.md), [changelog](https://github.com/bitinn/node-fetch/blob/1.x/CHANGELOG.md) and [2.x upgrade guide](UPGRADE-GUIDE.md) for the differences. + +#### Plain text or HTML +```js +fetch('https://github.com/') + .then(res => res.text()) + .then(body => console.log(body)); +``` + +#### JSON + +```js + +fetch('https://api.github.com/users/github') + .then(res => res.json()) + .then(json => console.log(json)); +``` + +#### Simple Post +```js +fetch('https://httpbin.org/post', { method: 'POST', body: 'a=1' }) + .then(res => res.json()) // expecting a json response + .then(json => console.log(json)); +``` + +#### Post with JSON + +```js +const body = { a: 1 }; + +fetch('https://httpbin.org/post', { + method: 'post', + body: JSON.stringify(body), + headers: { 'Content-Type': 'application/json' }, + }) + .then(res => res.json()) + .then(json => console.log(json)); +``` + +#### Post with form parameters +`URLSearchParams` is available in Node.js as of v7.5.0. See [official documentation](https://nodejs.org/api/url.html#url_class_urlsearchparams) for more usage methods. + +NOTE: The `Content-Type` header is only set automatically to `x-www-form-urlencoded` when an instance of `URLSearchParams` is given as such: + +```js +const { URLSearchParams } = require('url'); + +const params = new URLSearchParams(); +params.append('a', 1); + +fetch('https://httpbin.org/post', { method: 'POST', body: params }) + .then(res => res.json()) + .then(json => console.log(json)); +``` + +#### Handling exceptions +NOTE: 3xx-5xx responses are *NOT* exceptions, and should be handled in `then()`, see the next section. + +Adding a catch to the fetch promise chain will catch *all* exceptions, such as errors originating from node core libraries, like network errors, and operational errors which are instances of FetchError. See the [error handling document](ERROR-HANDLING.md) for more details. + +```js +fetch('https://domain.invalid/') + .catch(err => console.error(err)); +``` + +#### Handling client and server errors +It is common to create a helper function to check that the response contains no client (4xx) or server (5xx) error responses: + +```js +function checkStatus(res) { + if (res.ok) { // res.status >= 200 && res.status < 300 + return res; + } else { + throw MyCustomError(res.statusText); + } +} + +fetch('https://httpbin.org/status/400') + .then(checkStatus) + .then(res => console.log('will not get here...')) +``` + +## Advanced Usage + +#### Streams +The "Node.js way" is to use streams when possible: + +```js +fetch('https://assets-cdn.github.com/images/modules/logos_page/Octocat.png') + .then(res => { + const dest = fs.createWriteStream('./octocat.png'); + res.body.pipe(dest); + }); +``` + +#### Buffer +If you prefer to cache binary data in full, use buffer(). (NOTE: buffer() is a `node-fetch` only API) + +```js +const fileType = require('file-type'); + +fetch('https://assets-cdn.github.com/images/modules/logos_page/Octocat.png') + .then(res => res.buffer()) + .then(buffer => fileType(buffer)) + .then(type => { /* ... */ }); +``` + +#### Accessing Headers and other Meta data +```js +fetch('https://github.com/') + .then(res => { + console.log(res.ok); + console.log(res.status); + console.log(res.statusText); + console.log(res.headers.raw()); + console.log(res.headers.get('content-type')); + }); +``` + +#### Post data using a file stream + +```js +const { createReadStream } = require('fs'); + +const stream = createReadStream('input.txt'); + +fetch('https://httpbin.org/post', { method: 'POST', body: stream }) + .then(res => res.json()) + .then(json => console.log(json)); +``` + +#### Post with form-data (detect multipart) + +```js +const FormData = require('form-data'); + +const form = new FormData(); +form.append('a', 1); + +fetch('https://httpbin.org/post', { method: 'POST', body: form }) + .then(res => res.json()) + .then(json => console.log(json)); + +// OR, using custom headers +// NOTE: getHeaders() is non-standard API + +const form = new FormData(); +form.append('a', 1); + +const options = { + method: 'POST', + body: form, + headers: form.getHeaders() +} + +fetch('https://httpbin.org/post', options) + .then(res => res.json()) + .then(json => console.log(json)); +``` + +#### Request cancellation with AbortSignal + +> NOTE: You may only cancel streamed requests on Node >= v8.0.0 + +You may cancel requests with `AbortController`. A suggested implementation is [`abort-controller`](https://www.npmjs.com/package/abort-controller). + +An example of timing out a request after 150ms could be achieved as follows: + +```js +import AbortController from 'abort-controller'; + +const controller = new AbortController(); +const timeout = setTimeout( + () => { controller.abort(); }, + 150, +); + +fetch(url, { signal: controller.signal }) + .then(res => res.json()) + .then( + data => { + useData(data) + }, + err => { + if (err.name === 'AbortError') { + // request was aborted + } + }, + ) + .finally(() => { + clearTimeout(timeout); + }); +``` + +See [test cases](https://github.com/bitinn/node-fetch/blob/master/test/test.js) for more examples. + + +## API + +### fetch(url[, options]) + +- `url` A string representing the URL for fetching +- `options` [Options](#fetch-options) for the HTTP(S) request +- Returns: Promise<[Response](#class-response)> + +Perform an HTTP(S) fetch. + +`url` should be an absolute url, such as `https://example.com/`. A path-relative URL (`/file/under/root`) or protocol-relative URL (`//can-be-http-or-https.com/`) will result in a rejected promise. + + +### Options + +The default values are shown after each option key. + +```js +{ + // These properties are part of the Fetch Standard + method: 'GET', + headers: {}, // request headers. format is the identical to that accepted by the Headers constructor (see below) + body: null, // request body. can be null, a string, a Buffer, a Blob, or a Node.js Readable stream + redirect: 'follow', // set to `manual` to extract redirect headers, `error` to reject redirect + signal: null, // pass an instance of AbortSignal to optionally abort requests + + // The following properties are node-fetch extensions + follow: 20, // maximum redirect count. 0 to not follow redirect + timeout: 0, // req/res timeout in ms, it resets on redirect. 0 to disable (OS limit applies). Signal is recommended instead. + compress: true, // support gzip/deflate content encoding. false to disable + size: 0, // maximum response body size in bytes. 0 to disable + agent: null // http(s).Agent instance, allows custom proxy, certificate, dns lookup etc. +} +``` + +##### Default Headers + +If no values are set, the following request headers will be sent automatically: + +Header | Value +------------------- | -------------------------------------------------------- +`Accept-Encoding` | `gzip,deflate` _(when `options.compress === true`)_ +`Accept` | `*/*` +`Connection` | `close` _(when no `options.agent` is present)_ +`Content-Length` | _(automatically calculated, if possible)_ +`Transfer-Encoding` | `chunked` _(when `req.body` is a stream)_ +`User-Agent` | `node-fetch/1.0 (+https://github.com/bitinn/node-fetch)` + + +### Class: Request + +An HTTP(S) request containing information about URL, method, headers, and the body. This class implements the [Body](#iface-body) interface. + +Due to the nature of Node.js, the following properties are not implemented at this moment: + +- `type` +- `destination` +- `referrer` +- `referrerPolicy` +- `mode` +- `credentials` +- `cache` +- `integrity` +- `keepalive` + +The following node-fetch extension properties are provided: + +- `follow` +- `compress` +- `counter` +- `agent` + +See [options](#fetch-options) for exact meaning of these extensions. + +#### new Request(input[, options]) + +*(spec-compliant)* + +- `input` A string representing a URL, or another `Request` (which will be cloned) +- `options` [Options][#fetch-options] for the HTTP(S) request + +Constructs a new `Request` object. The constructor is identical to that in the [browser](https://developer.mozilla.org/en-US/docs/Web/API/Request/Request). + +In most cases, directly `fetch(url, options)` is simpler than creating a `Request` object. + + +### Class: Response + +An HTTP(S) response. This class implements the [Body](#iface-body) interface. + +The following properties are not implemented in node-fetch at this moment: + +- `Response.error()` +- `Response.redirect()` +- `type` +- `trailer` + +#### new Response([body[, options]]) + +*(spec-compliant)* + +- `body` A string or [Readable stream][node-readable] +- `options` A [`ResponseInit`][response-init] options dictionary + +Constructs a new `Response` object. The constructor is identical to that in the [browser](https://developer.mozilla.org/en-US/docs/Web/API/Response/Response). + +Because Node.js does not implement service workers (for which this class was designed), one rarely has to construct a `Response` directly. + +#### response.ok + +*(spec-compliant)* + +Convenience property representing if the request ended normally. Will evaluate to true if the response status was greater than or equal to 200 but smaller than 300. + +#### response.redirected + +*(spec-compliant)* + +Convenience property representing if the request has been redirected at least once. Will evaluate to true if the internal redirect counter is greater than 0. + + +### Class: Headers + +This class allows manipulating and iterating over a set of HTTP headers. All methods specified in the [Fetch Standard][whatwg-fetch] are implemented. + +#### new Headers([init]) + +*(spec-compliant)* + +- `init` Optional argument to pre-fill the `Headers` object + +Construct a new `Headers` object. `init` can be either `null`, a `Headers` object, an key-value map object, or any iterable object. + +```js +// Example adapted from https://fetch.spec.whatwg.org/#example-headers-class + +const meta = { + 'Content-Type': 'text/xml', + 'Breaking-Bad': '<3' +}; +const headers = new Headers(meta); + +// The above is equivalent to +const meta = [ + [ 'Content-Type', 'text/xml' ], + [ 'Breaking-Bad', '<3' ] +]; +const headers = new Headers(meta); + +// You can in fact use any iterable objects, like a Map or even another Headers +const meta = new Map(); +meta.set('Content-Type', 'text/xml'); +meta.set('Breaking-Bad', '<3'); +const headers = new Headers(meta); +const copyOfHeaders = new Headers(headers); +``` + + +### Interface: Body + +`Body` is an abstract interface with methods that are applicable to both `Request` and `Response` classes. + +The following methods are not yet implemented in node-fetch at this moment: + +- `formData()` + +#### body.body + +*(deviation from spec)* + +* Node.js [`Readable` stream][node-readable] + +The data encapsulated in the `Body` object. Note that while the [Fetch Standard][whatwg-fetch] requires the property to always be a WHATWG `ReadableStream`, in node-fetch it is a Node.js [`Readable` stream][node-readable]. + +#### body.bodyUsed + +*(spec-compliant)* + +* `Boolean` + +A boolean property for if this body has been consumed. Per spec, a consumed body cannot be used again. + +#### body.arrayBuffer() +#### body.blob() +#### body.json() +#### body.text() + +*(spec-compliant)* + +* Returns: Promise + +Consume the body and return a promise that will resolve to one of these formats. + +#### body.buffer() + +*(node-fetch extension)* + +* Returns: Promise<Buffer> + +Consume the body and return a promise that will resolve to a Buffer. + +#### body.textConverted() + +*(node-fetch extension)* + +* Returns: Promise<String> + +Identical to `body.text()`, except instead of always converting to UTF-8, encoding sniffing will be performed and text converted to UTF-8, if possible. + +(This API requires an optional dependency on npm package [encoding](https://www.npmjs.com/package/encoding), which you need to install manually. `webpack` users may see [a warning message](https://github.com/bitinn/node-fetch/issues/412#issuecomment-379007792) due to this optional dependency.) + + +### Class: FetchError + +*(node-fetch extension)* + +An operational error in the fetching process. See [ERROR-HANDLING.md][] for more info. + + +### Class: AbortError + +*(node-fetch extension)* + +An Error thrown when the request is aborted in response to an `AbortSignal`'s `abort` event. It has a `name` property of `AbortError`. See [ERROR-HANDLING.MD][] for more info. + +## Acknowledgement + +Thanks to [github/fetch](https://github.com/github/fetch) for providing a solid implementation reference. + +`node-fetch` v1 was maintained by [@bitinn](https://github.com/bitinn); v2 was maintained by [@TimothyGu](https://github.com/timothygu), [@bitinn](https://github.com/bitinn) and [@jimmywarting](https://github.com/jimmywarting); v2 readme is written by [@jkantr](https://github.com/jkantr). + +## License + +MIT + +[npm-image]: https://flat.badgen.net/npm/v/node-fetch +[npm-url]: https://www.npmjs.com/package/node-fetch +[travis-image]: https://flat.badgen.net/travis/bitinn/node-fetch +[travis-url]: https://travis-ci.org/bitinn/node-fetch +[codecov-image]: https://flat.badgen.net/codecov/c/github/bitinn/node-fetch/master +[codecov-url]: https://codecov.io/gh/bitinn/node-fetch +[install-size-image]: https://flat.badgen.net/packagephobia/install/node-fetch +[install-size-url]: https://packagephobia.now.sh/result?p=node-fetch +[whatwg-fetch]: https://fetch.spec.whatwg.org/ +[response-init]: https://fetch.spec.whatwg.org/#responseinit +[node-readable]: https://nodejs.org/api/stream.html#stream_readable_streams +[mdn-headers]: https://developer.mozilla.org/en-US/docs/Web/API/Headers +[LIMITS.md]: https://github.com/bitinn/node-fetch/blob/master/LIMITS.md +[ERROR-HANDLING.md]: https://github.com/bitinn/node-fetch/blob/master/ERROR-HANDLING.md +[UPGRADE-GUIDE.md]: https://github.com/bitinn/node-fetch/blob/master/UPGRADE-GUIDE.md diff --git a/scripts/metrics/node_modules/node-fetch/browser.js b/scripts/metrics/node_modules/node-fetch/browser.js new file mode 100644 index 00000000000..0ad5de004c4 --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/browser.js @@ -0,0 +1,23 @@ +"use strict"; + +// ref: https://github.com/tc39/proposal-global +var getGlobal = function () { + // the only reliable means to get the global object is + // `Function('return this')()` + // However, this causes CSP violations in Chrome apps. + if (typeof self !== 'undefined') { return self; } + if (typeof window !== 'undefined') { return window; } + if (typeof global !== 'undefined') { return global; } + throw new Error('unable to locate global object'); +} + +var global = getGlobal(); + +module.exports = exports = global.fetch; + +// Needed for TypeScript and Webpack. +exports.default = global.fetch.bind(global); + +exports.Headers = global.Headers; +exports.Request = global.Request; +exports.Response = global.Response; \ No newline at end of file diff --git a/scripts/metrics/node_modules/node-fetch/lib/index.es.js b/scripts/metrics/node_modules/node-fetch/lib/index.es.js new file mode 100644 index 00000000000..20ab807872f --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/lib/index.es.js @@ -0,0 +1,1631 @@ +process.emitWarning("The .es.js file is deprecated. Use .mjs instead."); + +import Stream from 'stream'; +import http from 'http'; +import Url from 'url'; +import https from 'https'; +import zlib from 'zlib'; + +// Based on https://github.com/tmpvar/jsdom/blob/aa85b2abf07766ff7bf5c1f6daafb3726f2f2db5/lib/jsdom/living/blob.js + +// fix for "Readable" isn't a named export issue +const Readable = Stream.Readable; + +const BUFFER = Symbol('buffer'); +const TYPE = Symbol('type'); + +class Blob { + constructor() { + this[TYPE] = ''; + + const blobParts = arguments[0]; + const options = arguments[1]; + + const buffers = []; + let size = 0; + + if (blobParts) { + const a = blobParts; + const length = Number(a.length); + for (let i = 0; i < length; i++) { + const element = a[i]; + let buffer; + if (element instanceof Buffer) { + buffer = element; + } else if (ArrayBuffer.isView(element)) { + buffer = Buffer.from(element.buffer, element.byteOffset, element.byteLength); + } else if (element instanceof ArrayBuffer) { + buffer = Buffer.from(element); + } else if (element instanceof Blob) { + buffer = element[BUFFER]; + } else { + buffer = Buffer.from(typeof element === 'string' ? element : String(element)); + } + size += buffer.length; + buffers.push(buffer); + } + } + + this[BUFFER] = Buffer.concat(buffers); + + let type = options && options.type !== undefined && String(options.type).toLowerCase(); + if (type && !/[^\u0020-\u007E]/.test(type)) { + this[TYPE] = type; + } + } + get size() { + return this[BUFFER].length; + } + get type() { + return this[TYPE]; + } + text() { + return Promise.resolve(this[BUFFER].toString()); + } + arrayBuffer() { + const buf = this[BUFFER]; + const ab = buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + return Promise.resolve(ab); + } + stream() { + const readable = new Readable(); + readable._read = function () {}; + readable.push(this[BUFFER]); + readable.push(null); + return readable; + } + toString() { + return '[object Blob]'; + } + slice() { + const size = this.size; + + const start = arguments[0]; + const end = arguments[1]; + let relativeStart, relativeEnd; + if (start === undefined) { + relativeStart = 0; + } else if (start < 0) { + relativeStart = Math.max(size + start, 0); + } else { + relativeStart = Math.min(start, size); + } + if (end === undefined) { + relativeEnd = size; + } else if (end < 0) { + relativeEnd = Math.max(size + end, 0); + } else { + relativeEnd = Math.min(end, size); + } + const span = Math.max(relativeEnd - relativeStart, 0); + + const buffer = this[BUFFER]; + const slicedBuffer = buffer.slice(relativeStart, relativeStart + span); + const blob = new Blob([], { type: arguments[2] }); + blob[BUFFER] = slicedBuffer; + return blob; + } +} + +Object.defineProperties(Blob.prototype, { + size: { enumerable: true }, + type: { enumerable: true }, + slice: { enumerable: true } +}); + +Object.defineProperty(Blob.prototype, Symbol.toStringTag, { + value: 'Blob', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * fetch-error.js + * + * FetchError interface for operational errors + */ + +/** + * Create FetchError instance + * + * @param String message Error message for human + * @param String type Error type for machine + * @param String systemError For Node.js system error + * @return FetchError + */ +function FetchError(message, type, systemError) { + Error.call(this, message); + + this.message = message; + this.type = type; + + // when err.type is `system`, err.code contains system error code + if (systemError) { + this.code = this.errno = systemError.code; + } + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +FetchError.prototype = Object.create(Error.prototype); +FetchError.prototype.constructor = FetchError; +FetchError.prototype.name = 'FetchError'; + +let convert; +try { + convert = require('encoding').convert; +} catch (e) {} + +const INTERNALS = Symbol('Body internals'); + +// fix an issue where "PassThrough" isn't a named export for node <10 +const PassThrough = Stream.PassThrough; + +/** + * Body mixin + * + * Ref: https://fetch.spec.whatwg.org/#body + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +function Body(body) { + var _this = this; + + var _ref = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}, + _ref$size = _ref.size; + + let size = _ref$size === undefined ? 0 : _ref$size; + var _ref$timeout = _ref.timeout; + let timeout = _ref$timeout === undefined ? 0 : _ref$timeout; + + if (body == null) { + // body is undefined or null + body = null; + } else if (isURLSearchParams(body)) { + // body is a URLSearchParams + body = Buffer.from(body.toString()); + } else if (isBlob(body)) ; else if (Buffer.isBuffer(body)) ; else if (Object.prototype.toString.call(body) === '[object ArrayBuffer]') { + // body is ArrayBuffer + body = Buffer.from(body); + } else if (ArrayBuffer.isView(body)) { + // body is ArrayBufferView + body = Buffer.from(body.buffer, body.byteOffset, body.byteLength); + } else if (body instanceof Stream) ; else { + // none of the above + // coerce to string then buffer + body = Buffer.from(String(body)); + } + this[INTERNALS] = { + body, + disturbed: false, + error: null + }; + this.size = size; + this.timeout = timeout; + + if (body instanceof Stream) { + body.on('error', function (err) { + const error = err.name === 'AbortError' ? err : new FetchError(`Invalid response body while trying to fetch ${_this.url}: ${err.message}`, 'system', err); + _this[INTERNALS].error = error; + }); + } +} + +Body.prototype = { + get body() { + return this[INTERNALS].body; + }, + + get bodyUsed() { + return this[INTERNALS].disturbed; + }, + + /** + * Decode response as ArrayBuffer + * + * @return Promise + */ + arrayBuffer() { + return consumeBody.call(this).then(function (buf) { + return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + }); + }, + + /** + * Return raw response as Blob + * + * @return Promise + */ + blob() { + let ct = this.headers && this.headers.get('content-type') || ''; + return consumeBody.call(this).then(function (buf) { + return Object.assign( + // Prevent copying + new Blob([], { + type: ct.toLowerCase() + }), { + [BUFFER]: buf + }); + }); + }, + + /** + * Decode response as json + * + * @return Promise + */ + json() { + var _this2 = this; + + return consumeBody.call(this).then(function (buffer) { + try { + return JSON.parse(buffer.toString()); + } catch (err) { + return Body.Promise.reject(new FetchError(`invalid json response body at ${_this2.url} reason: ${err.message}`, 'invalid-json')); + } + }); + }, + + /** + * Decode response as text + * + * @return Promise + */ + text() { + return consumeBody.call(this).then(function (buffer) { + return buffer.toString(); + }); + }, + + /** + * Decode response as buffer (non-spec api) + * + * @return Promise + */ + buffer() { + return consumeBody.call(this); + }, + + /** + * Decode response as text, while automatically detecting the encoding and + * trying to decode to UTF-8 (non-spec api) + * + * @return Promise + */ + textConverted() { + var _this3 = this; + + return consumeBody.call(this).then(function (buffer) { + return convertBody(buffer, _this3.headers); + }); + } +}; + +// In browsers, all properties are enumerable. +Object.defineProperties(Body.prototype, { + body: { enumerable: true }, + bodyUsed: { enumerable: true }, + arrayBuffer: { enumerable: true }, + blob: { enumerable: true }, + json: { enumerable: true }, + text: { enumerable: true } +}); + +Body.mixIn = function (proto) { + for (const name of Object.getOwnPropertyNames(Body.prototype)) { + // istanbul ignore else: future proof + if (!(name in proto)) { + const desc = Object.getOwnPropertyDescriptor(Body.prototype, name); + Object.defineProperty(proto, name, desc); + } + } +}; + +/** + * Consume and convert an entire Body to a Buffer. + * + * Ref: https://fetch.spec.whatwg.org/#concept-body-consume-body + * + * @return Promise + */ +function consumeBody() { + var _this4 = this; + + if (this[INTERNALS].disturbed) { + return Body.Promise.reject(new TypeError(`body used already for: ${this.url}`)); + } + + this[INTERNALS].disturbed = true; + + if (this[INTERNALS].error) { + return Body.Promise.reject(this[INTERNALS].error); + } + + let body = this.body; + + // body is null + if (body === null) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is blob + if (isBlob(body)) { + body = body.stream(); + } + + // body is buffer + if (Buffer.isBuffer(body)) { + return Body.Promise.resolve(body); + } + + // istanbul ignore if: should never happen + if (!(body instanceof Stream)) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is stream + // get ready to actually consume the body + let accum = []; + let accumBytes = 0; + let abort = false; + + return new Body.Promise(function (resolve, reject) { + let resTimeout; + + // allow timeout on slow response body + if (_this4.timeout) { + resTimeout = setTimeout(function () { + abort = true; + reject(new FetchError(`Response timeout while trying to fetch ${_this4.url} (over ${_this4.timeout}ms)`, 'body-timeout')); + }, _this4.timeout); + } + + // handle stream errors + body.on('error', function (err) { + if (err.name === 'AbortError') { + // if the request was aborted, reject with this Error + abort = true; + reject(err); + } else { + // other errors, such as incorrect content-encoding + reject(new FetchError(`Invalid response body while trying to fetch ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + + body.on('data', function (chunk) { + if (abort || chunk === null) { + return; + } + + if (_this4.size && accumBytes + chunk.length > _this4.size) { + abort = true; + reject(new FetchError(`content size at ${_this4.url} over limit: ${_this4.size}`, 'max-size')); + return; + } + + accumBytes += chunk.length; + accum.push(chunk); + }); + + body.on('end', function () { + if (abort) { + return; + } + + clearTimeout(resTimeout); + + try { + resolve(Buffer.concat(accum, accumBytes)); + } catch (err) { + // handle streams that have accumulated too much data (issue #414) + reject(new FetchError(`Could not create Buffer from response body for ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + }); +} + +/** + * Detect buffer encoding and convert to target encoding + * ref: http://www.w3.org/TR/2011/WD-html5-20110113/parsing.html#determining-the-character-encoding + * + * @param Buffer buffer Incoming buffer + * @param String encoding Target encoding + * @return String + */ +function convertBody(buffer, headers) { + if (typeof convert !== 'function') { + throw new Error('The package `encoding` must be installed to use the textConverted() function'); + } + + const ct = headers.get('content-type'); + let charset = 'utf-8'; + let res, str; + + // header + if (ct) { + res = /charset=([^;]*)/i.exec(ct); + } + + // no charset in content type, peek at response body for at most 1024 bytes + str = buffer.slice(0, 1024).toString(); + + // html5 + if (!res && str) { + res = / 0 && arguments[0] !== undefined ? arguments[0] : undefined; + + this[MAP] = Object.create(null); + + if (init instanceof Headers) { + const rawHeaders = init.raw(); + const headerNames = Object.keys(rawHeaders); + + for (const headerName of headerNames) { + for (const value of rawHeaders[headerName]) { + this.append(headerName, value); + } + } + + return; + } + + // We don't worry about converting prop to ByteString here as append() + // will handle it. + if (init == null) ; else if (typeof init === 'object') { + const method = init[Symbol.iterator]; + if (method != null) { + if (typeof method !== 'function') { + throw new TypeError('Header pairs must be iterable'); + } + + // sequence> + // Note: per spec we have to first exhaust the lists then process them + const pairs = []; + for (const pair of init) { + if (typeof pair !== 'object' || typeof pair[Symbol.iterator] !== 'function') { + throw new TypeError('Each header pair must be iterable'); + } + pairs.push(Array.from(pair)); + } + + for (const pair of pairs) { + if (pair.length !== 2) { + throw new TypeError('Each header pair must be a name/value tuple'); + } + this.append(pair[0], pair[1]); + } + } else { + // record + for (const key of Object.keys(init)) { + const value = init[key]; + this.append(key, value); + } + } + } else { + throw new TypeError('Provided initializer must be an object'); + } + } + + /** + * Return combined header value given name + * + * @param String name Header name + * @return Mixed + */ + get(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key === undefined) { + return null; + } + + return this[MAP][key].join(', '); + } + + /** + * Iterate over all headers + * + * @param Function callback Executed for each item with parameters (value, name, thisArg) + * @param Boolean thisArg `this` context for callback function + * @return Void + */ + forEach(callback) { + let thisArg = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : undefined; + + let pairs = getHeaders(this); + let i = 0; + while (i < pairs.length) { + var _pairs$i = pairs[i]; + const name = _pairs$i[0], + value = _pairs$i[1]; + + callback.call(thisArg, value, name, this); + pairs = getHeaders(this); + i++; + } + } + + /** + * Overwrite header values given name + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + set(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + this[MAP][key !== undefined ? key : name] = [value]; + } + + /** + * Append a value onto existing header + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + append(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + if (key !== undefined) { + this[MAP][key].push(value); + } else { + this[MAP][name] = [value]; + } + } + + /** + * Check for header name existence + * + * @param String name Header name + * @return Boolean + */ + has(name) { + name = `${name}`; + validateName(name); + return find(this[MAP], name) !== undefined; + } + + /** + * Delete all header values given name + * + * @param String name Header name + * @return Void + */ + delete(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key !== undefined) { + delete this[MAP][key]; + } + } + + /** + * Return raw headers (non-spec api) + * + * @return Object + */ + raw() { + return this[MAP]; + } + + /** + * Get an iterator on keys. + * + * @return Iterator + */ + keys() { + return createHeadersIterator(this, 'key'); + } + + /** + * Get an iterator on values. + * + * @return Iterator + */ + values() { + return createHeadersIterator(this, 'value'); + } + + /** + * Get an iterator on entries. + * + * This is the default iterator of the Headers object. + * + * @return Iterator + */ + [Symbol.iterator]() { + return createHeadersIterator(this, 'key+value'); + } +} +Headers.prototype.entries = Headers.prototype[Symbol.iterator]; + +Object.defineProperty(Headers.prototype, Symbol.toStringTag, { + value: 'Headers', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Headers.prototype, { + get: { enumerable: true }, + forEach: { enumerable: true }, + set: { enumerable: true }, + append: { enumerable: true }, + has: { enumerable: true }, + delete: { enumerable: true }, + keys: { enumerable: true }, + values: { enumerable: true }, + entries: { enumerable: true } +}); + +function getHeaders(headers) { + let kind = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'key+value'; + + const keys = Object.keys(headers[MAP]).sort(); + return keys.map(kind === 'key' ? function (k) { + return k.toLowerCase(); + } : kind === 'value' ? function (k) { + return headers[MAP][k].join(', '); + } : function (k) { + return [k.toLowerCase(), headers[MAP][k].join(', ')]; + }); +} + +const INTERNAL = Symbol('internal'); + +function createHeadersIterator(target, kind) { + const iterator = Object.create(HeadersIteratorPrototype); + iterator[INTERNAL] = { + target, + kind, + index: 0 + }; + return iterator; +} + +const HeadersIteratorPrototype = Object.setPrototypeOf({ + next() { + // istanbul ignore if + if (!this || Object.getPrototypeOf(this) !== HeadersIteratorPrototype) { + throw new TypeError('Value of `this` is not a HeadersIterator'); + } + + var _INTERNAL = this[INTERNAL]; + const target = _INTERNAL.target, + kind = _INTERNAL.kind, + index = _INTERNAL.index; + + const values = getHeaders(target, kind); + const len = values.length; + if (index >= len) { + return { + value: undefined, + done: true + }; + } + + this[INTERNAL].index = index + 1; + + return { + value: values[index], + done: false + }; + } +}, Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]()))); + +Object.defineProperty(HeadersIteratorPrototype, Symbol.toStringTag, { + value: 'HeadersIterator', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * Export the Headers object in a form that Node.js can consume. + * + * @param Headers headers + * @return Object + */ +function exportNodeCompatibleHeaders(headers) { + const obj = Object.assign({ __proto__: null }, headers[MAP]); + + // http.request() only supports string as Host header. This hack makes + // specifying custom Host header possible. + const hostHeaderKey = find(headers[MAP], 'Host'); + if (hostHeaderKey !== undefined) { + obj[hostHeaderKey] = obj[hostHeaderKey][0]; + } + + return obj; +} + +/** + * Create a Headers object from an object of headers, ignoring those that do + * not conform to HTTP grammar productions. + * + * @param Object obj Object of headers + * @return Headers + */ +function createHeadersLenient(obj) { + const headers = new Headers(); + for (const name of Object.keys(obj)) { + if (invalidTokenRegex.test(name)) { + continue; + } + if (Array.isArray(obj[name])) { + for (const val of obj[name]) { + if (invalidHeaderCharRegex.test(val)) { + continue; + } + if (headers[MAP][name] === undefined) { + headers[MAP][name] = [val]; + } else { + headers[MAP][name].push(val); + } + } + } else if (!invalidHeaderCharRegex.test(obj[name])) { + headers[MAP][name] = [obj[name]]; + } + } + return headers; +} + +const INTERNALS$1 = Symbol('Response internals'); + +// fix an issue where "STATUS_CODES" aren't a named export for node <10 +const STATUS_CODES = http.STATUS_CODES; + +/** + * Response class + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +class Response { + constructor() { + let body = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null; + let opts = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + Body.call(this, body, opts); + + const status = opts.status || 200; + const headers = new Headers(opts.headers); + + if (body != null && !headers.has('Content-Type')) { + const contentType = extractContentType(body); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + this[INTERNALS$1] = { + url: opts.url, + status, + statusText: opts.statusText || STATUS_CODES[status], + headers, + counter: opts.counter + }; + } + + get url() { + return this[INTERNALS$1].url; + } + + get status() { + return this[INTERNALS$1].status; + } + + /** + * Convenience property representing if the request ended normally + */ + get ok() { + return this[INTERNALS$1].status >= 200 && this[INTERNALS$1].status < 300; + } + + get redirected() { + return this[INTERNALS$1].counter > 0; + } + + get statusText() { + return this[INTERNALS$1].statusText; + } + + get headers() { + return this[INTERNALS$1].headers; + } + + /** + * Clone this response + * + * @return Response + */ + clone() { + return new Response(clone(this), { + url: this.url, + status: this.status, + statusText: this.statusText, + headers: this.headers, + ok: this.ok, + redirected: this.redirected + }); + } +} + +Body.mixIn(Response.prototype); + +Object.defineProperties(Response.prototype, { + url: { enumerable: true }, + status: { enumerable: true }, + ok: { enumerable: true }, + redirected: { enumerable: true }, + statusText: { enumerable: true }, + headers: { enumerable: true }, + clone: { enumerable: true } +}); + +Object.defineProperty(Response.prototype, Symbol.toStringTag, { + value: 'Response', + writable: false, + enumerable: false, + configurable: true +}); + +const INTERNALS$2 = Symbol('Request internals'); + +// fix an issue where "format", "parse" aren't a named export for node <10 +const parse_url = Url.parse; +const format_url = Url.format; + +const streamDestructionSupported = 'destroy' in Stream.Readable.prototype; + +/** + * Check if a value is an instance of Request. + * + * @param Mixed input + * @return Boolean + */ +function isRequest(input) { + return typeof input === 'object' && typeof input[INTERNALS$2] === 'object'; +} + +function isAbortSignal(signal) { + const proto = signal && typeof signal === 'object' && Object.getPrototypeOf(signal); + return !!(proto && proto.constructor.name === 'AbortSignal'); +} + +/** + * Request class + * + * @param Mixed input Url or Request instance + * @param Object init Custom options + * @return Void + */ +class Request { + constructor(input) { + let init = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + let parsedURL; + + // normalize input + if (!isRequest(input)) { + if (input && input.href) { + // in order to support Node.js' Url objects; though WHATWG's URL objects + // will fall into this branch also (since their `toString()` will return + // `href` property anyway) + parsedURL = parse_url(input.href); + } else { + // coerce input to a string before attempting to parse + parsedURL = parse_url(`${input}`); + } + input = {}; + } else { + parsedURL = parse_url(input.url); + } + + let method = init.method || input.method || 'GET'; + method = method.toUpperCase(); + + if ((init.body != null || isRequest(input) && input.body !== null) && (method === 'GET' || method === 'HEAD')) { + throw new TypeError('Request with GET/HEAD method cannot have body'); + } + + let inputBody = init.body != null ? init.body : isRequest(input) && input.body !== null ? clone(input) : null; + + Body.call(this, inputBody, { + timeout: init.timeout || input.timeout || 0, + size: init.size || input.size || 0 + }); + + const headers = new Headers(init.headers || input.headers || {}); + + if (inputBody != null && !headers.has('Content-Type')) { + const contentType = extractContentType(inputBody); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + let signal = isRequest(input) ? input.signal : null; + if ('signal' in init) signal = init.signal; + + if (signal != null && !isAbortSignal(signal)) { + throw new TypeError('Expected signal to be an instanceof AbortSignal'); + } + + this[INTERNALS$2] = { + method, + redirect: init.redirect || input.redirect || 'follow', + headers, + parsedURL, + signal + }; + + // node-fetch-only options + this.follow = init.follow !== undefined ? init.follow : input.follow !== undefined ? input.follow : 20; + this.compress = init.compress !== undefined ? init.compress : input.compress !== undefined ? input.compress : true; + this.counter = init.counter || input.counter || 0; + this.agent = init.agent || input.agent; + } + + get method() { + return this[INTERNALS$2].method; + } + + get url() { + return format_url(this[INTERNALS$2].parsedURL); + } + + get headers() { + return this[INTERNALS$2].headers; + } + + get redirect() { + return this[INTERNALS$2].redirect; + } + + get signal() { + return this[INTERNALS$2].signal; + } + + /** + * Clone this request + * + * @return Request + */ + clone() { + return new Request(this); + } +} + +Body.mixIn(Request.prototype); + +Object.defineProperty(Request.prototype, Symbol.toStringTag, { + value: 'Request', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Request.prototype, { + method: { enumerable: true }, + url: { enumerable: true }, + headers: { enumerable: true }, + redirect: { enumerable: true }, + clone: { enumerable: true }, + signal: { enumerable: true } +}); + +/** + * Convert a Request to Node.js http request options. + * + * @param Request A Request instance + * @return Object The options object to be passed to http.request + */ +function getNodeRequestOptions(request) { + const parsedURL = request[INTERNALS$2].parsedURL; + const headers = new Headers(request[INTERNALS$2].headers); + + // fetch step 1.3 + if (!headers.has('Accept')) { + headers.set('Accept', '*/*'); + } + + // Basic fetch + if (!parsedURL.protocol || !parsedURL.hostname) { + throw new TypeError('Only absolute URLs are supported'); + } + + if (!/^https?:$/.test(parsedURL.protocol)) { + throw new TypeError('Only HTTP(S) protocols are supported'); + } + + if (request.signal && request.body instanceof Stream.Readable && !streamDestructionSupported) { + throw new Error('Cancellation of streamed requests with AbortSignal is not supported in node < 8'); + } + + // HTTP-network-or-cache fetch steps 2.4-2.7 + let contentLengthValue = null; + if (request.body == null && /^(POST|PUT)$/i.test(request.method)) { + contentLengthValue = '0'; + } + if (request.body != null) { + const totalBytes = getTotalBytes(request); + if (typeof totalBytes === 'number') { + contentLengthValue = String(totalBytes); + } + } + if (contentLengthValue) { + headers.set('Content-Length', contentLengthValue); + } + + // HTTP-network-or-cache fetch step 2.11 + if (!headers.has('User-Agent')) { + headers.set('User-Agent', 'node-fetch/1.0 (+https://github.com/bitinn/node-fetch)'); + } + + // HTTP-network-or-cache fetch step 2.15 + if (request.compress && !headers.has('Accept-Encoding')) { + headers.set('Accept-Encoding', 'gzip,deflate'); + } + + if (!headers.has('Connection') && !request.agent) { + headers.set('Connection', 'close'); + } + + // HTTP-network fetch step 4.2 + // chunked encoding is handled by Node.js + + return Object.assign({}, parsedURL, { + method: request.method, + headers: exportNodeCompatibleHeaders(headers), + agent: request.agent + }); +} + +/** + * abort-error.js + * + * AbortError interface for cancelled requests + */ + +/** + * Create AbortError instance + * + * @param String message Error message for human + * @return AbortError + */ +function AbortError(message) { + Error.call(this, message); + + this.type = 'aborted'; + this.message = message; + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +AbortError.prototype = Object.create(Error.prototype); +AbortError.prototype.constructor = AbortError; +AbortError.prototype.name = 'AbortError'; + +// fix an issue where "PassThrough", "resolve" aren't a named export for node <10 +const PassThrough$1 = Stream.PassThrough; +const resolve_url = Url.resolve; + +/** + * Fetch function + * + * @param Mixed url Absolute url or Request instance + * @param Object opts Fetch options + * @return Promise + */ +function fetch(url, opts) { + + // allow custom promise + if (!fetch.Promise) { + throw new Error('native promise missing, set fetch.Promise to your favorite alternative'); + } + + Body.Promise = fetch.Promise; + + // wrap http.request into fetch + return new fetch.Promise(function (resolve, reject) { + // build request object + const request = new Request(url, opts); + const options = getNodeRequestOptions(request); + + const send = (options.protocol === 'https:' ? https : http).request; + const signal = request.signal; + + let response = null; + + const abort = function abort() { + let error = new AbortError('The user aborted a request.'); + reject(error); + if (request.body && request.body instanceof Stream.Readable) { + request.body.destroy(error); + } + if (!response || !response.body) return; + response.body.emit('error', error); + }; + + if (signal && signal.aborted) { + abort(); + return; + } + + const abortAndFinalize = function abortAndFinalize() { + abort(); + finalize(); + }; + + // send request + const req = send(options); + let reqTimeout; + + if (signal) { + signal.addEventListener('abort', abortAndFinalize); + } + + function finalize() { + req.abort(); + if (signal) signal.removeEventListener('abort', abortAndFinalize); + clearTimeout(reqTimeout); + } + + if (request.timeout) { + req.once('socket', function (socket) { + reqTimeout = setTimeout(function () { + reject(new FetchError(`network timeout at: ${request.url}`, 'request-timeout')); + finalize(); + }, request.timeout); + }); + } + + req.on('error', function (err) { + reject(new FetchError(`request to ${request.url} failed, reason: ${err.message}`, 'system', err)); + finalize(); + }); + + req.on('response', function (res) { + clearTimeout(reqTimeout); + + const headers = createHeadersLenient(res.headers); + + // HTTP fetch step 5 + if (fetch.isRedirect(res.statusCode)) { + // HTTP fetch step 5.2 + const location = headers.get('Location'); + + // HTTP fetch step 5.3 + const locationURL = location === null ? null : resolve_url(request.url, location); + + // HTTP fetch step 5.5 + switch (request.redirect) { + case 'error': + reject(new FetchError(`redirect mode is set to error: ${request.url}`, 'no-redirect')); + finalize(); + return; + case 'manual': + // node-fetch-specific step: make manual redirect a bit easier to use by setting the Location header value to the resolved URL. + if (locationURL !== null) { + // handle corrupted header + try { + headers.set('Location', locationURL); + } catch (err) { + // istanbul ignore next: nodejs server prevent invalid response headers, we can't test this through normal request + reject(err); + } + } + break; + case 'follow': + // HTTP-redirect fetch step 2 + if (locationURL === null) { + break; + } + + // HTTP-redirect fetch step 5 + if (request.counter >= request.follow) { + reject(new FetchError(`maximum redirect reached at: ${request.url}`, 'max-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 6 (counter increment) + // Create a new Request object. + const requestOpts = { + headers: new Headers(request.headers), + follow: request.follow, + counter: request.counter + 1, + agent: request.agent, + compress: request.compress, + method: request.method, + body: request.body, + signal: request.signal, + timeout: request.timeout + }; + + // HTTP-redirect fetch step 9 + if (res.statusCode !== 303 && request.body && getTotalBytes(request) === null) { + reject(new FetchError('Cannot follow redirect with body being a readable stream', 'unsupported-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 11 + if (res.statusCode === 303 || (res.statusCode === 301 || res.statusCode === 302) && request.method === 'POST') { + requestOpts.method = 'GET'; + requestOpts.body = undefined; + requestOpts.headers.delete('content-length'); + } + + // HTTP-redirect fetch step 15 + resolve(fetch(new Request(locationURL, requestOpts))); + finalize(); + return; + } + } + + // prepare response + res.once('end', function () { + if (signal) signal.removeEventListener('abort', abortAndFinalize); + }); + let body = res.pipe(new PassThrough$1()); + + const response_options = { + url: request.url, + status: res.statusCode, + statusText: res.statusMessage, + headers: headers, + size: request.size, + timeout: request.timeout, + counter: request.counter + }; + + // HTTP-network fetch step 12.1.1.3 + const codings = headers.get('Content-Encoding'); + + // HTTP-network fetch step 12.1.1.4: handle content codings + + // in following scenarios we ignore compression support + // 1. compression support is disabled + // 2. HEAD request + // 3. no Content-Encoding header + // 4. no content response (204) + // 5. content not modified response (304) + if (!request.compress || request.method === 'HEAD' || codings === null || res.statusCode === 204 || res.statusCode === 304) { + response = new Response(body, response_options); + resolve(response); + return; + } + + // For Node v6+ + // Be less strict when decoding compressed responses, since sometimes + // servers send slightly invalid responses that are still accepted + // by common browsers. + // Always using Z_SYNC_FLUSH is what cURL does. + const zlibOptions = { + flush: zlib.Z_SYNC_FLUSH, + finishFlush: zlib.Z_SYNC_FLUSH + }; + + // for gzip + if (codings == 'gzip' || codings == 'x-gzip') { + body = body.pipe(zlib.createGunzip(zlibOptions)); + response = new Response(body, response_options); + resolve(response); + return; + } + + // for deflate + if (codings == 'deflate' || codings == 'x-deflate') { + // handle the infamous raw deflate response from old servers + // a hack for old IIS and Apache servers + const raw = res.pipe(new PassThrough$1()); + raw.once('data', function (chunk) { + // see http://stackoverflow.com/questions/37519828 + if ((chunk[0] & 0x0F) === 0x08) { + body = body.pipe(zlib.createInflate()); + } else { + body = body.pipe(zlib.createInflateRaw()); + } + response = new Response(body, response_options); + resolve(response); + }); + return; + } + + // for br + if (codings == 'br' && typeof zlib.createBrotliDecompress === 'function') { + body = body.pipe(zlib.createBrotliDecompress()); + response = new Response(body, response_options); + resolve(response); + return; + } + + // otherwise, use response as-is + response = new Response(body, response_options); + resolve(response); + }); + + writeToStream(req, request); + }); +} +/** + * Redirect code matching + * + * @param Number code Status code + * @return Boolean + */ +fetch.isRedirect = function (code) { + return code === 301 || code === 302 || code === 303 || code === 307 || code === 308; +}; + +// expose Promise +fetch.Promise = global.Promise; + +export default fetch; +export { Headers, Request, Response, FetchError }; diff --git a/scripts/metrics/node_modules/node-fetch/lib/index.js b/scripts/metrics/node_modules/node-fetch/lib/index.js new file mode 100644 index 00000000000..86c7c031229 --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/lib/index.js @@ -0,0 +1,1640 @@ +'use strict'; + +Object.defineProperty(exports, '__esModule', { value: true }); + +function _interopDefault (ex) { return (ex && (typeof ex === 'object') && 'default' in ex) ? ex['default'] : ex; } + +var Stream = _interopDefault(require('stream')); +var http = _interopDefault(require('http')); +var Url = _interopDefault(require('url')); +var https = _interopDefault(require('https')); +var zlib = _interopDefault(require('zlib')); + +// Based on https://github.com/tmpvar/jsdom/blob/aa85b2abf07766ff7bf5c1f6daafb3726f2f2db5/lib/jsdom/living/blob.js + +// fix for "Readable" isn't a named export issue +const Readable = Stream.Readable; + +const BUFFER = Symbol('buffer'); +const TYPE = Symbol('type'); + +class Blob { + constructor() { + this[TYPE] = ''; + + const blobParts = arguments[0]; + const options = arguments[1]; + + const buffers = []; + let size = 0; + + if (blobParts) { + const a = blobParts; + const length = Number(a.length); + for (let i = 0; i < length; i++) { + const element = a[i]; + let buffer; + if (element instanceof Buffer) { + buffer = element; + } else if (ArrayBuffer.isView(element)) { + buffer = Buffer.from(element.buffer, element.byteOffset, element.byteLength); + } else if (element instanceof ArrayBuffer) { + buffer = Buffer.from(element); + } else if (element instanceof Blob) { + buffer = element[BUFFER]; + } else { + buffer = Buffer.from(typeof element === 'string' ? element : String(element)); + } + size += buffer.length; + buffers.push(buffer); + } + } + + this[BUFFER] = Buffer.concat(buffers); + + let type = options && options.type !== undefined && String(options.type).toLowerCase(); + if (type && !/[^\u0020-\u007E]/.test(type)) { + this[TYPE] = type; + } + } + get size() { + return this[BUFFER].length; + } + get type() { + return this[TYPE]; + } + text() { + return Promise.resolve(this[BUFFER].toString()); + } + arrayBuffer() { + const buf = this[BUFFER]; + const ab = buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + return Promise.resolve(ab); + } + stream() { + const readable = new Readable(); + readable._read = function () {}; + readable.push(this[BUFFER]); + readable.push(null); + return readable; + } + toString() { + return '[object Blob]'; + } + slice() { + const size = this.size; + + const start = arguments[0]; + const end = arguments[1]; + let relativeStart, relativeEnd; + if (start === undefined) { + relativeStart = 0; + } else if (start < 0) { + relativeStart = Math.max(size + start, 0); + } else { + relativeStart = Math.min(start, size); + } + if (end === undefined) { + relativeEnd = size; + } else if (end < 0) { + relativeEnd = Math.max(size + end, 0); + } else { + relativeEnd = Math.min(end, size); + } + const span = Math.max(relativeEnd - relativeStart, 0); + + const buffer = this[BUFFER]; + const slicedBuffer = buffer.slice(relativeStart, relativeStart + span); + const blob = new Blob([], { type: arguments[2] }); + blob[BUFFER] = slicedBuffer; + return blob; + } +} + +Object.defineProperties(Blob.prototype, { + size: { enumerable: true }, + type: { enumerable: true }, + slice: { enumerable: true } +}); + +Object.defineProperty(Blob.prototype, Symbol.toStringTag, { + value: 'Blob', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * fetch-error.js + * + * FetchError interface for operational errors + */ + +/** + * Create FetchError instance + * + * @param String message Error message for human + * @param String type Error type for machine + * @param String systemError For Node.js system error + * @return FetchError + */ +function FetchError(message, type, systemError) { + Error.call(this, message); + + this.message = message; + this.type = type; + + // when err.type is `system`, err.code contains system error code + if (systemError) { + this.code = this.errno = systemError.code; + } + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +FetchError.prototype = Object.create(Error.prototype); +FetchError.prototype.constructor = FetchError; +FetchError.prototype.name = 'FetchError'; + +let convert; +try { + convert = require('encoding').convert; +} catch (e) {} + +const INTERNALS = Symbol('Body internals'); + +// fix an issue where "PassThrough" isn't a named export for node <10 +const PassThrough = Stream.PassThrough; + +/** + * Body mixin + * + * Ref: https://fetch.spec.whatwg.org/#body + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +function Body(body) { + var _this = this; + + var _ref = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}, + _ref$size = _ref.size; + + let size = _ref$size === undefined ? 0 : _ref$size; + var _ref$timeout = _ref.timeout; + let timeout = _ref$timeout === undefined ? 0 : _ref$timeout; + + if (body == null) { + // body is undefined or null + body = null; + } else if (isURLSearchParams(body)) { + // body is a URLSearchParams + body = Buffer.from(body.toString()); + } else if (isBlob(body)) ; else if (Buffer.isBuffer(body)) ; else if (Object.prototype.toString.call(body) === '[object ArrayBuffer]') { + // body is ArrayBuffer + body = Buffer.from(body); + } else if (ArrayBuffer.isView(body)) { + // body is ArrayBufferView + body = Buffer.from(body.buffer, body.byteOffset, body.byteLength); + } else if (body instanceof Stream) ; else { + // none of the above + // coerce to string then buffer + body = Buffer.from(String(body)); + } + this[INTERNALS] = { + body, + disturbed: false, + error: null + }; + this.size = size; + this.timeout = timeout; + + if (body instanceof Stream) { + body.on('error', function (err) { + const error = err.name === 'AbortError' ? err : new FetchError(`Invalid response body while trying to fetch ${_this.url}: ${err.message}`, 'system', err); + _this[INTERNALS].error = error; + }); + } +} + +Body.prototype = { + get body() { + return this[INTERNALS].body; + }, + + get bodyUsed() { + return this[INTERNALS].disturbed; + }, + + /** + * Decode response as ArrayBuffer + * + * @return Promise + */ + arrayBuffer() { + return consumeBody.call(this).then(function (buf) { + return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + }); + }, + + /** + * Return raw response as Blob + * + * @return Promise + */ + blob() { + let ct = this.headers && this.headers.get('content-type') || ''; + return consumeBody.call(this).then(function (buf) { + return Object.assign( + // Prevent copying + new Blob([], { + type: ct.toLowerCase() + }), { + [BUFFER]: buf + }); + }); + }, + + /** + * Decode response as json + * + * @return Promise + */ + json() { + var _this2 = this; + + return consumeBody.call(this).then(function (buffer) { + try { + return JSON.parse(buffer.toString()); + } catch (err) { + return Body.Promise.reject(new FetchError(`invalid json response body at ${_this2.url} reason: ${err.message}`, 'invalid-json')); + } + }); + }, + + /** + * Decode response as text + * + * @return Promise + */ + text() { + return consumeBody.call(this).then(function (buffer) { + return buffer.toString(); + }); + }, + + /** + * Decode response as buffer (non-spec api) + * + * @return Promise + */ + buffer() { + return consumeBody.call(this); + }, + + /** + * Decode response as text, while automatically detecting the encoding and + * trying to decode to UTF-8 (non-spec api) + * + * @return Promise + */ + textConverted() { + var _this3 = this; + + return consumeBody.call(this).then(function (buffer) { + return convertBody(buffer, _this3.headers); + }); + } +}; + +// In browsers, all properties are enumerable. +Object.defineProperties(Body.prototype, { + body: { enumerable: true }, + bodyUsed: { enumerable: true }, + arrayBuffer: { enumerable: true }, + blob: { enumerable: true }, + json: { enumerable: true }, + text: { enumerable: true } +}); + +Body.mixIn = function (proto) { + for (const name of Object.getOwnPropertyNames(Body.prototype)) { + // istanbul ignore else: future proof + if (!(name in proto)) { + const desc = Object.getOwnPropertyDescriptor(Body.prototype, name); + Object.defineProperty(proto, name, desc); + } + } +}; + +/** + * Consume and convert an entire Body to a Buffer. + * + * Ref: https://fetch.spec.whatwg.org/#concept-body-consume-body + * + * @return Promise + */ +function consumeBody() { + var _this4 = this; + + if (this[INTERNALS].disturbed) { + return Body.Promise.reject(new TypeError(`body used already for: ${this.url}`)); + } + + this[INTERNALS].disturbed = true; + + if (this[INTERNALS].error) { + return Body.Promise.reject(this[INTERNALS].error); + } + + let body = this.body; + + // body is null + if (body === null) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is blob + if (isBlob(body)) { + body = body.stream(); + } + + // body is buffer + if (Buffer.isBuffer(body)) { + return Body.Promise.resolve(body); + } + + // istanbul ignore if: should never happen + if (!(body instanceof Stream)) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is stream + // get ready to actually consume the body + let accum = []; + let accumBytes = 0; + let abort = false; + + return new Body.Promise(function (resolve, reject) { + let resTimeout; + + // allow timeout on slow response body + if (_this4.timeout) { + resTimeout = setTimeout(function () { + abort = true; + reject(new FetchError(`Response timeout while trying to fetch ${_this4.url} (over ${_this4.timeout}ms)`, 'body-timeout')); + }, _this4.timeout); + } + + // handle stream errors + body.on('error', function (err) { + if (err.name === 'AbortError') { + // if the request was aborted, reject with this Error + abort = true; + reject(err); + } else { + // other errors, such as incorrect content-encoding + reject(new FetchError(`Invalid response body while trying to fetch ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + + body.on('data', function (chunk) { + if (abort || chunk === null) { + return; + } + + if (_this4.size && accumBytes + chunk.length > _this4.size) { + abort = true; + reject(new FetchError(`content size at ${_this4.url} over limit: ${_this4.size}`, 'max-size')); + return; + } + + accumBytes += chunk.length; + accum.push(chunk); + }); + + body.on('end', function () { + if (abort) { + return; + } + + clearTimeout(resTimeout); + + try { + resolve(Buffer.concat(accum, accumBytes)); + } catch (err) { + // handle streams that have accumulated too much data (issue #414) + reject(new FetchError(`Could not create Buffer from response body for ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + }); +} + +/** + * Detect buffer encoding and convert to target encoding + * ref: http://www.w3.org/TR/2011/WD-html5-20110113/parsing.html#determining-the-character-encoding + * + * @param Buffer buffer Incoming buffer + * @param String encoding Target encoding + * @return String + */ +function convertBody(buffer, headers) { + if (typeof convert !== 'function') { + throw new Error('The package `encoding` must be installed to use the textConverted() function'); + } + + const ct = headers.get('content-type'); + let charset = 'utf-8'; + let res, str; + + // header + if (ct) { + res = /charset=([^;]*)/i.exec(ct); + } + + // no charset in content type, peek at response body for at most 1024 bytes + str = buffer.slice(0, 1024).toString(); + + // html5 + if (!res && str) { + res = / 0 && arguments[0] !== undefined ? arguments[0] : undefined; + + this[MAP] = Object.create(null); + + if (init instanceof Headers) { + const rawHeaders = init.raw(); + const headerNames = Object.keys(rawHeaders); + + for (const headerName of headerNames) { + for (const value of rawHeaders[headerName]) { + this.append(headerName, value); + } + } + + return; + } + + // We don't worry about converting prop to ByteString here as append() + // will handle it. + if (init == null) ; else if (typeof init === 'object') { + const method = init[Symbol.iterator]; + if (method != null) { + if (typeof method !== 'function') { + throw new TypeError('Header pairs must be iterable'); + } + + // sequence> + // Note: per spec we have to first exhaust the lists then process them + const pairs = []; + for (const pair of init) { + if (typeof pair !== 'object' || typeof pair[Symbol.iterator] !== 'function') { + throw new TypeError('Each header pair must be iterable'); + } + pairs.push(Array.from(pair)); + } + + for (const pair of pairs) { + if (pair.length !== 2) { + throw new TypeError('Each header pair must be a name/value tuple'); + } + this.append(pair[0], pair[1]); + } + } else { + // record + for (const key of Object.keys(init)) { + const value = init[key]; + this.append(key, value); + } + } + } else { + throw new TypeError('Provided initializer must be an object'); + } + } + + /** + * Return combined header value given name + * + * @param String name Header name + * @return Mixed + */ + get(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key === undefined) { + return null; + } + + return this[MAP][key].join(', '); + } + + /** + * Iterate over all headers + * + * @param Function callback Executed for each item with parameters (value, name, thisArg) + * @param Boolean thisArg `this` context for callback function + * @return Void + */ + forEach(callback) { + let thisArg = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : undefined; + + let pairs = getHeaders(this); + let i = 0; + while (i < pairs.length) { + var _pairs$i = pairs[i]; + const name = _pairs$i[0], + value = _pairs$i[1]; + + callback.call(thisArg, value, name, this); + pairs = getHeaders(this); + i++; + } + } + + /** + * Overwrite header values given name + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + set(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + this[MAP][key !== undefined ? key : name] = [value]; + } + + /** + * Append a value onto existing header + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + append(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + if (key !== undefined) { + this[MAP][key].push(value); + } else { + this[MAP][name] = [value]; + } + } + + /** + * Check for header name existence + * + * @param String name Header name + * @return Boolean + */ + has(name) { + name = `${name}`; + validateName(name); + return find(this[MAP], name) !== undefined; + } + + /** + * Delete all header values given name + * + * @param String name Header name + * @return Void + */ + delete(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key !== undefined) { + delete this[MAP][key]; + } + } + + /** + * Return raw headers (non-spec api) + * + * @return Object + */ + raw() { + return this[MAP]; + } + + /** + * Get an iterator on keys. + * + * @return Iterator + */ + keys() { + return createHeadersIterator(this, 'key'); + } + + /** + * Get an iterator on values. + * + * @return Iterator + */ + values() { + return createHeadersIterator(this, 'value'); + } + + /** + * Get an iterator on entries. + * + * This is the default iterator of the Headers object. + * + * @return Iterator + */ + [Symbol.iterator]() { + return createHeadersIterator(this, 'key+value'); + } +} +Headers.prototype.entries = Headers.prototype[Symbol.iterator]; + +Object.defineProperty(Headers.prototype, Symbol.toStringTag, { + value: 'Headers', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Headers.prototype, { + get: { enumerable: true }, + forEach: { enumerable: true }, + set: { enumerable: true }, + append: { enumerable: true }, + has: { enumerable: true }, + delete: { enumerable: true }, + keys: { enumerable: true }, + values: { enumerable: true }, + entries: { enumerable: true } +}); + +function getHeaders(headers) { + let kind = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'key+value'; + + const keys = Object.keys(headers[MAP]).sort(); + return keys.map(kind === 'key' ? function (k) { + return k.toLowerCase(); + } : kind === 'value' ? function (k) { + return headers[MAP][k].join(', '); + } : function (k) { + return [k.toLowerCase(), headers[MAP][k].join(', ')]; + }); +} + +const INTERNAL = Symbol('internal'); + +function createHeadersIterator(target, kind) { + const iterator = Object.create(HeadersIteratorPrototype); + iterator[INTERNAL] = { + target, + kind, + index: 0 + }; + return iterator; +} + +const HeadersIteratorPrototype = Object.setPrototypeOf({ + next() { + // istanbul ignore if + if (!this || Object.getPrototypeOf(this) !== HeadersIteratorPrototype) { + throw new TypeError('Value of `this` is not a HeadersIterator'); + } + + var _INTERNAL = this[INTERNAL]; + const target = _INTERNAL.target, + kind = _INTERNAL.kind, + index = _INTERNAL.index; + + const values = getHeaders(target, kind); + const len = values.length; + if (index >= len) { + return { + value: undefined, + done: true + }; + } + + this[INTERNAL].index = index + 1; + + return { + value: values[index], + done: false + }; + } +}, Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]()))); + +Object.defineProperty(HeadersIteratorPrototype, Symbol.toStringTag, { + value: 'HeadersIterator', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * Export the Headers object in a form that Node.js can consume. + * + * @param Headers headers + * @return Object + */ +function exportNodeCompatibleHeaders(headers) { + const obj = Object.assign({ __proto__: null }, headers[MAP]); + + // http.request() only supports string as Host header. This hack makes + // specifying custom Host header possible. + const hostHeaderKey = find(headers[MAP], 'Host'); + if (hostHeaderKey !== undefined) { + obj[hostHeaderKey] = obj[hostHeaderKey][0]; + } + + return obj; +} + +/** + * Create a Headers object from an object of headers, ignoring those that do + * not conform to HTTP grammar productions. + * + * @param Object obj Object of headers + * @return Headers + */ +function createHeadersLenient(obj) { + const headers = new Headers(); + for (const name of Object.keys(obj)) { + if (invalidTokenRegex.test(name)) { + continue; + } + if (Array.isArray(obj[name])) { + for (const val of obj[name]) { + if (invalidHeaderCharRegex.test(val)) { + continue; + } + if (headers[MAP][name] === undefined) { + headers[MAP][name] = [val]; + } else { + headers[MAP][name].push(val); + } + } + } else if (!invalidHeaderCharRegex.test(obj[name])) { + headers[MAP][name] = [obj[name]]; + } + } + return headers; +} + +const INTERNALS$1 = Symbol('Response internals'); + +// fix an issue where "STATUS_CODES" aren't a named export for node <10 +const STATUS_CODES = http.STATUS_CODES; + +/** + * Response class + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +class Response { + constructor() { + let body = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null; + let opts = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + Body.call(this, body, opts); + + const status = opts.status || 200; + const headers = new Headers(opts.headers); + + if (body != null && !headers.has('Content-Type')) { + const contentType = extractContentType(body); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + this[INTERNALS$1] = { + url: opts.url, + status, + statusText: opts.statusText || STATUS_CODES[status], + headers, + counter: opts.counter + }; + } + + get url() { + return this[INTERNALS$1].url; + } + + get status() { + return this[INTERNALS$1].status; + } + + /** + * Convenience property representing if the request ended normally + */ + get ok() { + return this[INTERNALS$1].status >= 200 && this[INTERNALS$1].status < 300; + } + + get redirected() { + return this[INTERNALS$1].counter > 0; + } + + get statusText() { + return this[INTERNALS$1].statusText; + } + + get headers() { + return this[INTERNALS$1].headers; + } + + /** + * Clone this response + * + * @return Response + */ + clone() { + return new Response(clone(this), { + url: this.url, + status: this.status, + statusText: this.statusText, + headers: this.headers, + ok: this.ok, + redirected: this.redirected + }); + } +} + +Body.mixIn(Response.prototype); + +Object.defineProperties(Response.prototype, { + url: { enumerable: true }, + status: { enumerable: true }, + ok: { enumerable: true }, + redirected: { enumerable: true }, + statusText: { enumerable: true }, + headers: { enumerable: true }, + clone: { enumerable: true } +}); + +Object.defineProperty(Response.prototype, Symbol.toStringTag, { + value: 'Response', + writable: false, + enumerable: false, + configurable: true +}); + +const INTERNALS$2 = Symbol('Request internals'); + +// fix an issue where "format", "parse" aren't a named export for node <10 +const parse_url = Url.parse; +const format_url = Url.format; + +const streamDestructionSupported = 'destroy' in Stream.Readable.prototype; + +/** + * Check if a value is an instance of Request. + * + * @param Mixed input + * @return Boolean + */ +function isRequest(input) { + return typeof input === 'object' && typeof input[INTERNALS$2] === 'object'; +} + +function isAbortSignal(signal) { + const proto = signal && typeof signal === 'object' && Object.getPrototypeOf(signal); + return !!(proto && proto.constructor.name === 'AbortSignal'); +} + +/** + * Request class + * + * @param Mixed input Url or Request instance + * @param Object init Custom options + * @return Void + */ +class Request { + constructor(input) { + let init = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + let parsedURL; + + // normalize input + if (!isRequest(input)) { + if (input && input.href) { + // in order to support Node.js' Url objects; though WHATWG's URL objects + // will fall into this branch also (since their `toString()` will return + // `href` property anyway) + parsedURL = parse_url(input.href); + } else { + // coerce input to a string before attempting to parse + parsedURL = parse_url(`${input}`); + } + input = {}; + } else { + parsedURL = parse_url(input.url); + } + + let method = init.method || input.method || 'GET'; + method = method.toUpperCase(); + + if ((init.body != null || isRequest(input) && input.body !== null) && (method === 'GET' || method === 'HEAD')) { + throw new TypeError('Request with GET/HEAD method cannot have body'); + } + + let inputBody = init.body != null ? init.body : isRequest(input) && input.body !== null ? clone(input) : null; + + Body.call(this, inputBody, { + timeout: init.timeout || input.timeout || 0, + size: init.size || input.size || 0 + }); + + const headers = new Headers(init.headers || input.headers || {}); + + if (inputBody != null && !headers.has('Content-Type')) { + const contentType = extractContentType(inputBody); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + let signal = isRequest(input) ? input.signal : null; + if ('signal' in init) signal = init.signal; + + if (signal != null && !isAbortSignal(signal)) { + throw new TypeError('Expected signal to be an instanceof AbortSignal'); + } + + this[INTERNALS$2] = { + method, + redirect: init.redirect || input.redirect || 'follow', + headers, + parsedURL, + signal + }; + + // node-fetch-only options + this.follow = init.follow !== undefined ? init.follow : input.follow !== undefined ? input.follow : 20; + this.compress = init.compress !== undefined ? init.compress : input.compress !== undefined ? input.compress : true; + this.counter = init.counter || input.counter || 0; + this.agent = init.agent || input.agent; + } + + get method() { + return this[INTERNALS$2].method; + } + + get url() { + return format_url(this[INTERNALS$2].parsedURL); + } + + get headers() { + return this[INTERNALS$2].headers; + } + + get redirect() { + return this[INTERNALS$2].redirect; + } + + get signal() { + return this[INTERNALS$2].signal; + } + + /** + * Clone this request + * + * @return Request + */ + clone() { + return new Request(this); + } +} + +Body.mixIn(Request.prototype); + +Object.defineProperty(Request.prototype, Symbol.toStringTag, { + value: 'Request', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Request.prototype, { + method: { enumerable: true }, + url: { enumerable: true }, + headers: { enumerable: true }, + redirect: { enumerable: true }, + clone: { enumerable: true }, + signal: { enumerable: true } +}); + +/** + * Convert a Request to Node.js http request options. + * + * @param Request A Request instance + * @return Object The options object to be passed to http.request + */ +function getNodeRequestOptions(request) { + const parsedURL = request[INTERNALS$2].parsedURL; + const headers = new Headers(request[INTERNALS$2].headers); + + // fetch step 1.3 + if (!headers.has('Accept')) { + headers.set('Accept', '*/*'); + } + + // Basic fetch + if (!parsedURL.protocol || !parsedURL.hostname) { + throw new TypeError('Only absolute URLs are supported'); + } + + if (!/^https?:$/.test(parsedURL.protocol)) { + throw new TypeError('Only HTTP(S) protocols are supported'); + } + + if (request.signal && request.body instanceof Stream.Readable && !streamDestructionSupported) { + throw new Error('Cancellation of streamed requests with AbortSignal is not supported in node < 8'); + } + + // HTTP-network-or-cache fetch steps 2.4-2.7 + let contentLengthValue = null; + if (request.body == null && /^(POST|PUT)$/i.test(request.method)) { + contentLengthValue = '0'; + } + if (request.body != null) { + const totalBytes = getTotalBytes(request); + if (typeof totalBytes === 'number') { + contentLengthValue = String(totalBytes); + } + } + if (contentLengthValue) { + headers.set('Content-Length', contentLengthValue); + } + + // HTTP-network-or-cache fetch step 2.11 + if (!headers.has('User-Agent')) { + headers.set('User-Agent', 'node-fetch/1.0 (+https://github.com/bitinn/node-fetch)'); + } + + // HTTP-network-or-cache fetch step 2.15 + if (request.compress && !headers.has('Accept-Encoding')) { + headers.set('Accept-Encoding', 'gzip,deflate'); + } + + if (!headers.has('Connection') && !request.agent) { + headers.set('Connection', 'close'); + } + + // HTTP-network fetch step 4.2 + // chunked encoding is handled by Node.js + + return Object.assign({}, parsedURL, { + method: request.method, + headers: exportNodeCompatibleHeaders(headers), + agent: request.agent + }); +} + +/** + * abort-error.js + * + * AbortError interface for cancelled requests + */ + +/** + * Create AbortError instance + * + * @param String message Error message for human + * @return AbortError + */ +function AbortError(message) { + Error.call(this, message); + + this.type = 'aborted'; + this.message = message; + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +AbortError.prototype = Object.create(Error.prototype); +AbortError.prototype.constructor = AbortError; +AbortError.prototype.name = 'AbortError'; + +// fix an issue where "PassThrough", "resolve" aren't a named export for node <10 +const PassThrough$1 = Stream.PassThrough; +const resolve_url = Url.resolve; + +/** + * Fetch function + * + * @param Mixed url Absolute url or Request instance + * @param Object opts Fetch options + * @return Promise + */ +function fetch(url, opts) { + + // allow custom promise + if (!fetch.Promise) { + throw new Error('native promise missing, set fetch.Promise to your favorite alternative'); + } + + Body.Promise = fetch.Promise; + + // wrap http.request into fetch + return new fetch.Promise(function (resolve, reject) { + // build request object + const request = new Request(url, opts); + const options = getNodeRequestOptions(request); + + const send = (options.protocol === 'https:' ? https : http).request; + const signal = request.signal; + + let response = null; + + const abort = function abort() { + let error = new AbortError('The user aborted a request.'); + reject(error); + if (request.body && request.body instanceof Stream.Readable) { + request.body.destroy(error); + } + if (!response || !response.body) return; + response.body.emit('error', error); + }; + + if (signal && signal.aborted) { + abort(); + return; + } + + const abortAndFinalize = function abortAndFinalize() { + abort(); + finalize(); + }; + + // send request + const req = send(options); + let reqTimeout; + + if (signal) { + signal.addEventListener('abort', abortAndFinalize); + } + + function finalize() { + req.abort(); + if (signal) signal.removeEventListener('abort', abortAndFinalize); + clearTimeout(reqTimeout); + } + + if (request.timeout) { + req.once('socket', function (socket) { + reqTimeout = setTimeout(function () { + reject(new FetchError(`network timeout at: ${request.url}`, 'request-timeout')); + finalize(); + }, request.timeout); + }); + } + + req.on('error', function (err) { + reject(new FetchError(`request to ${request.url} failed, reason: ${err.message}`, 'system', err)); + finalize(); + }); + + req.on('response', function (res) { + clearTimeout(reqTimeout); + + const headers = createHeadersLenient(res.headers); + + // HTTP fetch step 5 + if (fetch.isRedirect(res.statusCode)) { + // HTTP fetch step 5.2 + const location = headers.get('Location'); + + // HTTP fetch step 5.3 + const locationURL = location === null ? null : resolve_url(request.url, location); + + // HTTP fetch step 5.5 + switch (request.redirect) { + case 'error': + reject(new FetchError(`redirect mode is set to error: ${request.url}`, 'no-redirect')); + finalize(); + return; + case 'manual': + // node-fetch-specific step: make manual redirect a bit easier to use by setting the Location header value to the resolved URL. + if (locationURL !== null) { + // handle corrupted header + try { + headers.set('Location', locationURL); + } catch (err) { + // istanbul ignore next: nodejs server prevent invalid response headers, we can't test this through normal request + reject(err); + } + } + break; + case 'follow': + // HTTP-redirect fetch step 2 + if (locationURL === null) { + break; + } + + // HTTP-redirect fetch step 5 + if (request.counter >= request.follow) { + reject(new FetchError(`maximum redirect reached at: ${request.url}`, 'max-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 6 (counter increment) + // Create a new Request object. + const requestOpts = { + headers: new Headers(request.headers), + follow: request.follow, + counter: request.counter + 1, + agent: request.agent, + compress: request.compress, + method: request.method, + body: request.body, + signal: request.signal, + timeout: request.timeout + }; + + // HTTP-redirect fetch step 9 + if (res.statusCode !== 303 && request.body && getTotalBytes(request) === null) { + reject(new FetchError('Cannot follow redirect with body being a readable stream', 'unsupported-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 11 + if (res.statusCode === 303 || (res.statusCode === 301 || res.statusCode === 302) && request.method === 'POST') { + requestOpts.method = 'GET'; + requestOpts.body = undefined; + requestOpts.headers.delete('content-length'); + } + + // HTTP-redirect fetch step 15 + resolve(fetch(new Request(locationURL, requestOpts))); + finalize(); + return; + } + } + + // prepare response + res.once('end', function () { + if (signal) signal.removeEventListener('abort', abortAndFinalize); + }); + let body = res.pipe(new PassThrough$1()); + + const response_options = { + url: request.url, + status: res.statusCode, + statusText: res.statusMessage, + headers: headers, + size: request.size, + timeout: request.timeout, + counter: request.counter + }; + + // HTTP-network fetch step 12.1.1.3 + const codings = headers.get('Content-Encoding'); + + // HTTP-network fetch step 12.1.1.4: handle content codings + + // in following scenarios we ignore compression support + // 1. compression support is disabled + // 2. HEAD request + // 3. no Content-Encoding header + // 4. no content response (204) + // 5. content not modified response (304) + if (!request.compress || request.method === 'HEAD' || codings === null || res.statusCode === 204 || res.statusCode === 304) { + response = new Response(body, response_options); + resolve(response); + return; + } + + // For Node v6+ + // Be less strict when decoding compressed responses, since sometimes + // servers send slightly invalid responses that are still accepted + // by common browsers. + // Always using Z_SYNC_FLUSH is what cURL does. + const zlibOptions = { + flush: zlib.Z_SYNC_FLUSH, + finishFlush: zlib.Z_SYNC_FLUSH + }; + + // for gzip + if (codings == 'gzip' || codings == 'x-gzip') { + body = body.pipe(zlib.createGunzip(zlibOptions)); + response = new Response(body, response_options); + resolve(response); + return; + } + + // for deflate + if (codings == 'deflate' || codings == 'x-deflate') { + // handle the infamous raw deflate response from old servers + // a hack for old IIS and Apache servers + const raw = res.pipe(new PassThrough$1()); + raw.once('data', function (chunk) { + // see http://stackoverflow.com/questions/37519828 + if ((chunk[0] & 0x0F) === 0x08) { + body = body.pipe(zlib.createInflate()); + } else { + body = body.pipe(zlib.createInflateRaw()); + } + response = new Response(body, response_options); + resolve(response); + }); + return; + } + + // for br + if (codings == 'br' && typeof zlib.createBrotliDecompress === 'function') { + body = body.pipe(zlib.createBrotliDecompress()); + response = new Response(body, response_options); + resolve(response); + return; + } + + // otherwise, use response as-is + response = new Response(body, response_options); + resolve(response); + }); + + writeToStream(req, request); + }); +} +/** + * Redirect code matching + * + * @param Number code Status code + * @return Boolean + */ +fetch.isRedirect = function (code) { + return code === 301 || code === 302 || code === 303 || code === 307 || code === 308; +}; + +// expose Promise +fetch.Promise = global.Promise; + +module.exports = exports = fetch; +Object.defineProperty(exports, "__esModule", { value: true }); +exports.default = exports; +exports.Headers = Headers; +exports.Request = Request; +exports.Response = Response; +exports.FetchError = FetchError; diff --git a/scripts/metrics/node_modules/node-fetch/lib/index.mjs b/scripts/metrics/node_modules/node-fetch/lib/index.mjs new file mode 100644 index 00000000000..dca525658b4 --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/lib/index.mjs @@ -0,0 +1,1629 @@ +import Stream from 'stream'; +import http from 'http'; +import Url from 'url'; +import https from 'https'; +import zlib from 'zlib'; + +// Based on https://github.com/tmpvar/jsdom/blob/aa85b2abf07766ff7bf5c1f6daafb3726f2f2db5/lib/jsdom/living/blob.js + +// fix for "Readable" isn't a named export issue +const Readable = Stream.Readable; + +const BUFFER = Symbol('buffer'); +const TYPE = Symbol('type'); + +class Blob { + constructor() { + this[TYPE] = ''; + + const blobParts = arguments[0]; + const options = arguments[1]; + + const buffers = []; + let size = 0; + + if (blobParts) { + const a = blobParts; + const length = Number(a.length); + for (let i = 0; i < length; i++) { + const element = a[i]; + let buffer; + if (element instanceof Buffer) { + buffer = element; + } else if (ArrayBuffer.isView(element)) { + buffer = Buffer.from(element.buffer, element.byteOffset, element.byteLength); + } else if (element instanceof ArrayBuffer) { + buffer = Buffer.from(element); + } else if (element instanceof Blob) { + buffer = element[BUFFER]; + } else { + buffer = Buffer.from(typeof element === 'string' ? element : String(element)); + } + size += buffer.length; + buffers.push(buffer); + } + } + + this[BUFFER] = Buffer.concat(buffers); + + let type = options && options.type !== undefined && String(options.type).toLowerCase(); + if (type && !/[^\u0020-\u007E]/.test(type)) { + this[TYPE] = type; + } + } + get size() { + return this[BUFFER].length; + } + get type() { + return this[TYPE]; + } + text() { + return Promise.resolve(this[BUFFER].toString()); + } + arrayBuffer() { + const buf = this[BUFFER]; + const ab = buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + return Promise.resolve(ab); + } + stream() { + const readable = new Readable(); + readable._read = function () {}; + readable.push(this[BUFFER]); + readable.push(null); + return readable; + } + toString() { + return '[object Blob]'; + } + slice() { + const size = this.size; + + const start = arguments[0]; + const end = arguments[1]; + let relativeStart, relativeEnd; + if (start === undefined) { + relativeStart = 0; + } else if (start < 0) { + relativeStart = Math.max(size + start, 0); + } else { + relativeStart = Math.min(start, size); + } + if (end === undefined) { + relativeEnd = size; + } else if (end < 0) { + relativeEnd = Math.max(size + end, 0); + } else { + relativeEnd = Math.min(end, size); + } + const span = Math.max(relativeEnd - relativeStart, 0); + + const buffer = this[BUFFER]; + const slicedBuffer = buffer.slice(relativeStart, relativeStart + span); + const blob = new Blob([], { type: arguments[2] }); + blob[BUFFER] = slicedBuffer; + return blob; + } +} + +Object.defineProperties(Blob.prototype, { + size: { enumerable: true }, + type: { enumerable: true }, + slice: { enumerable: true } +}); + +Object.defineProperty(Blob.prototype, Symbol.toStringTag, { + value: 'Blob', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * fetch-error.js + * + * FetchError interface for operational errors + */ + +/** + * Create FetchError instance + * + * @param String message Error message for human + * @param String type Error type for machine + * @param String systemError For Node.js system error + * @return FetchError + */ +function FetchError(message, type, systemError) { + Error.call(this, message); + + this.message = message; + this.type = type; + + // when err.type is `system`, err.code contains system error code + if (systemError) { + this.code = this.errno = systemError.code; + } + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +FetchError.prototype = Object.create(Error.prototype); +FetchError.prototype.constructor = FetchError; +FetchError.prototype.name = 'FetchError'; + +let convert; +try { + convert = require('encoding').convert; +} catch (e) {} + +const INTERNALS = Symbol('Body internals'); + +// fix an issue where "PassThrough" isn't a named export for node <10 +const PassThrough = Stream.PassThrough; + +/** + * Body mixin + * + * Ref: https://fetch.spec.whatwg.org/#body + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +function Body(body) { + var _this = this; + + var _ref = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}, + _ref$size = _ref.size; + + let size = _ref$size === undefined ? 0 : _ref$size; + var _ref$timeout = _ref.timeout; + let timeout = _ref$timeout === undefined ? 0 : _ref$timeout; + + if (body == null) { + // body is undefined or null + body = null; + } else if (isURLSearchParams(body)) { + // body is a URLSearchParams + body = Buffer.from(body.toString()); + } else if (isBlob(body)) ; else if (Buffer.isBuffer(body)) ; else if (Object.prototype.toString.call(body) === '[object ArrayBuffer]') { + // body is ArrayBuffer + body = Buffer.from(body); + } else if (ArrayBuffer.isView(body)) { + // body is ArrayBufferView + body = Buffer.from(body.buffer, body.byteOffset, body.byteLength); + } else if (body instanceof Stream) ; else { + // none of the above + // coerce to string then buffer + body = Buffer.from(String(body)); + } + this[INTERNALS] = { + body, + disturbed: false, + error: null + }; + this.size = size; + this.timeout = timeout; + + if (body instanceof Stream) { + body.on('error', function (err) { + const error = err.name === 'AbortError' ? err : new FetchError(`Invalid response body while trying to fetch ${_this.url}: ${err.message}`, 'system', err); + _this[INTERNALS].error = error; + }); + } +} + +Body.prototype = { + get body() { + return this[INTERNALS].body; + }, + + get bodyUsed() { + return this[INTERNALS].disturbed; + }, + + /** + * Decode response as ArrayBuffer + * + * @return Promise + */ + arrayBuffer() { + return consumeBody.call(this).then(function (buf) { + return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + }); + }, + + /** + * Return raw response as Blob + * + * @return Promise + */ + blob() { + let ct = this.headers && this.headers.get('content-type') || ''; + return consumeBody.call(this).then(function (buf) { + return Object.assign( + // Prevent copying + new Blob([], { + type: ct.toLowerCase() + }), { + [BUFFER]: buf + }); + }); + }, + + /** + * Decode response as json + * + * @return Promise + */ + json() { + var _this2 = this; + + return consumeBody.call(this).then(function (buffer) { + try { + return JSON.parse(buffer.toString()); + } catch (err) { + return Body.Promise.reject(new FetchError(`invalid json response body at ${_this2.url} reason: ${err.message}`, 'invalid-json')); + } + }); + }, + + /** + * Decode response as text + * + * @return Promise + */ + text() { + return consumeBody.call(this).then(function (buffer) { + return buffer.toString(); + }); + }, + + /** + * Decode response as buffer (non-spec api) + * + * @return Promise + */ + buffer() { + return consumeBody.call(this); + }, + + /** + * Decode response as text, while automatically detecting the encoding and + * trying to decode to UTF-8 (non-spec api) + * + * @return Promise + */ + textConverted() { + var _this3 = this; + + return consumeBody.call(this).then(function (buffer) { + return convertBody(buffer, _this3.headers); + }); + } +}; + +// In browsers, all properties are enumerable. +Object.defineProperties(Body.prototype, { + body: { enumerable: true }, + bodyUsed: { enumerable: true }, + arrayBuffer: { enumerable: true }, + blob: { enumerable: true }, + json: { enumerable: true }, + text: { enumerable: true } +}); + +Body.mixIn = function (proto) { + for (const name of Object.getOwnPropertyNames(Body.prototype)) { + // istanbul ignore else: future proof + if (!(name in proto)) { + const desc = Object.getOwnPropertyDescriptor(Body.prototype, name); + Object.defineProperty(proto, name, desc); + } + } +}; + +/** + * Consume and convert an entire Body to a Buffer. + * + * Ref: https://fetch.spec.whatwg.org/#concept-body-consume-body + * + * @return Promise + */ +function consumeBody() { + var _this4 = this; + + if (this[INTERNALS].disturbed) { + return Body.Promise.reject(new TypeError(`body used already for: ${this.url}`)); + } + + this[INTERNALS].disturbed = true; + + if (this[INTERNALS].error) { + return Body.Promise.reject(this[INTERNALS].error); + } + + let body = this.body; + + // body is null + if (body === null) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is blob + if (isBlob(body)) { + body = body.stream(); + } + + // body is buffer + if (Buffer.isBuffer(body)) { + return Body.Promise.resolve(body); + } + + // istanbul ignore if: should never happen + if (!(body instanceof Stream)) { + return Body.Promise.resolve(Buffer.alloc(0)); + } + + // body is stream + // get ready to actually consume the body + let accum = []; + let accumBytes = 0; + let abort = false; + + return new Body.Promise(function (resolve, reject) { + let resTimeout; + + // allow timeout on slow response body + if (_this4.timeout) { + resTimeout = setTimeout(function () { + abort = true; + reject(new FetchError(`Response timeout while trying to fetch ${_this4.url} (over ${_this4.timeout}ms)`, 'body-timeout')); + }, _this4.timeout); + } + + // handle stream errors + body.on('error', function (err) { + if (err.name === 'AbortError') { + // if the request was aborted, reject with this Error + abort = true; + reject(err); + } else { + // other errors, such as incorrect content-encoding + reject(new FetchError(`Invalid response body while trying to fetch ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + + body.on('data', function (chunk) { + if (abort || chunk === null) { + return; + } + + if (_this4.size && accumBytes + chunk.length > _this4.size) { + abort = true; + reject(new FetchError(`content size at ${_this4.url} over limit: ${_this4.size}`, 'max-size')); + return; + } + + accumBytes += chunk.length; + accum.push(chunk); + }); + + body.on('end', function () { + if (abort) { + return; + } + + clearTimeout(resTimeout); + + try { + resolve(Buffer.concat(accum, accumBytes)); + } catch (err) { + // handle streams that have accumulated too much data (issue #414) + reject(new FetchError(`Could not create Buffer from response body for ${_this4.url}: ${err.message}`, 'system', err)); + } + }); + }); +} + +/** + * Detect buffer encoding and convert to target encoding + * ref: http://www.w3.org/TR/2011/WD-html5-20110113/parsing.html#determining-the-character-encoding + * + * @param Buffer buffer Incoming buffer + * @param String encoding Target encoding + * @return String + */ +function convertBody(buffer, headers) { + if (typeof convert !== 'function') { + throw new Error('The package `encoding` must be installed to use the textConverted() function'); + } + + const ct = headers.get('content-type'); + let charset = 'utf-8'; + let res, str; + + // header + if (ct) { + res = /charset=([^;]*)/i.exec(ct); + } + + // no charset in content type, peek at response body for at most 1024 bytes + str = buffer.slice(0, 1024).toString(); + + // html5 + if (!res && str) { + res = / 0 && arguments[0] !== undefined ? arguments[0] : undefined; + + this[MAP] = Object.create(null); + + if (init instanceof Headers) { + const rawHeaders = init.raw(); + const headerNames = Object.keys(rawHeaders); + + for (const headerName of headerNames) { + for (const value of rawHeaders[headerName]) { + this.append(headerName, value); + } + } + + return; + } + + // We don't worry about converting prop to ByteString here as append() + // will handle it. + if (init == null) ; else if (typeof init === 'object') { + const method = init[Symbol.iterator]; + if (method != null) { + if (typeof method !== 'function') { + throw new TypeError('Header pairs must be iterable'); + } + + // sequence> + // Note: per spec we have to first exhaust the lists then process them + const pairs = []; + for (const pair of init) { + if (typeof pair !== 'object' || typeof pair[Symbol.iterator] !== 'function') { + throw new TypeError('Each header pair must be iterable'); + } + pairs.push(Array.from(pair)); + } + + for (const pair of pairs) { + if (pair.length !== 2) { + throw new TypeError('Each header pair must be a name/value tuple'); + } + this.append(pair[0], pair[1]); + } + } else { + // record + for (const key of Object.keys(init)) { + const value = init[key]; + this.append(key, value); + } + } + } else { + throw new TypeError('Provided initializer must be an object'); + } + } + + /** + * Return combined header value given name + * + * @param String name Header name + * @return Mixed + */ + get(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key === undefined) { + return null; + } + + return this[MAP][key].join(', '); + } + + /** + * Iterate over all headers + * + * @param Function callback Executed for each item with parameters (value, name, thisArg) + * @param Boolean thisArg `this` context for callback function + * @return Void + */ + forEach(callback) { + let thisArg = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : undefined; + + let pairs = getHeaders(this); + let i = 0; + while (i < pairs.length) { + var _pairs$i = pairs[i]; + const name = _pairs$i[0], + value = _pairs$i[1]; + + callback.call(thisArg, value, name, this); + pairs = getHeaders(this); + i++; + } + } + + /** + * Overwrite header values given name + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + set(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + this[MAP][key !== undefined ? key : name] = [value]; + } + + /** + * Append a value onto existing header + * + * @param String name Header name + * @param String value Header value + * @return Void + */ + append(name, value) { + name = `${name}`; + value = `${value}`; + validateName(name); + validateValue(value); + const key = find(this[MAP], name); + if (key !== undefined) { + this[MAP][key].push(value); + } else { + this[MAP][name] = [value]; + } + } + + /** + * Check for header name existence + * + * @param String name Header name + * @return Boolean + */ + has(name) { + name = `${name}`; + validateName(name); + return find(this[MAP], name) !== undefined; + } + + /** + * Delete all header values given name + * + * @param String name Header name + * @return Void + */ + delete(name) { + name = `${name}`; + validateName(name); + const key = find(this[MAP], name); + if (key !== undefined) { + delete this[MAP][key]; + } + } + + /** + * Return raw headers (non-spec api) + * + * @return Object + */ + raw() { + return this[MAP]; + } + + /** + * Get an iterator on keys. + * + * @return Iterator + */ + keys() { + return createHeadersIterator(this, 'key'); + } + + /** + * Get an iterator on values. + * + * @return Iterator + */ + values() { + return createHeadersIterator(this, 'value'); + } + + /** + * Get an iterator on entries. + * + * This is the default iterator of the Headers object. + * + * @return Iterator + */ + [Symbol.iterator]() { + return createHeadersIterator(this, 'key+value'); + } +} +Headers.prototype.entries = Headers.prototype[Symbol.iterator]; + +Object.defineProperty(Headers.prototype, Symbol.toStringTag, { + value: 'Headers', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Headers.prototype, { + get: { enumerable: true }, + forEach: { enumerable: true }, + set: { enumerable: true }, + append: { enumerable: true }, + has: { enumerable: true }, + delete: { enumerable: true }, + keys: { enumerable: true }, + values: { enumerable: true }, + entries: { enumerable: true } +}); + +function getHeaders(headers) { + let kind = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 'key+value'; + + const keys = Object.keys(headers[MAP]).sort(); + return keys.map(kind === 'key' ? function (k) { + return k.toLowerCase(); + } : kind === 'value' ? function (k) { + return headers[MAP][k].join(', '); + } : function (k) { + return [k.toLowerCase(), headers[MAP][k].join(', ')]; + }); +} + +const INTERNAL = Symbol('internal'); + +function createHeadersIterator(target, kind) { + const iterator = Object.create(HeadersIteratorPrototype); + iterator[INTERNAL] = { + target, + kind, + index: 0 + }; + return iterator; +} + +const HeadersIteratorPrototype = Object.setPrototypeOf({ + next() { + // istanbul ignore if + if (!this || Object.getPrototypeOf(this) !== HeadersIteratorPrototype) { + throw new TypeError('Value of `this` is not a HeadersIterator'); + } + + var _INTERNAL = this[INTERNAL]; + const target = _INTERNAL.target, + kind = _INTERNAL.kind, + index = _INTERNAL.index; + + const values = getHeaders(target, kind); + const len = values.length; + if (index >= len) { + return { + value: undefined, + done: true + }; + } + + this[INTERNAL].index = index + 1; + + return { + value: values[index], + done: false + }; + } +}, Object.getPrototypeOf(Object.getPrototypeOf([][Symbol.iterator]()))); + +Object.defineProperty(HeadersIteratorPrototype, Symbol.toStringTag, { + value: 'HeadersIterator', + writable: false, + enumerable: false, + configurable: true +}); + +/** + * Export the Headers object in a form that Node.js can consume. + * + * @param Headers headers + * @return Object + */ +function exportNodeCompatibleHeaders(headers) { + const obj = Object.assign({ __proto__: null }, headers[MAP]); + + // http.request() only supports string as Host header. This hack makes + // specifying custom Host header possible. + const hostHeaderKey = find(headers[MAP], 'Host'); + if (hostHeaderKey !== undefined) { + obj[hostHeaderKey] = obj[hostHeaderKey][0]; + } + + return obj; +} + +/** + * Create a Headers object from an object of headers, ignoring those that do + * not conform to HTTP grammar productions. + * + * @param Object obj Object of headers + * @return Headers + */ +function createHeadersLenient(obj) { + const headers = new Headers(); + for (const name of Object.keys(obj)) { + if (invalidTokenRegex.test(name)) { + continue; + } + if (Array.isArray(obj[name])) { + for (const val of obj[name]) { + if (invalidHeaderCharRegex.test(val)) { + continue; + } + if (headers[MAP][name] === undefined) { + headers[MAP][name] = [val]; + } else { + headers[MAP][name].push(val); + } + } + } else if (!invalidHeaderCharRegex.test(obj[name])) { + headers[MAP][name] = [obj[name]]; + } + } + return headers; +} + +const INTERNALS$1 = Symbol('Response internals'); + +// fix an issue where "STATUS_CODES" aren't a named export for node <10 +const STATUS_CODES = http.STATUS_CODES; + +/** + * Response class + * + * @param Stream body Readable stream + * @param Object opts Response options + * @return Void + */ +class Response { + constructor() { + let body = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : null; + let opts = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + Body.call(this, body, opts); + + const status = opts.status || 200; + const headers = new Headers(opts.headers); + + if (body != null && !headers.has('Content-Type')) { + const contentType = extractContentType(body); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + this[INTERNALS$1] = { + url: opts.url, + status, + statusText: opts.statusText || STATUS_CODES[status], + headers, + counter: opts.counter + }; + } + + get url() { + return this[INTERNALS$1].url; + } + + get status() { + return this[INTERNALS$1].status; + } + + /** + * Convenience property representing if the request ended normally + */ + get ok() { + return this[INTERNALS$1].status >= 200 && this[INTERNALS$1].status < 300; + } + + get redirected() { + return this[INTERNALS$1].counter > 0; + } + + get statusText() { + return this[INTERNALS$1].statusText; + } + + get headers() { + return this[INTERNALS$1].headers; + } + + /** + * Clone this response + * + * @return Response + */ + clone() { + return new Response(clone(this), { + url: this.url, + status: this.status, + statusText: this.statusText, + headers: this.headers, + ok: this.ok, + redirected: this.redirected + }); + } +} + +Body.mixIn(Response.prototype); + +Object.defineProperties(Response.prototype, { + url: { enumerable: true }, + status: { enumerable: true }, + ok: { enumerable: true }, + redirected: { enumerable: true }, + statusText: { enumerable: true }, + headers: { enumerable: true }, + clone: { enumerable: true } +}); + +Object.defineProperty(Response.prototype, Symbol.toStringTag, { + value: 'Response', + writable: false, + enumerable: false, + configurable: true +}); + +const INTERNALS$2 = Symbol('Request internals'); + +// fix an issue where "format", "parse" aren't a named export for node <10 +const parse_url = Url.parse; +const format_url = Url.format; + +const streamDestructionSupported = 'destroy' in Stream.Readable.prototype; + +/** + * Check if a value is an instance of Request. + * + * @param Mixed input + * @return Boolean + */ +function isRequest(input) { + return typeof input === 'object' && typeof input[INTERNALS$2] === 'object'; +} + +function isAbortSignal(signal) { + const proto = signal && typeof signal === 'object' && Object.getPrototypeOf(signal); + return !!(proto && proto.constructor.name === 'AbortSignal'); +} + +/** + * Request class + * + * @param Mixed input Url or Request instance + * @param Object init Custom options + * @return Void + */ +class Request { + constructor(input) { + let init = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {}; + + let parsedURL; + + // normalize input + if (!isRequest(input)) { + if (input && input.href) { + // in order to support Node.js' Url objects; though WHATWG's URL objects + // will fall into this branch also (since their `toString()` will return + // `href` property anyway) + parsedURL = parse_url(input.href); + } else { + // coerce input to a string before attempting to parse + parsedURL = parse_url(`${input}`); + } + input = {}; + } else { + parsedURL = parse_url(input.url); + } + + let method = init.method || input.method || 'GET'; + method = method.toUpperCase(); + + if ((init.body != null || isRequest(input) && input.body !== null) && (method === 'GET' || method === 'HEAD')) { + throw new TypeError('Request with GET/HEAD method cannot have body'); + } + + let inputBody = init.body != null ? init.body : isRequest(input) && input.body !== null ? clone(input) : null; + + Body.call(this, inputBody, { + timeout: init.timeout || input.timeout || 0, + size: init.size || input.size || 0 + }); + + const headers = new Headers(init.headers || input.headers || {}); + + if (inputBody != null && !headers.has('Content-Type')) { + const contentType = extractContentType(inputBody); + if (contentType) { + headers.append('Content-Type', contentType); + } + } + + let signal = isRequest(input) ? input.signal : null; + if ('signal' in init) signal = init.signal; + + if (signal != null && !isAbortSignal(signal)) { + throw new TypeError('Expected signal to be an instanceof AbortSignal'); + } + + this[INTERNALS$2] = { + method, + redirect: init.redirect || input.redirect || 'follow', + headers, + parsedURL, + signal + }; + + // node-fetch-only options + this.follow = init.follow !== undefined ? init.follow : input.follow !== undefined ? input.follow : 20; + this.compress = init.compress !== undefined ? init.compress : input.compress !== undefined ? input.compress : true; + this.counter = init.counter || input.counter || 0; + this.agent = init.agent || input.agent; + } + + get method() { + return this[INTERNALS$2].method; + } + + get url() { + return format_url(this[INTERNALS$2].parsedURL); + } + + get headers() { + return this[INTERNALS$2].headers; + } + + get redirect() { + return this[INTERNALS$2].redirect; + } + + get signal() { + return this[INTERNALS$2].signal; + } + + /** + * Clone this request + * + * @return Request + */ + clone() { + return new Request(this); + } +} + +Body.mixIn(Request.prototype); + +Object.defineProperty(Request.prototype, Symbol.toStringTag, { + value: 'Request', + writable: false, + enumerable: false, + configurable: true +}); + +Object.defineProperties(Request.prototype, { + method: { enumerable: true }, + url: { enumerable: true }, + headers: { enumerable: true }, + redirect: { enumerable: true }, + clone: { enumerable: true }, + signal: { enumerable: true } +}); + +/** + * Convert a Request to Node.js http request options. + * + * @param Request A Request instance + * @return Object The options object to be passed to http.request + */ +function getNodeRequestOptions(request) { + const parsedURL = request[INTERNALS$2].parsedURL; + const headers = new Headers(request[INTERNALS$2].headers); + + // fetch step 1.3 + if (!headers.has('Accept')) { + headers.set('Accept', '*/*'); + } + + // Basic fetch + if (!parsedURL.protocol || !parsedURL.hostname) { + throw new TypeError('Only absolute URLs are supported'); + } + + if (!/^https?:$/.test(parsedURL.protocol)) { + throw new TypeError('Only HTTP(S) protocols are supported'); + } + + if (request.signal && request.body instanceof Stream.Readable && !streamDestructionSupported) { + throw new Error('Cancellation of streamed requests with AbortSignal is not supported in node < 8'); + } + + // HTTP-network-or-cache fetch steps 2.4-2.7 + let contentLengthValue = null; + if (request.body == null && /^(POST|PUT)$/i.test(request.method)) { + contentLengthValue = '0'; + } + if (request.body != null) { + const totalBytes = getTotalBytes(request); + if (typeof totalBytes === 'number') { + contentLengthValue = String(totalBytes); + } + } + if (contentLengthValue) { + headers.set('Content-Length', contentLengthValue); + } + + // HTTP-network-or-cache fetch step 2.11 + if (!headers.has('User-Agent')) { + headers.set('User-Agent', 'node-fetch/1.0 (+https://github.com/bitinn/node-fetch)'); + } + + // HTTP-network-or-cache fetch step 2.15 + if (request.compress && !headers.has('Accept-Encoding')) { + headers.set('Accept-Encoding', 'gzip,deflate'); + } + + if (!headers.has('Connection') && !request.agent) { + headers.set('Connection', 'close'); + } + + // HTTP-network fetch step 4.2 + // chunked encoding is handled by Node.js + + return Object.assign({}, parsedURL, { + method: request.method, + headers: exportNodeCompatibleHeaders(headers), + agent: request.agent + }); +} + +/** + * abort-error.js + * + * AbortError interface for cancelled requests + */ + +/** + * Create AbortError instance + * + * @param String message Error message for human + * @return AbortError + */ +function AbortError(message) { + Error.call(this, message); + + this.type = 'aborted'; + this.message = message; + + // hide custom error implementation details from end-users + Error.captureStackTrace(this, this.constructor); +} + +AbortError.prototype = Object.create(Error.prototype); +AbortError.prototype.constructor = AbortError; +AbortError.prototype.name = 'AbortError'; + +// fix an issue where "PassThrough", "resolve" aren't a named export for node <10 +const PassThrough$1 = Stream.PassThrough; +const resolve_url = Url.resolve; + +/** + * Fetch function + * + * @param Mixed url Absolute url or Request instance + * @param Object opts Fetch options + * @return Promise + */ +function fetch(url, opts) { + + // allow custom promise + if (!fetch.Promise) { + throw new Error('native promise missing, set fetch.Promise to your favorite alternative'); + } + + Body.Promise = fetch.Promise; + + // wrap http.request into fetch + return new fetch.Promise(function (resolve, reject) { + // build request object + const request = new Request(url, opts); + const options = getNodeRequestOptions(request); + + const send = (options.protocol === 'https:' ? https : http).request; + const signal = request.signal; + + let response = null; + + const abort = function abort() { + let error = new AbortError('The user aborted a request.'); + reject(error); + if (request.body && request.body instanceof Stream.Readable) { + request.body.destroy(error); + } + if (!response || !response.body) return; + response.body.emit('error', error); + }; + + if (signal && signal.aborted) { + abort(); + return; + } + + const abortAndFinalize = function abortAndFinalize() { + abort(); + finalize(); + }; + + // send request + const req = send(options); + let reqTimeout; + + if (signal) { + signal.addEventListener('abort', abortAndFinalize); + } + + function finalize() { + req.abort(); + if (signal) signal.removeEventListener('abort', abortAndFinalize); + clearTimeout(reqTimeout); + } + + if (request.timeout) { + req.once('socket', function (socket) { + reqTimeout = setTimeout(function () { + reject(new FetchError(`network timeout at: ${request.url}`, 'request-timeout')); + finalize(); + }, request.timeout); + }); + } + + req.on('error', function (err) { + reject(new FetchError(`request to ${request.url} failed, reason: ${err.message}`, 'system', err)); + finalize(); + }); + + req.on('response', function (res) { + clearTimeout(reqTimeout); + + const headers = createHeadersLenient(res.headers); + + // HTTP fetch step 5 + if (fetch.isRedirect(res.statusCode)) { + // HTTP fetch step 5.2 + const location = headers.get('Location'); + + // HTTP fetch step 5.3 + const locationURL = location === null ? null : resolve_url(request.url, location); + + // HTTP fetch step 5.5 + switch (request.redirect) { + case 'error': + reject(new FetchError(`redirect mode is set to error: ${request.url}`, 'no-redirect')); + finalize(); + return; + case 'manual': + // node-fetch-specific step: make manual redirect a bit easier to use by setting the Location header value to the resolved URL. + if (locationURL !== null) { + // handle corrupted header + try { + headers.set('Location', locationURL); + } catch (err) { + // istanbul ignore next: nodejs server prevent invalid response headers, we can't test this through normal request + reject(err); + } + } + break; + case 'follow': + // HTTP-redirect fetch step 2 + if (locationURL === null) { + break; + } + + // HTTP-redirect fetch step 5 + if (request.counter >= request.follow) { + reject(new FetchError(`maximum redirect reached at: ${request.url}`, 'max-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 6 (counter increment) + // Create a new Request object. + const requestOpts = { + headers: new Headers(request.headers), + follow: request.follow, + counter: request.counter + 1, + agent: request.agent, + compress: request.compress, + method: request.method, + body: request.body, + signal: request.signal, + timeout: request.timeout + }; + + // HTTP-redirect fetch step 9 + if (res.statusCode !== 303 && request.body && getTotalBytes(request) === null) { + reject(new FetchError('Cannot follow redirect with body being a readable stream', 'unsupported-redirect')); + finalize(); + return; + } + + // HTTP-redirect fetch step 11 + if (res.statusCode === 303 || (res.statusCode === 301 || res.statusCode === 302) && request.method === 'POST') { + requestOpts.method = 'GET'; + requestOpts.body = undefined; + requestOpts.headers.delete('content-length'); + } + + // HTTP-redirect fetch step 15 + resolve(fetch(new Request(locationURL, requestOpts))); + finalize(); + return; + } + } + + // prepare response + res.once('end', function () { + if (signal) signal.removeEventListener('abort', abortAndFinalize); + }); + let body = res.pipe(new PassThrough$1()); + + const response_options = { + url: request.url, + status: res.statusCode, + statusText: res.statusMessage, + headers: headers, + size: request.size, + timeout: request.timeout, + counter: request.counter + }; + + // HTTP-network fetch step 12.1.1.3 + const codings = headers.get('Content-Encoding'); + + // HTTP-network fetch step 12.1.1.4: handle content codings + + // in following scenarios we ignore compression support + // 1. compression support is disabled + // 2. HEAD request + // 3. no Content-Encoding header + // 4. no content response (204) + // 5. content not modified response (304) + if (!request.compress || request.method === 'HEAD' || codings === null || res.statusCode === 204 || res.statusCode === 304) { + response = new Response(body, response_options); + resolve(response); + return; + } + + // For Node v6+ + // Be less strict when decoding compressed responses, since sometimes + // servers send slightly invalid responses that are still accepted + // by common browsers. + // Always using Z_SYNC_FLUSH is what cURL does. + const zlibOptions = { + flush: zlib.Z_SYNC_FLUSH, + finishFlush: zlib.Z_SYNC_FLUSH + }; + + // for gzip + if (codings == 'gzip' || codings == 'x-gzip') { + body = body.pipe(zlib.createGunzip(zlibOptions)); + response = new Response(body, response_options); + resolve(response); + return; + } + + // for deflate + if (codings == 'deflate' || codings == 'x-deflate') { + // handle the infamous raw deflate response from old servers + // a hack for old IIS and Apache servers + const raw = res.pipe(new PassThrough$1()); + raw.once('data', function (chunk) { + // see http://stackoverflow.com/questions/37519828 + if ((chunk[0] & 0x0F) === 0x08) { + body = body.pipe(zlib.createInflate()); + } else { + body = body.pipe(zlib.createInflateRaw()); + } + response = new Response(body, response_options); + resolve(response); + }); + return; + } + + // for br + if (codings == 'br' && typeof zlib.createBrotliDecompress === 'function') { + body = body.pipe(zlib.createBrotliDecompress()); + response = new Response(body, response_options); + resolve(response); + return; + } + + // otherwise, use response as-is + response = new Response(body, response_options); + resolve(response); + }); + + writeToStream(req, request); + }); +} +/** + * Redirect code matching + * + * @param Number code Status code + * @return Boolean + */ +fetch.isRedirect = function (code) { + return code === 301 || code === 302 || code === 303 || code === 307 || code === 308; +}; + +// expose Promise +fetch.Promise = global.Promise; + +export default fetch; +export { Headers, Request, Response, FetchError }; diff --git a/scripts/metrics/node_modules/node-fetch/package.json b/scripts/metrics/node_modules/node-fetch/package.json new file mode 100644 index 00000000000..e93129c801f --- /dev/null +++ b/scripts/metrics/node_modules/node-fetch/package.json @@ -0,0 +1,94 @@ +{ + "_from": "node-fetch", + "_id": "node-fetch@2.5.0", + "_inBundle": false, + "_integrity": "sha512-YuZKluhWGJwCcUu4RlZstdAxr8bFfOVHakc1mplwHkk8J+tqM1Y5yraYvIUpeX8aY7+crCwiELJq7Vl0o0LWXw==", + "_location": "/node-fetch", + "_phantomChildren": {}, + "_requested": { + "type": "tag", + "registry": true, + "raw": "node-fetch", + "name": "node-fetch", + "escapedName": "node-fetch", + "rawSpec": "", + "saveSpec": null, + "fetchSpec": "latest" + }, + "_requiredBy": [ + "#USER", + "/" + ], + "_resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.5.0.tgz", + "_shasum": "8028c49fc1191bba56a07adc6e2a954644a48501", + "_spec": "node-fetch", + "_where": "/Users/zachary.butler/Work/auto-buildkite-pipelines/scripts/metrics", + "author": { + "name": "David Frank" + }, + "browser": "./browser.js", + "bugs": { + "url": "https://github.com/bitinn/node-fetch/issues" + }, + "bundleDependencies": false, + "dependencies": {}, + "deprecated": false, + "description": "A light-weight module that brings window.fetch to node.js", + "devDependencies": { + "@ungap/url-search-params": "^0.1.2", + "abort-controller": "^1.1.0", + "abortcontroller-polyfill": "^1.3.0", + "babel-core": "^6.26.3", + "babel-plugin-istanbul": "^4.1.6", + "babel-preset-env": "^1.6.1", + "babel-register": "^6.16.3", + "chai": "^3.5.0", + "chai-as-promised": "^7.1.1", + "chai-iterator": "^1.1.1", + "chai-string": "~1.3.0", + "codecov": "^3.3.0", + "cross-env": "^5.2.0", + "form-data": "^2.3.3", + "is-builtin-module": "^1.0.0", + "mocha": "^5.0.0", + "nyc": "11.9.0", + "parted": "^0.1.1", + "promise": "^8.0.3", + "resumer": "0.0.0", + "rollup": "^0.63.4", + "rollup-plugin-babel": "^3.0.7", + "string-to-arraybuffer": "^1.0.2", + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "files": [ + "lib/index.js", + "lib/index.mjs", + "lib/index.es.js", + "browser.js" + ], + "homepage": "https://github.com/bitinn/node-fetch", + "keywords": [ + "fetch", + "http", + "promise" + ], + "license": "MIT", + "main": "lib/index", + "module": "lib/index.mjs", + "name": "node-fetch", + "repository": { + "type": "git", + "url": "git+https://github.com/bitinn/node-fetch.git" + }, + "scripts": { + "build": "cross-env BABEL_ENV=rollup rollup -c", + "coverage": "cross-env BABEL_ENV=coverage nyc --reporter json --reporter text mocha -R spec test/test.js && codecov -f coverage/coverage-final.json", + "prepare": "npm run build", + "report": "cross-env BABEL_ENV=coverage nyc --reporter lcov --reporter text mocha -R spec test/test.js", + "test": "cross-env BABEL_ENV=test mocha --require babel-register --throw-deprecation test/test.js" + }, + "version": "2.5.0" +} diff --git a/scripts/metrics/node_modules/sax/LICENSE b/scripts/metrics/node_modules/sax/LICENSE new file mode 100644 index 00000000000..ccffa082c99 --- /dev/null +++ b/scripts/metrics/node_modules/sax/LICENSE @@ -0,0 +1,41 @@ +The ISC License + +Copyright (c) Isaac Z. Schlueter and Contributors + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR +IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +==== + +`String.fromCodePoint` by Mathias Bynens used according to terms of MIT +License, as follows: + + Copyright Mathias Bynens + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/scripts/metrics/node_modules/sax/README.md b/scripts/metrics/node_modules/sax/README.md new file mode 100644 index 00000000000..afcd3f3dd65 --- /dev/null +++ b/scripts/metrics/node_modules/sax/README.md @@ -0,0 +1,225 @@ +# sax js + +A sax-style parser for XML and HTML. + +Designed with [node](http://nodejs.org/) in mind, but should work fine in +the browser or other CommonJS implementations. + +## What This Is + +* A very simple tool to parse through an XML string. +* A stepping stone to a streaming HTML parser. +* A handy way to deal with RSS and other mostly-ok-but-kinda-broken XML + docs. + +## What This Is (probably) Not + +* An HTML Parser - That's a fine goal, but this isn't it. It's just + XML. +* A DOM Builder - You can use it to build an object model out of XML, + but it doesn't do that out of the box. +* XSLT - No DOM = no querying. +* 100% Compliant with (some other SAX implementation) - Most SAX + implementations are in Java and do a lot more than this does. +* An XML Validator - It does a little validation when in strict mode, but + not much. +* A Schema-Aware XSD Thing - Schemas are an exercise in fetishistic + masochism. +* A DTD-aware Thing - Fetching DTDs is a much bigger job. + +## Regarding `Hello, world!').close(); + +// stream usage +// takes the same options as the parser +var saxStream = require("sax").createStream(strict, options) +saxStream.on("error", function (e) { + // unhandled errors will throw, since this is a proper node + // event emitter. + console.error("error!", e) + // clear the error + this._parser.error = null + this._parser.resume() +}) +saxStream.on("opentag", function (node) { + // same object as above +}) +// pipe is supported, and it's readable/writable +// same chunks coming in also go out. +fs.createReadStream("file.xml") + .pipe(saxStream) + .pipe(fs.createWriteStream("file-copy.xml")) +``` + + +## Arguments + +Pass the following arguments to the parser function. All are optional. + +`strict` - Boolean. Whether or not to be a jerk. Default: `false`. + +`opt` - Object bag of settings regarding string formatting. All default to `false`. + +Settings supported: + +* `trim` - Boolean. Whether or not to trim text and comment nodes. +* `normalize` - Boolean. If true, then turn any whitespace into a single + space. +* `lowercase` - Boolean. If true, then lowercase tag names and attribute names + in loose mode, rather than uppercasing them. +* `xmlns` - Boolean. If true, then namespaces are supported. +* `position` - Boolean. If false, then don't track line/col/position. +* `strictEntities` - Boolean. If true, only parse [predefined XML + entities](http://www.w3.org/TR/REC-xml/#sec-predefined-ent) + (`&`, `'`, `>`, `<`, and `"`) + +## Methods + +`write` - Write bytes onto the stream. You don't have to do this all at +once. You can keep writing as much as you want. + +`close` - Close the stream. Once closed, no more data may be written until +it is done processing the buffer, which is signaled by the `end` event. + +`resume` - To gracefully handle errors, assign a listener to the `error` +event. Then, when the error is taken care of, you can call `resume` to +continue parsing. Otherwise, the parser will not continue while in an error +state. + +## Members + +At all times, the parser object will have the following members: + +`line`, `column`, `position` - Indications of the position in the XML +document where the parser currently is looking. + +`startTagPosition` - Indicates the position where the current tag starts. + +`closed` - Boolean indicating whether or not the parser can be written to. +If it's `true`, then wait for the `ready` event to write again. + +`strict` - Boolean indicating whether or not the parser is a jerk. + +`opt` - Any options passed into the constructor. + +`tag` - The current tag being dealt with. + +And a bunch of other stuff that you probably shouldn't touch. + +## Events + +All events emit with a single argument. To listen to an event, assign a +function to `on`. Functions get executed in the this-context of +the parser object. The list of supported events are also in the exported +`EVENTS` array. + +When using the stream interface, assign handlers using the EventEmitter +`on` function in the normal fashion. + +`error` - Indication that something bad happened. The error will be hanging +out on `parser.error`, and must be deleted before parsing can continue. By +listening to this event, you can keep an eye on that kind of stuff. Note: +this happens *much* more in strict mode. Argument: instance of `Error`. + +`text` - Text node. Argument: string of text. + +`doctype` - The ``. Argument: +object with `name` and `body` members. Attributes are not parsed, as +processing instructions have implementation dependent semantics. + +`sgmldeclaration` - Random SGML declarations. Stuff like `` +would trigger this kind of event. This is a weird thing to support, so it +might go away at some point. SAX isn't intended to be used to parse SGML, +after all. + +`opentagstart` - Emitted immediately when the tag name is available, +but before any attributes are encountered. Argument: object with a +`name` field and an empty `attributes` set. Note that this is the +same object that will later be emitted in the `opentag` event. + +`opentag` - An opening tag. Argument: object with `name` and `attributes`. +In non-strict mode, tag names are uppercased, unless the `lowercase` +option is set. If the `xmlns` option is set, then it will contain +namespace binding information on the `ns` member, and will have a +`local`, `prefix`, and `uri` member. + +`closetag` - A closing tag. In loose mode, tags are auto-closed if their +parent closes. In strict mode, well-formedness is enforced. Note that +self-closing tags will have `closeTag` emitted immediately after `openTag`. +Argument: tag name. + +`attribute` - An attribute node. Argument: object with `name` and `value`. +In non-strict mode, attribute names are uppercased, unless the `lowercase` +option is set. If the `xmlns` option is set, it will also contains namespace +information. + +`comment` - A comment node. Argument: the string of the comment. + +`opencdata` - The opening tag of a ``) of a `` tags trigger a `"script"` +event, and their contents are not checked for special xml characters. +If you pass `noscript: true`, then this behavior is suppressed. + +## Reporting Problems + +It's best to write a failing test if you find an issue. I will always +accept pull requests with failing tests if they demonstrate intended +behavior, but it is very hard to figure out what issue you're describing +without a test. Writing a test is also the best way for you yourself +to figure out if you really understand the issue you think you have with +sax-js. diff --git a/scripts/metrics/node_modules/sax/lib/sax.js b/scripts/metrics/node_modules/sax/lib/sax.js new file mode 100644 index 00000000000..795d607ef63 --- /dev/null +++ b/scripts/metrics/node_modules/sax/lib/sax.js @@ -0,0 +1,1565 @@ +;(function (sax) { // wrapper for non-node envs + sax.parser = function (strict, opt) { return new SAXParser(strict, opt) } + sax.SAXParser = SAXParser + sax.SAXStream = SAXStream + sax.createStream = createStream + + // When we pass the MAX_BUFFER_LENGTH position, start checking for buffer overruns. + // When we check, schedule the next check for MAX_BUFFER_LENGTH - (max(buffer lengths)), + // since that's the earliest that a buffer overrun could occur. This way, checks are + // as rare as required, but as often as necessary to ensure never crossing this bound. + // Furthermore, buffers are only tested at most once per write(), so passing a very + // large string into write() might have undesirable effects, but this is manageable by + // the caller, so it is assumed to be safe. Thus, a call to write() may, in the extreme + // edge case, result in creating at most one complete copy of the string passed in. + // Set to Infinity to have unlimited buffers. + sax.MAX_BUFFER_LENGTH = 64 * 1024 + + var buffers = [ + 'comment', 'sgmlDecl', 'textNode', 'tagName', 'doctype', + 'procInstName', 'procInstBody', 'entity', 'attribName', + 'attribValue', 'cdata', 'script' + ] + + sax.EVENTS = [ + 'text', + 'processinginstruction', + 'sgmldeclaration', + 'doctype', + 'comment', + 'opentagstart', + 'attribute', + 'opentag', + 'closetag', + 'opencdata', + 'cdata', + 'closecdata', + 'error', + 'end', + 'ready', + 'script', + 'opennamespace', + 'closenamespace' + ] + + function SAXParser (strict, opt) { + if (!(this instanceof SAXParser)) { + return new SAXParser(strict, opt) + } + + var parser = this + clearBuffers(parser) + parser.q = parser.c = '' + parser.bufferCheckPosition = sax.MAX_BUFFER_LENGTH + parser.opt = opt || {} + parser.opt.lowercase = parser.opt.lowercase || parser.opt.lowercasetags + parser.looseCase = parser.opt.lowercase ? 'toLowerCase' : 'toUpperCase' + parser.tags = [] + parser.closed = parser.closedRoot = parser.sawRoot = false + parser.tag = parser.error = null + parser.strict = !!strict + parser.noscript = !!(strict || parser.opt.noscript) + parser.state = S.BEGIN + parser.strictEntities = parser.opt.strictEntities + parser.ENTITIES = parser.strictEntities ? Object.create(sax.XML_ENTITIES) : Object.create(sax.ENTITIES) + parser.attribList = [] + + // namespaces form a prototype chain. + // it always points at the current tag, + // which protos to its parent tag. + if (parser.opt.xmlns) { + parser.ns = Object.create(rootNS) + } + + // mostly just for error reporting + parser.trackPosition = parser.opt.position !== false + if (parser.trackPosition) { + parser.position = parser.line = parser.column = 0 + } + emit(parser, 'onready') + } + + if (!Object.create) { + Object.create = function (o) { + function F () {} + F.prototype = o + var newf = new F() + return newf + } + } + + if (!Object.keys) { + Object.keys = function (o) { + var a = [] + for (var i in o) if (o.hasOwnProperty(i)) a.push(i) + return a + } + } + + function checkBufferLength (parser) { + var maxAllowed = Math.max(sax.MAX_BUFFER_LENGTH, 10) + var maxActual = 0 + for (var i = 0, l = buffers.length; i < l; i++) { + var len = parser[buffers[i]].length + if (len > maxAllowed) { + // Text/cdata nodes can get big, and since they're buffered, + // we can get here under normal conditions. + // Avoid issues by emitting the text node now, + // so at least it won't get any bigger. + switch (buffers[i]) { + case 'textNode': + closeText(parser) + break + + case 'cdata': + emitNode(parser, 'oncdata', parser.cdata) + parser.cdata = '' + break + + case 'script': + emitNode(parser, 'onscript', parser.script) + parser.script = '' + break + + default: + error(parser, 'Max buffer length exceeded: ' + buffers[i]) + } + } + maxActual = Math.max(maxActual, len) + } + // schedule the next check for the earliest possible buffer overrun. + var m = sax.MAX_BUFFER_LENGTH - maxActual + parser.bufferCheckPosition = m + parser.position + } + + function clearBuffers (parser) { + for (var i = 0, l = buffers.length; i < l; i++) { + parser[buffers[i]] = '' + } + } + + function flushBuffers (parser) { + closeText(parser) + if (parser.cdata !== '') { + emitNode(parser, 'oncdata', parser.cdata) + parser.cdata = '' + } + if (parser.script !== '') { + emitNode(parser, 'onscript', parser.script) + parser.script = '' + } + } + + SAXParser.prototype = { + end: function () { end(this) }, + write: write, + resume: function () { this.error = null; return this }, + close: function () { return this.write(null) }, + flush: function () { flushBuffers(this) } + } + + var Stream + try { + Stream = require('stream').Stream + } catch (ex) { + Stream = function () {} + } + + var streamWraps = sax.EVENTS.filter(function (ev) { + return ev !== 'error' && ev !== 'end' + }) + + function createStream (strict, opt) { + return new SAXStream(strict, opt) + } + + function SAXStream (strict, opt) { + if (!(this instanceof SAXStream)) { + return new SAXStream(strict, opt) + } + + Stream.apply(this) + + this._parser = new SAXParser(strict, opt) + this.writable = true + this.readable = true + + var me = this + + this._parser.onend = function () { + me.emit('end') + } + + this._parser.onerror = function (er) { + me.emit('error', er) + + // if didn't throw, then means error was handled. + // go ahead and clear error, so we can write again. + me._parser.error = null + } + + this._decoder = null + + streamWraps.forEach(function (ev) { + Object.defineProperty(me, 'on' + ev, { + get: function () { + return me._parser['on' + ev] + }, + set: function (h) { + if (!h) { + me.removeAllListeners(ev) + me._parser['on' + ev] = h + return h + } + me.on(ev, h) + }, + enumerable: true, + configurable: false + }) + }) + } + + SAXStream.prototype = Object.create(Stream.prototype, { + constructor: { + value: SAXStream + } + }) + + SAXStream.prototype.write = function (data) { + if (typeof Buffer === 'function' && + typeof Buffer.isBuffer === 'function' && + Buffer.isBuffer(data)) { + if (!this._decoder) { + var SD = require('string_decoder').StringDecoder + this._decoder = new SD('utf8') + } + data = this._decoder.write(data) + } + + this._parser.write(data.toString()) + this.emit('data', data) + return true + } + + SAXStream.prototype.end = function (chunk) { + if (chunk && chunk.length) { + this.write(chunk) + } + this._parser.end() + return true + } + + SAXStream.prototype.on = function (ev, handler) { + var me = this + if (!me._parser['on' + ev] && streamWraps.indexOf(ev) !== -1) { + me._parser['on' + ev] = function () { + var args = arguments.length === 1 ? [arguments[0]] : Array.apply(null, arguments) + args.splice(0, 0, ev) + me.emit.apply(me, args) + } + } + + return Stream.prototype.on.call(me, ev, handler) + } + + // this really needs to be replaced with character classes. + // XML allows all manner of ridiculous numbers and digits. + var CDATA = '[CDATA[' + var DOCTYPE = 'DOCTYPE' + var XML_NAMESPACE = 'http://www.w3.org/XML/1998/namespace' + var XMLNS_NAMESPACE = 'http://www.w3.org/2000/xmlns/' + var rootNS = { xml: XML_NAMESPACE, xmlns: XMLNS_NAMESPACE } + + // http://www.w3.org/TR/REC-xml/#NT-NameStartChar + // This implementation works on strings, a single character at a time + // as such, it cannot ever support astral-plane characters (10000-EFFFF) + // without a significant breaking change to either this parser, or the + // JavaScript language. Implementation of an emoji-capable xml parser + // is left as an exercise for the reader. + var nameStart = /[:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]/ + + var nameBody = /[:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u00B7\u0300-\u036F\u203F-\u2040.\d-]/ + + var entityStart = /[#:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD]/ + var entityBody = /[#:_A-Za-z\u00C0-\u00D6\u00D8-\u00F6\u00F8-\u02FF\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD\u00B7\u0300-\u036F\u203F-\u2040.\d-]/ + + function isWhitespace (c) { + return c === ' ' || c === '\n' || c === '\r' || c === '\t' + } + + function isQuote (c) { + return c === '"' || c === '\'' + } + + function isAttribEnd (c) { + return c === '>' || isWhitespace(c) + } + + function isMatch (regex, c) { + return regex.test(c) + } + + function notMatch (regex, c) { + return !isMatch(regex, c) + } + + var S = 0 + sax.STATE = { + BEGIN: S++, // leading byte order mark or whitespace + BEGIN_WHITESPACE: S++, // leading whitespace + TEXT: S++, // general stuff + TEXT_ENTITY: S++, // & and such. + OPEN_WAKA: S++, // < + SGML_DECL: S++, // + SCRIPT: S++, //

ConvTe&h~$_fv9&g93Y+<1m?g0rZPs*BbX}Mw zv3r;$CIYe^VAi}Yh;R!3#~4$~8nI^(tNC&-B>P;`-Q7Y#`D9gMU)<6; zuLY3{nZBrHF@ktHp^%0YL-0x#>0PQ-ado>eGO<`#*wWJ*EbW0FNEo=(NvMd+yMb}n7CX{no0s!o<|341O!((sNBI32b- zT%zD~#DZiwwdyNYI4Jf$iuLb1US%sxkhbuK(A5SxNF zOSI6zi>R=K;$;Ae^aMwCcXb|)(gZ+@5XCG;}xWTqi8_?O9Qrqn&wAndEK3)mWm%FFwGsX5ub(~D{ zCW|!|WgEq2Tmz%}u=0qsxjrhIB(JqDZmD67BTgBA?9V6}Rw%+gHQbuj0vARwRBcE4 zy@ff7Jy@*9#V!rtxf<@oP^L_NzFG`y_^^iCigK5Sux1TwF*Ytv;zoG2hE@HZ&zFjO z#)mrGi?#1yu#=6dA$fm2J=iXb2kV>b5B1R!9oP6&yIjvWg;{*8U9NAO4t9L5U2e-b z{e1AjcDX0UDT?5e?Q$dIlp)}w?Q+}3J%z((+vRqQljitvyWFmE&*08FxU}UqH{kH` zcGuf8PI}<;?Q(C7lO7*VeJ(RjdVK2Oa^s}O_YUr@anb{yad&_F#z_x+$X%{5PI}-| z?s5mlNe_I?U2bfg^!U}ml`Yqk9{8ZU>m3>=J-&8u?~Ick_^7+46Wt%N-jhJ@AQlxoOMwr3XIpE_Y&_ z^uQ)Qtdbza_IMi}6#FnE4>t%+v-T(k!{KIZO3yFh>JnC7kN3ywSG;Ny-7j3b`&SG1 zJXyj{BwS^b^}q4*n$jv>^UrSK`lQMmg|HvIxlidpWuP$9lx0#VH=!@D?x9+n#2m{* zegnT~vb<_idP7;FgPHx&uTig8XE@Q=BJU_E(ndqHa%fzZs;!-p1C)2hT?%L)D+eUYa!DnVQ{@~Y7ew_I(lC!J7h(FB# z#rau&G8ZC|&zYZL1j0EPI0FR}<0fb&I`#y?*` z&7WuONcIH2w;Ew@H4+&9y!`ux`95kDpo*0Hd8D{saKGT)T6cus(y=mVUeMjTw4-TZ zb9Zx7SL=VYI4+~>T9$UTE^TV=>T2oic09@1+0wkQDV5#v8D0C?mTnvCWEhcK(z0a1 zvSXYq1A8!a!P1WImQ~$Ni#l6cq#{krbsgP2b-x)-ju}f@RZ}`Hx2&_Zqr1z=Gegkb z+1$}(t4|OuG&L=3UD@3_%_&H(Wh;(a+`6Dy+SJ{;q{XRW^nis&HMK5WHDh{HSNGD+ z7N=IK>}u&)*tD=^QA=lM%R;BfxFRwxYDIMGjOk9D#7M5zj>WAVElP+w5Q43Z8 zi;nK51nEIhuFjSPt;<@Gs9CekM=eFCI4m|O-A^F&!WJiBd;glJZHBVqc^omC3ELpKQ?wp#YrWNVV zLU?i4(ULxhNF&_bD~$9`mQST8VS-<}unK%Syjz4+%L34$v#Gu17-u)uQv^ZdnB5su zE1b5Xb;{J4N~^sW9Nn^@y=hrX=aSYgjD!y9s->N+{~@g93}&f8AoAj+pKIwnYUzqJ z%aHghmSIFG@%Lcfq2!PhxW&y~-AyaHz!5{3yq7Z20sz8z!AMIOcu%GmbSN|}Ru;@# ze6=rYB$Obp9o?acnzd_@Gn8;Nv~ZELR}xyjNZY(yck{BPT}?+VUdontwk&F0rNUrf zI-z655{(T+9;8O4^1EVsbjvEmbl`&Kjs-1?lVLWD6%Um`RV5eFBa9-XtEF4T#2^fy zMIguuju!~4l`T4mL~^Ypk!`ANFUpedwQwz1u48HE!tO;=bvWmnnp&2}A5N0b_6GM+Pue)NY__ zGjt(pA7%(aV(sk(M>n^2G%aiHY+lmR4atSi1?^x>k^3Q8QSnezNZ<(-MnGpZlV4M5M?KHcS9y*Mn3-WJpMOqSRQenm-oGV z-pyA=X0pSw{xE-7K9|dR_&=A+WZh-1>t=n|adSib)voE;=oD8w=8>&#^pn5bts{>u z?L5YF&?85;G%ss9s=2Ghcfl~C3eg|Fmu+6QZ1FM9`EIW1zb{;T_)6zWH{0~zJ3iOu zY>_l*u%Uq)I%e;{WAIKJeA3bIhU+vri(qz+ z`Lzv}H1&QBPH-G^qcg{B)!jevEtUol3_X8TMkQBJr&gOs2Jo~0AT~{ey%XxBeHq6C@MdvCtIFt(P0m>hczP;jo+KPj`z^nL@NNM1Q`VGDVdl#Rqbu%1K zYgPCn_cIW@4u%s1YvYsP{`rVI(G}=m!Zj`gTg6MulPyjgQ6+HCe15sfBsw#A5co?OW1&3oWx@2{VE8ndCm(l=G>V6Qz4>o{Gp|2zk-5K8DA;!WC?}*SB zy#hggfCK8YmLs5JDw5_wj#oS|D5DQAQ$PZV5P$-VbmWxy_BrNK#U+YChorhA>Iwnw z5c0nO1d7j}V{W&N*u-u@FHdkTYY{|@)8MRcnc*FZ7BCU@1-aCBK5&Mahfx}W(Ttmc zRy*c4v{cn}27UW$cUVwE{q#8Nfacz6>zdrw=;_~}7SZ3hN$7|MR@}o%wGmxhC;&iP zBYakIO9Sc#7K0-7&z75&@NrV9ZU87F(l}9>eSU!iK^*`7t1|@xLFsQ_j zgXY*<9FxOPC;`R^xAelJSXWm}iuU>C1_S!6zP7GdRN%qDzChR1)xH;K=)*ri`UgRu z6%^`fiU8~cgP448@8;+03TXaVe_5~_(=Y9vo~z3vy|>Pb)ZzBZaWXg|_#b4l$>4kZ zOb^}mU?AB49(A71ZWTTnA*ut+fbN2FWi|{JW0Ql{I77DanyVNkEO$>r8Oj3mhle8KhWLj2) zvq&{y2`Fi$B!UPk8>$FdQ5UQyHW#Xu?O~-A$K?On{80=MHcMl8b?!CBgNW5 zfPu!-4)VfGa52n_V4X{4A~sOg35GbuyImiiOSm?nt3t8fLZ32VsQv^3!UUL^1ar)d zw{omP)4{B+WMIyrm%u=yalEi1zi5f@}|CCPDK$7@YI#B~j;<0Y3of=3IPf90hAW1wWov48%@yv9h29m^c z(}@~L61RAqWxV4m7eY1(6LMUog#1yBt6>N>IzdRC;WXZI^l|(sG_F2w&Ee)cWkAW% zh^+nRBCJ%VHF*UEIFF^_q^nzdyAx;HW;3~sv`rI~o=mcweVdGyMrKLXbW|GWYc!9_ zm&?4JYdvfgv|d^cbRV+S5DTeClN-N~uICM#DQdONOZpXIn=Xmj6) zFiB}*L6qZK)~9O$T`UCTjKmWtWUi;n4TN^BS z9{&DUF8RiB_ucG~g?@BxjBDp`r!R z4;5{WeyC`3?3jwD^iM_f1Z=p9k}zrS?`?aT>_*96+?Q{slJ|*{Y19_tEN9#i4-zvg zorT$YR>%1^r1HTxsF+B|ai=7dPGA&GuuHwrMlq_c@$@?t^{N7Hp9MBL?A~AYRbW- zEHIlyd#kVfUZ45rGyiw*?xS@eo?^5e@l@+x(^sKKs|qdCQLOoY-&cN+FxBiYH^N)L zP4g)QztKMhDUU_kjuzTA9+j@}?0d9TdqXRIZ#87P&9t{>Lc6cAIS3}5qfaoRAOR}^ zQyYVM(1MX1DL4_Qy^&5&q1!v1dVI&pyGD>PJ3|ngsUL#$2PP(n&D0-3G)wOW3j}F# z-{~<#*G;im6W}j#O-;y!Gasbly>Bd@Ao~^CX)S z`{a2pl4Ecci(Nt%`#6<{RnYD3cD2Hg}|hkge%pgSpP~v;ZDk9UNyirc?Jd zzfYGPZyrgfPEhd@AvICBB??WF{)$4A;e1QsKgIZ|&{XqS8tgRl!*t&1=Ja%MKeK15 z=nS(pU3C9y5kAvgmM;2H6QqhBfQ8yr@W5&$4l=K#Qx8sPe~8%w#mJJg%(_Sq++_Jn z^?>lkF6jXZEP^bXud!NwR#YeQbEM+N&-z5>t43wyXfx9m8e=v@R|Iw6nTi)7L1%_u8Wk= zUA9ZC5<;IuN{BWPlu#~`gu9}IXd1bZpp3?uRYFzDA|AhzIMQ4|WLcPK*2iOAf}_k& z)2XA)m(#&9*gcRc+YpT%DLOW`VI(+CJ3XprU-SKR(edVOJ6a_3geqM!(VUvjJIU;y zs&uj`rb1Jy6zo(}rG2NFL8+|MRT)GG?Pp$2SCR~0L1=%qL88nvtI6_Fb8))t0oWvy zs_4KfvK$o41p$1pc{g435c4d9$}qnJ!-(44Ql&P*FW8;5D{AwN2xb{8V-|b`%kTDc zq`4>htXB!m`bWiTZM1nTDmBL39%XGX&quLiRgkmPIP;4rc3+ioj2)jqn2=O9F&^BL zAg?Sklhv|eDcHGDWmA=17(2~OvstH`TcY&+%-a!w8D>-TxxaZS`ka}_hmR&!<^if> z2=+j8umyWiEL0ATCCMRbB{AbHU}dez7prvl&#}{x)1sbuQw4N7HajKrB8Dd?6C-G} zHq(F^P&991IK&hv9i~m14r7XhO=yy|xiB%SCJ3=8eAawbSEkQ`& z9tSfEJ6DRvRcmMC1L;`_aMis8*b&BUVNs1j7%nd-m}|ZU#>EZLI4mBg+M#hn85cK4 z;{wL*LaR3Ke5=+zST(EoyKdGw5AJklZL!T#YyI!KRWI*+tJXiHoz)|2^pAW9zKjPy z1cUqQ6jT^f7<#m|ewGeau$M4N9BD+HM|tbbmUP}x)rRnBoxDex$HdA<>S>7eqXfrB zT314LULF!b z%vm*S6dw*_$D1)WeSDnC)@>`b3zz7xexPA0BX%;OP)Qxjb~&FpKn&u=YFtQiTS zL}n!V@5AwBh>3uX5mTjXW}y{Ps1nbGBynWSN)oI$&)Ytg;HcQ!CBf06S0hMcg!)ma z!Q7E9IkwsXF-|EJW!~5PI-PlZwJkeAC)`oyiRR&S=1EolmC4Z<69lK2YuF)C$*GBT zF^wCb)hc_$(qF|R$iIuu44JXA`^)hpp<+&o7-3{$yVPqdnRQe$$r^38MtR316RHOD z3LDA1W0M)zICW|=c3*R8RBU`~;Z49E6I*Ix;sTkJ)G#@o9Zg9lQd5(x)65L(&v?;N z1*QNt8Eo-%qv`u_6@&ORSHAr)x*>5Fr>vUobUB>Nk*nC7&9;WgU@FH(on>66k2HUb znq6;xAJJ`;@-8!sHou8t$C#3(P=lEoQ#dx9$Eh1cDI8m!Cj$rxeUOC2{CZHF=vp&D6}{)+^tf@vtf|rSKp84~N3B*Ju|h{6y4%vdVlmppuy2 zBhB>@1?v+>@+k98RBUwYGaF+D+qO0&@^EZ2^u{Fub6;~yRDOKo$eWN%X(pO0q70K{ zN=URO$6|O&;(nf*OaZ1P61~btAd^S}n2|Up_Qy_Gi|tG!E?;K&Xwu&YBzPTYhTE(M z>Fk}a7G_7bn}1l<&5wOZ11YvucCnjf0fKm!WfIM=ia9Q#@kn!C^jU9i zj6O#tliATphmSFLN0e(wyhmeAgQd|pk;JTEU-Rt<)Oho5RA@rt#GPnfi!w|~=7N*e zdLo1=;;-h{)Wm%?%}lW*{!0~!@ATa42VK^RN71ex zy=jD7%Mp4`>38@ECzBMkimYrYFyq651CP{b?2D#EGpZ0nCX2(xyVAf`E36{cRMiN4v50|(3#`{ znMp2@S#QByvBmiyU@o^{F4=m)&;Jh&HuP>5Pr3EgNpkuPhh2bAgGYR>v4HMecggXe z{~&;FP(Y2zCOY_Xd!xlO7JZ?mXD=X1IU8Dw;{vx@XxpMc^bEgmXq#;#FMs-tcX$k> zH=zEhHICF82WovYxW)aaZBIQk@qd~7EPnS~B!8cqwae)3%f{}D#-1D8>>fI@@zlC= zzjyP23(O-H@`fj`e)8-yUVaOZ3m9NLtg{2OlM#wDj1>wdoKf}UJWF4+-oCNSADw`~ z95%zLXv`wCaXljIJjxdQ#;s(`G8|>qG&G>dbFDaWW8Cfo zaCm^C4rf?!t~H~F$a%&Yhn6ZlE{RwkqI5fFc*x0v-$Pk+hBufyBLE)obVxl1#v$veUVkN9HW9qQp zlGLjeyo(A}u!06JsATjU8?gd9a*j=oVyEiSQpf#1RZ5Suvq^*XoICPp)e$z^kHCJq zHpP{NP8?|_SH7=quVxcK0ijv1wiX1YAd{I`%n}kpPT?0(TJk+d8TM4Pnrt{C#VTmC zj4|8&na*;?`~dmDEiSpmwcO%H+yd1IZUKFeT z<^j7%1tGN-lrqE&74?DHK{002QwTy-($$0L-!gse8flg-ltp{SwAWqU0ci- zb{jmXG$8PC5LiVf-IN?PJdrt?O{@C^+;rfLEaV$bRPJYd#C;qTsmMhDnbNensSs)n zUV#x{Vhp8_!Z1^sZZgO_M^1AFaORnn#P>nm$%ktk2=uU0Jy@rv?!Y3BqGAGv%LzVK3hVZfk|9pf zBTuQUkZ^HE*{$1?XhuJwr#Xhz?IqEy*39>{*Rv|QpqlX!gO6 zr)_|ItVamfUp(yTjuOrzd4Al3JO^E9uJAL&7px2W72ucC%HTh6+8r{<)5GR=7d~=9 zC`f+h@`C>Zl^^{jOT+#HGsBv5SK<^oewJ5q!!e#w3@5asBu&VOKiji50d-8687|6@ z+Q6F~=3oQn4=Vu$TyIi#uuG%>2w?{Qe5?f6K+kH`0+p+n7D98hajHteCYxQdA=G~_iquCAQy8g)OvQ`G%} zHK;Nbv8$bm({3qA`IK#kO$9Q6A~?hk>Y%Zp6#n^G31{jllF!2T;&}7Yh_nR!mH}WecA`Np3P?wBqO(5J$=8&l#=;4XUAC0~waV zZ7ll5<_Td5DngOAVI47L?bHi2ayU=3D4;Iflz|Zhk`NpIRpT#Tyto!;7SXFjs*B$M$KtHfm^?haRm!DX*;5U;wEUtEG+~N*Sa{ zn&3c+c}y7*7GPXJ5QWb0GK~NU*m03VSJpxx87Hi1ms__e#Y9-b92?7 z1rVImRhue&RS#Tb6CiC;Xg_iniZ^)mxKu2rFS%WZeP@Tla6SLD(hYE`jgw4bu=q5rAa!w$xjE&>SvHq3)gz0h_8;;UF|1JE@@<493EYCSx7Bu9_S0f2#E)uI({H3e3+=ntaF1LJ$KMq zWC<1PdJPc3LCql_$e&e-B}_!Kz;ch}r9AIE%Y&{Uf%WW33)si3oVBuYUVTTxsxt?1 z0w2G@^{yZWq6xj(M6`#ihit6SK<|FquDop!XGH?W*9jHRuEs^x{p6_wLv;LL^q87h zyhI3{^r|a26Ul~wDmF!rwi^$3H;GpuLpCJ^$)@(!Mql~UCx7~V@TV|ECdHq^unB+W zgoDYS;%bWc6BSo-Xq8^dC}Lh~#(<7M`Vq?$8M%iMJtOkeUbKi<1aocT@V2N#_k*n1o-Z=* zA-7am>6qV%Zsk(TvIQ@dw2C`t+x(YC+yvu#6aLTT;Kd`C!UP`3`xJ9i9bjM`RH1# z?j{Zk{6{Jegu`e696#VMF$Gv7rXRJ4glEc%+As$%BDExDscbyZlulRtaoZ)Fw*yEG z)(<6;YDkezC3Fp3$;QPG=K1V4_>W5Xf&Yk+@EX)oAhCzMqM%_u~#>AF9yw2IM(<%BlCgz+G(Nj@;KJdWRSV zl-+u{w$ID8T~EO9M8HtaEk-wYsA{B2E>2ThO*g=ubh9IK?wm$W<_c zKbUmek?g9#(oJqf$0$pr7qncG0#A^vyQwD#LiD1%s_35hg4o;s0I~V93mW4KD||D& zR}tX;u9u;mxfb~*-Iq9C&=#EN4vY7V6@qDqy`80>VQG9x=@)G2XHa@sTpDk4RF!U! z(wkX&3rpimNG^T#Exk&wlG0dU zoZ)R|X0y^*EyC8amn(i>6w<8kR*Ql*bSrN`F2g{3#JG`^(t)wc8ol%5@zhQf{O-uy(5 zt@|04UeD6_lG2yi((6%rW?Xuv7QSQMMc04*uC=!C>n!|Z7RHwp#_h^Nv>&7Jdd;~fUcYwDxk~VJSa>Z9<4X#2#|b-oEegXHwM;%#3qN|= zc^BS#=V|ASm6|VM;S*UHUsCvFTlhp2rtP641eOB3`lT~&y7c5H&bUb#{c=Pe*E?a($5ACfT4tN|hj6<>0#0jM^gn0`<!{dd z3M$qb=dUOvj%V>p7B9;9c*06AO1E!4p^!7yxnt4(&XM|g>mrePsvGzklE?xH%EL(* z+_*}ek66$m1VJZr*5&f6fnTt)nF`wxLy;!r2>yr>9@5A}GBGFaBP15-=01)r4j_EU zz^Y>o+#_uvq9Tk(5|U3#=;XlREQDY}X?x?9k6ilDj~_qz(GylH#+c(D%BnfJr$EAe z@34%~T*b^60g1s#1m$6il3r#HK@GAJ@S*4h@It%-!0Mp{)c=pUR_fMz^bBy()J~mx zWX$LAv{GaSdU#ODW{6)_VjV*($sW)V@g7j9Ib??H{2$CSUuPu?@q$bAZ~~%aRfPmb zyq&Ts&br)!W~c$5N}Op+VfRsWT5QvUEA87`C*Z9#@CsHKwnP^={@=$Fx4--B`itK$ zPoS$)op88z!V#%XKvdibh>tpf%mfaKX#M^>0$czy@8*J^?_id=sfS;Ek@$>R+8=)2aylgQ1yle;jyu=g6 z2GPQxXU&+*Zp-?7O;KNZEJTlru&;UwaK@pl$s=I6&!~~gnGXevM<&4kkS8)Dp>l## z2?mKb8{^2dI1+>4ukva-qo(G%5nOhf!c zV>*|NX_6AeRQ?e{i0ev;9&&)umFS=>#x4Ab>Cv3kSek+=6xfYmM7tGGT{DEFedR*_ zgCumS9N=uhvQU+W3lPi}I8SAZgY6!QT*`3qh$xnLIY%|05Lms!QV_+X(YgiOlZeQZ zcmy%FTNoLaWC}(`QDGixQ)Fa{&oPxaGfIpOC`4(Zfn(Jxk}-yVgn|c7FBhmu|DP~v zZd`N1UGLk>{W~#f&iB2{>I`j5U0#B9UxWFCT_E6g7MD|n;G-2Er#GJ!30{FI4?&8% z2JD?}P=QrmEeV+o3{tkla%Q&#HTOTJjI$hd=?JrMyXt=2mP0d2<~ z@>d)U52bOGOdtUWU6^7+qFt0_U)l*k-p&B}@=gHqcLva!odA^C89;In8?qaO z+aZmzI|JzWoxmu!Gk{Lm2|)QYAY}w7Y-=>I1Hm7Ni2E=yA9mDEF&sz&o+tDT-oQ%1 zN;AVQ0{Y5_1?a0E7NC=N9#GY2A%*aWAy=L`?Vx7^Y96X>+pY$RY`T2n5&1my;34Eu+e4(5M!TXJsciMc5sOfKJ40&mz%k|M@MD+ zgJ}ZvkKG1~@m(qLb#hr_&V?mtb=lL3O?ud;gN1pbm@DIyWUXB;ELoQwprSHj3^d48 zK)Qf0)P&7A2gUk^6I7{)#S!p=LYz9VQpjUfLPlD zLx4z4mEkguw-ei~ue$fQ!haJ3O-o;FNFhstP?&vrK&t_BYh@BexdKy1icr5| zqjkGZjL5pZEBzxP7l9Gjhfz9+?H{KKjLl&F{%aJXE$*C># zw4JnXFVX$kvKSrDi*$0QJczPvDYrNS5?BJpr`^oL2E!n>n`45Xa$gH*h*hukE`ICn zFND;k$(EOMd|Y0JCIaeYp@L-}oGs@hyrQ#tFpQ7hl?56f9mf>eO^B^Fc7Y))P0pZk z7%k_1pzullFkpXqeh%GiP4RBg|VpM4TE~8(n~4wR}-C% z_l1?id~z5xIo11TL>qelQ-2myyQS3o#}Q50-lOUr3OcRcLsjqCNjKIHsnJFHBOnVs zaQVpgHN--_;~PE0DB2aG-l2%&^_&q%Q@a2QXw==<^uSe8ZbNPS(|2zA{ZFlzo*EvB zXfRKq`gk3Hd1s=J@uSKfdyBtibv+S=S_tDbc=c4-qYB4rb=e6Bh0enqDP^zhLyVbb znG~vgrsZ{Qi*l7urv$YyNLSu`Z{qt)qH$hP_>(dk$j zwmQ8|b$T7ScLX-RLZ<^5bO3d_pj6jOry~hBiMSM%byhpqgmqX^gmi{O3vvjn9-NG0 z7cY7}T7*j}TyX0S)GJglXbD}*maAx;87FJK+0j1g9@fwZg(YpiQ%M?ih8AJ1{GlkzCZN}tJSL48bYrTjY9Q`=0xgM zRIn;uRj=4kt!C9tZXzeSKndrB6^`TEY!fkoiN4@9jXpt82qTI{NDW3}I>Mjny+kAC z4~T}vDL;@UT-?RK*crmvgi4pgu`LuLzG!kwMb0diy7h1tWI zSv;dds_BVmbl4#z#81uWG_m)LuE*7WSW_+J86Eco(-I*crc&xP0aaJe=#sMCaSmu8 z*m(b#(fwma_m3Iff0Y^C{RL4qMVfnSYtpJ_Bm5I1V$JeG0cLQdW?n7C@z6N8(dvcD z*{tfsyW9R^1+3a2-^hfNMLyF8t;21yTI6u?*?m{CwE=;d>TrW^WIaJ*?*a@G>@1bQ z4DT}h8e&^72MFX7<_-Nna5KSzp-m2pyaG){Wh`PhL@K>QIQT?^QgY#_3bv#T5VHt3 zIMEU+g#%}}k;1w%)j zak+3HE_i?k4Ee)a*la9Rghv5=2;XNxzeL%>S42;jSaT!bVS!aFwlMAlihW+vS-1$O zE!Ay3Ktd>iri2xE&N%O8PuX_Xg>cBpQN$vc7hbPgn$bmTMh~z~E-9v66P8Ojmd4KJ zVH85qkgO#8VM%&_TOmZz_Oz{yjXKdvvh8kEO-Cp9f1w=D6jf2$yU2=iMw zk&VddQEMLJS`YstJ>2v&17-30N*3{f3Lx;F*pfA@tHy-bnu^+J%yBf~2PP0X0{>!2 z=yVZ$4gOV(_`60Y4$*Y6PeftVedPtLO=_R7$5B=_HqdQgB*j6iP=jx64QvM(uu{4B z2Cw)pX#D;7*cZQalJ1e{&-VK+?Lv6)4%IHB35b226bt__0pSDIy~7xb|8VO6!>Rud zr#{Sxzp7LJAA)*^lk^WJsNGn@{Y4V%QVdj>LmO*hk4v9x&q0N?P~P`njgaxh1vi zqeB&90irJJg>ejA{_y9)s^_W}aCin`{p2dke0Yht&V>6oqWK7~7}d#$Y#NL{#r*|S zknS%*v|fLv0~J5DGQgccfQEzLvh_OnK9HgN^9hP^TGcAc%9^3V=0uKf*dyJDj*BZ*~+-0}3mFGbBxnIAUR!+4XpZ+;*yiUAEGu zmhcG2z^xWb14}kiOfC{bvR)tcBVH&bMl$fQEAgHzvimd|Gu12c*(fK@p-Uz&UWw<9 zQXK!moep6JEAem{K{;pbO1!5|d!E&*KE}-qa^V)t8J>{E#Vf%tT^s}BoJ=dTE4tXS z#@5J{ELmPH$JvUDsThjzh|>HvxVZ7W0`g@sE|#q=wh>Y+U3la zz~v0vS7(4aqsY>+92bT=Symj_kzRU5*Sef>-4k8TTtg$xJ%wRcbm`NE%UPGUiM|}? z(RI$OG*j0Oah{znXUpd}(Il(-=@U)x5=Jg(;zt6>Hqb1WC#9Th2t{kLV(5V@NCM`P zb>A#kbY*2yc>*UR*qq4ajP2WK9(k|$4NsSHH%A9x*d7cF%jpb#Ttj3$%*u$QN{cPO zx$|}%>KoVI@!Y-OYO@j2;v832n9--R6=iAXp~=}1xq?7P0UCm36Bz`&9}oC*K_{4F z9^t_@1qi;86GDJS3Ib4ja1e*Vjw%3z5n8j0*;9&GC`9N{8YY`5LdwX4-}Bd=-i4}* zUg#toqQa05`2v9ug46*aqIG{@ENc?Dev#0nBVCdWE`BSJ_`?HOvG9Zd!vvFCHDnzJ z%T4PlxbGP(k4+?4pof&QH)3I0Cu2od^FazsV;!^#lq%=(C6WRK?1l~nw@!v41`#h; zkQuCZ0qO(i608?rv;|Lvy$BAhnwr+aiyZISun6%AC634Ts+zE~r&5S6cZ25xXE3<& z1>%ew5z~8_*sVW{X=X5o8c`s-}obMrOB)dra%^R8`*(4Tl*`C zgiB@_SVNpotHgN_V#_|A2UsTj8(U$@6ka8Ca?gI)|eKL(nT;nfp)Om-m0$5`IAz@wxA6`&In+ zw{|a!_Hsp%U9^`adv1aBlNEYMYYIwPe$Q0dLLx!_i!Z;+ekiepxZ@212YMy4g}6N+ zwsPKcciRVBe&6ak#x-ZX3}KDmU)kSB?QhSYH2z%sd%gXA!v3y#Mbpo-zc<<6r|s{F zf7bL1?eDGj_j&t!@~fJ@&i>wRe_yh{YhTm!OYQHS_V>^B_q5kF{TKH4Uik3~P#C=CCbg3byTNH9vFvHdC)7H&a&Z<1UB>io{aFJ^ zjarla+w&2|0{s#Cb2xs$L7zcU-OK=O8T?pwHEEbL+6yV|*I-VO(@!jU2Ny{mY|0{k z3ASr6U&nnu;J_?aE>cVSY$xuU!aQk?`RQ|@mxadNHrVeY6p`ugTyvi!YeE{tT=Pnt zg1MO5zOV>`b_4VGYtIuXOChEg#9vmY6%uVZ*ZiJo)G}SRt@+e+tjhSjY~B%d82?p!oN81hPy(P1wPon=(>|mf9UeB(LH{o z@Xuw>UGSow_L_^!cD=^akGu4VlbJjh@1>wMUgKAy0&`0tf-oD;xbWLQeCm2KVDR5@ zE*K`*?nqdF+tcgMU4O@;KU)27B^TjmpE={Z7uhf&KXBZZ7k?t*BXO0#*LeJ&pFKGW z;||Hj=i>ZxOL#=0@v=96Wy{Yi?Gr4(Sz`!-D}If8{dj=bYrO8NOEz67oBFiAi{E(r zF+Ga^Z=kl<_^Xp1e_J>G9a*YHcwL-6w=@vpE7$j&tm*SgA5psv%*@W?PDoRjWA0>S z?2Y+Bajtnmst#H8hTC3$<2>6mR&~!GzW?0$wrMo-9NkYr>nK(gE6JGQ|HJd7KF##b8<1= zzUkI@Z`^rvh3;=%`^%mm3qJEoLxTBy63A;jA!b&s|Bi=FI$xQ0UTF`l@93&(8=u51 zo_~YUm~?#eURH-=;a*CRSvHJs6`5BW9306*;$GvHxJNb9$=5vf?q8&b=az=>Ah_3f zZOq=9<2_j-qvl}5cp%Ngz{AWAQ?CDoljp%vs3~Z|G4r5B z4wkF1>pAhnsT`qp2HCSlf{@cG(F@UFGb1(FBPkevZx;`o*4+Y}u>Pwbyzo?JQ>jsj zWo?Yc(3xw#DjmqhPENRXMEfx@?QsD9-EF=Ono=?NEY83O%{&4BD^yyD8*>^P180$7 zELvn9Zce?6d2Az)BiI9H=C{KwmKX-Qlrm#aUMJfxdgZtyaO&PMXN!c%7lZR$9=tyl z!Kl`gQS0s&gSGtT)vJGs(GpC77LKlYjpzUAC$C>*#|qBC`{ox^GML)&LzE1+A_7HR ztP`rl!@uE&rer*8t@Qs3M9+NtFAqI<--fL}g~p&Q^VoeioV$j)#|rc{4_|VkYAVXd zXK#FC>+zb(R^E5cmo}WBsT2UuMX7U3xP89y50`H`K~m?HrU&0}>-J}n@BiSUyWctX zvA(rYCI=ORe-nL@@LIFuQU|{ z5#@u^T|7dDoNsM?=dEoPH&Wx*+a5daeoH%Qi`$=m?)V2Je57c%TjO*EeeIVXxB2Il zrX1?XpDnY3{miYno2_6Y25gKY2A%p!&)NU`{6nG$zG3Dy{;ua6FMd^9!$&ICJhI_} zuWM_lQSXdWwaVL0Jne1CJg-ED5nFSb%U935#-G2s=~eCdBQe^%#_z9)<3!i*)<7>7***AZB@F%G=*>eT(B0N;Is1(xasLi*B$>#yu#cdI4%<_{luO@i}EV-q=t1sf}{ zg9EWg-fZR$7QPziH9j7VZ!LfALl@m6!yTq3Q=_{r=9LxA5lFx98(DzVqG$pbzB&n?m9{pPC=d`l=WuT&p=-R0ADUgQ0dVADL8e(i?irOES3BZISCxy`6?ODvf* z(*-b^BF3l$t$ex;Hc>1RG}ATDpZhBn5%WNB-#oU-J1YTY5nvA>#qc3*-PjWTqcCuQhX>IMGaRKPXO+i8uC}RqnV%5 z?&5lxtYuZO1cQpijR#?LrdtPV-nUw{XWOi_9bscP^vvGhe-kvKl|foHo_puj8_&Mx z%-4_`)7-{2Po8@GOV^(H%3k|p8CF^l%+S%LSfxUk0Y@Ru06=6Vu#ymGuW;{(rMC_B$#w4USBN4N2yYhY}A=4nkPhaSudScOY(es;b zf@3(M+PJEElMw5*AsEo+7gSf6{S?fG`0J+VOJ4O9Zl0+-Wqn6m1&4Ztm-5Hq2CRvu z!y(SXq{CQZ;vv#FKi0)-ZJ0liy$KG4vqA>Ow;#r0Nbmq02R346eB#wcOpYSB&5*p@ z5JePhL>NV2yQkzB8b#FFh#-m(1MW@qI8dQ2_cA-51Hzk)uFRVCIF`@hAf2j10;h(f zkuVE)DDx^P*oztS^25|wxC2e{;wW777?p-WIxeurqyXuvND0zakrJe z+aTPhYy3XR77;ck8&^82$QIEtCR;?um~0Up`zM=HaDLM-%-1~3AeSd`(+?li>C|rzOi~AbFpA4=35_t zaQFWe2zOUPxJ)tVfIrcLF5K0>(Awv3yYJRN-*N2ehp9(!{f*yz^p#hC{`gtekN3dy zr$4Z^=iSpeOXJMr?Aw0uqgOOLjqV5IWNT$ybuA$Y zzWU7bmz;IaTFm+g`;2?9Kly11t8?z_ubgt~b2g0q5RGeJ-*AEEhbxI*vd1y}Q@?lG zu>7hE>6fSf_`a=564r1*M9{e<0=(+#`_FovsdG!5^PRQ%pc(1L>dk! z6m*k_;0N*|Hk?#3&D6Pc$#qYCgajtZ2 z&kJ|FC9F9QHzf8^YPh`UllEw{yIw!#q1V2${)(S}0q(D$i$1Ua4WAbbz~}Dy`487U z{GHXoA%~eqxov#e2HC+FY&j$3d1o{(w1Y9Pwx-M#{(=d zTR$ZKp_+ZR?nl>^6YNsMB0vOwgRMxiUyT64Z?Nm^8?nw{kH5$Ds5f$@Uew!s%`UeU(EK9d0QGPj zKUvM^cakv5;9XUHKn>FZ=DGm33+6C9el2qX+&Bsba6gaDBTPPm8*T7_8q)DlB=d0} z5t0>iCc|L580x1`#UHbvPJ$kB+BxaWb_xC`%rqys3H6)Ja?krI$?YF5_ZrkY9-6#MPvU^_IQ4Mg<;21shI0t; z*iDjp1ZQBhy#e9@TZ6D%uG<B9q?tu|Y#_0*hY# z;6ZbFsz#swz*9P)FQ$lcwF@{xo?F8uqIlRe!{^qp_ma=8Vbuz12Jb>*$t%nrg57h3 ztM3>3(K{O4{egH?fNd*S!zN}Jo$?JM5p)M;e2fHBS8yet#){Blrb-4@JJ)DYB`}iZ z_y{ApeZC?Oz~Kr7eE_Z+BgpgOt;tIOAWhjrbr?86p2(sp#7(aD$KyV!%6aurg|D5J z@f0B%ipph!Ui1)^vvNO-UP2)YV%%Z>3m!J&4>nQgKGxn(yZqeMWU`a& z%;c?~RZqJB4xcdOu2u9(wDlmmxJ520m0M!uo`AgUUX+VlaFYT$QMX~rzA~TNFjIRB z^FSDoWgDi{&f{-9H;08-K^a7kACQq(xukdHNgnnRUh$3iRNi<-D*M*HSP4FmrCrI6 z#pxt3nUv*wWtRa6Izz6120tU(0vS=5T>%e8(kZ_7w|(d`=}Vkb-0qp4pV_{|&cy!v zl4lvCzC<74;vem-)XrMz(p~8+%>8UVTH z4>xK8Z<3wwOjZCfBKx*}AMV1T&0cSJ@fas)1r~_Ta_SRadhS!M%=VCnk2=ia8yFnW z{tD0p`$F|i2fkuU7Y*(}gX+Pfr`^11&EtZOk1c-qWD|87&Lg?eS0KB*`Kp&kvBdU6 zbf7=F*+eLGpn(6mQ9RTmUBM9KV|B)fZvNvTlhrK*3fw2e) z!dJhzF*SKg0#}fd1VA}bWq4~*mG;6oN6qd>&c1e?O$I;9qpjxcF)>^I(Tdg zRl!KGyLa)32Hsmvw%JnHRb53as0-Wd@KM`5hziU^JS1#xAN{goRHQ@%Jbvg~#fFY~ z`y>}cgo<;E$aZbM!G%wCU*XP(w-F}VJ-sFS0S!ZW$v%o0AQJ$%_If~rSk4J0q zdJVc8{KatG-NODV?~wS z0|Gn=jJphlJ^QX55agj>+{a^WK%h8Q-T=*FaQl*)xKklaKZ!PCb}9; z+6^^^NKjZCRsa7?qozxvV2blnIrLON1P0eoi%3uDzC|IguqRD_?!cmzvQrd>QpkpS zL7;+#^eX-!vUc@uF5V>#K8?-ZXrCz!T@D6vdbMGy*?nlJ-g&vsl4SS99<^37vd3Fk zEodBl?*$#w1$AP?!trP6rYH7V4aEldoWA|ZFhc2;-g~&Ck zKKho8I?&k*nLdo-zk+xKGxky-_5tj)0^zL^6ua9gg+4UG4)gX_AfjQ#x(P0J@e3n! z9efC|u4(RYd!=5+kg0x}cg^3S_Js`u-lTAor@YC$%14%N$zUQ2i=N5pa8f;kk+0N% z;iFX0D@;Ow!wDWh!3$tj4-`L@aZ5XXy%j>}KgkHp_c8*FbD#@53C-a2onLi!KF=1> z26|6(FyG4@giaE35c}}0Iarezox4yUP6`T~TA2=teZ>n{Ds`&Ljyq~Zz@Bvo>>^wC({J`5`uXCRfg&}fJSd&Q_s3~_B7f#ObF;T~G6 zJYh zv!|Ci=Kz!w4S7&CK0V5bM)l)ybS=fFN4W(#>F8)qQ6OXaWUPQ+XnqbZqv$J)zi^hc zPiDi^9VAN4J+K0ApQ7WugC|olCj+B#j?Z@$`kCECweCdyunz=wP0V}^V7=ND>&cD9 zoD#XBp6}V8865yVg8RdLy!iQwNm=7)A$(4$6Vel!Y zV`L@p0(m^TvDmUF>AA!6t+ekx6P<4*`tVNn!?^ZTD<*`(!jzcuzH*9pQ%ox^6FJk7 znuW`a#md_1Sh4pS_degcBi26QZ{xn8x{V!CFv(#iUr@>MIY-V$v+a70Qx68mjwl#Q z)Y9@YCCvO>{K_zdBYG8m1}?THlh=;^17Ppd=2<#yAxT8osf|L7tws|JeD*c&^XuQC z;BBW%yefx6=F|2~aEw+v&cU#rI25+Ikw?Mp)1!d0;!*JW^e8AZQknEUmc94(BOr_Q zBj79TN7w;u(AlueU(b7Rncv^L>wU{qT%pdfStV2bK4mJ$C}b*nSYBX_EoIxaSgobokhZ38sR?H-qD^y7)`o@*=WmqlrJ-r83W?Pqo_fE}^V~0Hu!w*&-Moq)zsQGA%!QqpS|Y~uI5_z$ps<&U z^$-t+FwTxESeR%K)z+)#0tOGA@_scTtDL8&mt9?DQUxZ?mgE*1il7CUbAmObWjO4# zz4rp`W2dH0nF~fTr>9UE5o3f0U)h)u77?L=gC2r2gex!*74QUrwIsn+uK>uPS5m;Y zX6cKI`jx&`%{~q90B>zwnHj9ps|NK0!>hMP z$fo?tEoL3MIB#%q$XELXWxz-fm>gZ+Y02P%xmWT+!(i_;i6ncXLocHe>Zo zU8GgT*@#y>Hy=+yfFW`5W_zAmDRn3>Qp5QUceM2o+P9q73yUs09NGZs`aF4#$Z+Q{bqM8B?irU85N&NH*nj80y0|@4Cyw z!b%$tiHLce5R$zo&gAH1xIS&iu?>nu#3#<$AsjZLuia_cN0}&xjos(tZ2_ma8~XX0 z#%<9b=gZd?$plyH>&?r5r8iet3c8`wW&GtFExz`Oiy+O$k5PZyTq2}!oF8ivTw2o= zCt=P?hAf1Iw>dbFBm^UR(w`6joc;hB?nwbOqK#`4Lq3#{wSxzB!pO(yk|Ty-f*dsz z1RZ-^>N3ZEX-#3VOt)zBz(C4%RZv40z-{}>+n;M&ZhDYq)pCx|$Wc$7#-UAS&(KQE zV3AkEgxpD>ji74>xyYVA8v(31zqXAa_EH)eEu#{KouCC2wK>C!jUunS*&%IG!V_qg zG9N=@Ef@kCMo%Q}Q3(bI6w~qw9%DtF(WDyvoL^2npt;cTtCq9pSHU$(-A>Ej z#bge3=S8-0{=#8MTvXb5yfg>CN5BX*4{WW_S4l`^5_%kf_+N55$PUN~(-Ea8FvYfZ z+9^MwA@Gv@`M@!_!!6#PaqFnxRKHMEY&2XA!wajx%WS+3o7)2vV=uz)AO{l}znxeN zSE8dh%55^Bh^NvW56drZGd&ym?lDrqt$?)#!3tLp5yGanjbg5$5`m6&G)OjoHc`<@ zJf>dkDB9=v1!xr31xOkttCm|L5G~M#S;LGrIF-v%m26j{B^7LTX$`^_dJ+L7t#@0Y?oQMXn8bKcWa`8RKJiZZoY!JoPA)uM7Dy8RHqtP1z7+coI~N6U(S;T zXsypnQ;T68i}L?Z42LsD$-)*ZOwPgJDB#;15EEKndy@4*%CS1a^FpdKn#Y*oVJL9T z3PYUambLde!*w$Q_5+CrCd8mX-E5Ios#K=WqWGgMPTwm#u#w(uo{Mm_#iIS z08q$K)rbT*=qf-$VNw{KQ-}AUWZ()oivONBDSnZorw5Fd1slkDZ3MyP%#~k`II1@C zB}j<(B)qsaB?k@g(+%+10IinVLRcR>tNmvup4j#Q0l=klc89hqi&hobs%%(bt3pIT zN=GfiK}Pfk_Ec$uAX7!UNR!Bt5!cC;OrQ@W5EPmR^8;DPud>b{uhr7(#UxF8S9!&J zp4TE}+ob>U;>QJpAfs?Vieqp`9R>zildfK}hT7g}C`SgkfHi?eEUg1wrUEc+#EfAn zBx_H=LRZW+{Z=a$h95{S#1#@ou_9?&kmMZR_#+SZ*iV~6!{Au%1KWRR? z8#Ev0TBu*P;8!wy+f;B5EH1+ljNU_yaM?@Z^gUFzLf<*%m%ek#FMa2fU;54|zAmVs znLu%fR7hlTRGd}gYcN3~!vW4d632Xd$N6v{fz8L~Ss@++I_Blc)SmaC{q88*KVH?e zr^Tf0t(C8+uax#UQDJFMGAZrZ9Hl9nqcpY6*{ME?X>VVmjqs({OzB8^D;@d%ESe8) z3D7Z0fl;uQFz4Jzdq+7hxW~0*0^yLr8@jH{!kxf9iQ-PEco)zh9f+W3S(IPV^Sz#B zQN}5Ya!5yy>_Rf4cknzM;*`+<)H1Nbg}$|Leff%>@m07)PT@kfShz6k@fAG=tZ-#= zf(tny$C_e)oOdj}LuQqe{xFlcW)slnak^#Q7nS09@ z(cv4xiVj&1?`1>o2gnzz{HT1f%9;3baUjYf3thjzq!HO^^2L&JR9hHmN?dX>z7*L*vwx&`dK=|qqwyC*FGI3!soG$IyR9iqUhQvhJjdb0r`fIT1|f&&DZSTcS$<~6}cps7Ahs3UL{ zXX@ZvIy<#KdXgtoeDz0wVIMvL27gcj0KkV!fG?-?9PdY^sE=7-#D`CTjWol#F8;oG zfDk@38sN~!dvL+tA3hEK{Tc-_n~rX2UM5)=EM2l}aZ7g#G6|?=V1vpwT;~)Ar7N2<8MQg) z*N7jOj^B^5_anBFj_s?t%(xX9M+p(8Oq*~>GbwZEk^lOy|K4Q&>4;B!`cqAZ9yZ4u z`7cK_8RR|t`gGo-c9^#jc{f$%U9iKv|Af5HBJXaWZRxgT3>P)Gf-p#U8wtZ(I~FYM z>_n%8%UU{@w03o&A47DQbiwI2IK^Ro=ICf%(gG0ckzwqz<^}E2_2I&$=*5nu-Qkkv z?gdAOsY*#86}nqibxR*NEo|;?23rv@ z`|NY*R&Y+o(vAr>TS!(Q)<7OmF5QIg0oA8Wo7g|e?@BWEPw&@~jQtb)+c$z@Y%F*Z zjJKqfvWkQLn-FQt%9f-j24!(~RdE>e zSqX5!0ovnDi<%dAk+pDhUvaO#GC&wJ9hdC^z|!vK#VOIgqP3%Y#`LCcVu$yj4c!S~|U1==HwYhW(i3s$V}J{qEV1td;KcZkNew1O3T$qi)19dN}V-Ak4c zmd!1&1M;Te6}FR!6fsdR>+E&rr}c5-Whfu4NfN>OFLS^E+{0;29eUe zR61wTF+kz0-#Ep=5leSXT-@^icXsW;QB`L=O2Q)t5Cd3RL~nFTA_J1$&88usk}Bg^ zTOVyQ{Nc#X%|5c1>?`-~vPs%fJCvb1EjX#A1X=YHnoG50gQwa8Vv)( zW6&tJom7fd>F+z|Y;t##vzswOqJ}NupljK6VRquM_9Iz!0fr2_ zvV%}AfZhSrJ2*9k}#H7-(f6d1ITsx+crgV{l% zUN}sT%@uE?7Sz#U_A!r7gS&x1c@nD3sOsH0FjTM80u!==WA{H{{+9#e1ZK-y{4rAM zajtTas#kKtDySt-<%X35_Fuq?J)SI?I-FkvuB|6Gt`fL|z@bs~Y>m;E14E551PtCO zr5@QB&%B)r#v9{zxna~8CjdiZWXV)C#(&0vqsG{LJk@CCm@k)emC$!!9JmVTI|UpX z1)ZhRSHb%;49A47zU)m#X5 z6h~U+PgsP)5iW#!ilbcQPooGmnq1guS`>$$xco^V-<_xIC877^MM!Q4`z=eahuLn_ zqsoxJnuenX><}c=&?-)M#c;dyX2_^!XEJ1z$A=j*()Ywa^4Ri$U%PkyOm8`Oq}qC5 zpH}TCy7CO!#P~69#XA+W7DC5j>UyjKm6|C#>u;HTG<^L3cg(0QL^ovz+-?=0k(+KiFxu?R#nekDs3N$h^|+59D1r@0#0o`mevgx9W}F zJ1*}Zm|wGM^M%JYJow%eG*X6YYNUUX>KWhvQ{}I=w>R$E>j@P+*81pc-wyuek2js{ zE^mE*Mdz2>T~p5bYG1fz^NkyydiwF&UmVaX2YzdIY?+@Qz8Ki>>er`#4^P%R>80j_ z17tHyBjykL92I55?R0dsnCdt%TFg6fw3u|`6bY>PhK>Hw@7tDl?T;KyYr7z^KT0IS zv0!ZZTkFoc*M9U$*S~5?A2}I*V$w@mV(%XuUH4bF{BTdDv*l_@|J`UrsXFyIkm}DM%cwnxae5Y;X&)+vsev7cDX5D^ zLqP4Jdl>sPlRc07iDdaxppyc)chE4sAAv~}!$Vy%%0=0{o{Hk3!!n9bQ9Y?B9_phg zzEDM~0gCcPv5=r8$axA0atNV8grMUuz#h)+l0Ws-;W z7;bSQ8Oz5ghTDS#i6=x-oVvrR1-BK8QmZ&C!C8RrhwA$JQ1U^#*tQO${&^s9r4!a#F|33@ZjyRw5!=iUP#v z>ZYFvL7PUrZYV8IC}Ll=ku`qyFWGc&9_LU%nMF9Ua|ptGSTKmkNzUwgBJPgR4H;C&=!RyG9=f4X z(9w4+ud$;Tv2*+pdgknl$r zAzYJeYHlG-z>~0*gbv#u33rgtZTlnPJ`(D-KN7x1!WP>f2@gX!ZACIrdtcGf8(%2e z_h=kz0>~y)(||bxNB0hS; z;a5e4kZeA@tFQ$r-c>NY8M)c1R;WCH1SKLtiKV3xv9|mH;L|Xiu&{+W(Ic-WGJucu zF`92Y&kK&6W;3yjz(y0^cYl-Lo4`&_A#~!@Yp|KNmZBXb+FJ-<6N2_da3&`!&jf%S z_xiDKvQSF~4N&0HY@&4F_EJgV!Hzk2cY6c6q+3C=8lfDWjnn8@A-y6zTa*Fk&cT=< z0v`b!aL3A$;3`Mhcm4oG{7Ef<;VFTkQcFZaSi+#$OwtSaHf`bXL}HO% zcs5~07#WE(=G%irr_v}5S0`*{icL<7f%6j)<8s>4kR=XxC|ud72>3@~v$+{cT=1|q zZj{1j3Y#TtvCJsAQDH^guC>P?BluOp0iI-@@pm3Y=4FQSD^|>z#HuPUn%o2C*lG^f zMMI?}j*>g5Y8s95aE_(KIVFxvWH`@K;#?9(^8uV|DRJczN9_aWTS}Z;;%M@Lb1o%L zmpHQS;k-+UtCKjQ2jSdHiStSv^*Nk>DRDlDTLK)MgDG)-f#bXgoQElK0g0pjfO9b= zu3qA(Kj3^!i3>^`^#`1jDRG9xQGdXBnG&~J;;291+)RmUkT~iOI6qV3LJ~*)0q1B+ zTv+0$Kj1t~iHk@a^#`1*DREJOa}o&!=W9w_Oya0N;G9i~Ym_+Z4>)gA;!KI7{(y5g zCC-vK>JK=7Q{v(hNBsfka7tW4;;291JWh#QBXQIpa4x6BB_)pf1J37^xF(6C{(y5j zC9YZEIJXGrbxK@|#8H31HysXCj)<=_#PmQ@COn|ADVWhx8|-ja!>01j8iqwU9g|I~ zjWk4vYpfCW@iEB0l<^V3h>djtvH6IFe*C94D z1VT~R48+!V6sD;BRD`Tpcv)s;V|-hHPgH~kKK-WII~J1t^b;5kwp@i-fy`51EsVrj wezG@AUMx!5jifiw(#Pl5>Y6oL{z2LU>GiZ}4^OjLb%@I_8d;kCd diff --git a/unittests/test-contracts/test_api/test_compiler_builtins.cpp b/unittests/test-contracts/test_api/test_compiler_builtins.cpp deleted file mode 100644 index 7cf07efb172..00000000000 --- a/unittests/test-contracts/test_api/test_compiler_builtins.cpp +++ /dev/null @@ -1,395 +0,0 @@ -/** - * @file test_compiler_builtins.cpp - * @copyright defined in eos/LICENSE - */ -#include -#include -#include - -#include "test_api.hpp" - -unsigned __int128 operator "" _ULLL( const char* lit ) { - __int128 ret = 0; - size_t i = 0; - bool sign = false; - - if (lit[i] == '-') { - ++i; - sign = true; - } - - if (lit[i] == '+') - ++i; - - for (; lit[i] != '\0' ; ++i) { - const char c = lit[i]; - ret *= 10; - ret += c - '0'; - } - - if (sign) - ret *= -1; - - return (unsigned __int128)ret; -} - -__int128 operator "" _LLL( const char* lit ) { - __int128 ret = 0; - size_t i = 0; - bool sign = false; - - if (lit[i] == '-') { - ++i; - sign = true; - } - - if (lit[i] == '+') - ++i; - - for (; lit[i] != '\0' ; ++i) { - const char c = lit[i]; - ret *= 10; - ret += c - '0'; - } - - if (sign) - ret *= -1; - - return ret; -} - -void test_compiler_builtins::test_multi3() { - /* - * tests for negative values - */ - __int128 res = 0; - __int128 lhs_a = -30; - __int128 rhs_a = 100; - __int128 lhs_b = 100; - __int128 rhs_b = -30; - - __multi3( res, uint64_t(lhs_a), uint64_t( lhs_a >> 64 ), uint64_t(rhs_a), uint64_t( rhs_a >> 64 ) ); - eosio_assert( res == -3000, "__multi3 result should be -3000" ); - - __multi3( res, uint64_t(lhs_b), uint64_t( lhs_b >> 64 ), uint64_t(rhs_b), uint64_t( rhs_b >> 64 ) ); - eosio_assert( res == -3000, "__multi3 result should be -3000" ); - - __multi3( res, uint64_t(lhs_a), uint64_t( lhs_a >> 64 ), uint64_t(rhs_b), uint64_t( rhs_b >> 64 ) ); - eosio_assert( res == 900, "__multi3 result should be 900" ); - - /* - * test for positive values - */ - __multi3( res, uint64_t(lhs_b), uint64_t( lhs_b >> 64 ), uint64_t(rhs_a), uint64_t( rhs_a >> 64 ) ); - eosio_assert( res == 10000, "__multi3 result should be 10000" ); - - /* - * test identity - */ - __multi3( res, 1, 0, uint64_t(rhs_a), uint64_t(rhs_a >> 64) ); - eosio_assert( res == 100, "__multi3 result should be 100" ); - - __multi3( res, 1, 0, uint64_t(rhs_b), uint64_t(rhs_b >> 64) ); - eosio_assert( res == -30, "__multi3 result should be -30" ); -} - -void test_compiler_builtins::test_divti3() { - /* - * test for negative values - */ - __int128 res = 0; - __int128 lhs_a = -30; - __int128 rhs_a = 100; - __int128 lhs_b = 100; - __int128 rhs_b = -30; - - __divti3( res, uint64_t(lhs_a), uint64_t( lhs_a >> 64 ), uint64_t(rhs_a), uint64_t( rhs_a >> 64 ) ); - eosio_assert( res == 0, "__divti3 result should be 0" ); - - __divti3( res, uint64_t(lhs_b), uint64_t( lhs_b >> 64 ), uint64_t(rhs_b), uint64_t( rhs_b >> 64 ) ); - eosio_assert( res == -3, "__divti3 result should be -3" ); - - __divti3( res, uint64_t(lhs_a), uint64_t( lhs_a >> 64 ), uint64_t(rhs_b), uint64_t( rhs_b >> 64 ) ); - eosio_assert( res == 1, "__divti3 result should be 1" ); - - /* - * test for positive values - */ - __int128 lhs_c = 3333; - __int128 rhs_c = 3333; - - __divti3( res, uint64_t(lhs_b), uint64_t( lhs_b >> 64 ), uint64_t(rhs_a), uint64_t( rhs_a >> 64 ) ); - eosio_assert( res == 1, "__divti3 result should be 1" ); - - __divti3( res, uint64_t(lhs_b), uint64_t( lhs_b >> 64 ), uint64_t(rhs_c), uint64_t( rhs_c >> 64 ) ); - eosio_assert( res == 0, "__divti3 result should be 0" ); - - __divti3( res, uint64_t(lhs_c), uint64_t( lhs_c >> 64 ), uint64_t(rhs_a), uint64_t( rhs_a >> 64 ) ); - eosio_assert( res == 33, "__divti3 result should be 33" ); - - /* - * test identity - */ - __divti3( res, uint64_t(lhs_b), uint64_t( lhs_b >> 64 ), 1, 0 ); - eosio_assert( res == 100, "__divti3 result should be 100" ); - - __divti3( res, uint64_t(lhs_a), uint64_t( lhs_a >> 64 ), 1, 0 ); - eosio_assert( res == -30, "__divti3 result should be -30" ); -} - -void test_compiler_builtins::test_divti3_by_0() { - __int128 res = 0; - - __divti3( res, 100, 0, 0, 0 ); - eosio_assert( false, "Should have eosio_asserted" ); -} - -void test_compiler_builtins::test_udivti3() { - /* - * test for negative values - */ - unsigned __int128 res = 0; - unsigned __int128 lhs_a = (unsigned __int128)-30; - unsigned __int128 rhs_a = 100; - unsigned __int128 lhs_b = 100; - unsigned __int128 rhs_b = (unsigned __int128)-30; - - __udivti3( res, uint64_t(lhs_a), uint64_t( lhs_a >> 64 ), uint64_t(rhs_a), uint64_t( rhs_a >> 64 ) ); - eosio_assert( res == 3402823669209384634633746074317682114_ULLL, "__udivti3 result should be 0" ); - - __udivti3( res, uint64_t(lhs_b), uint64_t( lhs_b >> 64 ), uint64_t(rhs_b), uint64_t( rhs_b >> 64 ) ); - eosio_assert( res == 0, "__udivti3 result should be 0" ); - - __udivti3( res, uint64_t(lhs_a), uint64_t( lhs_a >> 64 ), uint64_t(rhs_b), uint64_t( rhs_b >> 64 ) ); - eosio_assert( res == 1, "__udivti3 result should be 1" ); - - /* - * test for positive values - */ - __int128 lhs_c = 3333; - __int128 rhs_c = 3333; - - __udivti3( res, uint64_t(lhs_b), uint64_t( lhs_b >> 64 ), uint64_t(rhs_a), uint64_t( rhs_a >> 64 ) ); - eosio_assert( res == 1, "__divti3 result should be 1" ); - - __udivti3( res, uint64_t(lhs_b), uint64_t( lhs_b >> 64 ), uint64_t(rhs_c), uint64_t( rhs_c >> 64 ) ); - eosio_assert( res == 0, "__divti3 result should be 0" ); - - __udivti3( res, uint64_t(lhs_c), uint64_t( lhs_c >> 64 ), uint64_t(rhs_a), uint64_t( rhs_a >> 64 ) ); - eosio_assert( res == 33, "__divti3 result should be 33" ); - - /* - * test identity - */ - __udivti3( res, uint64_t(lhs_b), uint64_t( lhs_b >> 64 ), 1, 0 ); - eosio_assert( res == 100, "__divti3 result should be 100" ); - - __udivti3( res, uint64_t(lhs_a), uint64_t( lhs_a >> 64 ), 1, 0 ); - eosio_assert( res == (unsigned __int128)-30, "__divti3 result should be -30" ); -} - -void test_compiler_builtins::test_udivti3_by_0() { - unsigned __int128 res = 0; - - __udivti3( res, 100, 0, 0, 0 ); - eosio_assert( false, "Should have eosio_asserted" ); -} - - -void test_compiler_builtins::test_lshlti3() { - __int128 res = 0; - __int128 val = 1; - __int128 test_res = 0; - - test_res = 0x8000000000000000; - test_res <<= 1; - - - __lshlti3( res, uint64_t(val), uint64_t(val >> 64), 0 ); - eosio_assert( res == 1, "__lshlti3 result should be 1" ); - - - __lshlti3( res, uint64_t(val), uint64_t(val >> 64), 1 ); - eosio_assert( res == ( 1 << 1 ), "__lshlti3 result should be 2" ); - - __lshlti3( res, uint64_t(val), uint64_t( val >> 64 ), 31 ); - eosio_assert( (unsigned __int128)res == 2147483648_ULLL, "__lshlti3 result should be 2^31" ); - - __lshlti3( res, uint64_t(val), uint64_t( val >> 64 ), 63 ); - eosio_assert( (unsigned __int128)res == 9223372036854775808_ULLL, "__lshlti3 result should be 2^63" ); - - __lshlti3( res, uint64_t(val), uint64_t( val >> 64 ), 64 ); - eosio_assert( res == test_res, "__lshlti3 result should be 2^64" ); - - __lshlti3( res, uint64_t(val), uint64_t( val >> 64 ), 127 ); - test_res <<= 63; - eosio_assert( res == test_res, "__lshlti3 result should be 2^127" ); - - __lshlti3( res, uint64_t(val), uint64_t( val >> 64 ), 128 ); - test_res <<= 1; - //should rollover - eosio_assert( res == test_res, "__lshlti3 result should be 2^128" ); -} - -void test_compiler_builtins::test_ashlti3() { - __int128 res = 0; - __int128 val = 1; - __int128 test_res = 0; - - test_res = 0x8000000000000000; - test_res <<= 1; - - __ashlti3( res, uint64_t(val), uint64_t(val >> 64), 0 ); - eosio_assert( res == 1, "__ashlti3 result should be 1" ); - - - __ashlti3( res, uint64_t(val), uint64_t(val >> 64), 1 ); - eosio_assert( res == (1 << 1), "__ashlti3 result should be 2" ); - - __ashlti3( res, uint64_t(val), uint64_t(val >> 64), 31 ); - eosio_assert( res == (__int128)2147483648_ULLL, "__ashlti3 result should be 2^31" ); - - __ashlti3( res, uint64_t(val), uint64_t(val >> 64), 63 ); - eosio_assert( res == (__int128)9223372036854775808_ULLL, "__ashlti3 result should be 2^63" ); - - __ashlti3( res, uint64_t(val), uint64_t(val >> 64), 64 ); - eosio_assert( res == test_res, "__ashlti3 result should be 2^64" ); - - __ashlti3( res, uint64_t(val), uint64_t(val >> 64), 127 ); - test_res <<= 63; - eosio_assert( res == test_res, "__ashlti3 result should be 2^127" ); - - __ashlti3( res, uint64_t(val), uint64_t(val >> 64), 128 ); - test_res <<= 1; - //should rollover - eosio_assert( res == test_res, "__ashlti3 result should be 2^128" ); -} - - -void test_compiler_builtins::test_lshrti3() { - __int128 res = 0; - __int128 val = 0x8000000000000000; - __int128 test_res = 0x8000000000000000; - - val <<= 64; - test_res <<= 64; - - __lshrti3( res, uint64_t(val), uint64_t(val >> 64), 0 ); - eosio_assert( res == test_res, "__lshrti3 result should be 2^127" ); - - __lshrti3( res, uint64_t(val), uint64_t(val >> 64), 1 ); - eosio_assert( res == (__int128)85070591730234615865843651857942052864_ULLL, "__lshrti3 result should be 2^126" ); - - __lshrti3( res, uint64_t(val), uint64_t(val >> 64), 63 ); - eosio_assert( res == (__int128)18446744073709551616_ULLL, "__lshrti3 result should be 2^64" ); - - __lshrti3( res, uint64_t(val), uint64_t(val >> 64), 64 ); - eosio_assert( res == (__int128)9223372036854775808_ULLL, "__lshrti3 result should be 2^63" ); - - __lshrti3( res, uint64_t(val), uint64_t(val >> 64), 96 ); - eosio_assert( res == (__int128)2147483648_ULLL, "__lshrti3 result should be 2^31" ); - - __lshrti3( res, uint64_t(val), uint64_t(val >> 64), 127 ); - eosio_assert( res == 0x1, "__lshrti3 result should be 2^0" ); -} - -void test_compiler_builtins::test_ashrti3() { - __int128 res = 0; - __int128 test = 1; - __int128 val = -170141183460469231731687303715884105728_LLL; - - test <<= 127; - - __ashrti3( res, uint64_t(val), uint64_t(val >> 64), 0 ); - eosio_assert( res == -170141183460469231731687303715884105728_LLL, "__ashrti3 result should be -2^127" ); - - __ashrti3(res, uint64_t(val), uint64_t(val >> 64), 1 ); - eosio_assert( res == -85070591730234615865843651857942052864_LLL, "__ashrti3 result should be -2^126" ); - - __ashrti3(res, uint64_t(val), uint64_t(val >> 64), 2 ); - eosio_assert( res == test >> 2, "__ashrti3 result should be -2^125" ); - - __ashrti3( res, uint64_t(val), uint64_t(val >> 64), 64 ); - eosio_assert( res == test >> 64, "__ashrti3 result should be -2^63" ); - - __ashrti3( res, uint64_t(val), uint64_t(val >> 64), 95 ); - eosio_assert( res == test >> 95, "__ashrti3 result should be -2^31" ); - - __ashrti3( res, uint64_t(val), uint64_t(val >> 64), 127 ); - eosio_assert( res == test >> 127, "__ashrti3 result should be -2^0" ); -} - - -void test_compiler_builtins::test_modti3() { - __int128 res = 0; - __int128 lhs_a = -30; - __int128 rhs_a = 100; - __int128 lhs_b = 30; - __int128 rhs_b = -100; - - __modti3( res, uint64_t(lhs_a), uint64_t(lhs_a >> 64), uint64_t(rhs_a), uint64_t(rhs_a >> 64) ); - eosio_assert( res == -30, "__modti3 result should be -30" ); - - __modti3( res, uint64_t(lhs_b), uint64_t(lhs_b >> 64), uint64_t(rhs_b), uint64_t(rhs_b >> 64) ); - eosio_assert( res == 30, "__modti3 result should be 30" ); - - __modti3( res, uint64_t(lhs_a), uint64_t(lhs_a >> 64), uint64_t(rhs_b), uint64_t(rhs_b >> 64) ); - eosio_assert( res == -30, "__modti3 result should be -30" ); - - __modti3( res, uint64_t(rhs_a), uint64_t(rhs_a >> 64), uint64_t(lhs_b), uint64_t(lhs_b >> 64) ); - eosio_assert( res == 10, "__modti3 result should be 10" ); - - __modti3( res, uint64_t(rhs_a), uint64_t(rhs_a >> 64), uint64_t(rhs_b), uint64_t(rhs_b >> 64) ); - eosio_assert( res == 0, "__modti3 result should be 0" ); - - __modti3( res, uint64_t(rhs_a), uint64_t(rhs_a >> 64), uint64_t(rhs_a), uint64_t(rhs_a >> 64) ); - eosio_assert( res == 0, "__modti3 result should be 0" ); - - __modti3( res, 0, 0, uint64_t(rhs_a), uint64_t(rhs_a >> 64) ); - eosio_assert( res == 0, "__modti3 result should be 0" ); -} - -void test_compiler_builtins::test_modti3_by_0() { - __int128 res = 0; - __int128 lhs = 100; - - __modti3( res, uint64_t(lhs), uint64_t(lhs >> 64), 0, 0 ); - eosio_assert( false, "should have thrown an error" ); -} - -void test_compiler_builtins::test_umodti3() { - unsigned __int128 res = 0; - unsigned __int128 lhs_a = (unsigned __int128)-30; - unsigned __int128 rhs_a = 100; - unsigned __int128 lhs_b = 30; - unsigned __int128 rhs_b = (unsigned __int128)-100; - - __umodti3( res, uint64_t(lhs_a), uint64_t(lhs_a >> 64), uint64_t(rhs_a), uint64_t(rhs_a >> 64) ); - eosio_assert( res == (unsigned __int128)-30, "__modti3 result should be -30" ); - - __umodti3( res, uint64_t(lhs_b), uint64_t(lhs_b >> 64), uint64_t(rhs_b), uint64_t(rhs_b >> 64) ); - eosio_assert( res == 30, "__modti3 result should be 30" ); - - __umodti3( res, uint64_t(lhs_a), uint64_t(lhs_a >> 64), uint64_t(rhs_b), uint64_t(rhs_b >> 64) ); - eosio_assert( res == (unsigned __int128)-30, "__modti3 result should be -30" ); - - __umodti3( res, uint64_t(rhs_a), uint64_t(rhs_a >> 64), uint64_t(lhs_b), uint64_t(lhs_b >> 64) ); - eosio_assert( res == 10, "__modti3 result should be 10" ); - - __umodti3( res, uint64_t(rhs_a), uint64_t(rhs_a >> 64), uint64_t(rhs_b), uint64_t(rhs_b >> 64) ); - eosio_assert( res == 0, "__modti3 result should be 0" ); - - __umodti3( res, uint64_t(rhs_a), uint64_t(rhs_a >> 64), uint64_t(rhs_a), uint64_t(rhs_a >> 64) ); - eosio_assert( res == 0, "__modti3 result should be 0" ); - - __umodti3( res, 0, 0, uint64_t(rhs_a), uint64_t(rhs_a >> 64) ); - eosio_assert( res == 0, "__modti3 result should be 0" ); -} - -void test_compiler_builtins::test_umodti3_by_0() { - unsigned __int128 res = 0; - unsigned __int128 lhs = 100; - - __umodti3( res, uint64_t(lhs), uint64_t(lhs >> 64), 0, 0 ); - eosio_assert( false, "should have thrown an error" ); -} diff --git a/unittests/test-contracts/test_api/test_fixedpoint.cpp b/unittests/test-contracts/test_api/test_fixedpoint.cpp deleted file mode 100644 index cca3a9d42f2..00000000000 --- a/unittests/test-contracts/test_api/test_fixedpoint.cpp +++ /dev/null @@ -1,158 +0,0 @@ -/** - * @file action_test.cpp - * @copyright defined in eos/LICENSE.txt - */ -#include -#include - -#include "test_api.hpp" - -void test_fixedpoint::create_instances() -{ - { - // Various ways to create fixed_point128 - eosio::fixed_point128<18> a(12345667); - eosio::fixed_point128<18> b(12345667); - eosio::fixed_point128<16> c(12345667); - eosio_assert( b == a, "fixed_point128 instances comparison with same number of decimals" ); - eosio_assert( c == a, "fixed_point128 instances with different number of decimals" ); - } - - { - // Various ways to create fixed_point64 - eosio::fixed_point64<5> a(12345667); - eosio::fixed_point64<5> b(12345667); - eosio::fixed_point64<5> c(12345667); - eosio_assert( b == a, "fixed_point64 instances comparison with same number of decimals" ); - eosio_assert( c == a, "fixed_point64 instances with different number of decimals" ); - } - - { - // Various ways to create fixed_point32 - eosio::fixed_point32<18> a(12345667); - eosio::fixed_point32<18> b(12345667); - eosio::fixed_point32<16> c(12345667); - eosio_assert( b == a, "fixed_point32 instances comparison with same number of decimals" ); - eosio_assert( c == a, "fixed_point32 instances with different number of decimals" ); - } -} - -void test_fixedpoint::test_addition() -{ - { - // Various ways to create fixed_point32 - eosio::fixed_point32<0> a(100); - eosio::fixed_point32<0> b(100); - eosio::fixed_point32<0> c = a + b; - eosio::fixed_point32<0> d = 200; - eosio_assert( c == d, "fixed_point32 instances addition with zero decmimals" ); - } - { - // Various ways to create fixed_point64 - eosio::fixed_point64<0> a(100); - eosio::fixed_point64<0> b(100); - eosio::fixed_point64<0> c = a + b; - eosio::fixed_point64<0> d = 200; - eosio_assert( c == d, "fixed_point64 instances addition with zero decmimals" ); - } -}; - -void test_fixedpoint::test_subtraction() -{ - { - // Various ways to create fixed_point64 - eosio::fixed_point64<0> a(100); - eosio::fixed_point64<0> b(100); - eosio::fixed_point64<0> c = a - b; - eosio::fixed_point64<0> d = 0; - eosio_assert( c == d, "fixed_point64 instances subtraction with zero decmimals" ); - - eosio::fixed_point64<0> a1(0); - eosio::fixed_point64<0> c1 = a1 - b; - eosio::fixed_point64<0> d1 = -100; - eosio_assert( c1 == d1, "fixed_point64 instances subtraction with zero decmimals" ); - } - { - // Various ways to create fixed_point32 - eosio::fixed_point32<0> a(100); - eosio::fixed_point32<0> b(100); - eosio::fixed_point32<0> c = a - b; - eosio::fixed_point32<0> d = 0; - eosio_assert( c == d, "fixed_point32 instances subtraction with zero decmimals" ); - - // Various ways to create fixed_point32 - eosio::fixed_point32<0> a1(0); - eosio::fixed_point32<0> c1 = a1 - b; - eosio::fixed_point32<0> d1 = -100; - eosio_assert( c1 == d1, "fixed_point32 instances subtraction with zero decmimals" ); - - } -}; - -void test_fixedpoint::test_multiplication() -{ - { - // Various ways to create fixed_point64 - eosio::fixed_point64<0> a(100); - eosio::fixed_point64<0> b(200); - eosio::fixed_point128<0> c = a * b; - eosio::fixed_point128<0> d(200*100); - eosio_assert( c == d, "fixed_point64 instances multiplication result in fixed_point128" ); - } - - { - // Various ways to create fixed_point32 - eosio::fixed_point32<0> a(100); - eosio::fixed_point32<0> b(200); - eosio::fixed_point64<0> c = a * b; - eosio::fixed_point64<0> d(200*100); - eosio_assert( c == d, "fixed_point32 instances multiplication result in fixed_point64" ); - } -} - -void test_fixedpoint::test_division() -{ - { - uint64_t lhs = 10000000; - uint64_t rhs = 333; - - eosio::fixed_point64<0> a((int64_t)lhs); - eosio::fixed_point64<0> b((int64_t)rhs); - eosio::fixed_point128<5> c = a / b; - - eosio::fixed_point128<5> e = eosio::fixed_divide<5>( lhs, rhs ); - print(e); - eosio_assert( c == e, "fixed_point64 instances division result from operator and function and compare in fixed_point128" ); - - } - - { - uint32_t lhs = 100000; - uint32_t rhs = 33; - - eosio::fixed_point32<0> a((int32_t)lhs); - eosio::fixed_point32<0> b((int32_t)rhs); - eosio::fixed_point64<5> c = a / b; - - eosio::fixed_point64<5> e = eosio::fixed_divide<5>( lhs, rhs ); - eosio_assert( c == e, "fixed_point64 instances division result from operator and function and compare in fixed_point128" ); - - } -} - -void test_fixedpoint::test_division_by_0() -{ - { - uint64_t lhs = 10000000; - uint64_t rhs = 0; - - eosio::fixed_point64<0> a((int64_t)lhs); - eosio::fixed_point64<0> b((int64_t)rhs); - - eosio::fixed_point128<5> e = eosio::fixed_divide<5>( lhs, rhs ); - // in order to get rid of unused parameter warning - e = 0; - eosio_assert( false, "should've thrown an error" ); - - } - } diff --git a/unittests/test-contracts/test_api/test_permission.cpp b/unittests/test-contracts/test_api/test_permission.cpp index df638812fed..7650d8f40cd 100644 --- a/unittests/test-contracts/test_api/test_permission.cpp +++ b/unittests/test-contracts/test_api/test_permission.cpp @@ -5,7 +5,6 @@ #include #include -#include #include #include #include diff --git a/unittests/test-contracts/test_api/test_print.cpp b/unittests/test-contracts/test_api/test_print.cpp index 203a3b65997..c0b5ab07e00 100644 --- a/unittests/test-contracts/test_api/test_print.cpp +++ b/unittests/test-contracts/test_api/test_print.cpp @@ -17,11 +17,8 @@ void test_print::test_prints_l() { void test_print::test_prints() { prints("ab"); - prints(nullptr); prints("c\0test_prints"); - prints(0); prints("efg"); - prints(0); } void test_print::test_printi() { @@ -68,14 +65,14 @@ void test_print::test_printn() { printn(eosio::name{"5"}.value); printn(eosio::name{"a"}.value); printn(eosio::name{"z"}.value); - + printn(eosio::name{"abc"}.value); printn(eosio::name{"123"}.value); - + printn(eosio::name{"abc.123"}.value); printn(eosio::name{"123.abc"}.value); - printn(eosio::name{"12345abcdefgj"}.value); + printn(eosio::name{"12345abcdefgj"}.value); printn(eosio::name{"ijklmnopqrstj"}.value); printn(eosio::name{"vwxyz.12345aj"}.value); diff --git a/unittests/test-contracts/test_api/test_transaction.cpp b/unittests/test-contracts/test_api/test_transaction.cpp index 857daa36b63..065828ea307 100644 --- a/unittests/test-contracts/test_api/test_transaction.cpp +++ b/unittests/test-contracts/test_api/test_transaction.cpp @@ -98,7 +98,7 @@ void test_transaction::send_action_large() { std::vector permissions = { {"testapi"_n, "active"_n} }; action act( permissions, name{"testapi"}, name{WASM_TEST_ACTION("test_action", "read_action_normal")}, test_action ); - + act.send(); eosio_assert( false, "send_message_large() should've thrown an error" ); } @@ -175,7 +175,7 @@ void test_transaction::send_transaction(uint64_t receiver, uint64_t, uint64_t) { auto trx = transaction(); std::vector permissions = { {"testapi"_n, "active"_n} }; - + trx.actions.emplace_back(permissions, name{"testapi"}, name{WASM_TEST_ACTION( "test_action", "read_action_normal" )}, test_action); trx.send( 0, name{receiver} ); } @@ -187,7 +187,7 @@ void test_transaction::send_action_sender( uint64_t receiver, uint64_t, uint64_t auto trx = transaction(); std::vector permissions = { {"testapi"_n, "active"_n} }; - + trx.actions.emplace_back(permissions, name{"testapi"}, name{WASM_TEST_ACTION( "test_action", "test_current_sender" )}, &cur_send); trx.send( 0, name{receiver} ); } @@ -203,10 +203,10 @@ void test_transaction::send_transaction_empty( uint64_t receiver, uint64_t, uint void test_transaction::send_transaction_trigger_error_handler( uint64_t receiver, uint64_t, uint64_t ) { using namespace eosio; test_action_action<"testapi"_n.value, WASM_TEST_ACTION( "test_action", "assert_false" )> test_action; - + auto trx = transaction(); std::vector permissions = { {"testapi"_n, "active"_n} }; - + trx.actions.emplace_back( permissions, name{"testapi"}, name{WASM_TEST_ACTION("test_action", "assert_false")}, test_action ); trx.send(0, name{receiver}); } @@ -252,7 +252,7 @@ void test_transaction::send_deferred_transaction( uint64_t receiver, uint64_t, u auto trx = transaction(); std::vector permissions = { {"testapi"_n, "active"_n} }; - + trx.actions.emplace_back( permissions, name{"testapi"}, name{ WASM_TEST_ACTION("test_transaction", "deferred_print" )}, test_action ); trx.delay_sec = 2; trx.send( 0xffffffffffffffff, name{receiver} ); @@ -264,7 +264,7 @@ void test_transaction::send_deferred_transaction_replace( uint64_t receiver, uin auto trx = transaction(); std::vector permissions = { {"testapi"_n, "active"_n} }; - + trx.actions.emplace_back( permissions, name{"testapi"}, name{WASM_TEST_ACTION( "test_transaction", "deferred_print" )}, test_action ); trx.delay_sec = 2; trx.send( 0xffffffffffffffff, name{receiver}, true ); @@ -323,16 +323,6 @@ void test_transaction::context_free_api() { get_context_free_data( 0, buf, sizeof(buf) ); } -extern "C" { int is_feature_active(int64_t); } -void test_transaction::new_feature() { - eosio_assert( false == is_feature_active("newfeature"_n.value), "we should not have new features unless hardfork" ); -} - -extern "C" { void activate_feature(int64_t);} -void test_transaction::active_new_feature() { - activate_feature("newfeature"_n.value); -} - void test_transaction::repeat_deferred_transaction( uint64_t receiver, uint64_t code, uint64_t action ) { using namespace eosio; @@ -350,7 +340,7 @@ void test_transaction::repeat_deferred_transaction( uint64_t receiver, uint64_t --payload; transaction trx; std::vector permissions = { {name{receiver}, "active"_n} }; - + trx.actions.emplace_back( permissions, name{code}, name{action}, payload ); trx.send( sender_id, eosio::name{receiver} ); } diff --git a/unittests/test-contracts/test_api_db/CMakeLists.txt b/unittests/test-contracts/test_api_db/CMakeLists.txt index 1cc38e3c1b7..0986dc0cb6c 100644 --- a/unittests/test-contracts/test_api_db/CMakeLists.txt +++ b/unittests/test-contracts/test_api_db/CMakeLists.txt @@ -1,5 +1,6 @@ -if( ${eosio.cdt_FOUND} ) - add_executable( test_api_db test_api_db.cpp ) +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( test_api_db test_api_db test_api_db.cpp ) else() - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/test_api_db.wasm ${CMAKE_CURRENT_BINARY_DIR}/test_api_db.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/test_api_db.wasm ${CMAKE_CURRENT_BINARY_DIR}/test_api_db.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/test_api_db.abi ${CMAKE_CURRENT_BINARY_DIR}/test_api_db.abi COPYONLY ) endif() diff --git a/unittests/test-contracts/test_api_db/test_api_db.abi b/unittests/test-contracts/test_api_db/test_api_db.abi new file mode 100644 index 00000000000..582978a47bf --- /dev/null +++ b/unittests/test-contracts/test_api_db/test_api_db.abi @@ -0,0 +1,144 @@ +{ + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", + "version": "eosio::abi/1.1", + "types": [], + "structs": [ + { + "name": "idx64_general", + "base": "", + "fields": [] + }, + { + "name": "idx64_lowerbound", + "base": "", + "fields": [] + }, + { + "name": "idx64_upperbound", + "base": "", + "fields": [] + }, + { + "name": "idx_double_nan_create_fail", + "base": "", + "fields": [] + }, + { + "name": "idx_double_nan_lookup_fail", + "base": "", + "fields": [ + { + "name": "lookup_type", + "type": "uint32" + } + ] + }, + { + "name": "idx_double_nan_modify_fail", + "base": "", + "fields": [] + }, + { + "name": "misaligned_secondary_key256_tests", + "base": "", + "fields": [] + }, + { + "name": "primary_i64_general", + "base": "", + "fields": [] + }, + { + "name": "primary_i64_lowerbound", + "base": "", + "fields": [] + }, + { + "name": "primary_i64_upperbound", + "base": "", + "fields": [] + }, + { + "name": "test_invalid_access", + "base": "", + "fields": [ + { + "name": "code", + "type": "name" + }, + { + "name": "val", + "type": "uint64" + }, + { + "name": "index", + "type": "uint32" + }, + { + "name": "store", + "type": "bool" + } + ] + } + ], + "actions": [ + { + "name": "pg", + "type": "primary_i64_general", + "ricardian_contract": "" + }, + { + "name": "pl", + "type": "primary_i64_lowerbound", + "ricardian_contract": "" + }, + { + "name": "pu", + "type": "primary_i64_upperbound", + "ricardian_contract": "" + }, + { + "name": "s1g", + "type": "idx64_general", + "ricardian_contract": "" + }, + { + "name": "s1l", + "type": "idx64_lowerbound", + "ricardian_contract": "" + }, + { + "name": "s1u", + "type": "idx64_upperbound", + "ricardian_contract": "" + }, + { + "name": "sdnancreate", + "type": "idx_double_nan_create_fail", + "ricardian_contract": "" + }, + { + "name": "sdnanlookup", + "type": "idx_double_nan_lookup_fail", + "ricardian_contract": "" + }, + { + "name": "sdnanmodify", + "type": "idx_double_nan_modify_fail", + "ricardian_contract": "" + }, + { + "name": "sk32align", + "type": "misaligned_secondary_key256_tests", + "ricardian_contract": "" + }, + { + "name": "tia", + "type": "test_invalid_access", + "ricardian_contract": "" + } + ], + "tables": [], + "ricardian_clauses": [], + "variants": [] +} \ No newline at end of file diff --git a/unittests/test-contracts/test_api_db/test_api_db.cpp b/unittests/test-contracts/test_api_db/test_api_db.cpp index b50a05e71cb..7ab5ddf9ce5 100644 --- a/unittests/test-contracts/test_api_db/test_api_db.cpp +++ b/unittests/test-contracts/test_api_db/test_api_db.cpp @@ -2,27 +2,476 @@ * @file * @copyright defined in eos/LICENSE */ -#include - -#include "../test_api/test_api.hpp" -#include "test_db.cpp" - -extern "C" { - void apply( uint64_t receiver, uint64_t code, uint64_t action ) { - require_auth(code); - WASM_TEST_HANDLER_EX( test_db, primary_i64_general ); - WASM_TEST_HANDLER_EX( test_db, primary_i64_lowerbound ); - WASM_TEST_HANDLER_EX( test_db, primary_i64_upperbound ); - WASM_TEST_HANDLER_EX( test_db, idx64_general ); - WASM_TEST_HANDLER_EX( test_db, idx64_lowerbound ); - WASM_TEST_HANDLER_EX( test_db, idx64_upperbound ); - WASM_TEST_HANDLER_EX( test_db, test_invalid_access ); - WASM_TEST_HANDLER_EX( test_db, idx_double_nan_create_fail ); - WASM_TEST_HANDLER_EX( test_db, idx_double_nan_modify_fail ); - WASM_TEST_HANDLER_EX( test_db, idx_double_nan_lookup_fail ); - WASM_TEST_HANDLER_EX( test_db, misaligned_secondary_key256_tests ); - - //unhandled test call - eosio_assert( false, "Unknown Test" ); +#include "test_api_db.hpp" + +using namespace eosio; + +using namespace eosio::internal_use_do_not_use; + +void test_api_db::primary_i64_general() +{ + uint64_t receiver = get_self().value; + auto table1 = "table1"_n.value; + + int alice_itr = db_store_i64( receiver, table1, receiver, "alice"_n.value, "alice's info", strlen("alice's info") ); + db_store_i64( receiver, table1, receiver, "bob"_n.value, "bob's info", strlen("bob's info") ); + db_store_i64( receiver, table1, receiver, "charlie"_n.value, "charlie's info", strlen("charlies's info") ); + db_store_i64( receiver, table1, receiver, "allyson"_n.value, "allyson's info", strlen("allyson's info") ); + + + // find + { + uint64_t prim = 0; + int itr_next = db_next_i64( alice_itr, &prim ); + int itr_next_expected = db_find_i64( receiver, receiver, table1, "allyson"_n.value ); + eosio_assert( itr_next == itr_next_expected && prim == "allyson"_n.value, "primary_i64_general - db_find_i64" ); + itr_next = db_next_i64( itr_next, &prim ); + itr_next_expected = db_find_i64( receiver, receiver, table1, "bob"_n.value ); + eosio_assert( itr_next == itr_next_expected && prim == "bob"_n.value, "primary_i64_general - db_next_i64" ); + } + + // next + { + int charlie_itr = db_find_i64( receiver, receiver, table1, "charlie"_n.value ); + // nothing after charlie + uint64_t prim = 0; + int end_itr = db_next_i64( charlie_itr, &prim ); + eosio_assert( end_itr < 0, "primary_i64_general - db_next_i64" ); + // prim didn't change + eosio_assert( prim == 0, "primary_i64_general - db_next_i64" ); + } + + // previous + { + int charlie_itr = db_find_i64( receiver, receiver, table1, "charlie"_n.value ); + uint64_t prim = 0; + int itr_prev = db_previous_i64( charlie_itr, &prim ); + int itr_prev_expected = db_find_i64( receiver, receiver, table1, "bob"_n.value ); + eosio_assert( itr_prev == itr_prev_expected && prim == "bob"_n.value, "primary_i64_general - db_previous_i64" ); + + itr_prev = db_previous_i64( itr_prev, &prim ); + itr_prev_expected = db_find_i64( receiver, receiver, table1, "allyson"_n.value ); + eosio_assert( itr_prev == itr_prev_expected && prim == "allyson"_n.value, "primary_i64_general - db_previous_i64" ); + + itr_prev = db_previous_i64( itr_prev, &prim ); + itr_prev_expected = db_find_i64( receiver, receiver, table1, "alice"_n.value ); + eosio_assert( itr_prev == itr_prev_expected && prim == "alice"_n.value, "primary_i64_general - db_previous_i64" ); + + itr_prev = db_previous_i64( itr_prev, &prim ); + eosio_assert( itr_prev < 0 && prim == "alice"_n.value, "primary_i64_general - db_previous_i64" ); + } + + // remove + { + int itr = db_find_i64( receiver, receiver, table1, "alice"_n.value ); + eosio_assert( itr >= 0, "primary_i64_general - db_find_i64" ); + db_remove_i64( itr ); + itr = db_find_i64( receiver, receiver, table1, "alice"_n.value ); + eosio_assert( itr < 0, "primary_i64_general - db_find_i64" ); + } + + // get + { + int itr = db_find_i64( receiver, receiver, table1, "bob"_n.value ); + eosio_assert( itr >= 0, "" ); + uint32_t buffer_len = 5; + char value[50]; + auto len = db_get_i64( itr, value, buffer_len ); + value[buffer_len] = '\0'; + std::string s(value); + eosio_assert( uint32_t(len) == buffer_len, "primary_i64_general - db_get_i64" ); + eosio_assert( s == "bob's", "primary_i64_general - db_get_i64 - 5" ); + + buffer_len = 20; + len = db_get_i64( itr, value, 0 ); + len = db_get_i64( itr, value, (uint32_t)len ); + value[len] = '\0'; + std::string sfull(value); + eosio_assert( sfull == "bob's info", "primary_i64_general - db_get_i64 - full" ); + } + + // update + { + int itr = db_find_i64( receiver, receiver, table1, "bob"_n.value ); + eosio_assert( itr >= 0, "" ); + const char* new_value = "bob's new info"; + uint32_t new_value_len = strlen(new_value); + db_update_i64( itr, receiver, new_value, new_value_len ); + char ret_value[50]; + db_get_i64( itr, ret_value, new_value_len ); + ret_value[new_value_len] = '\0'; + std::string sret(ret_value); + eosio_assert( sret == "bob's new info", "primary_i64_general - db_update_i64" ); + } +} + +void test_api_db::primary_i64_lowerbound() +{ + uint64_t receiver = get_self().value; + auto table = "mytable"_n.value; + db_store_i64( receiver, table, receiver, "alice"_n.value, "alice's info", strlen("alice's info") ); + db_store_i64( receiver, table, receiver, "bob"_n.value, "bob's info", strlen("bob's info") ); + db_store_i64( receiver, table, receiver, "charlie"_n.value, "charlie's info", strlen("charlies's info") ); + db_store_i64( receiver, table, receiver, "emily"_n.value, "emily's info", strlen("emily's info") ); + db_store_i64( receiver, table, receiver, "allyson"_n.value, "allyson's info", strlen("allyson's info") ); + db_store_i64( receiver, table, receiver, "joe"_n.value, "nothing here", strlen("nothing here") ); + + const std::string err = "primary_i64_lowerbound"; + + { + int lb = db_lowerbound_i64( receiver, receiver, table, "alice"_n.value ); + eosio_assert( lb == db_find_i64(receiver, receiver, table, "alice"_n.value), err.c_str() ); + } + { + int lb = db_lowerbound_i64( receiver, receiver, table, "billy"_n.value ); + eosio_assert( lb == db_find_i64(receiver, receiver, table, "bob"_n.value), err.c_str() ); + } + { + int lb = db_lowerbound_i64( receiver, receiver, table, "frank"_n.value ); + eosio_assert( lb == db_find_i64(receiver, receiver, table, "joe"_n.value), err.c_str() ); + } + { + int lb = db_lowerbound_i64( receiver, receiver, table, "joe"_n.value ); + eosio_assert( lb == db_find_i64(receiver, receiver, table, "joe"_n.value), err.c_str() ); + } + { + int lb = db_lowerbound_i64( receiver, receiver, table, "kevin"_n.value ); + eosio_assert( lb < 0, err.c_str() ); + } +} + +void test_api_db::primary_i64_upperbound() +{ + uint64_t receiver = get_self().value; + auto table = "mytable"_n.value; + const std::string err = "primary_i64_upperbound"; + { + int ub = db_upperbound_i64( receiver, receiver, table, "alice"_n.value ); + eosio_assert( ub == db_find_i64(receiver, receiver, table, "allyson"_n.value), err.c_str() ); + } + { + int ub = db_upperbound_i64( receiver, receiver, table, "billy"_n.value ); + eosio_assert( ub == db_find_i64(receiver, receiver, table, "bob"_n.value), err.c_str() ); + } + { + int ub = db_upperbound_i64( receiver, receiver, table, "frank"_n.value ); + eosio_assert( ub == db_find_i64(receiver, receiver, table, "joe"_n.value), err.c_str() ); + } + { + int ub = db_upperbound_i64( receiver, receiver, table, "joe"_n.value ); + eosio_assert( ub < 0, err.c_str() ); + } + { + int ub = db_upperbound_i64( receiver, receiver, table, "kevin"_n.value ); + eosio_assert( ub < 0, err.c_str() ); + } +} + +void test_api_db::idx64_general() +{ + uint64_t receiver = get_self().value; + const auto table = "myindextable"_n.value; + + typedef uint64_t secondary_type; + + struct record { + uint64_t ssn; + secondary_type name; + }; + + record records[] = { {265, "alice"_n.value}, + {781, "bob"_n.value}, + {234, "charlie"_n.value}, + {650, "allyson"_n.value}, + {540, "bob"_n.value}, + {976, "emily"_n.value}, + {110, "joe"_n.value} }; + + for ( uint32_t i = 0; i < sizeof(records)/sizeof(records[0]); ++i ) { + db_idx64_store( receiver, table, receiver, records[i].ssn, &records[i].name ); + } + + // find_primary + { + secondary_type sec = 0; + int itr = db_idx64_find_primary( receiver, receiver, table, &sec, 999 ); + eosio_assert( itr < 0 && sec == 0, "idx64_general - db_idx64_find_primary" ); + itr = db_idx64_find_primary( receiver, receiver, table, &sec, 110 ); + eosio_assert( itr >= 0 && sec == "joe"_n.value, "idx64_general - db_idx64_find_primary" ); + uint64_t prim_next = 0; + int itr_next = db_idx64_next( itr, &prim_next ); + eosio_assert( itr_next < 0 && prim_next == 0, "idx64_general - db_idx64_find_primary" ); + } + + // iterate forward starting with charlie + { + secondary_type sec = 0; + int itr = db_idx64_find_primary( receiver, receiver, table, &sec, 234 ); + eosio_assert( itr >= 0 && sec == "charlie"_n.value, "idx64_general - db_idx64_find_primary" ); + + uint64_t prim_next = 0; + int itr_next = db_idx64_next( itr, &prim_next ); + eosio_assert( itr_next >= 0 && prim_next == 976, "idx64_general - db_idx64_next" ); + secondary_type sec_next = 0; + int itr_next_expected = db_idx64_find_primary( receiver, receiver, table, &sec_next, prim_next ); + eosio_assert( itr_next == itr_next_expected && sec_next == "emily"_n.value, "idx64_general - db_idx64_next" ); + + itr_next = db_idx64_next( itr_next, &prim_next ); + eosio_assert( itr_next >= 0 && prim_next == 110, "idx64_general - db_idx64_next" ); + itr_next_expected = db_idx64_find_primary( receiver, receiver, table, &sec_next, prim_next ); + eosio_assert( itr_next == itr_next_expected && sec_next == "joe"_n.value, "idx64_general - db_idx64_next" ); + + itr_next = db_idx64_next( itr_next, &prim_next ); + eosio_assert( itr_next < 0 && prim_next == 110, "idx64_general - db_idx64_next" ); + } + + // iterate backward staring with second bob + { + secondary_type sec = 0; + int itr = db_idx64_find_primary( receiver, receiver, table, &sec, 781 ); + eosio_assert( itr >= 0 && sec == "bob"_n.value, "idx64_general - db_idx64_find_primary" ); + + uint64_t prim_prev = 0; + int itr_prev = db_idx64_previous( itr, &prim_prev ); + eosio_assert( itr_prev >= 0 && prim_prev == 540, "idx64_general - db_idx64_previous" ); + + secondary_type sec_prev = 0; + int itr_prev_expected = db_idx64_find_primary( receiver, receiver, table, &sec_prev, prim_prev ); + eosio_assert( itr_prev == itr_prev_expected && sec_prev == "bob"_n.value, "idx64_general - db_idx64_previous" ); + + itr_prev = db_idx64_previous( itr_prev, &prim_prev ); + eosio_assert( itr_prev >= 0 && prim_prev == 650, "idx64_general - db_idx64_previous" ); + itr_prev_expected = db_idx64_find_primary( receiver, receiver, table, &sec_prev, prim_prev ); + eosio_assert( itr_prev == itr_prev_expected && sec_prev == "allyson"_n.value, "idx64_general - db_idx64_previous" ); + + itr_prev = db_idx64_previous( itr_prev, &prim_prev ); + eosio_assert( itr_prev >= 0 && prim_prev == 265, "idx64_general - db_idx64_previous" ); + itr_prev_expected = db_idx64_find_primary( receiver, receiver, table, &sec_prev, prim_prev ); + eosio_assert( itr_prev == itr_prev_expected && sec_prev == "alice"_n.value, "idx64_general - db_idx64_previous" ); + + itr_prev = db_idx64_previous( itr_prev, &prim_prev ); + eosio_assert( itr_prev < 0 && prim_prev == 265, "idx64_general - db_idx64_previous" ); + } + + // find_secondary + { + uint64_t prim = 0; + auto sec = "bob"_n.value; + int itr = db_idx64_find_secondary( receiver, receiver, table, &sec, &prim ); + eosio_assert( itr >= 0 && prim == 540, "idx64_general - db_idx64_find_secondary" ); + + sec = "emily"_n.value; + itr = db_idx64_find_secondary( receiver, receiver, table, &sec, &prim ); + eosio_assert( itr >= 0 && prim == 976, "idx64_general - db_idx64_find_secondary" ); + + sec = "frank"_n.value; + itr = db_idx64_find_secondary( receiver, receiver, table, &sec, &prim ); + eosio_assert( itr < 0 && prim == 976, "idx64_general - db_idx64_find_secondary" ); + } + + // update and remove + { + uint64_t one_more_bob = "bob"_n.value; + const uint64_t ssn = 421; + int itr = db_idx64_store( receiver, table, receiver, ssn, &one_more_bob ); + uint64_t new_name = "billy"_n.value; + db_idx64_update( itr, receiver, &new_name ); + secondary_type sec = 0; + int sec_itr = db_idx64_find_primary( receiver, receiver, table, &sec, ssn ); + eosio_assert( sec_itr == itr && sec == new_name, "idx64_general - db_idx64_update" ); + db_idx64_remove(itr); + int itrf = db_idx64_find_primary( receiver, receiver, table, &sec, ssn ); + eosio_assert( itrf < 0, "idx64_general - db_idx64_remove" ); + } +} + +void test_api_db::idx64_lowerbound() +{ + uint64_t receiver = get_self().value; + const auto table = "myindextable"_n.value; + typedef uint64_t secondary_type; + const std::string err = "idx64_lowerbound"; + { + secondary_type lb_sec = "alice"_n.value; + uint64_t lb_prim = 0; + const uint64_t ssn = 265; + int lb = db_idx64_lowerbound( receiver, receiver, table, &lb_sec, &lb_prim ); + eosio_assert( lb_prim == ssn && lb_sec == "alice"_n.value, err.c_str() ); + eosio_assert( lb == db_idx64_find_primary(receiver, receiver, table, &lb_sec, ssn), err.c_str() ); + } + { + secondary_type lb_sec = "billy"_n.value; + uint64_t lb_prim = 0; + const uint64_t ssn = 540; + int lb = db_idx64_lowerbound( receiver, receiver, table, &lb_sec, &lb_prim ); + eosio_assert( lb_prim == ssn && lb_sec == "bob"_n.value, err.c_str() ); + eosio_assert( lb == db_idx64_find_primary(receiver, receiver, table, &lb_sec, ssn), err.c_str() ); + } + { + secondary_type lb_sec = "joe"_n.value; + uint64_t lb_prim = 0; + const uint64_t ssn = 110; + int lb = db_idx64_lowerbound( receiver, receiver, table, &lb_sec, &lb_prim ); + eosio_assert( lb_prim == ssn && lb_sec == "joe"_n.value, err.c_str() ); + eosio_assert( lb == db_idx64_find_primary(receiver, receiver, table, &lb_sec, ssn), err.c_str() ); + } + { + secondary_type lb_sec = "kevin"_n.value; + uint64_t lb_prim = 0; + int lb = db_idx64_lowerbound( receiver, receiver, table, &lb_sec, &lb_prim ); + eosio_assert( lb_prim == 0 && lb_sec == "kevin"_n.value, err.c_str() ); + eosio_assert( lb < 0, "" ); } } + +void test_api_db::idx64_upperbound() +{ + uint64_t receiver = get_self().value; + const auto table = "myindextable"_n.value; + typedef uint64_t secondary_type; + const std::string err = "idx64_upperbound"; + { + secondary_type ub_sec = "alice"_n.value; + uint64_t ub_prim = 0; + const uint64_t allyson_ssn = 650; + int ub = db_idx64_upperbound( receiver, receiver, table, &ub_sec, &ub_prim ); + eosio_assert( ub_prim == allyson_ssn && ub_sec == "allyson"_n.value, "" ); + eosio_assert( ub == db_idx64_find_primary(receiver, receiver, table, &ub_sec, allyson_ssn), err.c_str() ); + } + { + secondary_type ub_sec = "billy"_n.value; + uint64_t ub_prim = 0; + const uint64_t bob_ssn = 540; + int ub = db_idx64_upperbound( receiver, receiver, table, &ub_sec, &ub_prim ); + eosio_assert( ub_prim == bob_ssn && ub_sec == "bob"_n.value, "" ); + eosio_assert( ub == db_idx64_find_primary(receiver, receiver, table, &ub_sec, bob_ssn), err.c_str() ); + } + { + secondary_type ub_sec = "joe"_n.value; + uint64_t ub_prim = 0; + int ub = db_idx64_upperbound( receiver, receiver, table, &ub_sec, &ub_prim ); + eosio_assert( ub_prim == 0 && ub_sec == "joe"_n.value, err.c_str() ); + eosio_assert( ub < 0, err.c_str() ); + } + { + secondary_type ub_sec = "kevin"_n.value; + uint64_t ub_prim = 0; + int ub = db_idx64_upperbound( receiver, receiver, table, &ub_sec, &ub_prim ); + eosio_assert( ub_prim == 0 && ub_sec == "kevin"_n.value, err.c_str() ); + eosio_assert( ub < 0, err.c_str() ); + } +} + +void test_api_db::test_invalid_access( name _code, uint64_t val, uint32_t index, bool store ) +{ + uint64_t code = _code.value; + uint64_t receiver = get_self().value; + uint64_t scope = "access"_n.value; + uint64_t table = scope; + uint64_t pk = scope; + + int32_t itr = -1; + uint64_t value = 0; + switch( index ) { + case 1: + itr = db_idx64_find_primary( code, scope, table, &value, pk ); + break; + case 0: + default: + itr = db_find_i64( code, scope, table, pk ); + break; + } + if(store) { + uint64_t value_to_store = val; + if( itr < 0 ) { + switch(index) { + case 1: + db_idx64_store( scope, table, receiver, pk, &value_to_store ); + break; + case 0: + default: + db_store_i64( scope, table, receiver, pk, &value_to_store, sizeof(value_to_store) ); + break; + } + } else { + switch(index) { + case 1: + db_idx64_update( itr, receiver, &value_to_store); + break; + case 0: + default: + db_update_i64( itr, receiver, &value_to_store, sizeof(value_to_store) ); + break; + } + } + //eosio::print("test_invalid_access: stored ", value_to_store, "\n"); + } else { + eosio_assert( itr >= 0, "test_invalid_access: could not find row" ); + switch(index) { + case 1: + break; + case 0: + default: + eosio_assert( db_get_i64( itr, &value, sizeof(value) ) == sizeof(value), + "test_invalid_access: value in primary table was incorrect size" ); + break; + } + //eosio::print("test_invalid_access: expected ", val, " and retrieved ", value, "\n"); + eosio_assert( value == val, "test_invalid_access: value did not match" ); + } +} + +void test_api_db::idx_double_nan_create_fail() { + uint64_t receiver = get_self().value; + double x = 0.0; + x = x / x; // create a NaN + db_idx_double_store( "nan"_n.value, "nan"_n.value, receiver, 0, &x ); // should fail +} + +void test_api_db::idx_double_nan_modify_fail() { + uint64_t receiver = get_self().value; + double x = 0.0; + db_idx_double_store( "nan"_n.value, "nan"_n.value, receiver, 0, &x ); + auto itr = db_idx_double_find_primary( receiver, "nan"_n.value, "nan"_n.value, &x, 0 ); + x = 0.0; + x = x / x; // create a NaN + db_idx_double_update( itr, 0, &x ); // should fail +} + +void test_api_db::idx_double_nan_lookup_fail( uint32_t lookup_type ) { + uint64_t receiver = get_self().value; + + uint64_t pk; + double x = 0.0; + db_idx_double_store( "nan"_n.value, "nan"_n.value, receiver, 0, &x ); + x = x / x; // create a NaN + switch(lookup_type) { + case 0: // find + db_idx_double_find_secondary( receiver, "nan"_n.value, "nan"_n.value, &x, &pk ); + break; + case 1: // lower bound + db_idx_double_lowerbound( receiver, "nan"_n.value, "nan"_n.value, &x, &pk ); + break; + case 2: // upper bound + db_idx_double_upperbound( receiver, "nan"_n.value, "nan"_n.value, &x, &pk ); + break; + default: + eosio_assert( false, "idx_double_nan_lookup_fail: unexpected lookup_type" ); + } +} + +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wcast-align" + +void test_api_db::misaligned_secondary_key256_tests() { + uint64_t receiver = get_self().value; + auto key = eosio::checksum256::make_from_word_sequence( 0ULL, 0ULL, 0ULL, 42ULL ); + char* ptr = (char*)(&key); + ptr += 1; + // test that store doesn't crash on unaligned data + db_idx256_store( "testapi"_n.value, "testtable"_n.value, "testapi"_n.value, 1, (uint128_t*)(ptr), 2 ); + // test that find_primary doesn't crash on unaligned data + db_idx256_find_primary( "testapi"_n.value, "testtable"_n.value, "testapi"_n.value, (uint128_t*)(ptr), 2,0 ); +} + +#pragma clang diagnostic pop diff --git a/unittests/test-contracts/test_api_db/test_api_db.hpp b/unittests/test-contracts/test_api_db/test_api_db.hpp new file mode 100644 index 00000000000..9487434d518 --- /dev/null +++ b/unittests/test-contracts/test_api_db/test_api_db.hpp @@ -0,0 +1,46 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +class [[eosio::contract]] test_api_db : public eosio::contract { +public: + using eosio::contract::contract; + + [[eosio::action("pg")]] + void primary_i64_general(); + + [[eosio::action("pl")]] + void primary_i64_lowerbound(); + + [[eosio::action("pu")]] + void primary_i64_upperbound(); + + [[eosio::action("s1g")]] + void idx64_general(); + + [[eosio::action("s1l")]] + void idx64_lowerbound(); + + [[eosio::action("s1u")]] + void idx64_upperbound(); + + [[eosio::action("tia")]] + void test_invalid_access( eosio::name code, uint64_t val, uint32_t index, bool store ); + + [[eosio::action("sdnancreate")]] + void idx_double_nan_create_fail(); + + [[eosio::action("sdnanmodify")]] + void idx_double_nan_modify_fail(); + + [[eosio::action("sdnanlookup")]] + void idx_double_nan_lookup_fail( uint32_t lookup_type ); + + [[eosio::action("sk32align")]] + void misaligned_secondary_key256_tests(); + +}; diff --git a/unittests/test-contracts/test_api_db/test_api_db.wasm b/unittests/test-contracts/test_api_db/test_api_db.wasm index 83d6591c150b6c16330823fd4bfedb4ae9fd515f..5aefa71cd1a9b3e76be97275b809ec6a06848430 100755 GIT binary patch literal 12721 zcmd5@4UAmXb-wR!cI>y~eU2Biv1zz7rRh4w5(2Y0rY?Io@n2j7OHrh!fA^eo&;79|wN6+<2WCSqqv0K3i5ZX4(M}H?LgWYP?*K#7>u`f$DtDt#hdPNF+Ymm3$Ie~){R~6z znsFF|Fo_E=i8)aRcj9k<}_nt(O_% zaJNdMRc#bYtyZPk7A}EC&~2evS*|vgT2a>=pv}sO#ArMB)dL!@~MeES~rm;BA4}mtjj{P zdZN@kEjCdbG1VPNbHQ2@aSaRA((ucBaHi9$%r@#}qO_SPkr{L^t%UHCP;CsshS=7_ zjg-?=qLxU#L`$*USUNUeiR-sRQwv*GD><4(h|70HvMEimu_Q{rTz$>`3K_Fpd;NqF znO$wWrb*C*xWi<#pfuaAHtI#VLaErQ{&q#=7~HIs%Eefl8LhZS7H1pfir7NvUg=l^ zE-L7m{H#@JYd;&p3;Csf%fcP%?N)JKJM-pZajD*DmfLgJ3U>IRVzIK=d&?G!Czj^h zbJvLxqKns38Mx7FYw1|;6_eM=zTzgMhyH&}V-5=LzT`qWjACzw_#Szw`cx!eLJet>4Pi4}L@3=1wYpf9dO=|L1%E zMFj8iV;`b`4`W6Kd*VM|e(f{b1n7HDw;S)PMDRiXQ#l8-!QF1>=Y@Jj94}Z`2`P3t zqF^(@l}EmE{)p7i-ADfTi9IxQ^bKwgL|&Md1Xj0ucE5TISojjwjufQdd*J2Ao)PLP zV66*_Az%t%yo>wcg zib7msH&y>NWPUV~nI9ASr&C+QnwLP0{ixrbGPs1vu9AkLJ)+=okxFshHOeh~O(vm`1?R_EuQczP?GmkWovxaRS4FD`G7BayR04x2zN z=}p*U)N{ff^(C6%UeIo62fYXguull-D(|5B8rgW!%mNM-eG(rwBL(;2QoBx|{Y7aU!>A&XnY=-9~o93io)s*z&#Rp${0CSq2@cQSZaJETe4Vse@ zSc4~}1X}l%^A}ze>fbSv_{L|r1X{}ErA1C)Ay1$CsZei#7`|qX>x&(@@yDKFmx-qf zCrlR(rwei=t6308J&w*6j>#4R;RD|%pEu#->hOUNP@Y1_VFX;_f$^i90TRX!!?{Sy z99cZr)bsjPP4+Y+vN>(k zO)0U_p=BRQC1fg`jS|v6GKkV8_{iqeflX7nt34sT!v;O!dp>)DMJe%xP}2F-hG>1~+p)U#qh2haRH4N2Q_vrwBBbiQ!bas*rYL?sHB?u!B8-)#9`84?AAr~hZX-z>Z$iz^?8O>t6UKb&+ zH_60UktwG#3C%YZu~^T7AJ3#%(5$$4jMf;gEit7YcA?adFWrDN3r%YRnj_Qd*)PuO zoENfsbN1tOxIrHG+n2_}ERIIB}{J{3~IlG_YVwL*^a6iOk zFk(D+Y>Ey42Vc>i%U#(QH`tr~KGB<#_d2~zGSE?z?9GBBkHiy$J@qG4_Cjo*7v@IF z7jD#{PgxVek5ZYE0U{5ETBUfl1|m_^aS%@D9V|)IU$XKd*gCKmEgfIhCPGHXr0@TI ztn8J2DL%Ozz-<5D=(^DbEll5VDeOWoNZotqnVP6;h!lJI3 z{+Xqv>2p2Ha*6Ziq!rV&nUg9V`HgWFCY+)JtymC2ys{=o(__tRb+|yUnwH6h=0k5L z#6I!W|IH8s^`9xkw&g|SrsurG$eUS(jiWO@Khn&V=8oq)9^gR%8BPFHi?2Jx+v1U* z8a;Z<5@MxC^jjAt+w!p}*_hwyx@NU^YQI}2&ulCjEOIwA2s`V`6pnUW6$jlK3%H z+8lQeN8NRgKzcX$aNfq2w-WpVgD%378Ce;hHOYQ^{z#^yre;4G2w~HWq-!6i@_w6FczPF1YH->^dXImac%?eIu z->R^oEVCDx%T+^1`AF>_C{8J!_9BXq!e}E2IU?^d(y2w2McPL^_wcT%QMS`~L5O!| zQ3VM+3P?;Lj=GxAU5!2(qK_tU9q50%V=7mGY7wGQV@<6dbTvlhw_j~KuAQKfG{5o%sgSj z{I55Nclg$qu>~?VNkvJU_TwSJpS^VwY>@H5=>&pJiQqCabSS^pM!KQg+e5LR!Tk;L z!fRrN+er3C|En?H(#>b+2s7QBoVbmj5Uz5Lq85nU4Ml-=9!Fh4gKx5Y%sY_pBR8OY zuYHAmAlMHft|8m6q)b4UHu{)tc&3Ks-ovc9NTL}!oW8&F+V{Wp0ho2=+$TQ$1Agju zH4Lk5TJyTf4tDHd!wS#%1gI6Ws*k$g&j_qGj`&*y^`@y-1i$HP=u!VTsP;+`AlPb{_H^iU$UO=+ zAwMVh5W3ov#E_YR5*mq_?t)47Lpgzg#z@n#_b`6P?O{}Rx&(kAS+Is^H}{KT;J5gm z2(A?VQxoinLmu%}RpUs#7c2G*F68MIGrL_y3e!Rf@c& zEdZGvK8Yq$Y;>jo+7>rhOq@>xENp;)HqnFEbZ`;iI3Km10Y}DnYXxk+;D(Rtw7~m~ zpZ?@zZBcdax%Z=|hy@MZ8+7c=*t>UddvEfi40~?7&DQJa(lV9Wj@?k<(+qwG)-&MgVVqs9D zn+rAL;$YxPfw5UEpa>?zp3&Pth_azYlEEevb!rja)Tvs*g8-CpNJwHl#7Cnxx{+Iu zAo{S>1~Tqh5Dlqu=;DRL*c3t1hHYuk7*PNlgf&UQGZd558$-uwBD3`rL@LuijzuIo zgj4r0DZO04_#uCF4{PYfLu8bi8md9~0z&HIA@B;?Qrt}DnS><{=ImjIT?aa07$c0# zJcjVZ@L)H?A8%J$xQ$<5Ms(B7>e))GwNK7AmgdV6o*+qN*=(GIZrI=OZ%Rc+=cNjK zP#Qmx?Go;%%abKa7PF0JvohP3e2ZJqwurxL4JzfTW_+U5p1l)Xv7g{y2c^QR?e$U} r{~e%ld}*ObIcT3;!Wb=p0kV;Sw@)vS=VImJdvswO&%$@JR|)YyTV?tE literal 14681 zcmc&*dvILUc|Z5HyVl*cF5rh^)8y{0NtHI9X}}oA#I>%1WrRaP2AWRVnblfaVXdTH zX?NvE$FPE-fK3=O6H=0ilc6OgZ4)4LI_ZNBV}et|AK{TOG?|i)+oVYcI+Xm;4%6`H z@AsX1_oZD~rfF-BqGqI=WfkKVjAm<(V)`h^h6~pQrr^d~Q_c2`7Z_)S1 zr1h3@ODH%UCnjgI;)OEm=GoNcR-tq#J$WEkt(7Z<-1PQs%3za=rc&WZOY_g%~uMCrpt@f=FkjUD}}l8Aw7XZ%09H+T+sXzY)H%% z<|gKkD!Ji^#res6O^-;D-~zPGmJb&y2g-}BVz7O2etuc|TC`739@)N4>!wmg&8Vn^s zT-2a#DyO+Ldz$L$o<1U%pQufjODav)5ovC+ym(-?(5mnz%|7l~uJyWhf>wbqZtv=* z=|DS4r=T6(of<|sU3&TUNMy0S4Voug+r-UK({!bG@XOoM;1%f+EjH~c^sZW(`-3kL*sO9N(-Ua)G&wm%<`+bqbX!&0E{uemoIcbl&1{=AwaQ{?qBb=N9v6`w;=+Jpy&-NyP({-^ z68L~HcVMbQFj_nxphG96Cp^U@^t8Ro#r<7Y);TtE6_)qc?BhBS4SB{zr zU>z*v=W_@0)q-VUEfFj7IY4EZPJVuV_NWS-+^0&1i|VRG?o*Sq*B??NY{(bY9ya9B zuy>tM+)(?-NAk1NH`x|+@TVrTQ{dd;V^{TjA|U1$5gzHk1{76^JgVC<`eP%SeUV88kAKgZU`Qz>PRMzfBwX}5CuJB$^;fGY4x)9y} zz&}3xvc4`_XPYw1Wt;xpxvhZad*CK^)QKl;Yb;!uTUh@@Mq$}v zs^-!P%V>DFHHfARY(j2S^}_qM=X6=Gh?bOMD&0u5MFdX7?#^$)q%mT!kfZKv`;nk zI?yn`5Tv}T&}jK7^lmpn=qICT<7}ef@46t6!n-JG2+uW# z!au?F{s5v5n(KUPtMY>% zW`(I%y`dI;HPYDVt}%(AuG6V*66#_m3I=tFkh9%PX>}n{B!s97&5~${RECZcg94BY znsC3ZN&$1|h+naeR1j3YLj{unEPROya*;p|x;t$g#HsMDZ^;jkaJ*C$2rzYGQ=~=%zIL-|gU{>^U(;aDfglYA2sRC7MFb%+rERg2 zP6Q(>0bvV>_dMMI(d)p=LXIhJQT;Bo@`~Ok6fX4d==L4Qu$hXy)=c!$o1g!|1N8`M z@zK^)6epQ>s$Qw6jjDGpY9m`Zd*->9RQMRu7Jgb2TN_!IoTW2q8x;)X#M>(TTQI|q zX4dI4u-<3cw_=olX~HW42N9CqGm@IRRg9)iTL){9;&r z<8S`q_PVzQnPE8|I({^Q3S#7-sTe}&2-+j)2ya_$_TR-Y&i)@52Ig43Vf1xd>Zoj3 z?Rtt~bY&F9)73K4hS7`DdoYZ?uB+Ki2kS zQaU0aYVv2F_}q7%lW=(4g!O$?M$1BsiF+<($Rho9}H7$<1BnKsAg~P}{=V#xgZ$iIxd}fIa`+wF_I6 z!vg%v5%4lpY;l#N#qQfiOT<3M=qMOyuW27Gf+LQ!1_m|!$8al(?aIzOlJdnXISqYG zTm-c+g8@8G+S)iU?}ACw>%I#&(P_RF4kBrU%G(9bDES#n5v#8X>=~S5CijIud`4#~}3`Tb1Sscc%Yj(=Wbr69)$je(*DpY%Ssx3O>An~9e)=NuQpq_)K z9~vxp6S_S4^w;jbw0Q+JkY^>Sv%^a>$%k8tID6QmPfNQFYZ)O2@_IzZ;K8Oo8a~UE z&H1sZ0!pDV0ujht^iG7Sk?`&_Ai*{Q;SB8qMY!!)c!rI*JuTn{_MVIh$Dpp7;tC6h zYo6M#FbB2*IML=2&EAY59&LF{@G*~!mF5AbbT=h$ULi|25I}y${;y@MnTCwI&`IH} zv89KH$<2Yyt9Eu+WjwoS4g;(_sDSbCoS6|%ABYZ`JE$$S`&dgCZeQRrpZl7jx3fIW z<09eNlHr>;SXSOT*q*v3{041L7TpPNvqy=DcT<+nXaJuR_VdtNN+wnb7|T{~-AH&w zY*tAD2iZ}_F(mSs9Ve~WL+S=#jw=exaYY`2;fflx7(S)%V)cq9A1yr$muBFBgH2qy z!}<;Uxwz=qN!!a%XfYVH9 zF9vuX@YNv6X9@Vjx&ArzNyK$xXl+U~2fki@Du9|9@~LU^ajhRFepm zgfyWcP591?H!Qdi6*EQ^OGEN(lJ@KPi3YA%S`7xll%qf2oFVcvfq7jNW|JCc1V81Q zQLL0}SRIVpU9wEr*D*x)Q;o=uT|^w&cf^5x8v=W1b%b>Z%V6!%i0Bt z7?4Xk5lk*s^wvwT{xvHF&7Frn%6(G&9~N&)Xk>&$@XDBR_*lk{tiTWcGQfbcO>EEB z!7YklD~x$x%a}bFga={g9<-@tkk~ll%^OjKj^>Zz*^ZW&MMoGk2jg(|;g&AQeM-XF zeU4%Bbk3KET?GslExj z{b;(6tiIdORg-@9#Pk34n6MtpAj%Ej|M5no=J#*SXM?qt5zmphm~_2m_? zIk?zA>k>&ECt8VSYa`!S!@r6IByzm+>W{yF4X#d|xbL$+m8+#C-NrLeJhp)gd7C$_ zOiMq*x?Q|Um)Q?d2)w)CT@My4``jl@ytrXD{St{Mh0w|T6Ie68p&u&a7SBEz!H`=L ztUJu`UHnUCljacMM3))9g9d9K>{N144O~n=F}gWOqL{&xQJgPGQ6Wi8Aq!ag$ua^H zxHD-9qmxnt?SU$|KiER*Col8z#7z4|5Q* zrp+S)CLu{Bq7M(qXR>%6N;rA=iy#b3My6(WmRZY}Jc>tK5z8hnFh(*R%G%OKWxBHsX?%YYF80Y-)b(%pe*iUwGZ zuQmsUcT=D;${K6p8Y7TKinT(qUUvV-$1lDu|EZJmeR(eyot#3hFP?F$l{11CAUHBJs_C&ndjzbg&SmuN z(an+I2^HS|EXqvf4o4dJSQ2FoX%%20)WqHkvo2rV7jC!+v;%`v^A!K3r4d z0Ld+R!GdTA^uhtW4e2n1xztvSF5oSuZ@~$CH3OzbcnmWNJtBiHe#3edbHDrc&kq+D z!lm18zvBp}-~ytE#w4WPs^DfKZwpt=QbnBc$Gyxj>fwg0-kiX8xCW5HyOD4~QJ)45 zTjg%B-}SCT)N%jAKs=@&QpJ$mP(Y%FSKP~D*RT%*VaX&9H^}h*xQ55OC`Fu_;^CM! zbWDJq16F7uK&?e!j{yZtg3OUzpbUufFq$I^7#1GUXQT)cSQ9KECQix=GbV=7I4FpI zg~kLi0uOokng#~uTS84g@=ajNMH4~=YD}tGDyc+;rBtbEhR$ueeJcFQvry3cE%!UN zk)~7Ar0@rxbt+`c3O!(s%JnH#3_XnE@}V*Wu0@JsAzAs>W{tKUcRBem-9witdl|w_&M?FlKZJ$qqVwURyOGvXbqtp0;fWl{)f;oT$ zY8V0|r%B<}GSuj#w@CusfT!gD1svB-)U+ROpVrEEff~bhUyWyqSObJ_-Sy>!V@UY;)Q4E#m%LZ01Itt)ysz5Em|at zaQXno##PV?IZ!A?g&aRcg?uz1C`Za9pbP?g64ne|5I`lKz{s>6#-@fsiALfe=8R+@ zA{SbWRJQ(s9EPDpK@p~xlw}N#36CK!p^TG7!yAWSL8pmmw3&&SsPHu00`^D}+Ji33 zv~LGkdb%@m8%Ha#e>thaKAJHylFg^PP;9(MKzh0<<0BR-b{RT0PTn|4LqhDeBMG%9 zaE^$kRYJ@wGK#-LLM+6okVX%B$z!ZTO$iFNdkoo(VO}Z_BK_kN2E3ZJX!w=mI>n@C zd5MD7qWS5A7iJHPV&wC6G?FMJHwY=LJ>V+ZB^in7?NOj=%Ie&@3jPEr z(UPCSOZ2|Hx-O&><+*vht6u<-u)+r~lbGjq%+rro?<=#@EwjIkRxfI;#v(wdr$CI+ zN2}#hvlXw{x$3B|qzL_3?Ryd{@DbeH34gUV*Uj4Upp7!R`lpa*RKZ zk*|If--mUxJ&EHFe-vMVbrXUpM0pf9y=RC9`Ox5U5aVr9cko;@fOcweb{6D#i_-1a zOIIo!ju-Tvr+}&NtF?g{68}uVP@oRZ} zwj~i^x-?O)R0`s3XYc!7u5RRhu+R=H=gd-3)b5m)iR6bntZ-km?!y^QDBRx-lZ6-qb -#include -#include -#include -#include -#include -#include - -#include "../test_api/test_api.hpp" - -int primary[11] = { 0,1,2,3,4,5,6,7,8,9,10 }; -int secondary[11] = { 7,0,1,3,6,9,10,2,4,5,8 }; -int tertiary[11] = { 0,10,1,2,4,3,5,6,7,8,9 }; - -int primary_lb[11] = { 0,0,0,3,3,3,6,7,7,9,9 }; -int secondary_lb[11] = { 0,0,10,0,10,10,0,7,8,0,10 }; -int tertiary_lb[11] = { 0,1,2,3,2,5,6,7,8,9,0 }; - -int primary_ub[11] = { 3,3,3,6,6,6,7,9,9,-1,-1 }; -int secondary_ub[11] = { 10,10,8,10,8,8,10,0,-1,10,8 }; -int tertiary_ub[11] = { 1,2,3,5,3,6,7,8,9,-1,1 }; - -#pragma pack(push, 1) -struct test_model { - eosio::name name; - unsigned char age; - uint64_t phone; -}; - -struct test_model_v2 : test_model { - test_model_v2() : new_field(0) {} - uint64_t new_field; -}; - -struct test_model_v3 : test_model_v2 { - uint64_t another_field; -}; - -struct TestModel128x2 { - uint128_t number; - uint128_t price; - uint64_t extra; - uint64_t table_name; -}; - -struct TestModel128x2_V2 : TestModel128x2 { - uint64_t new_field; -}; - -struct TestModel3xi64 { - uint64_t a; - uint64_t b; - uint64_t c; - uint64_t table; -}; - -struct TestModel3xi64_V2 : TestModel3xi64 { - uint64_t new_field; -}; - -#pragma pack(pop) - -#define STRLEN(s) my_strlen(s) - -extern "C" { - void my_memset( void *vptr, unsigned char val, unsigned int size ) { - char *ptr = (char *)vptr; - while(size--) { *(ptr++)=(char)val; } - } - uint32_t my_strlen( const char *str ) { - uint32_t len = 0; - while(str[len]) ++len; - return len; - } - bool my_memcmp( void *s1, void *s2, uint32_t n ) { - unsigned char *c1 = (unsigned char*)s1; - unsigned char *c2 = (unsigned char*)s2; - for ( uint32_t i = 0; i < n; i++ ) { - if (c1[i] != c2[i]) { - return false; - } - } - return true; - } -} - -void test_db::primary_i64_general( uint64_t receiver, uint64_t code, uint64_t action ) -{ - (void)code; (void)action; - auto table1 = "table1"_n.value; - - int alice_itr = db_store_i64( receiver, table1, receiver, "alice"_n.value, "alice's info", strlen("alice's info") ); - db_store_i64( receiver, table1, receiver, "bob"_n.value, "bob's info", strlen("bob's info") ); - db_store_i64( receiver, table1, receiver, "charlie"_n.value, "charlie's info", strlen("charlies's info") ); - db_store_i64( receiver, table1, receiver, "allyson"_n.value, "allyson's info", strlen("allyson's info") ); - - - // find - { - uint64_t prim = 0; - int itr_next = db_next_i64( alice_itr, &prim ); - int itr_next_expected = db_find_i64( receiver, receiver, table1, "allyson"_n.value ); - eosio_assert( itr_next == itr_next_expected && prim == "allyson"_n.value, "primary_i64_general - db_find_i64" ); - itr_next = db_next_i64( itr_next, &prim ); - itr_next_expected = db_find_i64( receiver, receiver, table1, "bob"_n.value ); - eosio_assert( itr_next == itr_next_expected && prim == "bob"_n.value, "primary_i64_general - db_next_i64" ); - } - - // next - { - int charlie_itr = db_find_i64( receiver, receiver, table1, "charlie"_n.value ); - // nothing after charlie - uint64_t prim = 0; - int end_itr = db_next_i64( charlie_itr, &prim ); - eosio_assert( end_itr < 0, "primary_i64_general - db_next_i64" ); - // prim didn't change - eosio_assert( prim == 0, "primary_i64_general - db_next_i64" ); - } - - // previous - { - int charlie_itr = db_find_i64( receiver, receiver, table1, "charlie"_n.value ); - uint64_t prim = 0; - int itr_prev = db_previous_i64( charlie_itr, &prim ); - int itr_prev_expected = db_find_i64( receiver, receiver, table1, "bob"_n.value ); - eosio_assert( itr_prev == itr_prev_expected && prim == "bob"_n.value, "primary_i64_general - db_previous_i64" ); - - itr_prev = db_previous_i64( itr_prev, &prim ); - itr_prev_expected = db_find_i64( receiver, receiver, table1, "allyson"_n.value ); - eosio_assert( itr_prev == itr_prev_expected && prim == "allyson"_n.value, "primary_i64_general - db_previous_i64" ); - - itr_prev = db_previous_i64( itr_prev, &prim ); - itr_prev_expected = db_find_i64( receiver, receiver, table1, "alice"_n.value ); - eosio_assert( itr_prev == itr_prev_expected && prim == "alice"_n.value, "primary_i64_general - db_previous_i64" ); - - itr_prev = db_previous_i64( itr_prev, &prim ); - eosio_assert( itr_prev < 0 && prim == "alice"_n.value, "primary_i64_general - db_previous_i64" ); - } - - // remove - { - int itr = db_find_i64( receiver, receiver, table1, "alice"_n.value ); - eosio_assert( itr >= 0, "primary_i64_general - db_find_i64" ); - db_remove_i64( itr ); - itr = db_find_i64( receiver, receiver, table1, "alice"_n.value ); - eosio_assert( itr < 0, "primary_i64_general - db_find_i64" ); - } - - // get - { - int itr = db_find_i64( receiver, receiver, table1, "bob"_n.value ); - eosio_assert( itr >= 0, "" ); - uint32_t buffer_len = 5; - char value[50]; - auto len = db_get_i64( itr, value, buffer_len ); - value[buffer_len] = '\0'; - std::string s(value); - eosio_assert( uint32_t(len) == buffer_len, "primary_i64_general - db_get_i64" ); - eosio_assert( s == "bob's", "primary_i64_general - db_get_i64 - 5" ); - - buffer_len = 20; - len = db_get_i64( itr, value, 0 ); - len = db_get_i64( itr, value, (uint32_t)len ); - value[len] = '\0'; - std::string sfull(value); - eosio_assert( sfull == "bob's info", "primary_i64_general - db_get_i64 - full" ); - } - - // update - { - int itr = db_find_i64( receiver, receiver, table1, "bob"_n.value ); - eosio_assert( itr >= 0, "" ); - const char* new_value = "bob's new info"; - uint32_t new_value_len = strlen(new_value); - db_update_i64( itr, receiver, new_value, new_value_len ); - char ret_value[50]; - db_get_i64( itr, ret_value, new_value_len ); - ret_value[new_value_len] = '\0'; - std::string sret(ret_value); - eosio_assert( sret == "bob's new info", "primary_i64_general - db_update_i64" ); - } -} - -void test_db::primary_i64_lowerbound( uint64_t receiver, uint64_t code, uint64_t action ) -{ - (void)code;(void)action; - auto table = "mytable"_n.value; - db_store_i64( receiver, table, receiver, "alice"_n.value, "alice's info", strlen("alice's info") ); - db_store_i64( receiver, table, receiver, "bob"_n.value, "bob's info", strlen("bob's info") ); - db_store_i64( receiver, table, receiver, "charlie"_n.value, "charlie's info", strlen("charlies's info") ); - db_store_i64( receiver, table, receiver, "emily"_n.value, "emily's info", strlen("emily's info") ); - db_store_i64( receiver, table, receiver, "allyson"_n.value, "allyson's info", strlen("allyson's info") ); - db_store_i64( receiver, table, receiver, "joe"_n.value, "nothing here", strlen("nothing here") ); - - const std::string err = "primary_i64_lowerbound"; - - { - int lb = db_lowerbound_i64( receiver, receiver, table, "alice"_n.value ); - eosio_assert( lb == db_find_i64(receiver, receiver, table, "alice"_n.value), err.c_str() ); - } - { - int lb = db_lowerbound_i64( receiver, receiver, table, "billy"_n.value ); - eosio_assert( lb == db_find_i64(receiver, receiver, table, "bob"_n.value), err.c_str() ); - } - { - int lb = db_lowerbound_i64( receiver, receiver, table, "frank"_n.value ); - eosio_assert( lb == db_find_i64(receiver, receiver, table, "joe"_n.value), err.c_str() ); - } - { - int lb = db_lowerbound_i64( receiver, receiver, table, "joe"_n.value ); - eosio_assert( lb == db_find_i64(receiver, receiver, table, "joe"_n.value), err.c_str() ); - } - { - int lb = db_lowerbound_i64( receiver, receiver, table, "kevin"_n.value ); - eosio_assert( lb < 0, err.c_str() ); - } -} - -void test_db::primary_i64_upperbound( uint64_t receiver, uint64_t code, uint64_t action ) -{ - (void)code;(void)action; - auto table = "mytable"_n.value; - const std::string err = "primary_i64_upperbound"; - { - int ub = db_upperbound_i64( receiver, receiver, table, "alice"_n.value ); - eosio_assert( ub == db_find_i64(receiver, receiver, table, "allyson"_n.value), err.c_str() ); - } - { - int ub = db_upperbound_i64( receiver, receiver, table, "billy"_n.value ); - eosio_assert( ub == db_find_i64(receiver, receiver, table, "bob"_n.value), err.c_str() ); - } - { - int ub = db_upperbound_i64( receiver, receiver, table, "frank"_n.value ); - eosio_assert( ub == db_find_i64(receiver, receiver, table, "joe"_n.value), err.c_str() ); - } - { - int ub = db_upperbound_i64( receiver, receiver, table, "joe"_n.value ); - eosio_assert( ub < 0, err.c_str() ); - } - { - int ub = db_upperbound_i64( receiver, receiver, table, "kevin"_n.value ); - eosio_assert( ub < 0, err.c_str() ); - } -} - -void test_db::idx64_general( uint64_t receiver, uint64_t code, uint64_t action ) -{ - (void)code;(void)action; - const auto table = "myindextable"_n.value; - - typedef uint64_t secondary_type; - - struct record { - uint64_t ssn; - secondary_type name; - }; - - record records[] = { {265, "alice"_n.value}, - {781, "bob"_n.value}, - {234, "charlie"_n.value}, - {650, "allyson"_n.value}, - {540, "bob"_n.value}, - {976, "emily"_n.value}, - {110, "joe"_n.value} }; - - for ( uint32_t i = 0; i < sizeof(records)/sizeof(records[0]); ++i ) { - db_idx64_store( receiver, table, receiver, records[i].ssn, &records[i].name ); - } - - // find_primary - { - secondary_type sec = 0; - int itr = db_idx64_find_primary( receiver, receiver, table, &sec, 999 ); - eosio_assert( itr < 0 && sec == 0, "idx64_general - db_idx64_find_primary" ); - itr = db_idx64_find_primary( receiver, receiver, table, &sec, 110 ); - eosio_assert( itr >= 0 && sec == "joe"_n.value, "idx64_general - db_idx64_find_primary" ); - uint64_t prim_next = 0; - int itr_next = db_idx64_next( itr, &prim_next ); - eosio_assert( itr_next < 0 && prim_next == 0, "idx64_general - db_idx64_find_primary" ); - } - - // iterate forward starting with charlie - { - secondary_type sec = 0; - int itr = db_idx64_find_primary( receiver, receiver, table, &sec, 234 ); - eosio_assert( itr >= 0 && sec == "charlie"_n.value, "idx64_general - db_idx64_find_primary" ); - - uint64_t prim_next = 0; - int itr_next = db_idx64_next( itr, &prim_next ); - eosio_assert( itr_next >= 0 && prim_next == 976, "idx64_general - db_idx64_next" ); - secondary_type sec_next = 0; - int itr_next_expected = db_idx64_find_primary( receiver, receiver, table, &sec_next, prim_next ); - eosio_assert( itr_next == itr_next_expected && sec_next == "emily"_n.value, "idx64_general - db_idx64_next" ); - - itr_next = db_idx64_next( itr_next, &prim_next ); - eosio_assert( itr_next >= 0 && prim_next == 110, "idx64_general - db_idx64_next" ); - itr_next_expected = db_idx64_find_primary( receiver, receiver, table, &sec_next, prim_next ); - eosio_assert( itr_next == itr_next_expected && sec_next == "joe"_n.value, "idx64_general - db_idx64_next" ); - - itr_next = db_idx64_next( itr_next, &prim_next ); - eosio_assert( itr_next < 0 && prim_next == 110, "idx64_general - db_idx64_next" ); - } - - // iterate backward staring with second bob - { - secondary_type sec = 0; - int itr = db_idx64_find_primary( receiver, receiver, table, &sec, 781 ); - eosio_assert( itr >= 0 && sec == "bob"_n.value, "idx64_general - db_idx64_find_primary" ); - - uint64_t prim_prev = 0; - int itr_prev = db_idx64_previous( itr, &prim_prev ); - eosio_assert( itr_prev >= 0 && prim_prev == 540, "idx64_general - db_idx64_previous" ); - - secondary_type sec_prev = 0; - int itr_prev_expected = db_idx64_find_primary( receiver, receiver, table, &sec_prev, prim_prev ); - eosio_assert( itr_prev == itr_prev_expected && sec_prev == "bob"_n.value, "idx64_general - db_idx64_previous" ); - - itr_prev = db_idx64_previous( itr_prev, &prim_prev ); - eosio_assert( itr_prev >= 0 && prim_prev == 650, "idx64_general - db_idx64_previous" ); - itr_prev_expected = db_idx64_find_primary( receiver, receiver, table, &sec_prev, prim_prev ); - eosio_assert( itr_prev == itr_prev_expected && sec_prev == "allyson"_n.value, "idx64_general - db_idx64_previous" ); - - itr_prev = db_idx64_previous( itr_prev, &prim_prev ); - eosio_assert( itr_prev >= 0 && prim_prev == 265, "idx64_general - db_idx64_previous" ); - itr_prev_expected = db_idx64_find_primary( receiver, receiver, table, &sec_prev, prim_prev ); - eosio_assert( itr_prev == itr_prev_expected && sec_prev == "alice"_n.value, "idx64_general - db_idx64_previous" ); - - itr_prev = db_idx64_previous( itr_prev, &prim_prev ); - eosio_assert( itr_prev < 0 && prim_prev == 265, "idx64_general - db_idx64_previous" ); - } - - // find_secondary - { - uint64_t prim = 0; - auto sec = "bob"_n.value; - int itr = db_idx64_find_secondary( receiver, receiver, table, &sec, &prim ); - eosio_assert( itr >= 0 && prim == 540, "idx64_general - db_idx64_find_secondary" ); - - sec = "emily"_n.value; - itr = db_idx64_find_secondary( receiver, receiver, table, &sec, &prim ); - eosio_assert( itr >= 0 && prim == 976, "idx64_general - db_idx64_find_secondary" ); - - sec = "frank"_n.value; - itr = db_idx64_find_secondary( receiver, receiver, table, &sec, &prim ); - eosio_assert( itr < 0 && prim == 976, "idx64_general - db_idx64_find_secondary" ); - } - - // update and remove - { - uint64_t one_more_bob = "bob"_n.value; - const uint64_t ssn = 421; - int itr = db_idx64_store( receiver, table, receiver, ssn, &one_more_bob ); - uint64_t new_name = "billy"_n.value; - db_idx64_update( itr, receiver, &new_name ); - secondary_type sec = 0; - int sec_itr = db_idx64_find_primary( receiver, receiver, table, &sec, ssn ); - eosio_assert( sec_itr == itr && sec == new_name, "idx64_general - db_idx64_update" ); - db_idx64_remove(itr); - int itrf = db_idx64_find_primary( receiver, receiver, table, &sec, ssn ); - eosio_assert( itrf < 0, "idx64_general - db_idx64_remove" ); - } -} - -void test_db::idx64_lowerbound( uint64_t receiver, uint64_t code, uint64_t action ) -{ - (void)code;(void)action; - const auto table = "myindextable"_n.value; - typedef uint64_t secondary_type; - const std::string err = "idx64_lowerbound"; - { - secondary_type lb_sec = "alice"_n.value; - uint64_t lb_prim = 0; - const uint64_t ssn = 265; - int lb = db_idx64_lowerbound( receiver, receiver, table, &lb_sec, &lb_prim ); - eosio_assert( lb_prim == ssn && lb_sec == "alice"_n.value, err.c_str() ); - eosio_assert( lb == db_idx64_find_primary(receiver, receiver, table, &lb_sec, ssn), err.c_str() ); - } - { - secondary_type lb_sec = "billy"_n.value; - uint64_t lb_prim = 0; - const uint64_t ssn = 540; - int lb = db_idx64_lowerbound( receiver, receiver, table, &lb_sec, &lb_prim ); - eosio_assert( lb_prim == ssn && lb_sec == "bob"_n.value, err.c_str() ); - eosio_assert( lb == db_idx64_find_primary(receiver, receiver, table, &lb_sec, ssn), err.c_str() ); - } - { - secondary_type lb_sec = "joe"_n.value; - uint64_t lb_prim = 0; - const uint64_t ssn = 110; - int lb = db_idx64_lowerbound( receiver, receiver, table, &lb_sec, &lb_prim ); - eosio_assert( lb_prim == ssn && lb_sec == "joe"_n.value, err.c_str() ); - eosio_assert( lb == db_idx64_find_primary(receiver, receiver, table, &lb_sec, ssn), err.c_str() ); - } - { - secondary_type lb_sec = "kevin"_n.value; - uint64_t lb_prim = 0; - int lb = db_idx64_lowerbound( receiver, receiver, table, &lb_sec, &lb_prim ); - eosio_assert( lb_prim == 0 && lb_sec == "kevin"_n.value, err.c_str() ); - eosio_assert( lb < 0, "" ); - } -} - -void test_db::idx64_upperbound( uint64_t receiver, uint64_t code, uint64_t action ) -{ - (void)code;(void)action; - const auto table = "myindextable"_n.value; - typedef uint64_t secondary_type; - const std::string err = "idx64_upperbound"; - { - secondary_type ub_sec = "alice"_n.value; - uint64_t ub_prim = 0; - const uint64_t allyson_ssn = 650; - int ub = db_idx64_upperbound( receiver, receiver, table, &ub_sec, &ub_prim ); - eosio_assert( ub_prim == allyson_ssn && ub_sec == "allyson"_n.value, "" ); - eosio_assert( ub == db_idx64_find_primary(receiver, receiver, table, &ub_sec, allyson_ssn), err.c_str() ); - } - { - secondary_type ub_sec = "billy"_n.value; - uint64_t ub_prim = 0; - const uint64_t bob_ssn = 540; - int ub = db_idx64_upperbound( receiver, receiver, table, &ub_sec, &ub_prim ); - eosio_assert( ub_prim == bob_ssn && ub_sec == "bob"_n.value, "" ); - eosio_assert( ub == db_idx64_find_primary(receiver, receiver, table, &ub_sec, bob_ssn), err.c_str() ); - } - { - secondary_type ub_sec = "joe"_n.value; - uint64_t ub_prim = 0; - int ub = db_idx64_upperbound( receiver, receiver, table, &ub_sec, &ub_prim ); - eosio_assert( ub_prim == 0 && ub_sec == "joe"_n.value, err.c_str() ); - eosio_assert( ub < 0, err.c_str() ); - } - { - secondary_type ub_sec = "kevin"_n.value; - uint64_t ub_prim = 0; - int ub = db_idx64_upperbound( receiver, receiver, table, &ub_sec, &ub_prim ); - eosio_assert( ub_prim == 0 && ub_sec == "kevin"_n.value, err.c_str() ); - eosio_assert( ub < 0, err.c_str() ); - } -} - -void test_db::test_invalid_access( uint64_t receiver, uint64_t code, uint64_t action ) -{ - (void)code;(void)action; - auto act = eosio::get_action(1, 0); - auto ia = eosio::unpack(act.data); - uint64_t scope = "access"_n.value; - uint64_t table = scope; - uint64_t pk = scope; - - int32_t itr = -1; - uint64_t value = 0; - switch( ia.index ) { - case 1: - itr = db_idx64_find_primary( ia.code, scope, table, &value, pk ); - break; - case 0: - default: - itr = db_find_i64( ia.code, scope, table, pk ); - break; - } - if(ia.store) { - uint64_t value_to_store = ia.val; - if( itr < 0 ) { - switch(ia.index) { - case 1: - db_idx64_store( scope, table, receiver, pk, &value_to_store ); - break; - case 0: - default: - db_store_i64( scope, table, receiver, pk, &value_to_store, sizeof(value_to_store) ); - break; - } - } else { - switch(ia.index) { - case 1: - db_idx64_update( itr, receiver, &value_to_store); - break; - case 0: - default: - db_update_i64( itr, receiver, &value_to_store, sizeof(value_to_store) ); - break; - } - } - //eosio::print("test_invalid_access: stored ", value_to_store, "\n"); - } else { - eosio_assert( itr >= 0, "test_invalid_access: could not find row" ); - switch(ia.index) { - case 1: - break; - case 0: - default: - eosio_assert( db_get_i64( itr, &value, sizeof(value) ) == sizeof(value), - "test_invalid_access: value in primary table was incorrect size" ); - break; - } - //eosio::print("test_invalid_access: expected ", ia.val, " and retrieved ", value, "\n"); - eosio_assert( value == ia.val, "test_invalid_access: value did not match" ); - } -} - -void test_db::idx_double_nan_create_fail( uint64_t receiver, uint64_t, uint64_t ) { - double x = 0.0; - x = x / x; // create a NaN - db_idx_double_store( "nan"_n.value, "nan"_n.value, receiver, 0, &x ); // should fail -} - -void test_db::idx_double_nan_modify_fail( uint64_t receiver, uint64_t, uint64_t ) { - double x = 0.0; - db_idx_double_store( "nan"_n.value, "nan"_n.value, receiver, 0, &x ); - auto itr = db_idx_double_find_primary( receiver, "nan"_n.value, "nan"_n.value, &x, 0 ); - x = 0.0; - x = x / x; // create a NaN - db_idx_double_update( itr, 0, &x ); // should fail -} - -void test_db::idx_double_nan_lookup_fail( uint64_t receiver, uint64_t, uint64_t ) { - auto act = eosio::get_action( 1,0 ); - auto lookup_type = eosio::unpack(act.data); - - uint64_t pk; - double x = 0.0; - db_idx_double_store( "nan"_n.value, "nan"_n.value, receiver, 0, &x ); - x = x / x; // create a NaN - switch(lookup_type) { - case 0: // find - db_idx_double_find_secondary( receiver, "nan"_n.value, "nan"_n.value, &x, &pk ); - break; - case 1: // lower bound - db_idx_double_lowerbound( receiver, "nan"_n.value, "nan"_n.value, &x, &pk ); - break; - case 2: // upper bound - db_idx_double_upperbound( receiver, "nan"_n.value, "nan"_n.value, &x, &pk ); - break; - default: - eosio_assert( false, "idx_double_nan_lookup_fail: unexpected lookup_type" ); - } -} - -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wcast-align" - -void test_db::misaligned_secondary_key256_tests( uint64_t /* receiver */, uint64_t, uint64_t ) { - auto key = eosio::checksum256::make_from_word_sequence( 0ULL, 0ULL, 0ULL, 42ULL ); - char* ptr = (char*)(&key); - ptr += 1; - // test that store doesn't crash on unaligned data - db_idx256_store( "testapi"_n.value, "testtable"_n.value, "testapi"_n.value, 1, (uint128_t*)(ptr), 2 ); - // test that find_primary doesn't crash on unaligned data - db_idx256_find_primary( "testapi"_n.value, "testtable"_n.value, "testapi"_n.value, (uint128_t*)(ptr), 2,0 ); -} - -#pragma clang diagnostic pop diff --git a/unittests/test-contracts/test_api_mem/CMakeLists.txt b/unittests/test-contracts/test_api_mem/CMakeLists.txt deleted file mode 100644 index b0de4a71135..00000000000 --- a/unittests/test-contracts/test_api_mem/CMakeLists.txt +++ /dev/null @@ -1,5 +0,0 @@ -if( ${eosio.cdt_FOUND} ) - add_executable( test_api_mem test_api_mem.cpp ) -else() - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/test_api_mem.wasm ${CMAKE_CURRENT_BINARY_DIR}/test_api_mem.wasm COPYONLY ) -endif() diff --git a/unittests/test-contracts/test_api_mem/test_api_mem.cpp b/unittests/test-contracts/test_api_mem/test_api_mem.cpp deleted file mode 100644 index df635e35a0d..00000000000 --- a/unittests/test-contracts/test_api_mem/test_api_mem.cpp +++ /dev/null @@ -1,49 +0,0 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ -#include - -#include "../test_api/test_api.hpp" -#include "test_extended_memory.cpp" -#include "test_memory.cpp" - -extern "C" { - void apply( uint64_t /*receiver*/, uint64_t code, uint64_t action ) { - require_auth(code); - - //test_extended_memory - WASM_TEST_HANDLER( test_extended_memory, test_initial_buffer ); - WASM_TEST_HANDLER( test_extended_memory, test_page_memory ); - WASM_TEST_HANDLER( test_extended_memory, test_page_memory_exceeded ); - WASM_TEST_HANDLER( test_extended_memory, test_page_memory_negative_bytes ); - - //test_memory - WASM_TEST_HANDLER( test_memory, test_memory_allocs ); - WASM_TEST_HANDLER( test_memory, test_memory_hunk ); - WASM_TEST_HANDLER( test_memory, test_memory_hunks ); - WASM_TEST_HANDLER( test_memory, test_memory_hunks_disjoint ); - WASM_TEST_HANDLER( test_memory, test_memset_memcpy ); - WASM_TEST_HANDLER( test_memory, test_memcpy_overlap_start ); - WASM_TEST_HANDLER( test_memory, test_memcpy_overlap_end ); - WASM_TEST_HANDLER( test_memory, test_memcmp ); - WASM_TEST_HANDLER( test_memory, test_outofbound_0 ); - WASM_TEST_HANDLER( test_memory, test_outofbound_1 ); - WASM_TEST_HANDLER( test_memory, test_outofbound_2 ); - WASM_TEST_HANDLER( test_memory, test_outofbound_3 ); - WASM_TEST_HANDLER( test_memory, test_outofbound_4 ); - WASM_TEST_HANDLER( test_memory, test_outofbound_5 ); - WASM_TEST_HANDLER( test_memory, test_outofbound_6 ); - WASM_TEST_HANDLER( test_memory, test_outofbound_7 ); - WASM_TEST_HANDLER( test_memory, test_outofbound_8 ); - WASM_TEST_HANDLER( test_memory, test_outofbound_9 ); - WASM_TEST_HANDLER( test_memory, test_outofbound_10 ); - WASM_TEST_HANDLER( test_memory, test_outofbound_11 ); - WASM_TEST_HANDLER( test_memory, test_outofbound_12 ); - WASM_TEST_HANDLER( test_memory, test_outofbound_13 ); - - //unhandled test call - eosio_assert( false, "Unknown Test" ); - } - -} diff --git a/unittests/test-contracts/test_api_mem/test_api_mem.wasm b/unittests/test-contracts/test_api_mem/test_api_mem.wasm deleted file mode 100755 index 89582ffaeca02a6541abff2332ec2116aaf00dd3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 15119 zcmeHO3y@sJb?x`w{C3}rH2Q$hicimzFbiRkMiPtoRJ5%`E6JEpVDqz#m)YI7+MQj^ ztY&6~WXqZr$VeE0#n=4GQ4&(fWu;24q7tV9wy*_Oh%m$^7z8f9c98^Qs3MqD;cU+B z_nGebSc|I?+o|BP=H7m%Z{NOs@9lp5?y_8Yqrn(6vTfIy)hfHrs#dE;^*UoaBOVk( z+xG2X4LmidB?1V3&953U%Pnk9y2bKHu`gFHyQKqQr8LVJ|h@@Vy#5(JTH|vJfOYpy74d@Sg0jdotN8?*2Y?n3{^q5z@Wlh-{dg zOvr|ldd2&)VTa$qd&R!P)aAXYc~WR+uQ)0jJe3czUNQLu0QT2S!>Ed6dX`{o#FI}_ zpI26T+`mh1ZhDr3nXVDH{{Rm;>tbv*8PTH+g*Fx1Qs{_6M-@7z&~b%MD0G`bw=1-* z&`E_(DfA45o~h7jg`TC*vlV)dPus`Z46BNT2Jjg&fC%mJfntwu2Q=&v(y&KJ!yX|G zdxSLX5z??nNW&f>4SR$%>=DwiM@YjSAq{(kH0%-5ut!M49w7~TgfylZ(y&KJ!yY~@ zPEF5s404G3kB}1>9KQ4zkGX=HJhhc6$p$&4hyTf+(hLcG$zz*3{{u%g=u;jm76pd7`DuDC zr5=zZGC7R75XL+`z#I!OYs9Z5X>wzo`1TKF&o1UE@p8Z}gn(WlS;53o2$30j_cQ)L z>%?~@$wVYC+JiBE?zNHNzjzi%`v!w@+uP@R>HemFMw@t*WgshKSUuuB;Ac59pbyY9 zvxgh2Q<#bP@EL~4?iD|xW}BNCOionu4ofjx{5tB%dtb`?$mj0+5lN4DkG>Ny9rm|o z8qw-PCJtGn4qJqM8JmxnUS1;*Z%tWwoY9{1JjI6|ZiMb^0X@xG8XVBk)L5;GRa{(Wbz?jlgG{0`F=BKGzg@ZzJ%9roaaqfiE`& z?rQ|T+7$RuBk;|p!2ONDx0(VEGy>ml3Ov{dd{+UIORN@;dduEv?y;OmdLAXQqo((} z2fx^zO|!@9;f$Mk#}XgK;YUWdNgFd9^)>2k_HdK7YD;X(cbZI0Ci2|q72p2}Ih2W0 z0k-u?b4|)fnl_voBj7x-tNtmoZA~L=()mTqzUO2kPFwaqLvGpd?6V`r6$bKETLdQJGs*W{=9;sxr5!%vVFk(1*D_e1Ku@P?Fl*S`+1%1Q&>tL3X5@bI@_FfrTm1(8D%7|L$y{H`*IzBQysrc-gVws-c6QQNn4=Xt6J; zVfeqfFVU42oO(%KunP@3qITwk<0TI%96Ey|D&F#(;6O^igm>Vip7ZJO{Ei{!`%_fC zNOhGzi0l8lbIS3r|3Bufwiac0^O%oxB41sMJO#r$p+CT7@B$&0uAwVLZ?Rx%X<}Gh z$0ATVl&)YG=-T3Oy zzi~|RSEcZRzUretzwi0K_C=ektSeJ-dY^uHMufGs+qmzxM77)8pWqg^I`Mm^g#>|j zZTxm5=hFbc_|av&qu-k?7i0?J0|(dQ!#u&zDRz>#fRZX z=$ownS((1kdi;*b$utnvUhxxpo;-=hc;Wr0-hNq`#7p6%E=#rH`PAS2Qrr5`Q9ze=*eF6Zda_|L9{s^~K>zW9mZhL%}Qbog#Sx^>`^<|I1Qy zz`t|nJ>PuJ?_s9xWX&}T8ao&9c})t6`&$42yfHQUP+)Gk`mopk#y!ja7zOHIaVgRhZQyOBWoA z(|eZ_S%XNYEGCZy_@Qete8DoY{Xh_T0gsH@yHGj;83J3(zH3r!B{n8XCKrGcbl;Y) zbej)d13fYZ#zEw+P2rzTv^i!5xerb8yx6kIK^8!;;c{4{H#?ByL@z;BV>~+A!*IS{ z$#56vb<9{8pGMD2;}8BqEYyd8+GxMIbdojbbNQ$3z5xla({% z<`_gur&!2DEHbdsX(k*KDig&nCUmDsQ${0omy5k+6SqnvfYfpf36--Xku{Wr$SX=C z1#e*KIgTM0r3P3I@rk;g&zE}#z!BrHMaDR=@LI>~89-k_e5XF`H~U(`rc$$p0g zP15Y~oE^8vzCW4>G&sJgI6khdD>MRJpG!aYeaZ}7JQBEg#B*_|8s%uz-8>V8@|Nf4 z(zaNs6 z=x&kja!lNOdkzmz^_I=b#IO>kAG2__TFgc^ z7fwKa=lLPU5qU?yIxG~D3dAVqD zDS{T)$9VoJ-Gqt52n+Zlxgz;Pgm5s6R0B&-;SBT0R_Xs_d?bqKOdHBb2S#xDYGeB# zJVmr)l4e#AGV;5F+!=irhK6RSp%)1>hCCl3*~@0mFag0P z-SEIV$m?<|k=6n88?9qa(>fNCkEarkBL_9rIYcDvH%xq5B0U%a8Htjn7$pac`BS8m z5X0C8V$Df-pROWK8(f`Y6LIeQ5hmYGDw1CsSSIom9_J>ZFy%a=#~iJh4G9I$6TFSa zPUOkk0j401Q0$rOWVE8mOZ#$kI0dDBQr3aNDeYq)@@2|24ZN53B|(O#x%e@|&5}df z>U!bmOS>Gj#>DZ-H{u`H9pu;N%6!l*k8CJ_sT6s! z00I3j9{>_9xgiu8b6M>xp##N@<6|g12B;>L36%2MD||i5m65SA5;0yJDO5%Z8%S(r z*yX4X=EWgNX3LdOJRzP{cl%93HZulOvm)G~ob~kh*33XjjOKhKA z-&Glj$fZy>Fd=k&9bJ~ycWKnINl%m|Pq*i?k~SvlmyfIF^2)HPCMukd+ofCYvhVQm zyR6W=ph~*Y^v#WtU7~lLd}8`t>lDR%zP-?20_o$S3-atGRIu;8z0QSmqe`%DL zz2K1Wq1*`h^iZ+HE2XWzX9hChaz(yzVqmx?1usnY5_LScR4SGlB$QAUkAMpYM}4?` zx;kz!#30Wuo612s<_ zDANvQ6_8r$?3v>=aZnRg-1eerj0`C1o$ZAxwB%MMN|ar4e3>Q`<d;g()L-Z)S69mP=6RnN-+S`KsmL&L}}6-q2F~2e4yk~PV`gUc%_s@H`MXk7y09a zW^p%;SCAC9R@^e22g&q$@}2TXVZi0`k217eh>G8q2FRWpE)oVqL9o48om6?aG*K9p z(}!|F)~+)f>T&IJ;B)vp5K_YzStkFNtL-^F^6x~UQiMMv?KAOV{ZPuPs6*)rqP*8$ zt}569%CU*inw9ZKDWn7jL@BV{^Rcv)aXC>yd=$tBJTa&m)=dPXLy5BQBsAgv+yG35 zovSpm0XyRs436Srd*#O;+}$#sD-4EX-~4pzgK+)wB%4oixl1R3>s` zaGWo=P!h$=E&XKKd8&BSI@=ej;)io`ul78c7u@T)(t*?N&UFP*BCW@7^9Wc9wNNi@ zpDk^vh&ZFn2a1J?Tc`waL`Q_;_=t2aR8-sNsro6-b#5(;IgHD3(2yy#FflfUn8Sc5 z!VWW$%tDQU+2{Km&?wK=ZJ~^nZ86qJVRLS5WRQE~p{m>dV^!N=JRjBvSTLZU9DHSi zR;Yn1S6%5^wC4Es^H(gpSY1FAjW^HoloZcfzGS%yMiqH$@b>HyO)_fBZO5NXpn?p= z&2DKcJY!46pEc^t^<*2Mr3&ZO_9aXXRp`F~+pTGfdQ9DmRk<1xmFlhMwIaJh1EYGq zwF-193*rjuqPRU&0dAS=A9H22`S}wENtwEVL>+YNRqH-?xw1V2jq+$cXw<3cP*hFZ z7mXE*@w<%Qd;|h>2@X+ z@wULn=y*+k#|HtZw*6QFvh@N`gC}Z?<-(th(_- zFiJLSx@Ffdo6hRc`_?jA&Gk!2H`Ih8X@qUlT3`j+~Wz#fCNvkK`fY6=HJz1B0~V;0uP` d)l{53cQuuUFGP0gio$52czuC?#w}Oae*+{VuTKB~ diff --git a/unittests/test-contracts/test_api_mem/test_extended_memory.cpp b/unittests/test-contracts/test_api_mem/test_extended_memory.cpp deleted file mode 100644 index 8a66d528150..00000000000 --- a/unittests/test-contracts/test_api_mem/test_extended_memory.cpp +++ /dev/null @@ -1,112 +0,0 @@ -/** - * @file - * @copyright defined in eos/LICENSE.txt - */ -#include -#include - -#include "../test_api/test_api.hpp" - -void verify( const void* const ptr, const uint32_t val, const uint32_t size) { - const char* char_ptr = (const char*)ptr; - for ( uint32_t i = 0; i < size; ++i ) - eosio_assert( static_cast(static_cast(char_ptr[i])) == val, "buffer slot doesn't match" ); -} - -#define PRINT_PTR(x) prints("PTR : "); print((uint32_t)x, 4); prints("\n"); - -void test_extended_memory::test_page_memory() { - constexpr uint32_t _64K = 64*1024; - /* - * Test test_extended_memory::test_page_memory `ensure initial page size` - * Given I have not tried to increase the "program break" yet, - * when I call sbrk(0), then I should get the end of the first page, which should be 64K. - */ - auto prev = sbrk(0); - eosio_assert( reinterpret_cast(prev) == _64K, "Should initially have 1 64K page allocated" ); - - /* - * Test test_extended_memory::test_page_memory `ensure sbrk returns previous end of program break` - * Given I have not tried to increase memory, - * when I call sbrk(1), then I should get the end of the first page, which should be 64K. - */ - prev = sbrk(1); - eosio_assert( reinterpret_cast(prev) == _64K, "Should still be pointing to the end of the 1st 64K page" ); - - /* - * Test test_extended_memory::test_page_memory `ensure sbrk aligns allocations` - * Given that I allocated 1 byte via sbrk, - * when I call sbrk(2), then I should get 8 bytes past the previous end because of maintaining 8 byte alignment. - */ - prev = sbrk(2); - eosio_assert( reinterpret_cast(prev) == _64K+8, "Should point to 8 past the end of 1st 64K page" ); - - /* - * Test test_extended_memory::test_page_memory `ensure sbrk aligns allocations 2` - * Given that I allocated 2 bytes via sbrk, - * when I call sbrk(_64K-17), then I should get 8 bytes past the previous end because of maintaining 8 byte alignment. - */ - prev = sbrk(_64K - 17); - eosio_assert( reinterpret_cast(prev) == _64K+16, "Should point to 16 past the end of the 1st 64K page" ); - - prev = sbrk(1); - eosio_assert( reinterpret_cast(prev) == 2*_64K, "Should point to the end of the 2nd 64K page" ); - - prev = sbrk(_64K); - eosio_assert( reinterpret_cast(prev) == 2*_64K+8, "Should point to 8 past the end of the 2nd 64K page" ); - - prev = sbrk(_64K - 15); - eosio_assert( reinterpret_cast(prev) == 3*_64K+8, "Should point to 8 past the end of the 3rd 64K page" ); - - prev = sbrk(2*_64K-1); - eosio_assert( reinterpret_cast(prev) == 4*_64K, "Should point to the end of the 4th 64K page" ); - - prev = sbrk(2*_64K); - eosio_assert( reinterpret_cast(prev) == 6*_64K, "Should point to the end of the 6th 64K page" ); - - prev = sbrk(2*_64K+1); - eosio_assert( reinterpret_cast(prev) == 8*_64K, "Should point to the end of the 8th 64K page" ); - - prev = sbrk(6*_64K-15); - eosio_assert( reinterpret_cast(prev) == 10*_64K+8, "Should point to 8 past the end of the 10th 64K page" ); - - prev = sbrk(0); - eosio_assert( reinterpret_cast(prev) == 16*_64K, "Should point to 8 past the end of the 16th 64K page" ); -} - -void test_extended_memory::test_page_memory_exceeded() { - /* - * Test test_extended_memory::test_page_memory_exceeded `ensure sbrk won't allocation more than 1M of memory` - * Given that I have not tried to increase allocated memory, - * when I increase allocated memory with sbrk(15*64K), then I should get the end of the first page. - */ - auto prev = sbrk(15*64*1024); - eosio_assert( reinterpret_cast(prev) == 64*1024, "Should have allocated 1M of memory" ); - - /* - * Test test_extended_memory::test_page_memory_exceeded `ensure sbrk won't allocation more than 1M of memory 2` - */ - prev = sbrk(0); - eosio_assert( reinterpret_cast(prev) == (1024*1024), "Should have allocated 1M of memory" ); - eosio_assert( reinterpret_cast(sbrk(32*1024*1024+1)) == -1, "sbrk should have failed for trying to allocate too much memory" ); -} - -void test_extended_memory::test_page_memory_negative_bytes() { - eosio_assert( reinterpret_cast(sbrk((uint32_t)-1)) == -1, "Should have errored for trying to remove memory" ); -} - -void test_extended_memory::test_initial_buffer() { - // initial buffer should be exhausted at 8192 bytes - // 8176 left ( 12 + ptr header ) - char* ptr1 = (char*)malloc(12); - eosio_assert( ptr1 != nullptr, "should have allocated 12 char buffer" ); - - char* ptr2 = (char*)malloc(8159); - eosio_assert( ptr2 != nullptr, "should have allocate 8159 char buffer" ); - - // should overrun initial heap, allocated in 2nd heap - char* ptr3 = (char*)malloc(20); - eosio_assert( ptr3 != nullptr, "should have allocated a 20 char buffer" ); - verify( ptr3, 0, 20 ); - -} diff --git a/unittests/test-contracts/test_api_mem/test_memory.cpp b/unittests/test-contracts/test_api_mem/test_memory.cpp deleted file mode 100644 index 7fbe3a26e7a..00000000000 --- a/unittests/test-contracts/test_api_mem/test_memory.cpp +++ /dev/null @@ -1,386 +0,0 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ -#include - -#include "../test_api/test_api.hpp" - -void verify_mem( const void* const ptr, const uint32_t val, const uint32_t size ) -{ - const char* char_ptr = (const char*)ptr; - for ( uint32_t i = 0; i < size; ++i ) - { - eosio_assert( static_cast(static_cast(char_ptr[i])) == val, "buf slot doesn't match" ); - } -} - -/* -* malloc and realloc always allocate on 8 byte boundaries based off of total allocation, so -* if the requested size + the 2 byte header is not divisible by 8, then the allocated space -* will be larger than the requested size -*/ -void test_memory::test_memory_allocs() -{ - char* ptr1 = (char*)malloc(0); - eosio_assert( ptr1 == nullptr, "should not have allocated a 0 char buf" ); - - // 20 chars - 20 + 4(header) which is divisible by 8 - ptr1 = (char*)malloc(20); - eosio_assert( ptr1 != nullptr, "should have allocated a 20 char buf" ); - verify_mem( ptr1, 0, 20 ); - // existing memory layout -> |24| - - // 36 chars allocated - 30 + 4 plus an extra 6 to be divisible by 8 - char* ptr1_realloc = (char*)realloc( ptr1, 30 ); - eosio_assert( ptr1_realloc != nullptr, "should have returned a 30 char buf" ); - eosio_assert( ptr1_realloc == ptr1, "should have enlarged the 20 char buf" ); - // existing memory layout -> |40| - - // 20 chars allocated - char* ptr2 = (char*)malloc(20); - eosio_assert( ptr2 != nullptr, "should have allocated another 20 char buf" ); - eosio_assert( ptr1 + 36 < ptr2, "20 char buf should have been created after ptr1" ); // test specific to implementation (can remove for refactor) - verify_mem( ptr1, 0, 36 ); - eosio_assert( ptr1[36] != 0, "should not have empty bytes following since block allocated" ); // test specific to implementation (can remove for refactor) - // existing memory layout -> |40|24| - - //shrink the buffer - ptr1[14] = 0x7e; - // 20 chars allocated (still) - ptr1_realloc = (char*)realloc( ptr1, 15 ); - eosio_assert( ptr1_realloc != nullptr, "should have returned a 15 char buf" ); - eosio_assert( ptr1_realloc == ptr1, "should have shrunk the reallocated 30 char buf" ); - verify_mem( ptr1, 0, 14); // test specific to implementation (can remove for refactor) - eosio_assert( ptr1[14] == 0x7e, "remaining 15 chars of buf should be untouched" ); - // existing memory layout -> |24(shrunk)|16(freed)|24| - - //same size the buffer (verify corner case) - // 20 chars allocated (still) - ptr1_realloc = (char*)realloc( ptr1, 15 ); - eosio_assert( ptr1_realloc != nullptr, "should have returned a reallocated 15 char buf" ); - eosio_assert( ptr1_realloc == ptr1, "should have reallocated 15 char buf as the same buf" ); - eosio_assert( ptr1[14] == 0x7e, "remaining 15 chars of buf should be untouched for unchanged buf" ); - - //same size as max allocated buffer -- test specific to implementation (can remove for refactor) - ptr1_realloc = (char*)realloc( ptr1, 30 ); - eosio_assert( ptr1_realloc != nullptr, "should have returned a 30 char buf" ); - eosio_assert( ptr1_realloc == ptr1, "should have increased the buf back to orig max"); //test specific to implementation (can remove for refacto r) - eosio_assert( ptr1[14] == 0x7e, "remaining 15 chars of buf should be untouched for expanded buf" ); - - //increase buffer beyond (indicated) allocated space - // 36 chars allocated (still) - ptr1_realloc = (char*)realloc( ptr1, 36 ); - eosio_assert( ptr1_realloc != nullptr, "should have returned a 36 char buf" ); - eosio_assert( ptr1_realloc == ptr1, "should have increased char buf to actual size" ); // test specific to implementation (can remove for refacto r) - - //increase buffer beyond allocated space - ptr1[35] = 0x7f; - // 44 chars allocated - 37 + 4 plus an extra 7 to be divisible by 8 - ptr1_realloc = (char*)realloc( ptr1, 37 ); - eosio_assert( ptr1_realloc != nullptr, "should have returned a 37 char buf" ); - eosio_assert( ptr1_realloc != ptr1, "should have had to create new 37 char buf from 36 char buf" ); - eosio_assert( ptr2 < ptr1_realloc, "should have been created after ptr2" ); // test specific to implementation (can remove for refacto r) - eosio_assert( ptr1_realloc[14] == 0x7e, "orig 36 char buf's content should be copied" ); - eosio_assert( ptr1_realloc[35] == 0x7f, "orig 36 char buf's content should be copied" ); - - //realloc with nullptr - char* nullptr_realloc = (char*)realloc( nullptr, 50 ); - eosio_assert( nullptr_realloc != nullptr, "should have returned a 50 char buf and ignored nullptr" ); - eosio_assert( ptr1_realloc < nullptr_realloc, "should have created after ptr1_realloc" ); // test specific to implementation (can remove for refactor) - - //realloc with invalid ptr - char* invalid_ptr_realloc = (char*)realloc( nullptr_realloc + 4, 10 ); - eosio_assert( invalid_ptr_realloc != nullptr, "should have returned a 10 char buf and ignored invalid ptr" ); - eosio_assert( nullptr_realloc < invalid_ptr_realloc, "should have created invalid_ptr_realloc after nullptr_realloc" ); // test specific to implementation (can remove for refactor) -} - -// this test verifies that malloc can allocate 15 64K pages and treat them as one big heap space (if sbrk is not called in the mean time) -void test_memory::test_memory_hunk() -{ - // try to allocate the largest buffer we can, which is 15 contiguous 64K pages (with the 4 char space for the ptr header) - char* ptr1 = (char*)malloc( 15 * 64 * 1024 - 4 ); - eosio_assert( ptr1 != nullptr, "should have allocated a ~983K char buf" ); -} - -void test_memory::test_memory_hunks() -{ - // This test will be moved to `eosio.cdt' - // Note: for this reason, the asserts are commented out. - - // leave 784 bytes of initial buffer to allocate later (rounds up to nearest 8 byte boundary, - // 16 bytes bigger than remainder left below in 15 64K page heap)) - char* ptr1 = (char*)malloc(7404); - eosio_assert( ptr1 != nullptr, "should have allocated a 7404 char buf" ); - - char* last_ptr = nullptr; - // 96 * (10 * 1024 - 15) => 15 ~64K pages with 768 byte buffer left to allocate - for (int i = 0; i < 96; ++i) - { - char* ptr2 = (char*)malloc( 10 * 1024 - 15 ); - eosio_assert( ptr2 != nullptr, "should have allocated a ~10K char buf" ); - if ( last_ptr != nullptr ) - { - // - 15 rounds to -8 - eosio_assert( last_ptr + 10 * 1024 - 8 == ptr2, "should allocate the very next ptr" ); // test specific to implementation (can remove for refactor) - } - - last_ptr = ptr2; - } - - // try to allocate a buffer slightly larger than the remaining buffer| 765 + 4 rounds to 776 - char* ptr3 = (char*)malloc(765); - - eosio_assert( ptr3 != nullptr, "should have allocated a 772 char buf" ); - //eosio_assert(ptr1 + 7408 == ptr3, "should allocate the very next ptr after ptr1 in initial heap"); // test specific to implementation (can remove for refactor) - - // use all but 8 chars - char* ptr4 = (char*)malloc(764); - eosio_assert( ptr4 != nullptr, "should have allocated a 764 char buf" ); - //eosio_assert(last_ptr + 10 * 1024 - 8 == ptr4, "should allocate the very next ptr after last_ptr at end of contiguous heap"); // test specific to implementation (can remove for refactor) - - // use up remaining 8 chars - char* ptr5 = (char*)malloc(4); - eosio_assert( ptr5 != nullptr, "should have allocated a 4 char buf" ); - //eosio_assert(ptr3 + 776 == ptr5, "should allocate the very next ptr after ptr3 in initial heap"); // test specific to implementation (can remove for refactor) - - // nothing left to allocate - char* ptr6 = (char*)malloc(4); - //eosio_assert(ptr6 == nullptr, "should not have allocated a char buf"); -} - -void test_memory::test_memory_hunks_disjoint() -{ - // leave 8 bytes of initial buffer to allocate later - char* ptr1 = (char*)malloc( 8 * 1024 - 12 ); - eosio_assert( ptr1 != nullptr, "should have allocated a 8184 char buf" ); - - // can only make 14 extra (64K) heaps for malloc, since calls to sbrk will eat up part - char* loop_ptr1[14]; - // 14 * (64 * 1024 - 28) => 14 ~64K pages with each page having 24 bytes left to allocate - for ( int i = 0; i < 14; ++ i) - { - // allocates a new heap for each request, since sbrk call doesn't allow contiguous heaps to grow - loop_ptr1[i] = (char*)malloc( 64 * 1024 - 28 ); - eosio_assert( loop_ptr1[i] != nullptr, "should have allocated a 64K char buf" ); - - eosio_assert( reinterpret_cast(sbrk(4)) != -1, "should be able to allocate 8 bytes" ); - } - - // the 15th extra heap is reduced in size because of the 14 * 8 bytes allocated by sbrk calls - // will leave 8 bytes to allocate later (verifying that we circle back in the list - char* ptr2 = (char*)malloc(65412); - eosio_assert( ptr2 != nullptr, "should have allocated a 65412 char buf" ); - - char* loop_ptr2[14]; - for ( int i = 0; i < 14; ++ i) - { - // 12 char buffer to leave 8 bytes for another pass - loop_ptr2[i] = (char*)malloc(12); - eosio_assert( loop_ptr2[i] != nullptr, "should have allocated a 12 char buf" ); - eosio_assert( loop_ptr1[i] + 64 * 1024 - 24 == loop_ptr2[i], "loop_ptr2[i] should be very next pointer after loop_ptr1[i]" ); - } - - // this shows that searching for free ptrs starts at the last loop to find free memory, not at the begining - char* ptr3 = (char*)malloc(4); - eosio_assert( ptr3 != nullptr, "should have allocated a 4 char buf" ); - eosio_assert( loop_ptr2[13] + 16 == ptr3, "should allocate the very next ptr after loop_ptr2[13]" ); // test specific to implementation (can remove for refacto r) - - char* ptr4 = (char*)malloc(4); - eosio_assert( ptr4 != nullptr, "should have allocated a 4 char buf" ); - eosio_assert( ptr2 + 65416 == ptr4, "should allocate the very next ptr after ptr2 in last heap" ); // test specific to implementation (can remove for refacto r) - - char* ptr5 = (char*)malloc(4); - eosio_assert( ptr5 != nullptr, "should have allocated a 4 char buf" ); - eosio_assert( ptr1 + 8184 == ptr5, "should allocate the very next ptr after ptr1 in last heap" ); // test specific to implementation (can remove for refactor) - - // will eat up remaining memory (14th heap already used up) - char* loop_ptr3[13]; - for ( int i = 0; i < 13; ++i ) - { - // 4 char buffer to use up buffer - loop_ptr3[i] = (char*)malloc(4); - eosio_assert( loop_ptr3[i] != nullptr, "should have allocated a 4 char buf" ); - eosio_assert( loop_ptr2[i] + 16 == loop_ptr3[i], "loop_ptr3[i] should be very next pointer after loop_ptr2[i]" ); - } - - char* ptr6 = (char*)malloc(4); - eosio_assert( ptr6 == nullptr, "should not have allocated a char buf" ); - - free(loop_ptr1[3]); - free(loop_ptr2[3]); - free(loop_ptr3[3]); - - char* slot3_ptr[64]; - for ( int i = 0; i < 64; ++ i) - { - slot3_ptr[i] = (char*)malloc(1020); - eosio_assert( slot3_ptr[i] != nullptr, "should have allocated a 1020 char buf" ); - if (i == 0) - eosio_assert( loop_ptr1[3] == slot3_ptr[0], "loop_ptr1[3] should be very next pointer after slot3_ptr[0]" ); - else - eosio_assert( slot3_ptr[i - 1] + 1024 == slot3_ptr[i], "slot3_ptr[i] should be very next pointer after slot3_ptr[i-1]" ); - } - - char* ptr7 = (char*)malloc(4); - eosio_assert( ptr7 == nullptr, "should not have allocated a char buf" ); -} - -void test_memory::test_memset_memcpy() -{ - char buf1[40] = {}; - char buf2[40] = {}; - - verify_mem( buf1, 0, 40 ); - verify_mem( buf2, 0, 40 ); - - memset( buf1, 0x22, 20 ); - verify_mem( buf1, 0x22, 20 ); - verify_mem( &buf1[20], 0, 20 ); - - memset( &buf2[20], 0xff, 20 ); - verify_mem( buf2, 0, 20 ); - verify_mem( &buf2[20], 0xff, 20 ); - - memcpy( &buf1[10], &buf2[10], 20 ); - verify_mem( buf1, 0x22, 10 ); - verify_mem( &buf1[10], 0, 10 ); - verify_mem( &buf1[20], 0xff, 10 ); - verify_mem( &buf1[30], 0, 10 ); - - memset( &buf1[1], 1, 1 ); - verify_mem( buf1, 0x22, 1 ); - verify_mem( &buf1[1], 1, 1 ); - verify_mem( &buf1[2], 0x22, 8 ); - - // verify adjacent non-overlapping buffers - char buf3[50] = {}; - memset( &buf3[25], 0xee, 25 ); - verify_mem( buf3, 0, 25 ); - memcpy( buf3, &buf3[25], 25 ); - verify_mem( buf3, 0xee, 50 ); - - memset( buf3, 0, 25 ); - verify_mem( &buf3[25], 0xee, 25 ); - memcpy( &buf3[25], buf3, 25 ); - verify_mem( buf3, 0, 50 ); -} - -void test_memory::test_memcpy_overlap_start() -{ - char buf3[99] = {}; - memset( buf3, 0xee, 50 ); - memset( &buf3[50], 0xff, 49 ); - memcpy( &buf3[49], buf3, 50 ); -} - - -void test_memory::test_memcpy_overlap_end() -{ - char buf3[99] = {}; - memset( buf3, 0xee, 50 ); - memset( &buf3[50], 0xff, 49 ); - memcpy( buf3, &buf3[49], 50 ); -} - -void test_memory::test_memcmp() -{ - char buf1[] = "abcde"; - char buf2[] = "abcde"; - int32_t res1 = memcmp( buf1, buf2, 6 ); - eosio_assert( res1 == 0, "first data should be equal to second data" ); - - char buf3[] = "abcde"; - char buf4[] = "fghij"; - int32_t res2 = memcmp( buf3, buf4, 6 ); - eosio_assert( res2 < 0, "first data should be smaller than second data" ); - - char buf5[] = "fghij"; - char buf6[] = "abcde"; - int32_t res3 = memcmp( buf5, buf6, 6 ); - eosio_assert( res3 > 0, "first data should be larger than second data" ); -} - -void test_memory::test_outofbound_0() -{ - memset( (char *)0, 0xff, 1024 * 1024 * 1024 ); // big memo ry -} - -void test_memory::test_outofbound_1() -{ - memset( (char *)16, 0xff, 0xffffffff ); // memory wrap around -} - -void test_memory::test_outofbound_2() -{ - char buf[1024] = {0}; - char *ptr = (char *)malloc(1048576); - memcpy( buf, ptr, 1048576 ); // stack memory out of bound -} - -void test_memory::test_outofbound_3() -{ - char *ptr = (char *)malloc(128); - memset( ptr, 0xcc, 1048576 ); // heap memory out of bound -} - -template -void test_memory_store() { - T *ptr = (T *)( 8192 * 1024 - 1 ); - ptr[0] = (T)1; -} - -template -void test_memory_load() { - T *ptr = (T *)( 8192 * 1024 - 1 ); - volatile T tmp = ptr[0]; - (void)tmp; -} - -void test_memory::test_outofbound_4() -{ - test_memory_store(); -} -void test_memory::test_outofbound_5() -{ - test_memory_store(); -} -void test_memory::test_outofbound_6() -{ - test_memory_store(); -} -void test_memory::test_outofbound_7() -{ - test_memory_store(); -} -void test_memory::test_outofbound_8() -{ - test_memory_load(); -} -void test_memory::test_outofbound_9() -{ - test_memory_load(); -} -void test_memory::test_outofbound_10() -{ - test_memory_load(); -} -void test_memory::test_outofbound_11() -{ - test_memory_load(); -} - -void test_memory::test_outofbound_12() -{ - volatile unsigned int a = 0xffffffff; - double *ptr = (double *)a; // store with memory wrap - ptr[0] = 1; -} - -void test_memory::test_outofbound_13() -{ - volatile unsigned int a = 0xffffffff; - double *ptr = (double *)a; // load with memory wrap - volatile double tmp = ptr[0]; - (void)tmp; -} diff --git a/unittests/test-contracts/test_api_multi_index/CMakeLists.txt b/unittests/test-contracts/test_api_multi_index/CMakeLists.txt index 4331707d23b..cd1264fb473 100644 --- a/unittests/test-contracts/test_api_multi_index/CMakeLists.txt +++ b/unittests/test-contracts/test_api_multi_index/CMakeLists.txt @@ -1,6 +1,6 @@ -if( ${eosio.cdt_FOUND} ) - include_directories( ${Boost_INCLUDE_DIRS} ) - add_executable( test_api_multi_index test_api_multi_index.cpp ) +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( test_api_multi_index test_api_multi_index test_api_multi_index.cpp ) else() - configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/test_api_multi_index.wasm ${CMAKE_CURRENT_BINARY_DIR}/test_api_multi_index.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/test_api_multi_index.wasm ${CMAKE_CURRENT_BINARY_DIR}/test_api_multi_index.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/test_api_multi_index.abi ${CMAKE_CURRENT_BINARY_DIR}/test_api_multi_index.abi COPYONLY ) endif() diff --git a/unittests/test-contracts/test_api_multi_index/test_api_multi_index.abi b/unittests/test-contracts/test_api_multi_index/test_api_multi_index.abi new file mode 100644 index 00000000000..9bf6b7601de --- /dev/null +++ b/unittests/test-contracts/test_api_multi_index/test_api_multi_index.abi @@ -0,0 +1,332 @@ +{ + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", + "version": "eosio::abi/1.1", + "types": [], + "structs": [ + { + "name": "idx128_autoincrement_test", + "base": "", + "fields": [] + }, + { + "name": "idx128_autoincrement_test_part1", + "base": "", + "fields": [] + }, + { + "name": "idx128_autoincrement_test_part2", + "base": "", + "fields": [] + }, + { + "name": "idx128_check_without_storing", + "base": "", + "fields": [] + }, + { + "name": "idx128_general", + "base": "", + "fields": [] + }, + { + "name": "idx128_store_only", + "base": "", + "fields": [] + }, + { + "name": "idx256_general", + "base": "", + "fields": [] + }, + { + "name": "idx64_check_without_storing", + "base": "", + "fields": [] + }, + { + "name": "idx64_general", + "base": "", + "fields": [] + }, + { + "name": "idx64_modify_primary_key", + "base": "", + "fields": [] + }, + { + "name": "idx64_pass_pk_end_itr_to_erase", + "base": "", + "fields": [] + }, + { + "name": "idx64_pass_pk_end_itr_to_iterator_to", + "base": "", + "fields": [] + }, + { + "name": "idx64_pass_pk_end_itr_to_modify", + "base": "", + "fields": [] + }, + { + "name": "idx64_pass_pk_ref_to_other_table", + "base": "", + "fields": [] + }, + { + "name": "idx64_pass_sk_end_itr_to_erase", + "base": "", + "fields": [] + }, + { + "name": "idx64_pass_sk_end_itr_to_iterator_to", + "base": "", + "fields": [] + }, + { + "name": "idx64_pass_sk_end_itr_to_modify", + "base": "", + "fields": [] + }, + { + "name": "idx64_pass_sk_ref_to_other_table", + "base": "", + "fields": [] + }, + { + "name": "idx64_pk_cache_sk_lookup", + "base": "", + "fields": [] + }, + { + "name": "idx64_pk_iterator_exceed_begin", + "base": "", + "fields": [] + }, + { + "name": "idx64_pk_iterator_exceed_end", + "base": "", + "fields": [] + }, + { + "name": "idx64_require_find_fail", + "base": "", + "fields": [] + }, + { + "name": "idx64_require_find_fail_with_msg", + "base": "", + "fields": [] + }, + { + "name": "idx64_require_find_sk_fail", + "base": "", + "fields": [] + }, + { + "name": "idx64_require_find_sk_fail_with_msg", + "base": "", + "fields": [] + }, + { + "name": "idx64_run_out_of_avl_pk", + "base": "", + "fields": [] + }, + { + "name": "idx64_sk_cache_pk_lookup", + "base": "", + "fields": [] + }, + { + "name": "idx64_sk_iterator_exceed_begin", + "base": "", + "fields": [] + }, + { + "name": "idx64_sk_iterator_exceed_end", + "base": "", + "fields": [] + }, + { + "name": "idx64_store_only", + "base": "", + "fields": [] + }, + { + "name": "idx_double_general", + "base": "", + "fields": [] + }, + { + "name": "idx_long_double_general", + "base": "", + "fields": [] + } + ], + "actions": [ + { + "name": "s1check", + "type": "idx64_check_without_storing", + "ricardian_contract": "" + }, + { + "name": "s1exhaustpk", + "type": "idx64_run_out_of_avl_pk", + "ricardian_contract": "" + }, + { + "name": "s1findfail1", + "type": "idx64_require_find_fail", + "ricardian_contract": "" + }, + { + "name": "s1findfail2", + "type": "idx64_require_find_fail_with_msg", + "ricardian_contract": "" + }, + { + "name": "s1findfail3", + "type": "idx64_require_find_sk_fail", + "ricardian_contract": "" + }, + { + "name": "s1findfail4", + "type": "idx64_require_find_sk_fail_with_msg", + "ricardian_contract": "" + }, + { + "name": "s1g", + "type": "idx64_general", + "ricardian_contract": "" + }, + { + "name": "s1modpk", + "type": "idx64_modify_primary_key", + "ricardian_contract": "" + }, + { + "name": "s1pkbegin", + "type": "idx64_pk_iterator_exceed_begin", + "ricardian_contract": "" + }, + { + "name": "s1pkcache", + "type": "idx64_pk_cache_sk_lookup", + "ricardian_contract": "" + }, + { + "name": "s1pkend", + "type": "idx64_pk_iterator_exceed_end", + "ricardian_contract": "" + }, + { + "name": "s1pkerase", + "type": "idx64_pass_pk_end_itr_to_erase", + "ricardian_contract": "" + }, + { + "name": "s1pkitrto", + "type": "idx64_pass_pk_end_itr_to_iterator_to", + "ricardian_contract": "" + }, + { + "name": "s1pkmodify", + "type": "idx64_pass_pk_end_itr_to_modify", + "ricardian_contract": "" + }, + { + "name": "s1pkref", + "type": "idx64_pass_pk_ref_to_other_table", + "ricardian_contract": "" + }, + { + "name": "s1skbegin", + "type": "idx64_sk_iterator_exceed_begin", + "ricardian_contract": "" + }, + { + "name": "s1skcache", + "type": "idx64_sk_cache_pk_lookup", + "ricardian_contract": "" + }, + { + "name": "s1skend", + "type": "idx64_sk_iterator_exceed_end", + "ricardian_contract": "" + }, + { + "name": "s1skerase", + "type": "idx64_pass_sk_end_itr_to_erase", + "ricardian_contract": "" + }, + { + "name": "s1skitrto", + "type": "idx64_pass_sk_end_itr_to_iterator_to", + "ricardian_contract": "" + }, + { + "name": "s1skmodify", + "type": "idx64_pass_sk_end_itr_to_modify", + "ricardian_contract": "" + }, + { + "name": "s1skref", + "type": "idx64_pass_sk_ref_to_other_table", + "ricardian_contract": "" + }, + { + "name": "s1store", + "type": "idx64_store_only", + "ricardian_contract": "" + }, + { + "name": "s2autoinc", + "type": "idx128_autoincrement_test", + "ricardian_contract": "" + }, + { + "name": "s2autoinc1", + "type": "idx128_autoincrement_test_part1", + "ricardian_contract": "" + }, + { + "name": "s2autoinc2", + "type": "idx128_autoincrement_test_part2", + "ricardian_contract": "" + }, + { + "name": "s2check", + "type": "idx128_check_without_storing", + "ricardian_contract": "" + }, + { + "name": "s2g", + "type": "idx128_general", + "ricardian_contract": "" + }, + { + "name": "s2store", + "type": "idx128_store_only", + "ricardian_contract": "" + }, + { + "name": "s3g", + "type": "idx256_general", + "ricardian_contract": "" + }, + { + "name": "sdg", + "type": "idx_double_general", + "ricardian_contract": "" + }, + { + "name": "sldg", + "type": "idx_long_double_general", + "ricardian_contract": "" + } + ], + "tables": [], + "ricardian_clauses": [], + "variants": [] +} \ No newline at end of file diff --git a/unittests/test-contracts/test_api_multi_index/test_api_multi_index.cpp b/unittests/test-contracts/test_api_multi_index/test_api_multi_index.cpp index 54c4dc4214d..72e7d4e6a5b 100644 --- a/unittests/test-contracts/test_api_multi_index/test_api_multi_index.cpp +++ b/unittests/test-contracts/test_api_multi_index/test_api_multi_index.cpp @@ -2,49 +2,924 @@ * @file * @copyright defined in eos/LICENSE */ -#include - -#include "../test_api/test_api.hpp" -#include "test_multi_index.cpp" - -extern "C" { - - void apply( uint64_t receiver, uint64_t code, uint64_t action ) { - require_auth(code); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_general ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_store_only ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_check_without_storing ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_require_find_fail ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_require_find_fail_with_msg ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_require_find_sk_fail ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_require_find_sk_fail_with_msg ); - WASM_TEST_HANDLER_EX( test_multi_index, idx128_general ); - WASM_TEST_HANDLER_EX( test_multi_index, idx128_store_only ); - WASM_TEST_HANDLER_EX( test_multi_index, idx128_check_without_storing ); - WASM_TEST_HANDLER_EX( test_multi_index, idx128_autoincrement_test ); - WASM_TEST_HANDLER_EX( test_multi_index, idx128_autoincrement_test_part1 ); - WASM_TEST_HANDLER_EX( test_multi_index, idx128_autoincrement_test_part2 ); - WASM_TEST_HANDLER_EX( test_multi_index, idx256_general ); - WASM_TEST_HANDLER_EX( test_multi_index, idx_double_general ); - WASM_TEST_HANDLER_EX( test_multi_index, idx_long_double_general ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_pk_iterator_exceed_end ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_sk_iterator_exceed_end ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_pk_iterator_exceed_begin ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_sk_iterator_exceed_begin ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_pass_pk_ref_to_other_table ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_pass_sk_ref_to_other_table ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_pass_pk_end_itr_to_iterator_to ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_pass_pk_end_itr_to_modify ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_pass_pk_end_itr_to_erase ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_pass_sk_end_itr_to_iterator_to ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_pass_sk_end_itr_to_modify ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_pass_sk_end_itr_to_erase ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_modify_primary_key ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_run_out_of_avl_pk ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_sk_cache_pk_lookup ); - WASM_TEST_HANDLER_EX( test_multi_index, idx64_pk_cache_sk_lookup ); - - //unhandled test call - eosio_assert( false, "Unknown Test" ); +#include "test_api_multi_index.hpp" + +using namespace eosio; + +#include +#include + +namespace _test_multi_index { + + using eosio::checksum256; + + struct record_idx64 { + uint64_t id; + uint64_t sec; + + auto primary_key()const { return id; } + uint64_t get_secondary()const { return sec; } + + EOSLIB_SERIALIZE( record_idx64, (id)(sec) ) + }; + + struct record_idx128 { + uint64_t id; + uint128_t sec; + + auto primary_key()const { return id; } + uint128_t get_secondary()const { return sec; } + + EOSLIB_SERIALIZE( record_idx128, (id)(sec) ) + }; + + struct record_idx256 { + uint64_t id; + checksum256 sec; + + auto primary_key()const { return id; } + const checksum256& get_secondary()const { return sec; } + + EOSLIB_SERIALIZE( record_idx256, (id)(sec) ) + }; + + struct record_idx_double { + uint64_t id; + double sec; + + auto primary_key()const { return id; } + double get_secondary()const { return sec; } + + EOSLIB_SERIALIZE( record_idx_double, (id)(sec) ) + }; + + struct record_idx_long_double { + uint64_t id; + long double sec; + + auto primary_key()const { return id; } + long double get_secondary()const { return sec; } + + EOSLIB_SERIALIZE( record_idx_long_double, (id)(sec) ) + }; + + template + void idx64_store_only( name receiver ) + { + typedef record_idx64 record; + + record records[] = {{265, "alice"_n.value}, + {781, "bob"_n.value}, + {234, "charlie"_n.value}, + {650, "allyson"_n.value}, + {540, "bob"_n.value}, + {976, "emily"_n.value}, + {110, "joe"_n.value} + }; + size_t num_records = sizeof(records)/sizeof(records[0]); + + // Construct and fill table using multi_index + multi_index> + > table( receiver, receiver.value ); + + auto payer = receiver; + + for ( size_t i = 0; i < num_records; ++i ) { + table.emplace( payer, [&](auto& r) { + r.id = records[i].id; + r.sec = records[i].sec; + }); + } } + + template + void idx64_check_without_storing( name receiver ) + { + typedef record_idx64 record; + + // Load table using multi_index + multi_index> + > table( receiver, receiver.value ); + + auto payer = receiver; + + auto secondary_index = table.template get_index<"bysecondary"_n>(); + + // find by primary key + { + auto itr = table.find(999); + check( itr == table.end(), "idx64_general - table.find() of non-existing primary key" ); + + itr = table.find(976); + check( itr != table.end() && itr->sec == "emily"_n.value, "idx64_general - table.find() of existing primary key" ); + + ++itr; + check( itr == table.end(), "idx64_general - increment primary iterator to end" ); + + itr = table.require_find(976); + check( itr != table.end() && itr->sec == "emily"_n.value, "idx64_general - table.require_find() of existing primary key" ); + + ++itr; + check( itr == table.end(), "idx64_general - increment primary iterator to end" ); + } + + // iterate forward starting with charlie + { + auto itr = secondary_index.lower_bound("charlie"_n.value); + check( itr != secondary_index.end() && itr->sec == "charlie"_n.value, "idx64_general - secondary_index.lower_bound()" ); + + ++itr; + check( itr != secondary_index.end() && itr->id == 976 && itr->sec == "emily"_n.value, "idx64_general - increment secondary iterator" ); + + ++itr; + check( itr != secondary_index.end() && itr->id == 110 && itr->sec == "joe"_n.value, "idx64_general - increment secondary iterator again" ); + + ++itr; + check( itr == secondary_index.end(), "idx64_general - increment secondary iterator to end" ); + } + + // iterate backward starting with second bob + { + auto pk_itr = table.find(781); + check( pk_itr != table.end() && pk_itr->sec == "bob"_n.value, "idx64_general - table.find() of existing primary key" ); + + auto itr = secondary_index.iterator_to(*pk_itr); + check( itr->id == 781 && itr->sec == "bob"_n.value, "idx64_general - iterator to existing object in secondary index" ); + + --itr; + check( itr != secondary_index.end() && itr->id == 540 && itr->sec == "bob"_n.value, "idx64_general - decrement secondary iterator" ); + + --itr; + check( itr != secondary_index.end() && itr->id == 650 && itr->sec == "allyson"_n.value, "idx64_general - decrement secondary iterator again" ); + + --itr; + check( itr == secondary_index.begin() && itr->id == 265 && itr->sec == "alice"_n.value, "idx64_general - decrement secondary iterator to beginning" ); + } + + // iterate backward starting with emily using const_reverse_iterator + { + std::array pks{{976, 234, 781, 540, 650, 265}}; + + auto pk_itr = pks.begin(); + + auto itr = --std::make_reverse_iterator( secondary_index.find("emily"_n.value) ); + for( ; itr != secondary_index.rend(); ++itr ) { + check( pk_itr != pks.end(), "idx64_general - unexpected continuation of secondary index in reverse iteration" ); + check( *pk_itr == itr->id, "idx64_general - primary key mismatch in reverse iteration" ); + ++pk_itr; + } + check( pk_itr == pks.end(), "idx64_general - did not iterate backwards through secondary index properly" ); + } + + // require_find secondary key + { + auto itr = secondary_index.require_find("bob"_n.value); + check( itr != secondary_index.end(), "idx64_general - require_find must never return end iterator" ); + check( itr->id == 540, "idx64_general - require_find test" ); + + ++itr; + check( itr->id == 781, "idx64_general - require_find secondary key test" ); + } + + // modify and erase + { + const uint64_t ssn = 421; + auto new_person = table.emplace( payer, [&](auto& r) { + r.id = ssn; + r.sec = "bob"_n.value; + }); + + table.modify( new_person, payer, [&](auto& r) { + r.sec = "billy"_n.value; + }); + + auto itr1 = table.find(ssn); + check( itr1 != table.end() && itr1->sec == "billy"_n.value, "idx64_general - table.modify()" ); + + table.erase(itr1); + auto itr2 = table.find(ssn); + check( itr2 == table.end(), "idx64_general - table.erase()" ); + } + } + + template + void idx64_require_find_fail(name receiver) + { + typedef record_idx64 record; + + // Load table using multi_index + multi_index table( receiver, receiver.value ); + + // make sure we're looking at the right table + auto itr = table.require_find( 781, "table not loaded" ); + check( itr != table.end(), "table not loaded" ); + + // require_find by primary key + // should fail + itr = table.require_find(999); + } + + template + void idx64_require_find_fail_with_msg(name receiver) + { + typedef record_idx64 record; + + // Load table using multi_index + multi_index table( receiver, receiver.value ); + + // make sure we're looking at the right table + auto itr = table.require_find( 234, "table not loaded" ); + check( itr != table.end(), "table not loaded" ); + + // require_find by primary key + // should fail + itr = table.require_find( 335, "unable to find primary key in require_find" ); + } + + template + void idx64_require_find_sk_fail(name receiver) + { + typedef record_idx64 record; + + // Load table using multi_index + multi_index>> table( receiver, receiver.value ); + auto sec_index = table.template get_index<"bysecondary"_n>(); + + // make sure we're looking at the right table + auto itr = sec_index.require_find( "charlie"_n.value, "table not loaded" ); + check( itr != sec_index.end(), "table not loaded" ); + + // require_find by secondary key + // should fail + itr = sec_index.require_find("bill"_n.value); + } + + template + void idx64_require_find_sk_fail_with_msg(name receiver) + { + typedef record_idx64 record; + + // Load table using multi_index + multi_index>> table( receiver, receiver.value ); + auto sec_index = table.template get_index<"bysecondary"_n>(); + + // make sure we're looking at the right table + auto itr = sec_index.require_find( "emily"_n.value, "table not loaded" ); + check( itr != sec_index.end(), "table not loaded" ); + + // require_find by secondary key + // should fail + itr = sec_index.require_find( "frank"_n.value, "unable to find sec key" ); + } + + template + void idx128_store_only(name receiver) + { + typedef record_idx128 record; + + + // Construct and fill table using multi_index + multi_index> + > table( receiver, receiver.value ); + + auto payer = receiver; + + for (uint64_t i = 0; i < 5; ++i) { + table.emplace( payer, [&](auto& r) { + r.id = i; + r.sec = static_cast(1ULL << 63) * i; + }); + } + } + + template + void idx128_check_without_storing( name receiver ) + { + typedef record_idx128 record; + + // Load table using multi_index + multi_index> + > table( receiver, receiver.value ); + + auto payer = receiver; + + auto secondary_index = table.template get_index<"bysecondary"_n>(); + + table.modify( table.get(3), payer, [&](auto& r) { + r.sec *= 2; + }); + + { + uint128_t multiplier = 1ULL << 63; + + auto itr = secondary_index.begin(); + check( itr->primary_key() == 0 && itr->get_secondary() == multiplier*0, "idx128_general - secondary key sort" ); + ++itr; + check( itr->primary_key() == 1 && itr->get_secondary() == multiplier*1, "idx128_general - secondary key sort" ); + ++itr; + check( itr->primary_key() == 2 && itr->get_secondary() == multiplier*2, "idx128_general - secondary key sort" ); + ++itr; + check( itr->primary_key() == 4 && itr->get_secondary() == multiplier*4, "idx128_general - secondary key sort" ); + ++itr; + check( itr->primary_key() == 3 && itr->get_secondary() == multiplier*6, "idx128_general - secondary key sort" ); + ++itr; + check( itr == secondary_index.end(), "idx128_general - secondary key sort" ); + } + + } + + template + auto idx64_table( name receiver ) + { + typedef record_idx64 record; + // Load table using multi_index + multi_index> + > table( receiver, receiver.value ); + return table; + } + +} /// _test_multi_index + +void test_api_multi_index::idx64_store_only() +{ + _test_multi_index::idx64_store_only<"indextable1"_n.value>( get_self() ); +} + +void test_api_multi_index::idx64_check_without_storing() +{ + _test_multi_index::idx64_check_without_storing<"indextable1"_n.value>( get_self() ); +} + +void test_api_multi_index::idx64_general() +{ + _test_multi_index::idx64_store_only<"indextable2"_n.value>( get_self() ); + _test_multi_index::idx64_check_without_storing<"indextable2"_n.value>( get_self() ); +} + +void test_api_multi_index::idx128_store_only() +{ + _test_multi_index::idx128_store_only<"indextable3"_n.value>( get_self() ); +} + +void test_api_multi_index::idx128_check_without_storing() +{ + _test_multi_index::idx128_check_without_storing<"indextable3"_n.value>( get_self() ); +} + +void test_api_multi_index::idx128_general() +{ + _test_multi_index::idx128_store_only<"indextable4"_n.value>( get_self() ); + _test_multi_index::idx128_check_without_storing<"indextable4"_n.value>( get_self() ); +} + +void test_api_multi_index::idx64_require_find_fail() +{ + _test_multi_index::idx64_store_only<"indextable5"_n.value>( get_self() ); + _test_multi_index::idx64_require_find_fail<"indextable5"_n.value>( get_self() ); +} + +void test_api_multi_index::idx64_require_find_fail_with_msg() +{ + _test_multi_index::idx64_store_only<"indextablea"_n.value>( get_self() ); // Making the name smaller fixes this? + _test_multi_index::idx64_require_find_fail_with_msg<"indextablea"_n.value>( get_self() ); // Making the name smaller fixes this? +} + +void test_api_multi_index::idx64_require_find_sk_fail() +{ + _test_multi_index::idx64_store_only<"indextableb"_n.value>( get_self() ); + _test_multi_index::idx64_require_find_sk_fail<"indextableb"_n.value>( get_self() ); +} + +void test_api_multi_index::idx64_require_find_sk_fail_with_msg() +{ + _test_multi_index::idx64_store_only<"indextablec"_n.value>( get_self() ); + _test_multi_index::idx64_require_find_sk_fail_with_msg<"indextablec"_n.value>( get_self() ); +} + +void test_api_multi_index::idx128_autoincrement_test() +{ + using namespace _test_multi_index; + + typedef record_idx128 record; + + auto payer = get_self(); + + multi_index<"autoinctbl1"_n, record, + indexed_by<"bysecondary"_n, const_mem_fun> + > table( get_self(), get_self().value ); + + for( int i = 0; i < 5; ++i ) { + table.emplace( payer, [&](auto& r) { + r.id = table.available_primary_key(); + r.sec = 1000 - static_cast(r.id); + }); + } + + uint64_t expected_key = 4; + for( const auto& r : table.get_index<"bysecondary"_n>() ) + { + check( r.primary_key() == expected_key, "idx128_autoincrement_test - unexpected primary key" ); + --expected_key; + } + check( expected_key == static_cast(-1), "idx128_autoincrement_test - did not iterate through secondary index properly" ); + + auto itr = table.find(3); + check( itr != table.end(), "idx128_autoincrement_test - could not find object with primary key of 3" ); + + // The modification below would trigger an error: + /* + table.modify(itr, payer, [&](auto& r) { + r.id = 100; + }); + */ + + table.emplace( payer, [&](auto& r) { + r.id = 100; + r.sec = itr->sec; + }); + table.erase(itr); + + check( table.available_primary_key() == 101, "idx128_autoincrement_test - next_primary_key was not correct after record modify" ); +} + +void test_api_multi_index::idx128_autoincrement_test_part1() +{ + using namespace _test_multi_index; + + typedef record_idx128 record; + + auto payer = get_self(); + + multi_index<"autoinctbl2"_n, record, + indexed_by<"bysecondary"_n, const_mem_fun> + > table( get_self(), get_self().value ); + + for( int i = 0; i < 3; ++i ) { + table.emplace( payer, [&](auto& r) { + r.id = table.available_primary_key(); + r.sec = 1000 - static_cast(r.id); + }); + } + + table.erase(table.get(0)); + + uint64_t expected_key = 2; + for( const auto& r : table.get_index<"bysecondary"_n>() ) + { + check( r.primary_key() == expected_key, "idx128_autoincrement_test_part1 - unexpected primary key" ); + --expected_key; + } + check( expected_key == 0, "idx128_autoincrement_test_part1 - did not iterate through secondary index properly" ); + +} + +void test_api_multi_index::idx128_autoincrement_test_part2() +{ + using namespace _test_multi_index; + + typedef record_idx128 record; + + const name::raw table_name = "autoinctbl2"_n; + auto payer = get_self(); + + { + multi_index> + > table( get_self(), get_self().value ); + + check( table.available_primary_key() == 3, "idx128_autoincrement_test_part2 - did not recover expected next primary key" ); + } + + multi_index> + > table( get_self(), get_self().value ); + + table.emplace( payer, [&](auto& r) { + r.id = 0; + r.sec = 1000; + }); + // Done this way to make sure that table._next_primary_key is not incorrectly set to 1. + + for( int i = 3; i < 5; ++i ) { + table.emplace( payer, [&](auto& r) { + auto itr = table.available_primary_key(); + r.id = itr; + r.sec = 1000 - static_cast(r.id); + }); + } + + uint64_t expected_key = 4; + for( const auto& r : table.get_index<"bysecondary"_n>() ) + { + check( r.primary_key() == expected_key, "idx128_autoincrement_test_part2 - unexpected primary key" ); + --expected_key; + } + check( expected_key == static_cast(-1), "idx128_autoincrement_test_part2 - did not iterate through secondary index properly" ); + + auto itr = table.find(3); + check( itr != table.end(), "idx128_autoincrement_test_part2 - could not find object with primary key of 3" ); + + table.emplace( payer, [&](auto& r) { + r.id = 100; + r.sec = itr->sec; + }); + table.erase(itr); + + check( table.available_primary_key() == 101, "idx128_autoincrement_test_part2 - next_primary_key was not correct after record update" ); +} + +void test_api_multi_index::idx256_general() +{ + using namespace _test_multi_index; + + typedef record_idx256 record; + + auto payer = get_self(); + + print("Testing checksum256 secondary index.\n"); + multi_index<"indextable5"_n, record, + indexed_by<"bysecondary"_n, const_mem_fun> + > table( get_self(), get_self().value ); + + auto fourtytwo = checksum256::make_from_word_sequence( 0ULL, 0ULL, 0ULL, 42ULL ); + //auto onetwothreefour = checksum256::make_from_word_sequence(1ULL, 2ULL, 3ULL, 4ULL); + auto onetwothreefour = checksum256{std::array{ {0,1, 0,2, 0,3, 0,4} }}; + + table.emplace( payer, [&](auto& o) { + o.id = 1; + o.sec = fourtytwo; + }); + + table.emplace( payer, [&](auto& o) { + o.id = 2; + o.sec = onetwothreefour; + }); + + table.emplace( payer, [&](auto& o) { + o.id = 3; + o.sec = fourtytwo; + }); + + auto e = table.find(2); + + print("Items sorted by primary key:\n"); + for( const auto& item : table ) { + print(" ID=", item.primary_key(), ", secondary=", item.sec, "\n"); + } + + { + auto itr = table.begin(); + check( itr->primary_key() == 1 && itr->get_secondary() == fourtytwo, "idx256_general - primary key sort" ); + ++itr; + check( itr->primary_key() == 2 && itr->get_secondary() == onetwothreefour, "idx256_general - primary key sort" ); + ++itr; + check( itr->primary_key() == 3 && itr->get_secondary() == fourtytwo, "idx256_general - primary key sort" ); + ++itr; + check( itr == table.end(), "idx256_general - primary key sort" ); + } + + auto secidx = table.get_index<"bysecondary"_n>(); + + auto lower1 = secidx.lower_bound( checksum256::make_from_word_sequence(0ULL, 0ULL, 0ULL, 40ULL) ); + print("First entry with a secondary key of at least 40 has ID=", lower1->id, ".\n"); + check( lower1->id == 1, "idx256_general - lower_bound" ); + + auto lower2 = secidx.lower_bound( checksum256::make_from_word_sequence(0ULL, 0ULL, 0ULL, 50ULL) ); + print("First entry with a secondary key of at least 50 has ID=", lower2->id, ".\n"); + check( lower2->id == 2, "idx256_general - lower_bound" ); + + if( table.iterator_to(*lower2) == e ) { + print("Previously found entry is the same as the one found earlier with a primary key value of 2.\n"); + } + + print("Items sorted by secondary key (checksum256):\n"); + for( const auto& item : secidx ) { + print(" ID=", item.primary_key(), ", secondary=", item.sec, "\n"); + } + + { + auto itr = secidx.begin(); + check( itr->primary_key() == 1, "idx256_general - secondary key sort" ); + ++itr; + check( itr->primary_key() == 3, "idx256_general - secondary key sort" ); + ++itr; + check( itr->primary_key() == 2, "idx256_general - secondary key sort" ); + ++itr; + check( itr == secidx.end(), "idx256_general - secondary key sort" ); + } + + auto upper = secidx.upper_bound( checksum256{std::array{{0, 0, 0, 42}}} ); + + print("First entry with a secondary key greater than 42 has ID=", upper->id, ".\n"); + check( upper->id == 2, "idx256_general - upper_bound" ); + check( upper->id == secidx.get(onetwothreefour).id, "idx256_general - secondary index get" ); + + print("Removed entry with ID=", lower1->id, ".\n"); + secidx.erase( lower1 ); + + print("Items reverse sorted by primary key:\n"); + for( auto itr = table.rbegin(); itr != table.rend(); ++itr ) { + const auto& item = *itr; + print(" ID=", item.primary_key(), ", secondary=", item.sec, "\n"); + } + + { + auto itr = table.rbegin(); + check( itr->primary_key() == 3 && itr->get_secondary() == fourtytwo, "idx256_general - primary key sort after remove" ); + ++itr; + check( itr->primary_key() == 2 && itr->get_secondary() == onetwothreefour, "idx256_general - primary key sort after remove" ); + ++itr; + check( itr == table.rend(), "idx256_general - primary key sort after remove" ); + } +} + +void test_api_multi_index::idx_double_general() +{ + using namespace _test_multi_index; + + typedef record_idx_double record; + + auto payer = get_self(); + + print("Testing double secondary index.\n"); + multi_index<"floattable1"_n, record, + indexed_by<"bysecondary"_n, const_mem_fun> + > table( get_self(), get_self().value ); + + auto secidx = table.get_index<"bysecondary"_n>(); + + double tolerance = std::numeric_limits::epsilon(); + print("tolerance = ", tolerance, "\n"); + + for( uint64_t i = 1; i <= 10; ++i ) { + table.emplace( payer, [&]( auto& o ) { + o.id = i; + o.sec = 1.0 / (i * 1000000.0); + }); + } + + double expected_product = 1.0 / 1000000.0; + print( "expected_product = ", expected_product, "\n" ); + + uint64_t expected_key = 10; + for( const auto& obj : secidx ) { + check( obj.primary_key() == expected_key, "idx_double_general - unexpected primary key" ); + + double prod = obj.sec * obj.id; + + print(" id = ", obj.id, ", sec = ", obj.sec, ", sec * id = ", prod, "\n"); + + check( std::abs(prod - expected_product) <= tolerance, + "idx_double_general - product of secondary and id not equal to expected_product within tolerance" ); + + --expected_key; + } + check( expected_key == 0, "idx_double_general - did not iterate through secondary index properly" ); + + { + auto itr = secidx.lower_bound( expected_product / 5.5 ); + check( std::abs(1.0 / itr->sec - 5000000.0) <= tolerance, "idx_double_general - lower_bound" ); + + itr = secidx.upper_bound( expected_product / 5.0 ); + check( std::abs(1.0 / itr->sec - 4000000.0) <= tolerance, "idx_double_general - upper_bound" ); + + } +} + +void test_api_multi_index::idx_long_double_general() +{ + using namespace _test_multi_index; + + typedef record_idx_long_double record; + + auto payer = get_self(); + + print("Testing long double secondary index.\n"); + multi_index<"floattable2"_n, record, + indexed_by<"bysecondary"_n, const_mem_fun> + > table( get_self(), get_self().value ); + + auto secidx = table.get_index<"bysecondary"_n>(); + + long double tolerance = std::min( static_cast(std::numeric_limits::epsilon()), + std::numeric_limits::epsilon() * 1e7l ); + print("tolerance = ", tolerance, "\n"); + + long double f = 1.0l; + for( uint64_t i = 1; i <= 10; ++i, f += 1.0l ) { + table.emplace( payer, [&](auto& o) { + o.id = i; + o.sec = 1.0l / (i * 1000000.0l); + }); + } + + long double expected_product = 1.0l / 1000000.0l; + print( "expected_product = ", expected_product, "\n" ); + + uint64_t expected_key = 10; + for( const auto& obj : secidx ) { + check( obj.primary_key() == expected_key, "idx_long_double_general - unexpected primary key" ); + + long double prod = obj.sec * obj.id; + + print(" id = ", obj.id, ", sec = ", obj.sec, ", sec * id = ", prod, "\n"); + + check( std::abs(prod - expected_product) <= tolerance, + "idx_long_double_general - product of secondary and id not equal to expected_product within tolerance" ); + + --expected_key; + } + check( expected_key == 0, "idx_long_double_general - did not iterate through secondary index properly" ); + + { + auto itr = secidx.lower_bound( expected_product / 5.5l ); + check( std::abs(1.0l / itr->sec - 5000000.0l) <= tolerance, "idx_long_double_general - lower_bound" ); + + itr = secidx.upper_bound( expected_product / 5.0l ); + check( std::abs(1.0l / itr->sec - 4000000.0l) <= tolerance, "idx_long_double_general - upper_bound" ); + + } +} + +void test_api_multi_index::idx64_pk_iterator_exceed_end() +{ + auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>( get_self() ); + auto end_itr = table.end(); + // Should fail + ++end_itr; +} + +void test_api_multi_index::idx64_sk_iterator_exceed_end() +{ + auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>( get_self() ); + auto end_itr = table.get_index<"bysecondary"_n>().end(); + // Should fail + ++end_itr; +} + +void test_api_multi_index::idx64_pk_iterator_exceed_begin() +{ + auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>( get_self() ); + auto begin_itr = table.begin(); + // Should fail + --begin_itr; +} + +void test_api_multi_index::idx64_sk_iterator_exceed_begin() +{ + auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>( get_self() ); + auto begin_itr = table.get_index<"bysecondary"_n>().begin(); + // Should fail + --begin_itr; +} + +void test_api_multi_index::idx64_pass_pk_ref_to_other_table() +{ + auto table1 = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>( get_self() ); + auto table2 = _test_multi_index::idx64_table<"indextable2"_n.value, "bysecondary"_n.value>( get_self() ); + + auto table1_pk_itr = table1.find(781); + check( table1_pk_itr != table1.end() && table1_pk_itr->sec == "bob"_n.value, "idx64_pass_pk_ref_to_other_table - table.find() of existing primary key" ); + + // Should fail + table2.iterator_to(*table1_pk_itr); +} + +void test_api_multi_index::idx64_pass_sk_ref_to_other_table() +{ + auto table1 = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>( get_self() ); + auto table2 = _test_multi_index::idx64_table<"indextable2"_n.value, "bysecondary"_n.value>( get_self() ); + + auto table1_pk_itr = table1.find(781); + check( table1_pk_itr != table1.end() && table1_pk_itr->sec == "bob"_n.value, "idx64_pass_sk_ref_to_other_table - table.find() of existing primary key" ); + + auto table2_sec_index = table2.get_index<"bysecondary"_n>(); + // Should fail + table2_sec_index.iterator_to(*table1_pk_itr); +} + +void test_api_multi_index::idx64_pass_pk_end_itr_to_iterator_to() +{ + auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>( get_self() ); + auto end_itr = table.end(); + // Should fail + table.iterator_to(*end_itr); +} + +void test_api_multi_index::idx64_pass_pk_end_itr_to_modify() +{ + auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>( get_self() ); + auto end_itr = table.end(); + + // Should fail + table.modify( end_itr, get_self(), [](auto&){} ); +} + +void test_api_multi_index::idx64_pass_pk_end_itr_to_erase() +{ + auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>( get_self() ); + auto end_itr = table.end(); + + // Should fail + table.erase(end_itr); +} + +void test_api_multi_index::idx64_pass_sk_end_itr_to_iterator_to() +{ + auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>( get_self() ); + auto sec_index = table.get_index<"bysecondary"_n>(); + auto end_itr = sec_index.end(); + + // Should fail + sec_index.iterator_to(*end_itr); +} + +void test_api_multi_index::idx64_pass_sk_end_itr_to_modify() +{ + auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>( get_self() ); + auto sec_index = table.get_index<"bysecondary"_n>(); + auto end_itr = sec_index.end(); + + // Should fail + sec_index.modify( end_itr, get_self(), [](auto&){} ); +} + + +void test_api_multi_index::idx64_pass_sk_end_itr_to_erase() +{ + auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>( get_self() ); + auto sec_index = table.get_index<"bysecondary"_n>(); + auto end_itr = sec_index.end(); + + // Should fail + sec_index.erase(end_itr); +} + +void test_api_multi_index::idx64_modify_primary_key() +{ + auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>( get_self() ); + + auto pk_itr = table.find(781); + check( pk_itr != table.end() && pk_itr->sec == "bob"_n.value, "idx64_modify_primary_key - table.find() of existing primary key" ); + + // Should fail + table.modify( pk_itr, get_self(), [](auto& r){ + r.id = 1100; + }); +} + +void test_api_multi_index::idx64_run_out_of_avl_pk() +{ + auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>( get_self() ); + + auto pk_itr = table.find(781); + check( pk_itr != table.end() && pk_itr->sec == "bob"_n.value, "idx64_modify_primary_key - table.find() of existing primary key" ); + + auto payer = get_self(); + + table.emplace( payer, [&](auto& r) { + r.id = static_cast(-4); + r.sec = "alice"_n.value; + }); + check( table.available_primary_key() == static_cast(-3), "idx64_run_out_of_avl_pk - incorrect available primary key" ); + + table.emplace( payer, [&](auto& r) { + r.id = table.available_primary_key(); + r.sec = "bob"_n.value; + }); + + // Should fail + table.available_primary_key(); +} + +void test_api_multi_index::idx64_sk_cache_pk_lookup() +{ + auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>( get_self() ); + + auto sec_index = table.get_index<"bysecondary"_n>(); + auto sk_itr = sec_index.find("bob"_n.value); + check( sk_itr != sec_index.end() && sk_itr->id == 540, "idx64_sk_cache_pk_lookup - sec_index.find() of existing secondary key" ); + + auto pk_itr = table.iterator_to(*sk_itr); + auto prev_itr = --pk_itr; + check( prev_itr->id == 265 && prev_itr->sec == "alice"_n.value, "idx64_sk_cache_pk_lookup - previous record" ); +} + +void test_api_multi_index::idx64_pk_cache_sk_lookup() +{ + auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>( get_self() ); + + + auto pk_itr = table.find(540); + check( pk_itr != table.end() && pk_itr->sec == "bob"_n.value, "idx64_pk_cache_sk_lookup - table.find() of existing primary key" ); + + auto sec_index = table.get_index<"bysecondary"_n>(); + auto sk_itr = sec_index.iterator_to(*pk_itr); + auto next_itr = ++sk_itr; + check( next_itr->id == 781 && next_itr->sec == "bob"_n.value, "idx64_pk_cache_sk_lookup - next record" ); } diff --git a/unittests/test-contracts/test_api_multi_index/test_api_multi_index.hpp b/unittests/test-contracts/test_api_multi_index/test_api_multi_index.hpp new file mode 100644 index 00000000000..73f48ea8e4c --- /dev/null +++ b/unittests/test-contracts/test_api_multi_index/test_api_multi_index.hpp @@ -0,0 +1,109 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +class [[eosio::contract]] test_api_multi_index : public eosio::contract { +public: + using eosio::contract::contract; + + [[eosio::action("s1g")]] + void idx64_general(); + + [[eosio::action("s1store")]] + void idx64_store_only(); + + [[eosio::action("s1check")]] + void idx64_check_without_storing(); + + [[eosio::action("s1findfail1")]] + void idx64_require_find_fail(); + + [[eosio::action("s1findfail2")]] + void idx64_require_find_fail_with_msg(); + + [[eosio::action("s1findfail3")]] + void idx64_require_find_sk_fail(); + + [[eosio::action("s1findfail4")]] + void idx64_require_find_sk_fail_with_msg(); + + [[eosio::action("s1pkend")]] + void idx64_pk_iterator_exceed_end(); + + [[eosio::action("s1skend")]] + void idx64_sk_iterator_exceed_end(); + + [[eosio::action("s1pkbegin")]] + void idx64_pk_iterator_exceed_begin(); + + [[eosio::action("s1skbegin")]] + void idx64_sk_iterator_exceed_begin(); + + [[eosio::action("s1pkref")]] + void idx64_pass_pk_ref_to_other_table(); + + [[eosio::action("s1skref")]] + void idx64_pass_sk_ref_to_other_table(); + + [[eosio::action("s1pkitrto")]] + void idx64_pass_pk_end_itr_to_iterator_to(); + + [[eosio::action("s1pkmodify")]] + void idx64_pass_pk_end_itr_to_modify(); + + [[eosio::action("s1pkerase")]] + void idx64_pass_pk_end_itr_to_erase(); + + [[eosio::action("s1skitrto")]] + void idx64_pass_sk_end_itr_to_iterator_to(); + + [[eosio::action("s1skmodify")]] + void idx64_pass_sk_end_itr_to_modify(); + + [[eosio::action("s1skerase")]] + void idx64_pass_sk_end_itr_to_erase(); + + [[eosio::action("s1modpk")]] + void idx64_modify_primary_key(); + + [[eosio::action("s1exhaustpk")]] + void idx64_run_out_of_avl_pk(); + + [[eosio::action("s1skcache")]] + void idx64_sk_cache_pk_lookup(); + + [[eosio::action("s1pkcache")]] + void idx64_pk_cache_sk_lookup(); + + [[eosio::action("s2g")]] + void idx128_general(); + + [[eosio::action("s2store")]] + void idx128_store_only(); + + [[eosio::action("s2check")]] + void idx128_check_without_storing(); + + [[eosio::action("s2autoinc")]] + void idx128_autoincrement_test(); + + [[eosio::action("s2autoinc1")]] + void idx128_autoincrement_test_part1(); + + [[eosio::action("s2autoinc2")]] + void idx128_autoincrement_test_part2(); + + [[eosio::action("s3g")]] + void idx256_general(); + + [[eosio::action("sdg")]] + void idx_double_general(); + + [[eosio::action("sldg")]] + void idx_long_double_general(); + +}; diff --git a/unittests/test-contracts/test_api_multi_index/test_api_multi_index.wasm b/unittests/test-contracts/test_api_multi_index/test_api_multi_index.wasm index 322c00f679526b6a124325d07de2c06d3f66143d..c4cbb5f2584fb2d0cc1b33ceb9ee4ff654f6afea 100755 GIT binary patch literal 72891 zcmeI531D4UdFSu)-qZ4Iasw_=LLgVKZbxAo6O8RiL*u716v9$NnNDV=GqPem$5Ld; zk>%hpAyUagNW+lQv;-PDVF_CbO$q5hhEB~ANZSAjkTfkz%~A-oo#}L-&{^pG{@*!w zc~4KW9O6xdKGzs zYkFjC`oKk#tukoNX#b_Hb%Xum+Y5B|aOrM0!Sy2}yCa}-dWbc8= z+1<|$PVryI#xKji4)}&Ar>8FNGB7HSj*aD|dqzekTeG{*aTs-0m%0mwep;7O#rU<9 z4&jP-TBdz7t;@!z59}{cv!Ayuk2i^CB7AaHv%(HL*5&Uh9#~PoIH0v*s>8=*bl2?o z^wh}M=YIOGSeCz8!7$fT0OPAeMWXokF^|lHLQzUZVTDY>jPx__e{=? zKRbB5+ViVU-v#mqPtY%#`+U&xT-Tp7(da0!0x^G+PT<$VM7mvcFS~jDOpGzCaetxnWFvBX4t+R=4Wjfobj$Skk z4GRPsQN8`GS*YHf;qQgP6Z=o=ubeh;`st^g7Wem`mOZ+ceeQ$zr}_U6@Hx;IpD}QH zB|e?cN<46S-|2B*<%~+-!0Ei=mw}4^+jn|jU*D7XkN=$!_w}85gn!`~A^$t$jEetX z<#ZaTRAQ<=`6=P4VdeC`pi&L@g<)8Y!yv3ZF`f&X^M``^T+qDvTlq&%UsxU8w{Pl+ume!pJx@AL2d*j=Tz zKTy0b5Ab*G_2KW`{QX1a(svZE%cX&awf&pl|9H9d2aDI`(s#PopZOnu-8PaB+3O$u z-5cN6*2rD<`q0Neb9-Cq-!B>|xBXv=M#`^0Tr^UC{Snv5{NcAe@WB#H?$+x=cYW%v zFMR)frPu$}HS*mXuliWIk&hOy%ai{D-!>q>uhhtY^RGdOhfA;j(7y&D?k&AO(_L?{)BIvk^FV3kxv${e`mPB*n9HVcf9v`#p_Rn)iiJz ze(3I-9}Hez+1|A8fA)nt|K>ISBS?noNBZY*WZtS58|DN5`>Wr5=bUv!gTFFAUv97& zBwq{buZhCy+)oD0JHksE;YPMq^H4C+JoL?=`L{t5oWb{P6U|^t#4l;EHBQ~fKpGCu zZHPt1?L9m>0?W@A>K}*u(@GkOSI0OXmLx_YmWmH+Pcl&iR8KZJ``!TO4^uCYY_vPEa^F0QZR%xYCbH$US24t99O@m2Z!30B^ z8NM>D4To3o=heUTBzn|Ddnc$jShMPbJ({PDW{A<$+B@LK=Fmj)Q(K||idDBnL;6s! z(bhQ8GfT&u8ojKaUx70x8hv^d(^IM+e}SO@Wlb_pDM&r7!w}zSF-ZH;xH-V82ZDUa zqjhSt;?KH?RR9m77A3!unf#VSC>Dp;u&kF0vI$it3xXL<(TBloce7!C#Bj~;~ z6rS#J2zt;V=sc(<484I*o*ofC?xc%A+b#$G@V9%Cun{$b7YOrQIWXn?A!cWlKt7P+ zbKfvz2%2yCQ#}Ym9!m3}!A(HA+C5gN5^V=#D;dT%1nJ@XdtSCaOrt@T$xwA&R2SB& zcv>y>$EaO@fUzlT2Mw4SdsE9@9{c_I*!>JO@V(e}Cb)S_sFaNd6-vOE$vX+-N5%v6G;rf=3dOOE9&qJ3SalIz+%6;D$KU|!H)+CAZ23@ zRvfg3qRov+TQ=%8h`ZH@8-zV(gHY#e{3CX5v%2X`UwzjnKnAE4L*>umKz|1&lICJ# za8(>2eo&0T9^IUh;WHLi){4G-vANv#XZc+0amjZ6d(hs_{QUL*&o9j9i+1d>)Q%v# z8DIjYYyMo|*AR>UnDp{-^YyhA;${hr=I3Ag+?TJ9*2PD_*4Njvout_Dew|&5O^Kj9PJ<`qacf+mmHa_p~F`)sFZI!UVoVUg=;Pd+G)_4b>uY<8% z$Y(*Cw#GLL;P8=b7c<7B>Anp{wgimq%tnP>Ff2w0rKF11^yQFRz6~^btigR(@oUe! zO(;-F1_pnP-hXwYw$%g{J}NW}lNy%%GcWayeSm5x2D!j48ryXtNcF}Kt!#ojc8)93dY9G@KU$6-}q&1?E(QS zXGhi*g_v(sUHlyK2dttA3?F2Jl}zj@19j5>?|QhNxWt4GO(csfUPD* zAwp_nA(`i5o5UL901)BDMW_NmA~m+Qiwl1@84ftam`5vMhx%1V^lL~rfL2x+NSnHX zkd&z_B2;O`^bj|Afx=DQ=_^&~D_)$=^p#spU-?8F^~{A=A_b?HG$OWfB#kA7=M6Lv zr<-S*7^==q_N+D!>O~5@fgr*GQJ$(9wD>M)=_O^L3^4*edSLYwFGDaY_DgbhqvGYf z=C4$KLBcCC?I{z@eU~zXjg%Oyxjp@6)0LRnBjI4w3>|%?aB}t;nF0k3O&D*b+4CaX zPj|Q~fWfOpqES8iHZj<11rJ`@>}AA^#Fy5vAl0eyfTjXvo!5IL{M z2t>f9(WAu?oN`U3l2R0P&8c>=mdNWyK`M30G?}#K=ujXoB<+SjsCTi`X40A?Psw*c zF_wQ|FytpSM7N>VI&`Y32XFo3VHnRjlyU18E?hSjs>WcM3pzY7uSq{jYu(pQMZtI$ zIm7leNYNjesVP`mP$56xIs}~v!cc3@T_6Cbu?g>K4ecRoP;hqwRC|l{*F`ZFy(?oW zWXUCs7`%zaXQUL0P{SBO3NPuqozan(g(Af|GMW`KVlVDd?OuTE(X{Xv+H|H3Q+b%{T?I(-~wr{f{KQDGi zZT)XC0TmfUKus!;V_#o`8-SMBI=C?ae^6UcPBCA^($<6mh(THN9YBtdrZc)F^M;u= zQb5c;9*h8SMJO1fv#L|0XtbY*ZUm>)wFH8@QQ#H9J*!cIyENsHx zV&|N&$LwVi=#sS7f&aC%rfU3Y!~d$`e^#S&9We9+5MFkIG@^XT>pu#^N?|+$*^u9d zy4ukA4RCG{KAO=8;yQ#eX27uqV9bkTBKdTP!AvDneK63>Hbt`=8X!T*FeM5JfJ%lb zKp+~_qy6d)Qz_rf@`vcp_!+&40yY&Wvh~WJ+%QGie223GQmDBZrcxHWM9sZn>TMAQ z#LyY0V96DcX=BNdy(A4-GM4y3FELA zUvtOAU_T$?u!DVRKRBdXGUAve&bG@QieOQdX(5IHAj+_ef3G1m-A}H6BhA?v1nG-VH0x3`?F=5Y^Tf3>xEw+J# zKW$4L2~7lDR-4(qKwPRXc_u`S;ll`5&7tr@&`xt%0}h4bW2n9^^Aap#sJ_7DKPfJb z;5d;DlwAXc>SFBO4AmDr)`sc}mNZnqNO*Mo4ApI&AHh)lVv_|JPN%Va8HVbY?MhU!JJb%E_y{KDKR$HP#a4-&F1d!dnJ&wh`sq57pc zB0VZYb@yggY^Z*zH)a;5y2BWd%utu_mbGT6elg=?l*i3b{gPvAsCIBH48hXojMk@?FN5YDPeb(uXQ0%0L-orX`1tVY*m%%qUAtB!eKO}T@$5m_5>Q3Z=c! z)uC{~@pLE@ov#IlLNOXS6fRiX4uxW{vT$ei?-SCYP)xo$!Qs92>T>RrD zXu-zg>s!3a(NPSgMA=S%=->@8z zitvwDC_FZ`I_*AVjF}4Kmb+3VBbu_)BFVpUUUx-s67qqK*IY!}!S458a%P61Xq|zX4ARbrpGB74AIN!k!YYEc!*Q$=F#y<>h%8Gw zBg-b5r?f?(NfAusTdy{WEYtZJeA*lF+G0~*g~+l$S-B$k>8RqO5iCSd2b#+92#$m>W_%?#s#z$F zSd3!T6vRTU9x! z``x&N?Dy|hEc^ZP5oEuAuYGBv%?f0{|L~|~zXdHUSN2;_ak;YJf{M$P{Z#R!?Dr^6 z75v!BexErh`@!19Atb+uy*)bFFQZ7{Rq~XHd(hKS(juFIz&RM z6XN$xB+i7~RgaRmDU%3~SCmA#gS8PQ@mUkbG3k^~c^o1A$R}3 j%ET^;P7;~1aC zfPJsjiBsalAwGpYW_Mz#uxghMNn=&-UOxG!P+1w51fbu zr3A*V1m*K~hM0!Z(zD<)k7soCh6( zOnEP_R+hHN+1>e(^NF&YCleA#+<%hId@#N&b`p@4=fv4rO}H^jo-f9aK81t-iOQC| zA&s2lEfa^lYweTn{GmF{y2m5e(ftOKk<<<6eaqeFSv@$qhy+=5p6B$~uIz3atP1c& zXUM{)xu9Iy0ls}Q@K*ez$ohuyFn4yv9ckaPj#fYG+bzeNN|rlV8&vX(M?od8FL_}& zS)Y4hj_i0T=D!wV0*lc|6?`x}nxMc{QwR5oO05lb@I%Lx*t*PjnUgc4b`aaCjMO?Q z@Awhh&wlGGa06&%JGJfTe^F|C$6un!Ix5^wZT$~MU{6xp`CE@SwJmqBHmL1Gk14e+ z=D!xGZ7~{A+dn-jYWw|qjmrtHba`DYnBs3c-LHfi#F?On=M|C7<(@DHto*QSZR9!$ zdQa!0cLG}zx!$T+j!a0zI~_JR5|iTHvb{=8;TqYQCD1^2lREE`&|0Rq!%ASPVuYqV zPaibACEDIJPYCbrtGY!HN`qt{MG!`jJI!z0AXa0$o{7L{^lgbYdy&%eBK0e6i=}t% zcQGAgNl+FIrl1!3)et`2w4aIC7>1&4gX+?L4xChn6P^Gy-@o$rCYOqPlndJ7hQRD# z1lkbRWX@r#XeM4lE(Me^<5HW+jE1Zx#~J}X=Hbd%?DMZi+*;5C29Onkk=b61cp z!OmYM`#C3RmDC%*c&Ye4D_SP_P0I~sX+<_lmTdH6Q?a}&{c?0$W)>YN8nz5%0f{1d z(g33$GOcn8skCxaY7Kfhjt_WTaD_`NjHn*3s}S)fG?4r(+OO8;ul}EZ^uTu>__Hm; zipyiwNv~i{u!^$^{mL2?LbzxR-(sE+@68`$+WAWhtMloLJFp`d{wB!DFwq|@odhvH z{dh(!B2Pb%>c#Oz!y=zwL@1>+#>yv>h4N_m!R%fVRJ2V(A%eWH+=;3-pmfF?ZJDeB zEhx8((i+F)G$yj>3jSm{ja5|{H1vUbkoNOniG95{XeR@#7Z9VfY?XTyw-pLgcj|+B zIm)xww3?LI=CNMe&m}kRXBtELS;>ua?5C*5{+${2lg*>R{+${2GoAwb4;#J;aM}qD z{mUf_^amW^KFirHY2HSv(+F914Hq1sg$s5b5!-b&IZ&W&J|kN~BDKJ87IFB>f6Ktg zV6q6R1xYW>lf<_`H614gd?nh#3#{Z<`te-mPVXuh%rBWBY<<0Ay`^^iv=d7lHVywJ zd(x7qSSvz1DSy4ZM`<-JkjzvDJrO!#u=XkM1Sd_4epx1?P++Vx!w1<;WcL9cs1fD* z6>`$==!)$~X_mH`2QB4Kb&#EJnSnrXit#JSls77Em4B+gKdvaNlwxjNUNfI!&*F$& z`eGXrM8GA0aiMYs>{#0ILTmK zr1~a7Decik8;ljlrafX2HTq%!QA|hKgTR>KM~KLIbtd7G1XWEu((_C2=N)2{h&X_rF~fQ{{NrQahO?E}&W@&&O)YE&+14(uh$j#P}b z=Isx@??LV9=KJ(8*prndjhPjzG{5-12cJq1D7>}O3}+STnATqSqz2(jASd~vCW#*( zteA0Sutyo~U6udgAV}o z>DvhHHReJLpIU>glI$mpeWb5BoyZez*bZbYrIjfTkxTMuT>{uNB7+bxH4LyO6!puz zGS3wKhAe@loFimQeY)V_9@$Gk8?|I%X*LZlmfEp&JQITmDn2RacOFD|ggwJ^jE|Nxa{4ZlhKj?P zkTvgA@jM}?Z>RKwaX>W$^O|+c)ulPD|3K2)2_B|()-7OD}ll(!6+$mZqvBP->cCQFyEl7lQ##3j14 zsp8jVs(6s8;+hR&P>{T;tco*3r;1xc#Su9POv!%)oNf&OvIKK)+5pT=){4IwIFc*e zB!{A3b~cTui%sJmcm6K7Y0yOSYO`rj$!r?_B;Aj!pyuS&sKA5p_-Ij5o=|y_#*d5g zR9cEQ<>^(6Do=A>L0v=2Q#J{0ug6w-%6mSkJduL?gjSwTO0KA%YwCdJpR$1$(zNL6=}bE+Y>?ow*(8 zbon~PikV^KPWd1SNA8@Ho>}(I#Xv^tT51+eS?UFl7-PMx_ja?2R1j1ru1qnzJkL1e zaInc1j=kHg^{yFOYDlRomWf5Ps}$s&rCx5b*LUp9G@vUXWV*-2zyf6J!XP-gz)w1R zt&cvY;!b4k;Ym)8t?^FmuzhWPsabRaXR9|;^-fB=5jZ_F_TD(aiAt;gsDgrGio0%f z$h~pctp!1Uql01APQID>8*em-WxDxLv~!TPGz}WHlQB7uoHR!_8vdH*DaFY&&&~>_ zeP&AwRIroxHzQMI(aELf3{4Djt*un^TFiiX)S9s{^8D{CFE9G5ciMaX$Q8r1UfLE; zH7}Olt#2|Q|Gt>?Zm*p=tT4=aqcDpZL`%1i>MR!+x zIIsO%KWPS;ioEt2200CvO6rYYyx0CAE9&5&zjWoi_N;}+EO}9)?JSeIDzw|({j27+ zkLCBUU2a}`fG3|z$;l3$EzfJ86T&>3jzbG*M4PEV8@{{O{=N4w?VKt1_%^n!jlJF_97YudC+!m+p59=u}^00nU@}L$w$b%@xqazRO zF%okfzUR$F_oFE z(ePuyY}t?*vjvKsuKHtYAI^Y$wD#dw8F@FSG+8@N8p05;Sh9st1R0#ylp*Z@4OSHJ zp;$kR`G@@N;wJ7y&Ch-+NY*nK*kei7dx7J3JNSX{J`!O#S&z&q77f-5;u0yhLDPPn_PBnlo1l&wxYY@1B?IN@o)Btb5=l^_@(f@CkAsulya zNCm1u%J{Xx^;|bgfUp-^q8t4aRh``EOCmb5yKd62x(C)`x@yU5$dTf@mkcREZ+O0R z^w7^T>r8d1sd6dX*pmwONC&B~G>o03f?6UKl=f06l)#e;?(#blsW>_;h_(Y>s)}?epbR86 zDwb(JmYB<=FIX&$s0&B4?uC3MbnV`8t?oI5DNB}vlqc@fbSz8h zLz&^b=$kt!WsI`Nv}UjEa#z`nYS0Sx_S9>59MU5d1sW?Y|8oFPP;82^lSC~CY7L+g zx9UeCI1F1pE1p0!{#*m58c8?mPl{1k39qbEpkbdXV_oyYq!OA@GSVm;MtS`3FJ5Qk z&q`>ZT0PLI7-;9}b4_9yWR?c&*n_MSTo%Aw!a-f|=75{RX-$;E0@wT@T;9YeiVhEL zjrZ|^EP)+MA3G@_LCDlKheX#JpT3EEtef{5f(Z}6z4?2=LIwd7zljw<1aqwVD&8xA z(g|qc@Ht)jSlE7LFlgSY77fYUo3A)C9vfE(W9_R~ssLv##lo$|drWnTaCumb)|(&t zBem_OU-LuL9svM9--`=WKZ z(w<9T{yGx^%z5+K;8XhidAE(0cRQKNk9ke`KgR9BIh*YS;tw4=EJc8$}S?Z24;l1cAOb zbii>3c8q8VLQpPt&p<}LuDRMJhxQ~ZBViQENmfS1d{J7BY}2?!{|60%-nA<~F9T>}itEzvlKg?8TnaGX$|yWJuF%N@!wSh!BOG_E$36FUurYB~C3B zD9W%>*z`IP1Vg*QsJS$QtWo$3c#RwbCuZj^=A&ojmAHyogb7wgaIQ?6HF}$Z??^d7*pD$q>|kKeimR zp>FXeC_myjh;wf@Vm8Qmw1D$yc!;Ndd)mZPpFM5ki3AXIYN!l5QF#(wVX0SnJcT@| zJcz-Q%B%7C-HqzG!6`g(@$_Zq29L9+geQbM8MJv?$5X}1apu66tL5dYc{wt3(Z92# z#^(j6=_^a_$MPh;y0M&LPa4bV_N1|J(;qX_n4e%z8uR1rN##zpCzT_|hQ`b^8Q2r~ z0*3JP6ZWLR+P&KpI5#-U-l^PE@^Vkk%bm%hv4~I=2;gQ^||R!ML)A683>KQ ztC`ATuXmg$0YN0vX&w221;omD^6(4$6mT5WmNM!qj5B74717RoR(uyf@b{kM_l(7iz>KQPqe8oqQiNPKx{=fMlBfiz-5sAFvV2|(+k9TtR8EAj9O!i6! zN_5m#*2!LU3MUlxf|J>kw^-7Bwb|2_USB3Nvq8$FXA+Yp*gUJPkdzsee&W1zOM5J} z6LCa)VVJX^&BP{}a-FJ#E?%uONcnOTgObau&FF1MtT#fTq zVZ}62=QljM9Qi^BZl=U80RpQEtN}^$E(eP!T_?Rh%GyC%wjt=k^T9|X(hy4RI2{vj zyscq26nyp*G@~KP)x7rbO)BGg>m7tA(}{#}*JYikYJN!dreC%Bg3h8Z*!g1M z?P#8R#XhiCxwWrX7Z_~QsaQFj?b`WNAN1Iu7c<#zi6BSDQ-yd6>5&ydWQHf1wg8hR zUsWT-Ac*Uve6yS5?Xa8CqaNpH=V%@^`J)AkX;5ZPcZ#l-d^EFM$e1%RC&!EQ>z8tm^`ggo25-;Mbc##{+llbcRs6-n# zmecLY@gmK_@ghArUZf|-i}d7p5l=|1o)^)B=S6yQyvPQNTjDGlcjca9Pny(|?Mda% zggm6lv-DTN+aD<%&D&H;wKK$Gf^q(~7NMKsP57SU8vNeDT*$%~piePvPgYrs%d&r4 zo~V+rOEWN4C`fWn=}Ls5!CJIEt7h5|ksn?g>dVMtpXeYbKyaPXSIT9%fJ8LIE<7>f zNVKzvqnWjqsSPR6Dzjg8(T3!T)Zs{EDJZo&BN^jp2Xt?A$C2!`3IY^Jt2rsdzf)%k?N{OM4s7k)^Po)o&A|w(zFaV1Vos(?_e3Db!?@ibSRkv2$ zAQ>iTn>?PaAHNuoAv~FkNxN#gV+}a{89_ZFt$+~>g$-y05VHm&RzPP=@nHig1eZ*~ zZ~_a*tXUx~3#4Gv(l=EUN~&L$tcqx}S5obKj)eimQdiAa?2sAZ=(~wRDc`**ZsN zgzdMEaE?sY%w?Y=%klDP0|lj}9 zdxkmt9&e9)+~R;UwD`EiH1kzk<7wa<{@e4@%H8%nfAuq)Z-2+XVEG`M;vCkzSdugw z>9M`|UEMeR$S8Q>^N5hL>6YL6(3d3-Y1K(j;s%$(T6{D)~}_8 zjQ0avSR7M#%Y7fsyG0o6hps;jX?1<`hlY_Vgjw((-4}kL^Xon%TwR~`-~IN+`fM;X zx&C>6g#Yc<fb&1-XGm{4-Lz0 zPy!od;yF^IsqDtXj7vP(ZDSNn=Q+5BEf}q zC=5PeP-gJymCW30GIL*^;ly34V;N5RWbf$_{sn1f?>Q``@T9T8aO1==794%%%Yv%; zTx^?5@^$Yqm#>qh1-W8066D@+RJqu`x9t2FSW%87Ut1BAuO;8|zq0gPj8-JriO)aL zEA@+vxD@5AT|XLUfS%H6FF$!;VfYiU`%eqf+iWstU?>m z_raP~xEmAc2=51Gr6@Bl4Gd!U5ThJm0;Nnuv8e%Rwf<}Cdox+P=4EXp?L(1i>?ZFF zM$I3=pye&UE^3#ve+bu?4l*y}Kem+{WKS~j5INux2?c!qtv+$Bg$R0wLnm%iO^Lnl zzwL%EoWC``nQCh0Mm2L|G?AQ^%iz??gm8Yygz&4Q1ycAUKHR{cFCmvCVGET6v9XsZmgPi18z$04|6X_Z>kb787N_QW!intV{Goq5UsXP=DpYq>k~4~|5sXWA zzrcy`GH+Q@KiY6*CD!>0re;rbOT~dC%UCPH|N{K-h&rA6q))Gnb-x}Y6{L0aVwhqcDhOZcSM=kui-eWspNhJ;ik>shjHls z8cQ)0wU=H}^arIL{)6}5|AW_m;`-KCN8#h^5kZS&mLO&pa4-)V}xJ7?$j8;(9SS_Ik9_uR-^-Tn>Kkvp>&7?Tc>6zOWMbjEFf!D{_ATsIDFZctPINeAlR=e{tAd+K zKJxpn-=4?_jKf%SQYg`;Y)P`|?$9q6=2f6!#bmE}z|Y`&zwx`D`Mck}J)gnnf9;Of ze*V7S{^&Iviqtj|R`a*OEJdBGVLVDuO?HDbb$YxMMKWbdK@ARb{B%@D6bAQv2W9h9 zNo^+JSjVcRsyWns8n@dtw7Nsld{oxSpuLTLNr=RnuT&|Uk7O)xUU<{WESD~nGV{Vm zC$+Lj3uV596UBvltWNgle9s^4-%=8gAM;zG3oa}b?KezCOYJ~pp()_*gH0~Q!J=!~=i=jfqMHHUWuAFth6Z))@W_HYd8(lOr174T`Pcgu4w{RDe@bE@6>x8$7 z)kAR23eHrs8nRK$Vtl+%aO(+;f+wtk_G6pX5h-@13fe=u?A-h8a6y~%ALY$^5#{ZG z`Ad6}s)G);5h=n{FyuK{iDlns*IDFxw7rS-WdjBAFvqydU4{zy@A(#wJYdJrCDKzQ zi+;c}*~#b8oipQWIbb0-z77;@K9=#7W7%cn>n3seAB1tlyv#rB=7n=yLQiM<2%mDp zP0G%mq0;8FP8low!uc(}eEWC4hh}H#yM4eY1}kP~0l2zq60a^@`1}OP&7r?)IxWW; zl5oMo4DsH&iJN0ru60zED4V!~zP;c=v>jFf$>zpCD?|aFvm9VP}l|@@>}BE zIzr5CHX(aU=J%$X-p_f+50z2w>E3^S6ewp$-<@RH4doE@!MV39kfw7lb>f?b%p}{; zj%nfb(ClO}vRp4Yj6WypY&7Flo0z#}s^Ua?}ud>5e?olJQkO%dnHIXVU)3Qw6f#8Qt+nE!9nFxd=4BIL! z%8)l>TMV!(D`SGSWR&k{34o(~mT{+(0MM<2wSTDpCl_2~p^rq!#7J$iqGU99G&~2A z2meC)WF%j+sJcU!s}7rz2{8k`?7W!aC%PMinbsAV zOTbd{bUMQ(kNzkdlFp4`Mk+Ey!azj3T|OHb5S6Q5QC(=7$v0(9)L;d8IeUo&CgrR; z)j&O(=evYzC^fZ&0ADB$w2~pHrQ{iO0o`PGe!E0`9tEdsyEbA0$8&5!m}fUe1j+Lq zz#mK2cXHv|7yhqG3;*c3jtI5(#rxqr5Q+Q@%XwyKgaZwmtb;K!Tl>e92Yq5O`nnuG zR1$xTR&}GV?yQXMOzqa3#4hASD2od?(bq(0QE5d>l;H~q+AIHtuOBpuWFQt9N!}K^ z9rRerQ12}P@u)~AmM_bSHGH34)8Z+qjxrkS;{5e}K9}_GJ6gLaRrS%^9aPn*b(yMG zWt^|(#`%iR*>Fs#YIP~9`ni5fdM8=iIeJQgR11aeBMLvPbvN-RwgXxj(#0U1c~(2c z9}DX)3qNA*hF>LA^VkG`RJ;Z;?ic-)6qA*bxnhBp8s3tC!dN49Ypzms=BMca!O?iM!`KO7g?U3GUWk+*;(N-U;E z3VES13yCgG-0NpWazv+nO2v>iAvo(Ch2#oAtYtt?^VzElfH#;ck@D_#AqbWgXY=Y9 z2sriW@@-3lRWz~TSNkk!Z~KB#xU45oL2~_tU9nB$=@=G00V&QAG&?X+BTa+kom3Ib zcj$wa*C9b>qsWI1N<)+7kPnsEZ;^F0b)&qFJG!o8IZ1r6j^?@0SX?@Jw~pna-PX|% z%g5-um5j2Bn~uDW4wTGYR)l4(VT z%}*vJ#v>$fwTx&uXOW>}G_L%ZAiE*c-??y^En|)cymoE+yXi`HO3+eQAj?uL>*i$8 z*a_yIcCLc&BN6?nbky zq4Ry6SC*miiI(!4Q~Puh9dWPF$e>7>@7dj1mUBDM-s^-%cW0S9?IJ;3Ha#z7!Y6AHNvhD!UvTptH)#IEz>d3_7%vTPp2#vPA4M zGXSwmj%WkAPmpqgqCLur;NXt6NhgUgBz3f-_XrZ+G6a=RK?(4_Grlo141p}>LX%-g z&0{ilt1F?TV|9mhTv?^D01AIP)hR9R4Z2Rptz#uyTzlsiSKKV)TP*I;nw^~Wv#Z&O zjuhsAedQ&g4*uwbZL~f4>B8b&Y0g)4G_;~4uGWT3GyW_r2j6uFvy%KX>?J|+^Zfq> zGE>!Gcnn%Q@0hjpv)0nFnBfRB+xBX>HFjYwZ$Ro3z$m7usSrQKj?q z*N&2TJOLksBgFGL9FdF-svw;$hAJ_di*^q(7W+@dO#47*aXN}bbqE9{I7FNLrlJ(h z2-tEXNlGI*I$X`UFs>(QKd>DPKc4BV`h?$a@Ypkw9XLBY-jtR2@_0i3B{nL zL&<~d*tIlL)0|uug;)mvJR=_m6QE)W zmOm1_>ZtzKd{O`Xqvc-NqDI@BNQMK>>jYmsr5*(JwX(>+{ZDa`|8u^`tB!BCuAQO0 zeeDe8wzV^qcjZI5ZnZ@qOtnq|dY5wxTVRZIut&`%w)yZ4;}5B06C6%)2c?ZoY@%=2 z1md`uMgrkSbwiUf$%!WfID|EcP%qan*amdc77J{GUA2le92Lg$7c`W7%QKVg@FuPf zXNEfxGr{WMnTb2RY0RW1X2R{`k|Z-`;?$XAp@96N@#Ixs`)Pn{((kH|vAIn!?$4y;%nI=N7f1nid4oF zZ-z9AFct~r8hgved1<}}TdTlIw7s;h#)H?AH67hRu3 zU<+*?WUrfrDMdDrw10x}nv;oVPb}HsqjA)O-`Pp6xsEM2FM;F8rY_n1QD>bsiE&}l zSu(LGpI{|VI$L(M;Zv!(@Fkyg)_Gf+B%U>2BnmJ|XOBnr3JL71m$u^mm7j__x!X>x z-F9-lPOSAhNgc5OWVwA-mpXpoBy|*5ChIO{WR4>dhP`KEFvLi0=Q5?(o|GM%Uwq$# zPbFnM$GJ$~KHK1IeOi0rlNv79IQM~1l9v?stOaiLAZVCzJxC6R3Ud+&6#^~?uMT`s zyg}q3h-ZShg+#e{Lml)|$VP<()B}?)J1R+1Yi9G;!F(-L^y9u>C+xxc;~_h)iOgqT zJV_)^ZaUGPe{c%VE7-if5BCMSU*$sqw+g3SenEZPa;1-DMG?w@(4P?GXn1JNJD+# zBUhG;5D`c-;^Hv|HAk4gz|b0Ob|E-|@oG_egK2Z_Bv<4s)=L>X`Z-)(l zKns#>f&5@hy;JJRUrH9J|3eto=6D&YT9c9DPe_AH?U)RmCNH9<7RM?ypJ*+(<2Ure z34C)dOk}G^Cm2*n*>h=wiw@|sd4)eQPU_7|H4X#@#Fm4XQjde`76q+?>>8>1?wOGo z5>btY$V|x({>?#Mi>)?NR|aoG8ryGyWx$L8hd?5i75!R9E!U=waQQYR)6k85ug@zE zMAO!PhGs;1fhITY*2FJsa5WAwt2ga-Z*SNwz)UoICK!DK$>@gN^p(t}!Ak_9?Us!o z;_3%241kPSFx9pN)UV39OH+3qQkXakkjBE7Ev%YDW%}kUiIxETW1Qul^ma>h9YZ;v z&zpAF>z{er#Vq@wp#I|Vv4hV#XWPi=f!XQtsa-Rzy{)O)k=fS%+4LFdfvMKPeXU)y zt+909%=q5Xnak5lT9*gAN5?05H9MV-PEJnm8l7#W`$jKr?GJ+HeKh%!rJ5WYA4{jE zXVdZ7){H97?wOfBaPgjWe{0wD)R=l0pBif&q`&EXt(nQoY4gn=3+gXes?A-~2PR#s zyQ!Q`Uo^p((^rnq?kNvDo!*^3o94bob1z$}IVL?j;^#ZUeA8Es?zb`Tnx2_a%cHv) ze9GhW%vid2dTe~RhWWvd2lXc$m>Rukl37isHWWcCXx_sw|6mIQ`@kob!T=lDIgBi8SyR4@spnWBP?cfKWZ(M9BTiOLE`nM?^C9XVVY1~R ztpJbSdrwe*>C#|S%PYgCAJFK(K60Zg!>I%N#@HJG^Ru4{>OcJoHskoz#p$j+tzDPw zKd|?l=WOZRF3;?zFqcHvpYp=l*53W;{^=QQ+KVnPFV49t#0{7AYWl+GJ(p)Lfv&HA zdeN5NaTRnuQo(oe&|qTpHga)mirqR1a7tvrI-wwU`_{KTe|!dt0qI}^+r}F$nib8^ z<{O<&CtITw+q@~=!F%GS)tMbihS#^{WjeplGpbIP4@ zr*Hk2cg(ad8=pR~fAaElx0?0C9^Vf!YNh)}_qI|-r|0RZR#s$mW^x?N_j4`o;xQZ?B~_1p?RpG5hO?;J$dK1qRJ9 zi_xic^EoAWbJuJaC?D7dwauaYWtHh5Wo?6D99$P$|EVR0?8x}VEsdGGWb3EBytNnN zkwM!4=`i3fe*I|&F;=EEv!6v+6o0w%v;NN)bX#5z)y+w?x?})c08<~#_R-k%0nzAU zLN-)eJZ^ZYub-Ws1ly-}wbJLN)a81R`i6|+!55~-4!{HKC$9FWKb^shxF$>cWKr_Q z#UJ&a6eeg5aPvpK&vu~uPKMq3A znjFG-j?4t#C=!l~;KGk+_f2#_?+lVr*~3%%dXKWN%UhauRXv|p+F|zPs1_y zw4gIEQ->vR+1B2Dvl5HHL*-2iD(4ju7cOdDJU%rg*#~U0`a++FU?#1Z8QO-)jp^5{ z4!?5ULcOZ@JaY+Y{plr2>?%R_c8?!yjg4G%Icg5#-qilt8A$r{3{|)kp#E$>CZ;0| z#i*QT_1Y_E7z$tB!WT*W;6fogb*{kF^wcvJ*&<({ z{&S90f04S^Jsi}}>8zWpcDV$X!OYl$$yRq$@wuH9-JoaSB;zx!kuI3qz6pLn8_(=) zBS&tJ|0Pm1bT-hs?azYxGde33BhIUr*ZK3*dG<=`q@x#)j!)6sHGdw|H?OFs7^p$$ zBUC+qLDdY?jn8_D=XFqUrZjpfwhZ8hw6b8j#qKFp@GT2OU+Oa5rRwCX+41Qq?bFiuO_K)(nONs=j&d~nb(&o;t`ah( zd&l?h9i82^M;otO1Gjv|anQCs*2y<78r^ltD@SL>B+nnw(C}?qJg;-6WzdQvC0-Zm9ch1{}1X36?9Jw~gfv;DZAQwX}*bEW{!mV&u04+Xhiwf>9+Ah%x^h4?3_bB=G^ zm0Tvk|DKA^8J)#)uy0lij}Z{HDYK)YLSjtwCNTD5`!e7D4X3%vhFH*-cHH%vD@U8_ z&t5R6j1M7BE;_T2U%Lr*QEh*6G$b5Y|EHTLy|LGkaJ zU*5bvF1`lAoT_5EycE0?tcWpki_|6ZHd+z<^`e(iM6_B^mzYVWO tD)~!fmX3h}BeSMj3bOjdBK)5$BDFnq=rB*AnDDi=yXp8k#Uo||%3W(35BF1X1?Y%9wSFhUI>;3-L{=d$d znIsbuCh=xI$@#zbW9_xqUVE*z*IwHj+OpaAJkP(^KY6X^tu0MYPy5qr%UAQqHJG0E z^uuk1H`99O`B$&)(D&^1)%xy}(!biXPt%sr->$s8s!S_6h4V|w@-$gh)z$p+po^8R zVpMVh1Ep9pCHD1AIdrkY%B0eV?f!Ospm@)p4sPi=tJEC3ykm27^SX&EJzo#i^_L88 z7#>?cIJ|tB7wAo`Ilg6hd~j&XmgeM?S4tC&HMdP!qO#r`OSa+l+m2+ASWZ5z`KH0QVI`wLiE9H4-tZ-kvF1>vkZ%4sV(jyf$NMH zEKlGvXge**)?u$l=-+IYtv)KLrGz{otbetnj@b8v99Ikn*oM>eO-l6NNPOJ+%x!>|5wRF%I|UrbCkFCQM?x+SN- z9^N(+;#j1Xh-h5aG_fOrTK1mgo+VW+TPM~JO*yIvl|AWk=-GYoo~E}+H}cj9*O6y! zvU%CoVaRxB>(oX^3xej-rW+nMs=C3{mW`uR!_W7gu7Z-zrl6d#y=SymT%^X(L9KyN zglBR7y1kq(BhHP@ZEnOSDMf&Vl#5kk;nUWMiDZ;{_9>}Wxr)%FWRZ|+p+IXw+vh@| z_W79(XyC|+s=bKW(zKUT8M{jHP3txe4UY{@3{4JgZca5Pw{W{| zQ**t;LZnXFYcgXKVu%z7L;O?!Cg1Bhwx@jTaZ8pQdu*wv=h*c2*q%OK9`7!^KQ{S) z9M|KHFP(7Ql5%MY*S->W$1f=zUp}GSecY1cc*(cp%E|BXOOCg{6G~jlrBdkx{*~Ro z6S(L<|CTK2?k@AWyZgkG{Ac>*CFLdE<%&Py`+lY5dw$1prD;FjvD>Rod+~d3_tQ(~ zhy9wGGd_7`;8h0)H#UbR1}_=f(k%HsgM-lgkcLeus0>X^j9%%z)9)Dko%N%uF8A*8 zD}%o?cEyPIDZ3dO@pjv{A->)1FSc(ln|j{!hDL`k9UHuSXmoJOyT{%SjkLeN*FV;k z+Wz4_`#_QHAMW>izVle=CLfru3H-|Rv%L6De^cE*)rJ{s@ONJPeJ}J*;CaPJ z>@5%Y5_xBqB6sPF{KoW|r2#IyTuLhMEbnYxmIsSVD@o+Xi$|6RHM^t4t5Oi(`MJ9*`>v;?4_uy=p-2fnaI9bZ)N~tkhGyY@}Wu2)ud_mCoz+C_acc zl8IOS748Kc=_+K1D&bvzH7Z5H`4mOgr6Woe)bz4Hs7B?eq_zZ>%%{~Or+U?Y_x-YP zuyj+s3<}CoU`;!%h@g0BTg@Mbsn2WzW2)UHw0UFg8N8zXZdMSAuha*ube3$Q1 zN4ii*o#K?GL~H96kdid(q>*}un$-!Mk>LCd8qlfAySR{$Oln-jlR@1Pb;fZv(C zw|tNEL$&3&HWKdQy{EKgE&xhf27;&`c&r?dp#0b^L3&a`!!T^I9tLBCoMy=X*Z!;A40E!w{3JNq;R`W!&>rt}w<3XHB}7n4iJ)Eq z$&E)kSNHl+&|d=6>it?!weBW)RB5jZ1iEIPrY1q^BPyZ*^CKC|Ny`l88Rb+X>h5an z(BGaGms-n1sPvZQI>`u z@J1WsC+_yb7ug8uuQ(hH1k37yx;1DAW#?DPZ%vKuR&8cz#NPQ15}=?LGAQ_qcf6pD?op>c};- zHRg%D47=TH8UO6Hma${UtN(+(x%q1uLhLpxLkL}VZu~iY7fcSWc>k&Ibsh7dzNpgc zL!Oa87xlfagArrhky0OBQr|1}?bz|ka}wfn#9c{=4;nS{9|+baBaMEq&~L^;<7vi9 z1eNi*E^Z~31x07W*Y&oBSXhRm0JT`tNg{l&zk??u1X!WrtfTR|4xv6D77nm$zn)>< zZiRU$r;$;>Nf`}$jhoBuy{ER_ofl!ZdoAMCm6jsj_3D3r|FYHb6L}GKn-!7IV>kLa zX)`txN*D-$PdK{SH*|?xyeep^V#ltx{TWrf{jsc}c1snFfAr5Rt>F4dr_Gh1=uFL# zwDZi;1zcZUIkU8i>#Jb>7jw+I^2LSB(Daa*a%$-R43VN*|7dNO-s-%~_>xirSkw^(@7r1LIMe27E@HDxBeACaz7eLCdWE}CELf9? zz3dV!UE-H#LM&E=V+xW44B8tCC``d!Is(t=FB`y+BN+#RH~QkuR%mUcp9!Kr04+hh z^!)fSVUbVpnLt$;p_NVxToq7Vtb!W@L79)MP??Dph3C4q6=21?~)Ynf80{=Bm%m*ygL3T z_l8;x>&X|mN&Nb6TH3~~C9Bo&v^%Edp`a7Us)Z0O0CHmnmxkMKsWsHQ-3QICDW>y+RVw! z$s2Ib6ExO^vr2gGAx8wH%)r?ie_L)C}v(y=-o|gj^Ha!Du$&q3nWtUE|ZGiQ!16G{i_2>#?hv_B&*<5mFW!x zeMn?pe3Hp#s@?>$O1w=kB8Igj;ELq43RNbOn9|ux(tw^4qAS97Wsy3@i@k85UXD+> zJodN5-~NguqH_2-Cs8p|mE%Pt@x*07g!CHudJ_(u-~{nX1f!DJ+<8&_ zZz@Htj=QADl;dre#a-v8uVD}RGU7f~;%R0u5+gJ(MtuMseY!AAVUI}fCgGdlE(Fl> zdMC}OcX6MI?p+Z{yCM`F!2mx~Azdm#zg~ubTM!JKPviCVHulUg}H`(w)+3ccwgO28j~s zt()%3bce1;sii@MQD=e*F}cpDQ(5?FX+I(x?QKbo{-Mql0Z1=He9H8)60<7g`#@l0 zEJcA23K(5zYq^B3XGv;(XbC2(@k0OR64pf5z!#PouU=fX5s60FN4FVrg&V^UBrVjP z!myiu2@O?;LKJE7^c4OOI6Lg!MYqE$+? zMn)bagIX(Ob_tms1!Q(a9ftolWL6BBEva3>QI`tVSQ0w(Dyl!6iOIaZhEx+UA*TJu z$U85-Wsf>p>{ceEhwLdC zV3~aX`Ck#QW1=(xkEH#T7$c+E2>kf*#Owr_Fn#eMc@cL_8tK4nZC;~BY-ezqFgz9N zW&od73SZ7xgQ<=*u8t}Ve>*={{nm-_Q&~hb;IMCz*0_@lS(NlyO)I>UIPT0+emwna z6w_g?&`%4yGs{yYEZ1dbN~U2-3V%UN&kRfVl}eRVet^9*TZ@kycD`kTTZ)}O__pE7 zSe&tYF)n6U$?5eDGX}FLm39D?9 zQwIVNm1g=s ztjg}FtkT3BPZ}|hgyh8`X4IrJ)uIm9m4?3djtqd$a7tfxXc7gZ9pNv@TxC<#f*FX> zjIAw)fr0o?*+{S7tBv%+!_r2Y!#taf^uoPcsSciv^um4ENPkVVv-D8fNH3h1jr1Zn zAW`r$Ll^v%zkS$9Uy@nK@{IGfk_yDWM*7mkw3Zv7ocSN~Q%2f{la2Hu`s7gBNHrdq%$Xw(ij7p@ z9ugbrOWSRvX-8tJ)zm+$jr1ZDgf!saD0?^Jo#^|(ZKSjg-f<9YqzM;748({&FB|Ev z3xvJeNPojgfhIQX|H99cwUK5#1Visk8|gk6+QGGvURbb^u1e&`Lx+gjY^1XxLpr{* zkVAo$p8Ng#5t)PH4q=s2I9OU_hle{G9!Fx z<~V8$q04>$O{y(PZyg9~b**PN%>9u&xkB{GfokU}vsT7G z{I=FwZ0Yg0uq(MNa{Fue@@C^Mbna&|O-^5DYb!3hTvA)JtFbd`HmA5^%?~*$)T}r{ua+YS#X-z%XwZN6Iw63+AMA5~^+!lXWNn^z+UFl}6IB}VE zi#-K*8s}x~Oe0EV>%V1zjAMnCrc-a4M?9_czehgi)XSnSEBX1dk1cPd3q5Yd3w%eu zd^9SoGvS1iF6QFdti@c`q#`Xbn$tAPNZ$EW6+|72}3fwN2%61k%+ommOyuVBjM><19jd0`ht&3RPb?;caaz?eZ4;pFL|ZX zXK2vx9T!r zwAI<=3EvzKY}3kahNrnsWwQ?4MH_YO0-d>yI_6om6ymXsI`$O8c0^7g(~o?v%oB|{ z0J01(*0Ux9e|sG-W#*Aa1*Za*mF17NH1|X-X?Kp6$?+9R91%OUW-a%)emP!g<(yny zTC?s-91%Oss9a+FC{?Ex@hknEtUD*pa+b^s{|m4%o}ik8EMxrHg2@T`=_vB=Z-#g*S$QW&v@28C6>V#rVnljO7KV^*HvnA5n|8FwJ z4RgsDcOI274p7F(QTi0h768d6A|lcG)cPg10sWz+Nf=N73B z@uug$H~^9AgA27^;*Hs4IoopB2UsD~lE&;iFBWdE{`#MvA^DHK4Z`M}L-KP${0&BF z^T;2PA6l>>`QamJNZz>DA^DNRbVzPIe1_yhNeGd|2F~2|03k%&4j`E#a2eEP01cCv zA=(JdlcqRK5ie0QER;sX``jNkwIcE{a<=YpSVQ)t!YnFtv0kw=VRbsQwx&o-+4s7g z7Kz2&%EQEA<=L#9ZQSiLu3+WaJI4c77N_NVy#t?@(X;nDF*}ptld*NoGBJO2!MMQ3 z4uA_>Q?QrLZ%^RO*)W#+LjNwRusV^Tl^(EgzC===4Hw7-3)JTR3{EErSyz z<*(+=RBo)6J8ZMHt?7L~TQp#)%PEs+i4e4Qx*J{FvVm3)cw#LUSoQK?Wo(`RJ;|dg zWU`EU!f}%HEDu)b!dh9qTap9!tpC-k%Y$WF+F?LxQ(nEMz1kK;s}_;2E2>2#Np|9| z;E94*gNSN0!C)8&R`jb%M3og$kK1~uzyvF_7hm;jaU-12ibpZwxpKYi(HaFcyS;ev z^oS6`VDIl{HA5=5YZ!nFgBQ39pbG)~rd5QOxy7l0V12!ZC6Y$ju-I*_U9E_7*bBNF z)7#l9XnDiQ69f;b<(JQw)%`uxh@HF#pyCfGgDWz&zyxf5SN|A((zWKA7HB{r8w0^< z!=ilj^w&U5PrB9&ma?^GwIp8-q$bx=$_=%CNvxoM`ON44;qzZz-jD}-PqGo8Odx5C z6k2s>R)lsw61}4qf8{QK;=Yj32ijMw7i6FW?f%#P+Dq+Lx`MX{D%YcWVK-H^rM=p@JQxsNFAvu0Lh6jvliq`r zS0vP~4z*38*0}@fT!oHfORA{8t3dT@)u(3r$>L8yJZt;uBu1=JJ}~8!?}(Fz^KC^! z`t7N6@@FKu5G=JUo*2D-szY5^s`o@)7Lx(Qi4@pgARMy0bP9xOwXyCR+L*?PqXFjh zS`+3>%JRZ4NXCVDP#HCfBN-#Nz(nW}>$PyJ+w@Y;yNBJhR-;g4##yj{-*f|VwZ!MJ-XN7>l^0v@U>3$4$SR1uEZTq3MK(5is2D7l{vrIDcrL@-?#Y!Vez0Dtmp8?In{mM)2v0|R$CuypWevdiJ-X@dOw+Qr z-;$N%FTLl>&%OwdwTIiEVx^q88W)~e@3f&rOvh2&;IP*QV&in3Gu?XOr+sa`5)~-o zl_}H;tJ61Hhi?J4XvsGpWRpZixg=t(UU4EvnWn9W67P^C76@U?L|64rv3ItykI)zW zK2)tvF{T3bB@zKaXRHd5Mc{(_xMv1nz}|LnsxJ4fhzQ>&+LhX%krI4!$QJmWj+V_u zs#7jfWKv1yux-syXW8OEMD1dQ08;Kq;OCA6h444jCyTeHVHW^V!>{_IywzH4E-501Z0285%x@JMsF$>7&Y*p_)MguVO7ii2f>G_FmG8#_lF ztdV$879PuuZAmurtFt&*19&hpJekKeBGCaBVw=+hz|VV^(_OQM9Es#Dc{JpR1aE~E zf$O@!Z3i{v$f;S#5tB!m7A<;=rr$JnOd_Bt^bv3In?w;v!j06ja3eSPzD1lv4r0YM zol1#-CHV?>+*6ruFz!W&4lY>S2NdQ_9%MGH6$39N^@aVUCGOIHd|CQN;{2TFx7Ufk zor=H*c1t?zNeHX%tdyMQjC8gq)0>yJ>dhJ@Ook|%vT>vENb5tQm)^XLONc5Vy*Xei zFU5~0dh_Y6dUNWCN7f74^=4FUL6z#2FSk2oSd}YHdZcQ(o+4vjK@wztMCweRSf%?! zbv|3TWn~~D-cd~5CEn4?2&MrWRhaaTzR1CHr*K&&*{$MNkUn} zv(@n{!~dSOmevTf7rP7Xkqv_1tlsc{B<-T&mpGL=Xi=%JxkqG>snlc-x0^~$YE!9K z#UHmM6fjWlu%;28u=q}mFx-{m(+^xZuyHcGazKOjNH-vGT5@-=w1n&D&=Rgnw5b*? zVLDNxauNHm&=S%%Xv9CYT0&OyQ7z#}*Ai}Dko_1L0kv(%xN0xEF^&o~3l>Lh-$G5+ zp?ei-4w6cmRkqM7X$FW?(yJCoCCz~F-8SqaO(o6Xmr8ooQI+(tS4p=Wh)Q~QSkYcO zMZ&amc8V)+&ZL(r4#jo11~xz^;!2*^+8g3B%1C2JN;M6cdDEcBW;_d184i7eOj=f3 zY8G)qbfuD9S@zzt*?Z*|N+QH;0$8!d2Wwz%^Oaqhb)onV?w!*nZt<@xsZHDk`CQ&oLr3LHM2U(0%+gxyFFkorQ=f975F}=4S!LFe z)j=WuD(;h2A`I6TrwWi(iK=vomdv=gB_euSOGKnYR~4eQL}Ve#S)KrH!@QP==tbVgp73ef2S+J)L zr+vkR!AKdXHc^3D_{}mbZSQ*5ui*}?*J>E5Vk@Sk|1Me{ygWfuf}vyDF+>tDBt%_2 zD~2w{yqry$>wkKNc3PiBCKtWX_`#VNr!&6d=vhA*r8HSp+J23{rpqx)=h2VkF$6{Wbv ze4`oG2V6*9tqWuuligx*g;CC$P`X%5aC@yLw6FBKn%WnOGX|!`;x-00*J5#3{9S?C zN)zq7hiMYZH<|ZYUi2u7$B~09LxpdL9tk*3|#2GzlLhLFm5qBNPgotOJ z=0XjYYyngd*b4Cn?|-UiL$^YF@NA+fvgU6Sy-F042J}B_jbf{3vSyv4^sur<-4tQ7 z4&2!-k8rFX8^q(+eA){&jeZXg=Az5jDp(IQ?gh*`n68sKi1}Ay=P1(B&tzb4zmk|k ztuSymII}8Bzmh4^RNvstB9CtjrKLq74d*0IOB7o*bM{i^AQC4+8W&_V#Det2vt>vp zq=c149tMMnhs%T8HMiS~<-snW+FfjXdqWWx7e7fLX^uJko#co)*M_gK=dSP?pT#b5 zl2wfz`JS9avXfJeRVL>jT+HMWa9ZhNC)xoyMZni{=aO8!Q%)twH z>|LmL=0D|{*X8n@S^7)0%}IrK5&}e$U=cvopfZE-80QuT>n2?yJ zcKMk&_#HaFrq?(oaGAqph{6(q5QngQKDg7Dyd!~8P^IFan+t%)VT@mJ99|1N1EeeF zCw*U>&9krL!+>rH*hLJN&iLEF8Ss+*&UWCfN-F}J3F!j13uK~= z3T;Wv!a_q~#H?nXATwry>zLJyzuQ!BT>?eLK|#xx3IruGSa`t{Vd<}Hg^RAt;c7!- z;$NB}(P1RThQfdVQ$@JhFKn2S-Hwfz=I#?CihEMXEm;PjaK_0cElEk37j&7(-|El?Gywb;5lsY-IgVwu8Q#}@eNh`KO0#Bcfx@K)o0(IqkHQJI7_ ziFIka3#j3%6Hg>V*l2kL5|QfpAj2A`lKmWauAK0d$c;%}c^)QpOHFlk?=J|hkOa%O z{Iv#*0h(8!Z&+aG{1(jd?Oc~-e>)8!+eifW%R;AJZxm9)2zU>$foY&A5=brSw4XZ2 zi95)Pen?IjP78`g!dLhZGWJ{}dGIz>mrd9Pk;b%*r0Z-zr`#nS*l+{Nu|drzV>Kki zDV>T^hI8G{RVTV(!jN{GIGqH)I{vVo#Y&A(c_scjPwkCRt-JiXh7Wf2==$5lGp&gNXm@Kt{NuPQ{OBaZ6iBNOxY%~(6c-ei&!onjzz=IxYfU5G5C_GFAt<+s~; z_fvd@#4zM61FcHX4**&+zbHF=H4egMJB_b@=F}k-Bbz#C#;YM;P3m^=qml2A0m~1M{-FCNvJI=>62R!gtFI#%{Dqt*AdO6pX8%ponozkoD5<}}OZxMH>=9i!4 z9b1=O_9_ir(mJ@k&u$*9b zg5`L-6D)n)xi&t-?gaDG?M~^QW_L;#>h88Weuh+d91obixI5PF1caTpM}o7wQ|z76 zot&k6W|rkjR2?oT4JTpBkuE&{B1q^2o36JP~PG5g~#F19S5+jBWTCL-31Tv zrP=-+MmJr3P)jr zw;m*H5vG=~Mcp~Js5{3NN$f0m2^%eu-LX;l2X~Gw>dvu6a^6-ii!BOV#};+x*rM(n zTO^02>&()1*qvjGX}U7+begoV9f&RRD7NU>-HALpSY)<#upDo9jxDMcV*StHoonOM z?ar}9y>o1lyW8qC)7YXuOx1u0p$#x$BD4ptS*Fl9wvZYAAl=kz%A}ucgpzcrRU+-1DxMjg+wV#-{2jUu z|8u6-BN6m>1S`{Q%)n3n)aa#IR9yKwv;@)3VUYtB)&%Uad2$IuD;QU-xZs!Gc5JlB zwfgskO+C!vB38{RMa@>vIzMtnJqe<#wHr5THGd{|g$o7{cA+Rd9FKPzx;+GTnMb>jG+oGK00lR&%4 zOuTTkNr>HKE%OMY&F>cW;3V=TLviWRJvfK{*xzN@9-I|s9!>V(h=aO;t|NVuJ6#aw z-BV$gPCi@$M!ap*Vb8HhQ4U+-^R!XN*!COtu~BCk{C)p6>SR^Ue^q*?Z6nQ=rSI?R zl%rkAwo|jEdV9WQC;l|*rItTAvoUI20F?{`HV{{44V7(qFm}h^QwcBj?C;LA!>2og ztSeHRd$`*z1%J}^_$(tdM#Jy#lFp7_Siaz8F`Be^riR9D}~Q+;vYw# zu%MjO>_T^R$BT_PS(`+iw>j4o-kxkxvL~vEHhT&_DiUYsh%!XAgyVdxI*1xa&rw{G=(fxP6Fz?^= zK#XnHfTVmga#Bh7I^Wtr9lu-e#zxcVu|B8sK-TK`RS(Ik(fvzA23cgUaD)30?|SV4 zo7yMb0|}_{K6_Xc)jsz2`l1YiMK4Z(`m0^xXZ(x;oADQ|@Gg0L$?74*|A8}50*hR} zx>nhqDnugUA9Tkbc-Rb$jaPo)zFYp}?{B*Ig|^-=dLXy8NJYFZtpbvySuX~(cUYy5 zJpE;NwL$Qcq{Igv|JN_S`~QCEK1xg=NlT;)UQZWj5L{e`oL3g;9jWuStn?i_f4#l* zKlvQ+LZd*jv|a_d4%Lc{YBbXwRapUIgmueFO{cuGiq>uREOIHHN)l^<#wYHlF66v6 znnhJ1#XO-H8%7Sc#&mSczjO3Cf)wy;3MvE}M>7>--&#S=A7yfcR?jHNU0nxVC& zk1a_3UCf7R@i>O|v25Fs>^nYa$7|$g{|TENwv4VHMH0#8JxTpw6RAH>-YWH@WVRl! zaS+Mr`9~@_9X;H2A#7I&EAz2knU-3q{?A&6#9kLg$l|;04xG>5$C*GS^j;sdioD;~ zjEv0tJ+2aZf2=FdJ7gKbfb&|e+L( zO1I-=7|s;80iRqgJ47UI%fZX-4M|F;w7M;c?$EJ$E%LUoFDzu+M9c*fpef`SS}HHG zWAGB|mB8KGDscBkwye5{zUUz-zJcl%=O%t9LS?iFpY%e7D*8^k;sh5=41t+ciS*s; zx(euBFgV2TM@@2teb11;N!%)bQ_gGx_?>L6r%ohrfs>2i0O6X=@1v|yzJbogA+RO& zUZrqo>(NiX?~xz9?w%i}w07-l9)9Qle*G6e^s?D#E+LGSZFK42*kkUAbVB$?nXLZ7 zU^Zd>Taq_{c?UTpz8isRMVkl6!{~?CqL>|`SJEZKClcKwG5WO9L|y&hmU>BC_Zk`K zS))ER9iBL=8bCeh32A?8kVT7H8tgO#8@%>4>DRU)c!L{MYaLVxjDHQtrywihz@TNb1=wJW*jtuZG{NbIi{KA88{ls;!#2Z8i5HG(u0lf+2smwG^ z0%1PmNm2S^QFE?Y*mqFKYsA!f_@}F!b7be zMRTu;S&HqFzm^Tp?I%O^*)S1%`1h4v$c4X}T z@B;2OT1=4P9P$g8NRPdC@q%+Se5DNyifw4S)}5h~xz>Vbq_l1WSh(~;nf{VvZTpIz zn2g>7rRKuuor{_caR#(keY|BL1KR#04(wY5?m78WjzHU5?Sca6M{6NJ)M^NAqkv2kAf#nZM8$Ny$)k{J`CRaMR5bgl2(+5*gbmkkuS$T z{U+bEZ740`mOXlbf@uNxA97DP@?p%<$k3Kxx$m)>U1>b1g=hk0Kgdx_J2Vp(7NwU0 zT&s~x92>hImD-{-buOlT_BONw+Z6=UMM@}cj?3c=QklYHI4}rlKyq!$u_aE*{H%gN z^H!dSVv&SKci~2vrdomnWZadkG81%!<+}5$5=U?=roWJk{z&n-3v3gKm~v4pY=too zaHG%?3E7l2ce_c-g~O1Ow_}?Y;HFcUw_q!HO}4BsezCifnrr>(+)IJ%^O-_G!E9uM zY>@?3*sn>%=%he0G$WykJN`y}Z7vJiz#suYXMr2V?RwjvEt7OnZfvK&S_#7Se9OCp zAYjXmDqMM@Hn*oT?<;T$-PN_x4?4;`74+<5%NLZO{v?BKIVO#0jszsJ(k1^5!S6Po&;@%1)Y-RPNBx(FspwVu&tF%bQd z{pc35w06s?S-WLb+AXV!-C`C-!BUDJgjQU)6qC;A7K1YF6mK7$(l%cAqf^+Nw;h2- z=QfB@s#9nnw;b<~bc*VnDWDruI|u@La*{;GFzK)2*5VAPj|p$dC;Yhs3nyXGkV4$~kW4+VJV%3>127bpLd-3Y%WetUy<=*M04wC7eC`wwo%VvUDAVu2POx&4*5!pMlC8JEC!X z2Nkj2iDh;Rr%$kK5nLq4XZ--XpP}>8uS#RvLW-nkw4jO9DZj-olnl_)g|YS#C@uC0 zkw-g>pRuU=tvy(Lw-7z}@3odu6rL6N;eT>;d2oKCpmEKg{_)M9`NWTI`-|z{1S>(~ z-){TA|8TE8|2Babg2r8c@}}>6I{D7ll*Z3?eDmS^l4rI!HvTm|MXG! zeNFGPB5j)t!q@nW&il2?|%D3g|Z*`*mc*X&nkOYmfn^9*=w); zdzXJrZ#^mdPyO1nRQ4P8eDhs*70UYCcYpZt-N`fCJ{#}4??)eW&#QacZQ1zb8@_tK zdtTGq4{^cXF^6)q!c+rMBpP|GuCl-JZy&q=uM@b1gLmzDfi?#ll7zx>5l zT>3S=QRIc^@&Ei+TVMIu14&kPgEsEY(7U>qou!SPU;n|+9J<%^owx48}bJ=eH=v&_5zOU&$Il;{D`?XV)_tv|=`GmvFZ}$qT@5-K6_X;z+ z?tjZ$?>3Ou^ga^=%!f-$`(IH)LQ4=Q{w&rk`GNazwZ5EIT7j5XCmPZUs;i*zzyQQ5 zQ9Dm2EgUg7Z7iX*`I|Oy`83p; zWGas}nMXB~3$p{w*ZkKo`;E0qXl!X!-vPDic(rQB@23N9buasj8`od^$-lZk>8=x# zuKp9hc9PP5CL2I5?O$ilYkIXwSA+5L+KEc~sf-X^(pUWO!5=xoT+@3zCKt2A#w-%_ z$?|xDN1p|a4`gKMGTri~kKFRP1eU&}{I~lxgZtar0CXh!AOHJrev%HjB~bP`HS4t7 zva#fn-S@v9_^}&XYkH4M;ixT9?O(a+%O7!syr%aVNo_a!wd3^d<{$j?C(|Lvx9}4F zud5xa)l)V4t$uBBdVjNDdj|KS&~Q`j>AKy(|4VC6OH=%IE!5jzsWF?n{BQAV$E5dH z*Lu_2clb3s`~E0nZ)HFD8Slz3`*y8VNcH<^A_q0S(*n2VW zC{+S82r)dM}pO$3C z&hZ929%J_9Auht?;354Tyy^^AGI}gq1(Qgi;TBxTWYyW|dIg4%uIG8)PBzZ3xPB*= z5&!4HeiT^AkFFJ@F37X4$5x(rk#e%!n)>8 zU!%v?c`|+sH)Bm(kA&XxuXjAU(sy$ zM=N}elxZz+2DUn(6;I>VdDTQwghHbv+`lI9qi&ZPs7KVmJ?a={mc*wx_RD(@$T4hT zdCm#?Caso;n_`T8i&SqPr<=Lf`d7D5@x0&P51%1B<}QDx-kfWN$> z5a&*I6U;tH=U8))w+~93JJ}@;TAaJsyq2((n9EXbbd$lw7J!C`spcwfq-zwT-499~JD9O53_P!I41j)JFotmfHg zw$I7=tJ>*B9%m0!7nw0ho9evq3x3tTzss*~rf7V12y5ueL9Ih+4x!d=Uw}$>PgCv$8Y-QU2A?rkq$DgHVjJvj#)Gl@zr~dN5u8W*M;5i zawqZ;Y}KL>1Bye!1EDGQJ~cU~qq5Ze*Uw_Qd)oG#2^6^A7n%=$NV zcTUO)BOn_EDQI2fCvVuX(P3poE)DZ;RI+bI8&M!9L;l;|4$nDsxH8=_@m8k!b9tMr zj%wA5_k5QYhmZKxhs%CV)S7H0g;;jS!ZW^Xz!mSlOCgfUCf_yu%s}smfGKSf6el{Q z3n2r+y)hUGwD(U9t&NVvJ9cwqlh4<(UdB6lR3v@|1Lg41ppjWp{=5{dL)--l4nk-- zWojewMM{m+LqtF)JTZ9_Yum)O%Mv6AEC^po?f4FmSV)!NuuPI(y59>w?{gKhFAiih=YhE1)gOp3!7Q!e7@X^AfT}UzU6|eT;vIL%XzE+*_*m~z zLzIs=aXgR6Yg@!%2hR4!8VjA58CEzVkqts>a8v?qWT709RrNQZAoKD-k0ahV3Cc;v z5lW}EC1{+1TT@p3zwpGUH$jV9?8;l{zeID}VK-ir+Qu$U%(^R>lnMM>jCW z#fTo;(64whHjvdeU-|T$4#7wZyNtajJAPaeO$C4siB%u~AhJjKQBq~+ zWxRt{ASZc%G8)AsTswL_U@sM5$31aMj;eya>?mFb7+JTK6gHQK&Jo7FcJZngT!Sh! zs7npv5Jfd8;ss1EX^hTElcy&;31|x*sm4+uGDn9RNFp_SPj#ulcFyYsM0KehE$oWXy$p9SC;a?lY==24TGCT|WFykm?o(;F3D7 zY-rXbLN2ks$gv}1F!mtdYUW^uiW`#R#TxAi56WS!{_*LT8p^8AA6~!hd1tH`9NIcH zK0LN=vbniAHZ?fa+%gqCC)zsJ+&0l%H`QDpO-v4N9-6!|+SI(#^WqQe_Nwpj_m*}2 z@cL+Md@35AYEBMKHKVDGljB=2-570Yt{Wd)uabtx);G6N?)XG=a`Z}X^U&z%_`1Oj zljEEDX;S4CLtCQt%`L;1j`1)x9*vKUUKw4|jMkBoH$xjJIvU*A9GbAC>&7=vj5ep5 zB(kEYbv>YdcXmNDK@YKd)qoeT+(eo+pt#^CX)q5)r zluZpL*ct>|2AeRkZhUf5B|Ese9iLnuZ604gyg?X!f^wg+b!_O8QE)XL87L0d*WBY( ze|gRt2PcLmr=GXBzWO-%U$*c3^X@Pr%-ri$U$Xa-&j7NtWHtG6x-6q9p{j^u(C{^i z+(%xMtyOjLv`)=&~sOf|Ed)43GccJs6sjU<1p?#3~nooPx zUwk=3d3fy7Xx+xDcW(&iXEV3ckvFcGVSgq(C+U2)#< zB>V=(16#0xH0Dhe)<;X|xf};DF$4+-k|-=Sv@vY*~d4mj8>pbFdS9_<%N1^tyb=S2@ zR8I$(L04p9oXpAIIHIK588Nk50i01%?Y0r#%Sqp0&&m2TDN4i#u zB;I6+0wZ+Lahh2$ln-AgG7_ty z`b!R)Sqqy;=CH^~pzj!_!W;`%GEHKfS#)C)-opgzD2JK1xp`vB)RM1w)ul5s zXBiP8E@@smJT@kI2nw_O)cI|)Eon|pQZ^IcdVNjv@D)F_YDMq4{~7nE7r3*_1S{My zysf!@@RBQ0oDlTKwoFaJ=Eo<=La46l^Anh84)bcFVr+pbk|qGl5DjKDDD%|h&^n=l z)5WSCS4;v558vTIf-&@w%Tw*WGB!T;oH>TgTOP$Uxv%_lIj#$A;0-1aguf=|XofjteJ2DY9c({tK0uY=H{cPkRd5qC?d~{=I$*|*4(#0mhrlr6!)5wc4PyOsvo0(Rqc%~j4F+R;jyhl zQ^Vt98m9&L&1L`&Qc=&*9O)?bz2BGxt^zSdn}@e-9-3OWQ3G$b0=`QDnpIoISo>Uh z$N;4+G6_BR3PL8$kI$HHl6d_Hj1*n!H z;5lL!um2{@Z?6e+JW7v9vVDqdr?h8tW7JKEHlQ9S)ycD`LfYNZv_$OUuRbebb(-p1 znj)@{Sv%P0Uwv*u(MstIJnnM@LpRIO7|0o&8g@gR20lcll#?b&T@m}_q&vRlRe!~) z#c)_RxvWFCxwM%vFJZ(c7FV+70CXQ$K6Mjjx_WYwGv_14X^BK%CDH!QT~5I%%w4bl z+j-4hmY+I_Gh40i4d>Q9B>WZ$pECpH_H|B-|G~Fm{7K<1<$fZ3HyK|rG=()`VoDv4 zSrJt#wXLC6Vqil{p0%5QNM32PvP$OsrImIQZI-AW)#uM>Q#ubJV$RvJ2g&&~qjCdc zbbM&NSqs0x`(J9)U5hNlI7dEnf!7=U&Z|yz`(FNc-R~~zE#;4%cg#8em_M%k=(d;s z>Z^n7#o9IhYu&Pw@@GCAbIv2h{HMI-<*>KH-{PyV-LllUqzzDTl-OtA%-3b@=Cxej Sfb8gj7L#u@I=p#!%KLv9R3Iq; diff --git a/unittests/test-contracts/test_api_multi_index/test_multi_index.cpp b/unittests/test-contracts/test_api_multi_index/test_multi_index.cpp deleted file mode 100644 index f0a3bd76384..00000000000 --- a/unittests/test-contracts/test_api_multi_index/test_multi_index.cpp +++ /dev/null @@ -1,957 +0,0 @@ -/** - * @file - * @copyright defined in eos/LICENSE - */ -#include -#include - -#include - -#include "../test_api/test_api.hpp" - -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wunused-parameter" - -namespace _test_multi_index { - - using eosio::checksum256; - - struct record_idx64 { - uint64_t id; - uint64_t sec; - - auto primary_key()const { return id; } - uint64_t get_secondary()const { return sec; } - - EOSLIB_SERIALIZE( record_idx64, (id)(sec) ) - }; - - struct record_idx128 { - uint64_t id; - uint128_t sec; - - auto primary_key()const { return id; } - uint128_t get_secondary()const { return sec; } - - EOSLIB_SERIALIZE( record_idx128, (id)(sec) ) - }; - - struct record_idx256 { - uint64_t id; - checksum256 sec; - - auto primary_key()const { return id; } - const checksum256& get_secondary()const { return sec; } - - EOSLIB_SERIALIZE( record_idx256, (id)(sec) ) - }; - - struct record_idx_double { - uint64_t id; - double sec; - - auto primary_key()const { return id; } - double get_secondary()const { return sec; } - - EOSLIB_SERIALIZE( record_idx_double, (id)(sec) ) - }; - - struct record_idx_long_double { - uint64_t id; - long double sec; - - auto primary_key()const { return id; } - long double get_secondary()const { return sec; } - - EOSLIB_SERIALIZE( record_idx_long_double, (id)(sec) ) - }; - - template - void idx64_store_only( uint64_t receiver ) - { - using namespace eosio; - - typedef record_idx64 record; - - record records[] = {{265, "alice"_n.value}, - {781, "bob"_n.value}, - {234, "charlie"_n.value}, - {650, "allyson"_n.value}, - {540, "bob"_n.value}, - {976, "emily"_n.value}, - {110, "joe"_n.value} - }; - size_t num_records = sizeof(records)/sizeof(records[0]); - - // Construct and fill table using multi_index - multi_index> - > table( name{receiver}, receiver ); - - auto payer = receiver; - - for ( size_t i = 0; i < num_records; ++i ) { - table.emplace( name{payer}, [&](auto& r) { - r.id = records[i].id; - r.sec = records[i].sec; - }); - } - } - - template - void idx64_check_without_storing( uint64_t receiver ) - { - using namespace eosio; - - typedef record_idx64 record; - - // Load table using multi_index - multi_index> - > table( name{receiver}, receiver ); - - auto payer = receiver; - - auto secondary_index = table.template get_index<"bysecondary"_n>(); - - // find by primary key - { - auto itr = table.find(999); - eosio_assert( itr == table.end(), "idx64_general - table.find() of non-existing primary key" ); - - itr = table.find(976); - eosio_assert( itr != table.end() && itr->sec == "emily"_n.value, "idx64_general - table.find() of existing primary key" ); - - ++itr; - eosio_assert( itr == table.end(), "idx64_general - increment primary iterator to end" ); - - itr = table.require_find(976); - eosio_assert( itr != table.end() && itr->sec == "emily"_n.value, "idx64_general - table.require_find() of existing primary key" ); - - ++itr; - eosio_assert( itr == table.end(), "idx64_general - increment primary iterator to end" ); - } - - // iterate forward starting with charlie - { - auto itr = secondary_index.lower_bound("charlie"_n.value); - eosio_assert( itr != secondary_index.end() && itr->sec == "charlie"_n.value, "idx64_general - secondary_index.lower_bound()" ); - - ++itr; - eosio_assert( itr != secondary_index.end() && itr->id == 976 && itr->sec == "emily"_n.value, "idx64_general - increment secondary iterator" ); - - ++itr; - eosio_assert( itr != secondary_index.end() && itr->id == 110 && itr->sec == "joe"_n.value, "idx64_general - increment secondary iterator again" ); - - ++itr; - eosio_assert( itr == secondary_index.end(), "idx64_general - increment secondary iterator to end" ); - } - - // iterate backward starting with second bob - { - auto pk_itr = table.find(781); - eosio_assert( pk_itr != table.end() && pk_itr->sec == "bob"_n.value, "idx64_general - table.find() of existing primary key" ); - - auto itr = secondary_index.iterator_to(*pk_itr); - eosio_assert( itr->id == 781 && itr->sec == "bob"_n.value, "idx64_general - iterator to existing object in secondary index" ); - - --itr; - eosio_assert( itr != secondary_index.end() && itr->id == 540 && itr->sec == "bob"_n.value, "idx64_general - decrement secondary iterator" ); - - --itr; - eosio_assert( itr != secondary_index.end() && itr->id == 650 && itr->sec == "allyson"_n.value, "idx64_general - decrement secondary iterator again" ); - - --itr; - eosio_assert( itr == secondary_index.begin() && itr->id == 265 && itr->sec == "alice"_n.value, "idx64_general - decrement secondary iterator to beginning" ); - } - - // iterate backward starting with emily using const_reverse_iterator - { - std::array pks{{976, 234, 781, 540, 650, 265}}; - - auto pk_itr = pks.begin(); - - auto itr = --std::make_reverse_iterator( secondary_index.find("emily"_n.value) ); - for( ; itr != secondary_index.rend(); ++itr ) { - eosio_assert( pk_itr != pks.end(), "idx64_general - unexpected continuation of secondary index in reverse iteration" ); - eosio_assert( *pk_itr == itr->id, "idx64_general - primary key mismatch in reverse iteration" ); - ++pk_itr; - } - eosio_assert( pk_itr == pks.end(), "idx64_general - did not iterate backwards through secondary index properly" ); - } - - // require_find secondary key - { - auto itr = secondary_index.require_find("bob"_n.value); - eosio_assert( itr != secondary_index.end(), "idx64_general - require_find must never return end iterator" ); - eosio_assert( itr->id == 540, "idx64_general - require_find test" ); - - ++itr; - eosio_assert( itr->id == 781, "idx64_general - require_find secondary key test" ); - } - - // modify and erase - { - const uint64_t ssn = 421; - auto new_person = table.emplace( name{payer}, [&](auto& r) { - r.id = ssn; - r.sec = "bob"_n.value; - }); - - table.modify( new_person, name{payer}, [&](auto& r) { - r.sec = "billy"_n.value; - }); - - auto itr1 = table.find(ssn); - eosio_assert( itr1 != table.end() && itr1->sec == "billy"_n.value, "idx64_general - table.modify()" ); - - table.erase(itr1); - auto itr2 = table.find(ssn); - eosio_assert( itr2 == table.end(), "idx64_general - table.erase()" ); - } - } - - template - void idx64_require_find_fail(uint64_t receiver) - { - using namespace eosio; - typedef record_idx64 record; - - // Load table using multi_index - multi_index table( name{receiver}, receiver ); - - // make sure we're looking at the right table - auto itr = table.require_find( 781, "table not loaded" ); - eosio_assert( itr != table.end(), "table not loaded" ); - - // require_find by primary key - // should fail - itr = table.require_find(999); - } - - template - void idx64_require_find_fail_with_msg(uint64_t receiver) - { - using namespace eosio; - typedef record_idx64 record; - - // Load table using multi_index - multi_index table( name{receiver}, receiver ); - - // make sure we're looking at the right table - auto itr = table.require_find( 234, "table not loaded" ); - eosio_assert( itr != table.end(), "table not loaded" ); - - // require_find by primary key - // should fail - itr = table.require_find( 335, "unable to find primary key in require_find" ); - } - - template - void idx64_require_find_sk_fail(uint64_t receiver) - { - using namespace eosio; - typedef record_idx64 record; - - // Load table using multi_index - multi_index>> table( eosio::name{receiver}, receiver ); - auto sec_index = table.template get_index<"bysecondary"_n>(); - - // make sure we're looking at the right table - auto itr = sec_index.require_find( "charlie"_n.value, "table not loaded" ); - eosio_assert( itr != sec_index.end(), "table not loaded" ); - - // require_find by secondary key - // should fail - itr = sec_index.require_find("bill"_n.value); - } - - template - void idx64_require_find_sk_fail_with_msg(uint64_t receiver) - { - using namespace eosio; - typedef record_idx64 record; - - // Load table using multi_index - multi_index>> table( eosio::name{receiver}, receiver ); - auto sec_index = table.template get_index<"bysecondary"_n>(); - - // make sure we're looking at the right table - auto itr = sec_index.require_find( "emily"_n.value, "table not loaded" ); - eosio_assert( itr != sec_index.end(), "table not loaded" ); - - // require_find by secondary key - // should fail - itr = sec_index.require_find( "frank"_n.value, "unable to find sec key" ); - } - - template - void idx128_store_only(uint64_t receiver) - { - using namespace eosio; - - typedef record_idx128 record; - - - // Construct and fill table using multi_index - multi_index> - > table( name{receiver}, receiver ); - - auto payer = receiver; - - for (uint64_t i = 0; i < 5; ++i) { - table.emplace( name{payer}, [&](auto& r) { - r.id = i; - r.sec = static_cast(1ULL << 63) * i; - }); - } - } - - template - void idx128_check_without_storing( uint64_t receiver ) - { - using namespace eosio; - - typedef record_idx128 record; - - // Load table using multi_index - multi_index> - > table( name{receiver}, receiver ); - - auto payer = receiver; - - auto secondary_index = table.template get_index<"bysecondary"_n>(); - - table.modify( table.get(3), name{payer}, [&](auto& r) { - r.sec *= 2; - }); - - { - uint128_t multiplier = 1ULL << 63; - - auto itr = secondary_index.begin(); - eosio_assert( itr->primary_key() == 0 && itr->get_secondary() == multiplier*0, "idx128_general - secondary key sort" ); - ++itr; - eosio_assert( itr->primary_key() == 1 && itr->get_secondary() == multiplier*1, "idx128_general - secondary key sort" ); - ++itr; - eosio_assert( itr->primary_key() == 2 && itr->get_secondary() == multiplier*2, "idx128_general - secondary key sort" ); - ++itr; - eosio_assert( itr->primary_key() == 4 && itr->get_secondary() == multiplier*4, "idx128_general - secondary key sort" ); - ++itr; - eosio_assert( itr->primary_key() == 3 && itr->get_secondary() == multiplier*6, "idx128_general - secondary key sort" ); - ++itr; - eosio_assert( itr == secondary_index.end(), "idx128_general - secondary key sort" ); - } - - } - - template - auto idx64_table( uint64_t receiver ) - { - using namespace eosio; - typedef record_idx64 record; - // Load table using multi_index - multi_index> - > table( name{receiver}, receiver ); - return table; - } - -} /// _test_multi_index - -void test_multi_index::idx64_store_only( uint64_t receiver, uint64_t code, uint64_t action ) -{ - _test_multi_index::idx64_store_only<"indextable1"_n.value>(receiver); -} - -void test_multi_index::idx64_check_without_storing( uint64_t receiver, uint64_t code, uint64_t action ) -{ - _test_multi_index::idx64_check_without_storing<"indextable1"_n.value>(receiver); -} - -void test_multi_index::idx64_general( uint64_t receiver, uint64_t code, uint64_t action ) -{ - _test_multi_index::idx64_store_only<"indextable2"_n.value>(receiver); - _test_multi_index::idx64_check_without_storing<"indextable2"_n.value>(receiver); -} - -void test_multi_index::idx128_store_only( uint64_t receiver, uint64_t code, uint64_t action ) -{ - _test_multi_index::idx128_store_only<"indextable3"_n.value>(receiver); -} - -void test_multi_index::idx128_check_without_storing( uint64_t receiver, uint64_t code, uint64_t action ) -{ - _test_multi_index::idx128_check_without_storing<"indextable3"_n.value>(receiver); -} - -void test_multi_index::idx128_general( uint64_t receiver, uint64_t code, uint64_t action ) -{ - _test_multi_index::idx128_store_only<"indextable4"_n.value>(receiver); - _test_multi_index::idx128_check_without_storing<"indextable4"_n.value>(receiver); -} - -void test_multi_index::idx64_require_find_fail( uint64_t receiver, uint64_t code, uint64_t action ) -{ - _test_multi_index::idx64_store_only<"indextable5"_n.value>(receiver); - _test_multi_index::idx64_require_find_fail<"indextable5"_n.value>(receiver); -} - -void test_multi_index::idx64_require_find_fail_with_msg( uint64_t receiver, uint64_t code, uint64_t action ) -{ - _test_multi_index::idx64_store_only<"indextablea"_n.value>(receiver); // Making the name smaller fixes this? - _test_multi_index::idx64_require_find_fail_with_msg<"indextablea"_n.value>(receiver); // Making the name smaller fixes this? -} - -void test_multi_index::idx64_require_find_sk_fail( uint64_t receiver, uint64_t code, uint64_t action ) -{ - _test_multi_index::idx64_store_only<"indextableb"_n.value>(receiver); - _test_multi_index::idx64_require_find_sk_fail<"indextableb"_n.value>(receiver); -} - -void test_multi_index::idx64_require_find_sk_fail_with_msg( uint64_t receiver, uint64_t code, uint64_t action ) -{ - _test_multi_index::idx64_store_only<"indextablec"_n.value>(receiver); - _test_multi_index::idx64_require_find_sk_fail_with_msg<"indextablec"_n.value>(receiver); -} - -void test_multi_index::idx128_autoincrement_test( uint64_t receiver, uint64_t code, uint64_t action ) -{ - using namespace eosio; - using namespace _test_multi_index; - - typedef record_idx128 record; - - const uint64_t table_name = "autoinctbl1"_n.value; - auto payer = receiver; - - multi_index> - > table( name{receiver}, receiver ); - - for( int i = 0; i < 5; ++i ) { - table.emplace( name{payer}, [&](auto& r) { - r.id = table.available_primary_key(); - r.sec = 1000 - static_cast(r.id); - }); - } - - uint64_t expected_key = 4; - for( const auto& r : table.get_index<"bysecondary"_n>() ) - { - eosio_assert( r.primary_key() == expected_key, "idx128_autoincrement_test - unexpected primary key" ); - --expected_key; - } - eosio_assert( expected_key == static_cast(-1), "idx128_autoincrement_test - did not iterate through secondary index properly" ); - - auto itr = table.find(3); - eosio_assert( itr != table.end(), "idx128_autoincrement_test - could not find object with primary key of 3" ); - - // The modification below would trigger an error: - /* - table.modify(itr, payer, [&](auto& r) { - r.id = 100; - }); - */ - - table.emplace( name{payer}, [&](auto& r) { - r.id = 100; - r.sec = itr->sec; - }); - table.erase(itr); - - eosio_assert( table.available_primary_key() == 101, "idx128_autoincrement_test - next_primary_key was not correct after record modify" ); -} - -void test_multi_index::idx128_autoincrement_test_part1( uint64_t receiver, uint64_t code, uint64_t action ) -{ - using namespace eosio; - using namespace _test_multi_index; - - typedef record_idx128 record; - - const uint64_t table_name = "autoinctbl2"_n.value; - auto payer = receiver; - - multi_index> - > table( name{receiver}, receiver ); - - for( int i = 0; i < 3; ++i ) { - table.emplace( name{payer}, [&](auto& r) { - r.id = table.available_primary_key(); - r.sec = 1000 - static_cast(r.id); - }); - } - - table.erase(table.get(0)); - - uint64_t expected_key = 2; - for( const auto& r : table.get_index<"bysecondary"_n>() ) - { - eosio_assert( r.primary_key() == expected_key, "idx128_autoincrement_test_part1 - unexpected primary key" ); - --expected_key; - } - eosio_assert( expected_key == 0, "idx128_autoincrement_test_part1 - did not iterate through secondary index properly" ); - -} - -void test_multi_index::idx128_autoincrement_test_part2( uint64_t receiver, uint64_t code, uint64_t action ) -{ - using namespace eosio; - using namespace _test_multi_index; - - typedef record_idx128 record; - - const uint64_t table_name = "autoinctbl2"_n.value; - auto payer = receiver; - - { - multi_index> - > table( name{receiver}, receiver ); - - eosio_assert( table.available_primary_key() == 3, "idx128_autoincrement_test_part2 - did not recover expected next primary key" ); - } - - multi_index> - > table( name{receiver}, receiver ); - - table.emplace( name{payer}, [&](auto& r) { - r.id = 0; - r.sec = 1000; - }); - // Done this way to make sure that table._next_primary_key is not incorrectly set to 1. - - for( int i = 3; i < 5; ++i ) { - table.emplace( name{payer}, [&](auto& r) { - auto itr = table.available_primary_key(); - r.id = itr; - r.sec = 1000 - static_cast(r.id); - }); - } - - uint64_t expected_key = 4; - for( const auto& r : table.get_index<"bysecondary"_n>() ) - { - eosio_assert( r.primary_key() == expected_key, "idx128_autoincrement_test_part2 - unexpected primary key" ); - --expected_key; - } - eosio_assert( expected_key == static_cast(-1), "idx128_autoincrement_test_part2 - did not iterate through secondary index properly" ); - - auto itr = table.find(3); - eosio_assert( itr != table.end(), "idx128_autoincrement_test_part2 - could not find object with primary key of 3" ); - - table.emplace( name{payer}, [&](auto& r) { - r.id = 100; - r.sec = itr->sec; - }); - table.erase(itr); - - eosio_assert( table.available_primary_key() == 101, "idx128_autoincrement_test_part2 - next_primary_key was not correct after record update" ); -} - -void test_multi_index::idx256_general( uint64_t receiver, uint64_t code, uint64_t action ) -{ - using namespace eosio; - using namespace _test_multi_index; - - typedef record_idx256 record; - - const uint64_t table_name = "indextable5"_n.value; - auto payer = receiver; - - print("Testing checksum256 secondary index.\n"); - multi_index> - > table( name{receiver}, receiver ); - - auto fourtytwo = checksum256::make_from_word_sequence( 0ULL, 0ULL, 0ULL, 42ULL ); - //auto onetwothreefour = checksum256::make_from_word_sequence(1ULL, 2ULL, 3ULL, 4ULL); - auto onetwothreefour = checksum256{std::array{ {0,1, 0,2, 0,3, 0,4} }}; - - table.emplace( name{payer}, [&](auto& o) { - o.id = 1; - o.sec = fourtytwo; - }); - - table.emplace( name{payer}, [&](auto& o) { - o.id = 2; - o.sec = onetwothreefour; - }); - - table.emplace( name{payer}, [&](auto& o) { - o.id = 3; - o.sec = fourtytwo; - }); - - auto e = table.find(2); - - print("Items sorted by primary key:\n"); - for( const auto& item : table ) { - print(" ID=", item.primary_key(), ", secondary=", item.sec, "\n"); - } - - { - auto itr = table.begin(); - eosio_assert( itr->primary_key() == 1 && itr->get_secondary() == fourtytwo, "idx256_general - primary key sort" ); - ++itr; - eosio_assert( itr->primary_key() == 2 && itr->get_secondary() == onetwothreefour, "idx256_general - primary key sort" ); - ++itr; - eosio_assert( itr->primary_key() == 3 && itr->get_secondary() == fourtytwo, "idx256_general - primary key sort" ); - ++itr; - eosio_assert( itr == table.end(), "idx256_general - primary key sort" ); - } - - auto secidx = table.get_index<"bysecondary"_n>(); - - auto lower1 = secidx.lower_bound( checksum256::make_from_word_sequence(0ULL, 0ULL, 0ULL, 40ULL) ); - print("First entry with a secondary key of at least 40 has ID=", lower1->id, ".\n"); - eosio_assert( lower1->id == 1, "idx256_general - lower_bound" ); - - auto lower2 = secidx.lower_bound( checksum256::make_from_word_sequence(0ULL, 0ULL, 0ULL, 50ULL) ); - print("First entry with a secondary key of at least 50 has ID=", lower2->id, ".\n"); - eosio_assert( lower2->id == 2, "idx256_general - lower_bound" ); - - if( table.iterator_to(*lower2) == e ) { - print("Previously found entry is the same as the one found earlier with a primary key value of 2.\n"); - } - - print("Items sorted by secondary key (checksum256):\n"); - for( const auto& item : secidx ) { - print(" ID=", item.primary_key(), ", secondary=", item.sec, "\n"); - } - - { - auto itr = secidx.begin(); - eosio_assert( itr->primary_key() == 1, "idx256_general - secondary key sort" ); - ++itr; - eosio_assert( itr->primary_key() == 3, "idx256_general - secondary key sort" ); - ++itr; - eosio_assert( itr->primary_key() == 2, "idx256_general - secondary key sort" ); - ++itr; - eosio_assert( itr == secidx.end(), "idx256_general - secondary key sort" ); - } - - auto upper = secidx.upper_bound( checksum256{std::array{{0, 0, 0, 42}}} ); - - print("First entry with a secondary key greater than 42 has ID=", upper->id, ".\n"); - eosio_assert( upper->id == 2, "idx256_general - upper_bound" ); - eosio_assert( upper->id == secidx.get(onetwothreefour).id, "idx256_general - secondary index get" ); - - print("Removed entry with ID=", lower1->id, ".\n"); - secidx.erase( lower1 ); - - print("Items reverse sorted by primary key:\n"); - for( const auto& item : boost::make_iterator_range(table.rbegin(), table.rend()) ) { - print(" ID=", item.primary_key(), ", secondary=", item.sec, "\n"); - } - - { - auto itr = table.rbegin(); - eosio_assert( itr->primary_key() == 3 && itr->get_secondary() == fourtytwo, "idx256_general - primary key sort after remove" ); - ++itr; - eosio_assert( itr->primary_key() == 2 && itr->get_secondary() == onetwothreefour, "idx256_general - primary key sort after remove" ); - ++itr; - eosio_assert( itr == table.rend(), "idx256_general - primary key sort after remove" ); - } -} - -void test_multi_index::idx_double_general( uint64_t receiver, uint64_t code, uint64_t action ) -{ - using namespace eosio; - using namespace _test_multi_index; - - typedef record_idx_double record; - - const uint64_t table_name = "floattable1"_n.value; - auto payer = receiver; - - print("Testing double secondary index.\n"); - multi_index> - > table( name{receiver}, receiver ); - - auto secidx = table.get_index<"bysecondary"_n>(); - - double tolerance = std::numeric_limits::epsilon(); - print("tolerance = ", tolerance, "\n"); - - for( uint64_t i = 1; i <= 10; ++i ) { - table.emplace( name{payer}, [&]( auto& o ) { - o.id = i; - o.sec = 1.0 / (i * 1000000.0); - }); - } - - double expected_product = 1.0 / 1000000.0; - print( "expected_product = ", expected_product, "\n" ); - - uint64_t expected_key = 10; - for( const auto& obj : secidx ) { - eosio_assert( obj.primary_key() == expected_key, "idx_double_general - unexpected primary key" ); - - double prod = obj.sec * obj.id; - - print(" id = ", obj.id, ", sec = ", obj.sec, ", sec * id = ", prod, "\n"); - - eosio_assert( std::abs(prod - expected_product) <= tolerance, - "idx_double_general - product of secondary and id not equal to expected_product within tolerance" ); - - --expected_key; - } - eosio_assert( expected_key == 0, "idx_double_general - did not iterate through secondary index properly" ); - - { - auto itr = secidx.lower_bound( expected_product / 5.5 ); - eosio_assert( std::abs(1.0 / itr->sec - 5000000.0) <= tolerance, "idx_double_general - lower_bound" ); - - itr = secidx.upper_bound( expected_product / 5.0 ); - eosio_assert( std::abs(1.0 / itr->sec - 4000000.0) <= tolerance, "idx_double_general - upper_bound" ); - - } -} - -void test_multi_index::idx_long_double_general( uint64_t receiver, uint64_t code, uint64_t action ) -{ - using namespace eosio; - using namespace _test_multi_index; - - typedef record_idx_long_double record; - - const uint64_t table_name = "floattable2"_n.value; - auto payer = receiver; - - print("Testing long double secondary index.\n"); - multi_index> - > table( name{receiver}, receiver ); - - auto secidx = table.get_index<"bysecondary"_n>(); - - long double tolerance = std::min( static_cast(std::numeric_limits::epsilon()), - std::numeric_limits::epsilon() * 1e7l ); - print("tolerance = ", tolerance, "\n"); - - long double f = 1.0l; - for( uint64_t i = 1; i <= 10; ++i, f += 1.0l ) { - table.emplace( name{payer}, [&](auto& o) { - o.id = i; - o.sec = 1.0l / (i * 1000000.0l); - }); - } - - long double expected_product = 1.0l / 1000000.0l; - print( "expected_product = ", expected_product, "\n" ); - - uint64_t expected_key = 10; - for( const auto& obj : secidx ) { - eosio_assert( obj.primary_key() == expected_key, "idx_long_double_general - unexpected primary key" ); - - long double prod = obj.sec * obj.id; - - print(" id = ", obj.id, ", sec = ", obj.sec, ", sec * id = ", prod, "\n"); - - eosio_assert( std::abs(prod - expected_product) <= tolerance, - "idx_long_double_general - product of secondary and id not equal to expected_product within tolerance" ); - - --expected_key; - } - eosio_assert( expected_key == 0, "idx_long_double_general - did not iterate through secondary index properly" ); - - { - auto itr = secidx.lower_bound( expected_product / 5.5l ); - eosio_assert( std::abs(1.0l / itr->sec - 5000000.0l) <= tolerance, "idx_long_double_general - lower_bound" ); - - itr = secidx.upper_bound( expected_product / 5.0l ); - eosio_assert( std::abs(1.0l / itr->sec - 4000000.0l) <= tolerance, "idx_long_double_general - upper_bound" ); - - } -} - -void test_multi_index::idx64_pk_iterator_exceed_end( uint64_t receiver, uint64_t code, uint64_t action ) -{ - auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>(receiver); - auto end_itr = table.end(); - // Should fail - ++end_itr; -} - -void test_multi_index::idx64_sk_iterator_exceed_end( uint64_t receiver, uint64_t code, uint64_t action ) -{ - auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>(receiver); - auto end_itr = table.get_index<"bysecondary"_n>().end(); - // Should fail - ++end_itr; -} - -void test_multi_index::idx64_pk_iterator_exceed_begin( uint64_t receiver, uint64_t code, uint64_t action ) -{ - auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>(receiver); - auto begin_itr = table.begin(); - // Should fail - --begin_itr; -} - -void test_multi_index::idx64_sk_iterator_exceed_begin( uint64_t receiver, uint64_t code, uint64_t action ) -{ - auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>(receiver); - auto begin_itr = table.get_index<"bysecondary"_n>().begin(); - // Should fail - --begin_itr; -} - -void test_multi_index::idx64_pass_pk_ref_to_other_table( uint64_t receiver, uint64_t code, uint64_t action ) -{ - auto table1 = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>(receiver); - auto table2 = _test_multi_index::idx64_table<"indextable2"_n.value, "bysecondary"_n.value>(receiver); - - auto table1_pk_itr = table1.find(781); - eosio_assert( table1_pk_itr != table1.end() && table1_pk_itr->sec == "bob"_n.value, "idx64_pass_pk_ref_to_other_table - table.find() of existing primary key" ); - - // Should fail - table2.iterator_to(*table1_pk_itr); -} - -void test_multi_index::idx64_pass_sk_ref_to_other_table( uint64_t receiver, uint64_t code, uint64_t action ) -{ - auto table1 = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>(receiver); - auto table2 = _test_multi_index::idx64_table<"indextable2"_n.value, "bysecondary"_n.value>(receiver); - - auto table1_pk_itr = table1.find(781); - eosio_assert( table1_pk_itr != table1.end() && table1_pk_itr->sec == "bob"_n.value, "idx64_pass_sk_ref_to_other_table - table.find() of existing primary key" ); - - auto table2_sec_index = table2.get_index<"bysecondary"_n>(); - // Should fail - table2_sec_index.iterator_to(*table1_pk_itr); -} - -void test_multi_index::idx64_pass_pk_end_itr_to_iterator_to( uint64_t receiver, uint64_t code, uint64_t action ) -{ - auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>(receiver); - auto end_itr = table.end(); - // Should fail - table.iterator_to(*end_itr); -} - -void test_multi_index::idx64_pass_pk_end_itr_to_modify( uint64_t receiver, uint64_t code, uint64_t action ) -{ - auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>(receiver); - auto end_itr = table.end(); - - auto payer = receiver; - // Should fail - table.modify( end_itr, eosio::name{payer}, [](auto&){} ); -} - -void test_multi_index::idx64_pass_pk_end_itr_to_erase( uint64_t receiver, uint64_t code, uint64_t action ) -{ - auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>(receiver); - auto end_itr = table.end(); - - // Should fail - table.erase(end_itr); -} - -void test_multi_index::idx64_pass_sk_end_itr_to_iterator_to( uint64_t receiver, uint64_t code, uint64_t action ) -{ - auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>(receiver); - auto sec_index = table.get_index<"bysecondary"_n>(); - auto end_itr = sec_index.end(); - - // Should fail - sec_index.iterator_to(*end_itr); -} - -void test_multi_index::idx64_pass_sk_end_itr_to_modify( uint64_t receiver, uint64_t code, uint64_t action ) -{ - auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>(receiver); - auto sec_index = table.get_index<"bysecondary"_n>(); - auto end_itr = sec_index.end(); - - auto payer = receiver; - // Should fail - sec_index.modify( end_itr, eosio::name{payer}, [](auto&){} ); -} - - -void test_multi_index::idx64_pass_sk_end_itr_to_erase( uint64_t receiver, uint64_t code, uint64_t action ) -{ - auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>(receiver); - auto sec_index = table.get_index<"bysecondary"_n>(); - auto end_itr = sec_index.end(); - - // Should fail - sec_index.erase(end_itr); -} - -void test_multi_index::idx64_modify_primary_key( uint64_t receiver, uint64_t code, uint64_t action ) -{ - auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>(receiver); - - auto pk_itr = table.find(781); - eosio_assert( pk_itr != table.end() && pk_itr->sec == "bob"_n.value, "idx64_modify_primary_key - table.find() of existing primary key" ); - - auto payer = receiver; - - // Should fail - table.modify( pk_itr, eosio::name{payer}, [](auto& r){ - r.id = 1100; - }); -} - -void test_multi_index::idx64_run_out_of_avl_pk( uint64_t receiver, uint64_t code, uint64_t action ) -{ - auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>(receiver); - - auto pk_itr = table.find(781); - eosio_assert( pk_itr != table.end() && pk_itr->sec == "bob"_n.value, "idx64_modify_primary_key - table.find() of existing primary key" ); - - auto payer = receiver; - - table.emplace( eosio::name{payer}, [&](auto& r) { - r.id = static_cast(-4); - r.sec = "alice"_n.value; - }); - eosio_assert( table.available_primary_key() == static_cast(-3), "idx64_run_out_of_avl_pk - incorrect available primary key" ); - - table.emplace( eosio::name{payer}, [&](auto& r) { - r.id = table.available_primary_key(); - r.sec = "bob"_n.value; - }); - - // Should fail - table.available_primary_key(); -} - -void test_multi_index::idx64_sk_cache_pk_lookup( uint64_t receiver, uint64_t code, uint64_t action ) -{ - auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>(receiver); - - auto sec_index = table.get_index<"bysecondary"_n>(); - auto sk_itr = sec_index.find("bob"_n.value); - eosio_assert( sk_itr != sec_index.end() && sk_itr->id == 540, "idx64_sk_cache_pk_lookup - sec_index.find() of existing secondary key" ); - - auto pk_itr = table.iterator_to(*sk_itr); - auto prev_itr = --pk_itr; - eosio_assert( prev_itr->id == 265 && prev_itr->sec == "alice"_n.value, "idx64_sk_cache_pk_lookup - previous record" ); -} - -void test_multi_index::idx64_pk_cache_sk_lookup( uint64_t receiver, uint64_t code, uint64_t action ) -{ - auto table = _test_multi_index::idx64_table<"indextable1"_n.value, "bysecondary"_n.value>(receiver); - - - auto pk_itr = table.find(540); - eosio_assert( pk_itr != table.end() && pk_itr->sec == "bob"_n.value, "idx64_pk_cache_sk_lookup - table.find() of existing primary key" ); - - auto sec_index = table.get_index<"bysecondary"_n>(); - auto sk_itr = sec_index.iterator_to(*pk_itr); - auto next_itr = ++sk_itr; - eosio_assert( next_itr->id == 781 && next_itr->sec == "bob"_n.value, "idx64_pk_cache_sk_lookup - next record" ); -} - -#pragma GCC diagnostic pop diff --git a/unittests/test-contracts/test_ram_limit/CMakeLists.txt b/unittests/test-contracts/test_ram_limit/CMakeLists.txt index 2520fb59bcc..ebd35ffd749 100644 --- a/unittests/test-contracts/test_ram_limit/CMakeLists.txt +++ b/unittests/test-contracts/test_ram_limit/CMakeLists.txt @@ -1,4 +1,4 @@ -if( ${eosio.cdt_FOUND} ) +if( EOSIO_COMPILE_TEST_CONTRACTS ) message(STATUS "Not building test_ram_limit, read README.txt in eos/unittests/test-contracts/test_ram_limit") configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/test_ram_limit.wasm ${CMAKE_CURRENT_BINARY_DIR}/test_ram_limit.wasm COPYONLY ) configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/test_ram_limit.abi ${CMAKE_CURRENT_BINARY_DIR}/test_ram_limit.abi COPYONLY ) diff --git a/unittests/wasm_tests.cpp b/unittests/wasm_tests.cpp index ce351e2042c..5335ee037c4 100644 --- a/unittests/wasm_tests.cpp +++ b/unittests/wasm_tests.cpp @@ -91,11 +91,11 @@ BOOST_FIXTURE_TEST_CASE( basic_test, TESTER ) try { trx.sign( get_private_key( N(asserter), "active" ), control->get_chain_id() ); auto result = push_transaction( trx ); BOOST_CHECK_EQUAL(result->receipt->status, transaction_receipt::executed); - BOOST_CHECK_EQUAL(result->action_traces.size(), 1); + BOOST_CHECK_EQUAL(result->action_traces.size(), 1u); BOOST_CHECK_EQUAL(result->action_traces.at(0).receipt.receiver.to_string(), name(N(asserter)).to_string() ); BOOST_CHECK_EQUAL(result->action_traces.at(0).act.account.to_string(), name(N(asserter)).to_string() ); BOOST_CHECK_EQUAL(result->action_traces.at(0).act.name.to_string(), name(N(procassert)).to_string() ); - BOOST_CHECK_EQUAL(result->action_traces.at(0).act.authorization.size(), 1 ); + BOOST_CHECK_EQUAL(result->action_traces.at(0).act.authorization.size(), 1u ); BOOST_CHECK_EQUAL(result->action_traces.at(0).act.authorization.at(0).actor.to_string(), name(N(asserter)).to_string() ); BOOST_CHECK_EQUAL(result->action_traces.at(0).act.authorization.at(0).permission.to_string(), name(config::active_name).to_string() ); no_assert_id = trx.id(); @@ -510,69 +510,6 @@ BOOST_FIXTURE_TEST_CASE(misaligned_tests, tester ) try { check_aligned(misaligned_const_ref_wast); } FC_LOG_AND_RETHROW() -// test weighted cpu limit -BOOST_FIXTURE_TEST_CASE(weighted_cpu_limit_tests, tester ) try { -// TODO Increase the robustness of this test. - resource_limits_manager mgr = control->get_mutable_resource_limits_manager(); - create_accounts( {N(f_tests)} ); - create_accounts( {N(acc2)} ); - bool pass = false; - - std::string code = R"=====( -(module - (import "env" "require_auth" (func $require_auth (param i64))) - (import "env" "eosio_assert" (func $eosio_assert (param i32 i32))) - (table 0 anyfunc) - (memory $0 1) - (export "apply" (func $apply)) - (func $i64_trunc_u_f64 (param $0 f64) (result i64) (i64.trunc_u/f64 (get_local $0))) - (func $test (param $0 i64)) - (func $apply (param $0 i64)(param $1 i64)(param $2 i64) - )====="; - for (int i = 0; i < 1024; ++i) { - code += "(call $test (call $i64_trunc_u_f64 (f64.const 1)))\n"; - } - code += "))"; - - produce_blocks(1); - set_code(N(f_tests), code.c_str()); - produce_blocks(10); - - mgr.set_account_limits(N(f_tests), -1, -1, 1); - int count = 0; - while (count < 4) { - signed_transaction trx; - - for (int i = 0; i < 2; ++i) { - action act; - act.account = N(f_tests); - act.name = N() + (i * 16); - act.authorization = vector{{N(f_tests),config::active_name}}; - trx.actions.push_back(act); - } - - set_transaction_headers(trx); - trx.sign(get_private_key( N(f_tests), "active" ), control->get_chain_id()); - - try { - push_transaction(trx, fc::time_point::maximum(), 0); - produce_block(); - BOOST_REQUIRE_EQUAL(true, chain_has_transaction(trx.id())); - pass = true; - count++; - } catch( eosio::chain::leeway_deadline_exception& ) { - BOOST_REQUIRE_EQUAL(count, 3); - break; - } - BOOST_REQUIRE_EQUAL(true, validate()); - - if (count == 2) { // add a big weight on acc2, making f_tests out of resource - mgr.set_account_limits(N(acc2), -1, -1, 100000000); - } - } - BOOST_REQUIRE_EQUAL(count, 3); -} FC_LOG_AND_RETHROW() - /** * Make sure WASM "start" method is used correctly */ diff --git a/unittests/whitelist_blacklist_tests.cpp b/unittests/whitelist_blacklist_tests.cpp index d0f57fcb61a..5df32c19f79 100644 --- a/unittests/whitelist_blacklist_tests.cpp +++ b/unittests/whitelist_blacklist_tests.cpp @@ -526,7 +526,7 @@ BOOST_AUTO_TEST_CASE( actor_blacklist_inline_deferred ) { try { auto num_deferred = tester1.chain->control->db().get_index().size(); - BOOST_REQUIRE_EQUAL(0, num_deferred); + BOOST_REQUIRE_EQUAL(0u, num_deferred); // Schedule a deferred transaction authorized by charlie@active tester1.chain->push_action( N(charlie), N(defercall), N(alice), mvo() @@ -537,14 +537,14 @@ BOOST_AUTO_TEST_CASE( actor_blacklist_inline_deferred ) { try { ); num_deferred = tester1.chain->control->db().get_index().size(); - BOOST_REQUIRE_EQUAL(1, num_deferred); + BOOST_REQUIRE_EQUAL(1u, num_deferred); // Do not allow that deferred transaction to retire yet tester1.chain->finish_block(); tester1.chain->produce_blocks(2, true); // Produce 2 empty blocks (other than onblock of course). num_deferred = tester1.chain->control->db().get_index().size(); - BOOST_REQUIRE_EQUAL(1, num_deferred); + BOOST_REQUIRE_EQUAL(1u, num_deferred); c1.disconnect(); From 6261f53173a06593049204cfdedd0e02000fe9a0 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 22 Feb 2019 13:31:25 -0500 Subject: [PATCH 019/680] appbase: Block (queue) exit signals during shutdown MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a shutdown signal is handled the sig_set is 1) canceled and 2) destroyed. This means that signals are no longer handled by boost::asio and revert back to default behavior. If someone accidentally hits ctrl-c a second time during a long shutdown they run the risk of leaving the database dirty as that second ctrl-c will be insta-kill. This patch changes the behavior and keeps the sig_set active forever. This means that even after the first handled async_wait() on the sig_set (that starts an appbase quit), additional signals in the set are effectively “blocked” (not posix blocked, but blocked in the sense they are consumed and queued by the sig_set that is not being async_wait()ed on) --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index 2208d40578f..da4bf8cb324 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 2208d40578fb206978418c1df2bb8408ecef3fe7 +Subproject commit da4bf8cb324225b002b3105da42b62769da94ce9 From 559dbd6498e3090e1b2314ae8f0d01485dadfd3b Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 22 Feb 2019 12:48:41 -0600 Subject: [PATCH 020/680] Cleanup logging, add some additional on closing --- plugins/net_plugin/net_plugin.cpp | 32 ++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 43cb6326fc3..bf14a2bc040 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1529,7 +1529,8 @@ namespace eosio { void sync_manager::recv_notice(const connection_ptr& c, const notice_message& msg) { fc_ilog(logger, "sync_manager got ${m} block notice",("m",modes_str(msg.known_blocks.mode))); if( msg.known_blocks.ids.size() > 1 ) { - fc_elog( logger, "Invalid notice_message, known_blocks.ids.size ${s}", ("s", msg.known_blocks.ids.size()) ); + fc_elog( logger, "Invalid notice_message, known_blocks.ids.size ${s}, closing connection: ${p}", + ("s", msg.known_blocks.ids.size())("p", c->peer_name()) ); my_impl->close(c); return; } @@ -1549,7 +1550,7 @@ namespace eosio { void sync_manager::rejected_block(const connection_ptr& c, uint32_t blk_num) { if (state != in_sync ) { - fc_ilog(logger, "block ${bn} not accepted from ${p}",("bn",blk_num)("p",c->peer_name())); + fc_wlog( logger, "block ${bn} not accepted from ${p}, closing connection", ("bn",blk_num)("p",c->peer_name()) ); sync_last_requested_num = 0; source.reset(); my_impl->close(c); @@ -1561,7 +1562,8 @@ namespace eosio { fc_dlog(logger, "got block ${bn} from ${p}",("bn",blk_num)("p",c->peer_name())); if (state == lib_catchup) { if (blk_num != sync_next_expected_num) { - fc_ilog(logger, "expected block ${ne} but got ${bn}",("ne",sync_next_expected_num)("bn",blk_num)); + fc_wlog( logger, "expected block ${ne} but got ${bn}, closing connection: ${p}", + ("ne",sync_next_expected_num)("bn",blk_num)("p",c->peer_name()) ); my_impl->close(c); return; } @@ -2104,18 +2106,17 @@ namespace eosio { } } catch(const std::exception &ex) { - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger, "Exception in handling read data from ${p} ${s}",("p",pname)("s",ex.what()) ); + fc_elog( logger, "Exception in handling read data from ${p}: ${s}", + ("p",conn->peer_name())("s",ex.what()) ); close( conn ); } catch(const fc::exception &ex) { - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger, "Exception in handling read data ${s}", ("p",pname)("s",ex.to_string()) ); + fc_elog( logger, "Exception in handling read data from ${p}: ${s}", + ("p",conn->peer_name())("s",ex.to_string()) ); close( conn ); } catch (...) { - string pname = conn ? conn->peer_name() : "no connection name"; - fc_elog( logger, "Undefined exception hanlding the read data from connection ${p}",( "p",pname) ); + fc_elog( logger, "Undefined exception handling the read data from ${p}",( "p",conn->peer_name()) ); close( conn ); } }); @@ -2159,7 +2160,8 @@ namespace eosio { msg.visit( m ); } } catch( const fc::exception& e ) { - edump( (e.to_detail_string()) ); + fc_elog( logger, "Exception in handling message from ${p}: ${s}", + ("p", conn->peer_name())("s", e.to_detail_string()) ); close( conn ); return false; } @@ -2323,10 +2325,7 @@ namespace eosio { } void net_plugin_impl::handle_message(const connection_ptr& c, const go_away_message& msg) { - string rsn = reason_str( msg.reason ); - peer_wlog(c, "received go_away_message"); - fc_wlog( logger, "received a go away message from ${p}, reason = ${r}", - ("p", c->peer_name())("r",rsn) ); + peer_wlog(c, "received go_away_message, reason = ${r}", ("r",reason_str( msg.reason )) ); c->no_retry = msg.reason; if(msg.reason == duplicate ) { c->node_id = msg.node_id; @@ -2434,7 +2433,8 @@ namespace eosio { void net_plugin_impl::handle_message(const connection_ptr& c, const request_message& msg) { if( msg.req_blocks.ids.size() > 1 ) { - fc_elog( logger, "Invalid request_message, req_blocks.ids.size ${s}", ("s", msg.req_blocks.ids.size()) ); + fc_elog( logger, "Invalid request_message, req_blocks.ids.size ${s}, closing ${p}", + ("s", msg.req_blocks.ids.size())("p",c->peer_name()) ); close(c); return; } @@ -3090,6 +3090,7 @@ namespace eosio { fc_ilog( logger, "close ${s} connections",( "s",my->connections.size()) ); for( auto& con : my->connections ) { + fc_dlog( logger, "close: ${p}", ("p",con->peer_name()) ); my->close( con ); } my->connections.clear(); @@ -3129,6 +3130,7 @@ namespace eosio { for( auto itr = my->connections.begin(); itr != my->connections.end(); ++itr ) { if( (*itr)->peer_addr == host ) { (*itr)->reset(); + fc_ilog( logger, "disconnecting: ${p}", ("p", (*itr)->peer_name()) ); my->close(*itr); my->connections.erase(itr); return "connection removed"; From c864c5574a292a2ae7653627ba4dc26f2436701d Mon Sep 17 00:00:00 2001 From: NorseGaud Date: Fri, 22 Feb 2019 18:23:14 -0500 Subject: [PATCH 021/680] removal of comments --- scripts/eosio_build_darwin.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index a7ec32ff7de..753921beef3 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -138,11 +138,8 @@ if [ $COUNT -gt 1 ]; then [Nn]* ) echo "Proceeding without update!";; * ) echo "Please type 'y' for yes or 'n' for no."; exit;; esac - brew tap eosio/eosio # Required to install mongo-cxx-driver with static library + brew tap eosio/eosio printf "\\nInstalling Dependencies...\\n" - # Ignore cmake so we don't install a newer version. - # Build from source to use local cmake; see homebrew-eosio repo for examples - # DON'T INSTALL llvm@4 WITH --force! OIFS="$IFS" IFS=$',' for DEP in $DEPS; do From 08318b7b7979c84a54a6b688df7029e9679edbb3 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 22 Feb 2019 20:03:39 -0500 Subject: [PATCH 022/680] update protocol-feature structs to use new fc::reflector_init; logging for recognized protocol features #6429 --- .../chain/protocol_feature_activation.hpp | 12 ++- .../eosio/chain/protocol_feature_manager.hpp | 8 +- libraries/chain/protocol_feature_manager.cpp | 2 +- plugins/chain_plugin/chain_plugin.cpp | 80 +++++++++++++++++-- 4 files changed, 88 insertions(+), 14 deletions(-) diff --git a/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp b/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp index b7e9b2ca5a0..03ab31be131 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_activation.hpp @@ -8,10 +8,20 @@ namespace eosio { namespace chain { -struct protocol_feature_activation { +struct protocol_feature_activation : fc::reflect_init { static constexpr uint16_t extension_id() { return 0; } static constexpr bool enforce_unique() { return true; } + protocol_feature_activation() = default; + + protocol_feature_activation( const vector& pf ) + :protocol_features( pf ) + {} + + protocol_feature_activation( vector&& pf ) + :protocol_features( std::move(pf) ) + {} + void reflector_init(); vector protocol_features; diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index 6eba182639b..9a9572c07bf 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -18,22 +18,22 @@ enum class builtin_protocol_feature_t : uint32_t { struct protocol_feature_subjective_restrictions { time_point earliest_allowed_activation_time; - bool preactivation_required = true; - bool enabled = true; + bool preactivation_required = false; + bool enabled = false; }; struct builtin_protocol_feature_spec { const char* codename = nullptr; digest_type description_digest; flat_set builtin_dependencies; - protocol_feature_subjective_restrictions subjective_restrictions; + protocol_feature_subjective_restrictions subjective_restrictions{time_point{}, true, true}; }; extern const std::unordered_map builtin_protocol_feature_codenames; const char* builtin_protocol_feature_codename( builtin_protocol_feature_t ); -class protocol_feature_base { +class protocol_feature_base : public fc::reflect_init { public: protocol_feature_base() = default; diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 0c915e73295..110f70f75d6 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -83,7 +83,7 @@ namespace eosio { namespace chain { } void builtin_protocol_feature::reflector_init() { - //protocol_feature_base::reflector_init(); + protocol_feature_base::reflector_init(); for( const auto& p : builtin_protocol_feature_codenames ) { if( builtin_feature_codename.compare( p.second.codename ) == 0 ) { diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index eb941efe6db..7a678aad19e 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -334,9 +334,12 @@ void clear_directory_contents( const fc::path& p ) { optional read_builtin_protocol_feature( const fc::path& p ) { try { return fc::json::from_file( p ); + } catch( const protocol_feature_exception& e ) { + wlog( "problem encountered while reading '${path}':\n${details}", + ("path", p.generic_string())("details",e.to_detail_string()) ); } catch( ... ) { - return {}; } + return {}; } protocol_feature_manager initialize_protocol_features( const fc::path& p, bool populate_missing_builtins = true ) { @@ -358,6 +361,43 @@ protocol_feature_manager initialize_protocol_features( const fc::path& p, bool p directory_exists = false; } + auto log_recognized_protocol_feature = []( const builtin_protocol_feature& f, const digest_type& feature_digest ) { + if( f.subjective_restrictions.enabled ) { + if( f.subjective_restrictions.preactivation_required ) { + if( f.subjective_restrictions.earliest_allowed_activation_time == time_point{} ) { + ilog( "Support for builtin protocol feature '${codename}' (with digest of '${digest}') is enabled with preactivation required", + ("codename", builtin_protocol_feature_codename(f.get_codename())) + ("digest", feature_digest) + ); + } else { + ilog( "Support for builtin protocol feature '${codename}' (with digest of '${digest}') is enabled with preactivation required and with an earliest allowed activation time of ${earliest_time}", + ("codename", builtin_protocol_feature_codename(f.get_codename())) + ("digest", feature_digest) + ("earliest_time", f.subjective_restrictions.earliest_allowed_activation_time) + ); + } + } else { + if( f.subjective_restrictions.earliest_allowed_activation_time == time_point{} ) { + ilog( "Support for builtin protocol feature '${codename}' (with digest of '${digest}') is enabled without activation restrictions", + ("codename", builtin_protocol_feature_codename(f.get_codename())) + ("digest", feature_digest) + ); + } else { + ilog( "Support for builtin protocol feature '${codename}' (with digest of '${digest}') is enabled without preactivation required but with an earliest allowed activation time of ${earliest_time}", + ("codename", builtin_protocol_feature_codename(f.get_codename())) + ("digest", feature_digest) + ("earliest_time", f.subjective_restrictions.earliest_allowed_activation_time) + ); + } + } + } else { + ilog( "Recognized builtin protocol feature '${codename}' (with digest of '${digest}') but support for it is not enabled", + ("codename", builtin_protocol_feature_codename(f.get_codename())) + ("digest", feature_digest) + ); + } + }; + map found_builtin_protocol_features; map > builtin_protocol_features_to_add; // The bool in the pair is set to true if the builtin protocol feature has already been visited to add @@ -383,8 +423,10 @@ protocol_feature_manager initialize_protocol_features( const fc::path& p, bool p ("previous_file", res.first->second.generic_string()) ); + const auto feature_digest = f->digest(); + builtin_protocol_features_to_add.emplace( std::piecewise_construct, - std::forward_as_tuple( f->digest() ), + std::forward_as_tuple( feature_digest ), std::forward_as_tuple( *f, false ) ); } } @@ -392,7 +434,7 @@ protocol_feature_manager initialize_protocol_features( const fc::path& p, bool p // Add builtin protocol features to the protocol feature manager in the right order (to satisfy dependencies) using itr_type = map>::iterator; std::function add_protocol_feature = - [&pfm, &builtin_protocol_features_to_add, &visited_builtins, &add_protocol_feature]( const itr_type& itr ) -> void { + [&pfm, &builtin_protocol_features_to_add, &visited_builtins, &log_recognized_protocol_feature, &add_protocol_feature]( const itr_type& itr ) -> void { if( itr->second.second ) { return; } else { @@ -408,23 +450,32 @@ protocol_feature_manager initialize_protocol_features( const fc::path& p, bool p } pfm.add_feature( itr->second.first ); + + log_recognized_protocol_feature( itr->second.first, itr->first ); }; for( auto itr = builtin_protocol_features_to_add.begin(); itr != builtin_protocol_features_to_add.end(); ++itr ) { add_protocol_feature( itr ); } - auto output_protocol_feature = [&p]( const builtin_protocol_feature& f ) { + auto output_protocol_feature = [&p]( const builtin_protocol_feature& f, const digest_type& feature_digest ) { static constexpr int max_tries = 10; + string digest_string("-"); + { + fc::variant v; + to_variant( feature_digest, v ); + digest_string += v.get_string(); + } + string filename_base( "BUILTIN-" ); filename_base += builtin_protocol_feature_codename( f.get_codename() ); - string filename = filename_base + ".json"; + string filename = filename_base + digest_string + ".json"; int i = 0; for( ; i < max_tries && fc::exists( p / filename ); - ++i, filename = filename_base + "-" + std::to_string(i) + ".json" ) + ++i, filename = filename_base + digest_string + "-" + std::to_string(i) + ".json" ) ; EOS_ASSERT( i < max_tries, plugin_exception, @@ -433,10 +484,16 @@ protocol_feature_manager initialize_protocol_features( const fc::path& p, bool p ); fc::json::save_to_file( f, p / filename ); + + ilog( "Saved default specification for builtin protocol feature '${codename}' (with digest of '${digest}') to: ${path}", + ("codename", builtin_protocol_feature_codename(f.get_codename())) + ("digest", feature_digest) + ("path", (p / filename).generic_string()) + ); }; std::function add_missing_builtins = - [&pfm, &visited_builtins, &output_protocol_feature, &add_missing_builtins, populate_missing_builtins] + [&pfm, &visited_builtins, &output_protocol_feature, &log_recognized_protocol_feature, &add_missing_builtins, populate_missing_builtins] ( builtin_protocol_feature_t codename ) -> void { auto res = visited_builtins.emplace( codename ); if( !res.second ) return; @@ -446,10 +503,17 @@ protocol_feature_manager initialize_protocol_features( const fc::path& p, bool p add_missing_builtins( d ); } ); + if( !populate_missing_builtins ) + f.subjective_restrictions.enabled = false; + pfm.add_feature( f ); + const auto feature_digest = f.digest(); + + log_recognized_protocol_feature( f, feature_digest ); + if( populate_missing_builtins ) - output_protocol_feature( f ); + output_protocol_feature( f, feature_digest ); }; for( const auto& p : builtin_protocol_feature_codenames ) { From c1be4a2a57b56bcba8a4a3593f219a6957376a89 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Fri, 22 Feb 2019 19:26:02 -0600 Subject: [PATCH 023/680] Support --genesis-json argument with --snapshot argument Resolves #6767. --- plugins/chain_plugin/chain_plugin.cpp | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 2551548f294..74bcccad6b1 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -560,10 +560,25 @@ void chain_plugin::plugin_initialize(const variables_map& options) { }); infile.close(); - EOS_ASSERT( options.count( "genesis-json" ) == 0 && options.count( "genesis-timestamp" ) == 0, + EOS_ASSERT( options.count( "genesis-timestamp" ) == 0, plugin_config_exception, - "--snapshot is incompatible with --genesis-json and --genesis-timestamp as the snapshot contains genesis information"); + "--snapshot is incompatible with --genesis-timestamp as the snapshot contains genesis information"); + if( options.count( "genesis-json" )) { + auto genesis_path = options.at( "genesis-json" ).as(); + if( genesis_path.is_relative() ) { + genesis_path = bfs::current_path() / genesis_path; + } + EOS_ASSERT( fc::is_regular_file( genesis_path ), + plugin_config_exception, + "Specified genesis file '${genesis}' does not exist.", + ("genesis", genesis_path.generic_string())); + auto genesis_file = fc::json::from_file( genesis_path ).as(); + EOS_ASSERT( my->chain_config->genesis == genesis_file, plugin_config_exception, + "Genesis state provided via command line arguments does not match the existing genesis state in the snapshot. " + "It is not necessary to provide a genesis state argument when loading a snapshot." + ); + } auto shared_mem_path = my->chain_config->state_dir / "shared_memory.bin"; EOS_ASSERT( !fc::exists(shared_mem_path), plugin_config_exception, From a0c8b247ab65b45416cb746107c09f8a9d836e4f Mon Sep 17 00:00:00 2001 From: Ruslan Salikhov Date: Mon, 25 Feb 2019 00:06:54 +0500 Subject: [PATCH 024/680] Docker: Add missed build deps and packages deps for libusb/libcurl/pkgconfig --- Docker/Dockerfile | 2 +- Docker/builder/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Docker/Dockerfile b/Docker/Dockerfile index 6cce1a12bf4..74da4edf1ec 100644 --- a/Docker/Dockerfile +++ b/Docker/Dockerfile @@ -11,7 +11,7 @@ RUN git clone -b $branch https://github.com/EOSIO/eos.git --recursive \ FROM ubuntu:18.04 -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install openssl ca-certificates && rm -rf /var/lib/apt/lists/* +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install openssl ca-certificates libusb-1.0 libcurl3-gnutls && rm -rf /var/lib/apt/lists/* COPY --from=builder /usr/local/lib/* /usr/local/lib/ COPY --from=builder /tmp/build/bin /opt/eosio/bin COPY --from=builder /eos/Docker/config.ini / diff --git a/Docker/builder/Dockerfile b/Docker/builder/Dockerfile index cac09937cd0..11493039a10 100644 --- a/Docker/builder/Dockerfile +++ b/Docker/builder/Dockerfile @@ -13,7 +13,7 @@ RUN echo "deb http://apt.llvm.org/bionic/ llvm-toolchain-bionic main" >> /etc/ap && apt-get update \ && DEBIAN_FRONTEND=noninteractive apt-get install -y git-core automake autoconf libtool build-essential pkg-config libtool \ mpi-default-dev libicu-dev python-dev python3-dev libbz2-dev zlib1g-dev libssl-dev libgmp-dev \ - clang-4.0 lldb-4.0 lld-4.0 llvm-4.0-dev libclang-4.0-dev ninja-build \ + clang-4.0 lldb-4.0 lld-4.0 llvm-4.0-dev libclang-4.0-dev ninja-build libusb-1.0-0-dev libcurl4-gnutls-dev pkg-config \ && rm -rf /var/lib/apt/lists/* RUN update-alternatives --install /usr/bin/clang clang /usr/lib/llvm-4.0/bin/clang 400 \ From ad6bdf828433f1103949a8bb8200eca30af2e7ae Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 25 Feb 2019 08:54:10 -0600 Subject: [PATCH 025/680] Check for default transaction id which is possible if file/string does not actually contain a transaction --- programs/cleos/main.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index feef29cbfd9..5ab286cf86a 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -1311,7 +1311,12 @@ struct get_transaction_id_subcommand { try { auto trx_var = json_from_file_or_string(trx_to_check); auto trx = trx_var.as(); - std::cout << string(trx.id()) << std::endl; + transaction_id_type id = trx.id(); + if( id == transaction().id() ) { + std::cerr << "file/string does not represent a transaction" << std::endl; + } else { + std::cout << string( id ) << std::endl; + } } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse transaction JSON '${data}'", ("data",trx_to_check)) }); } From 1fd25de0bda38136f1048c4f11aca9be10c9ca6c Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 25 Feb 2019 12:20:46 -0500 Subject: [PATCH 026/680] enum hash function needs to be explicit to work with old gcc compiler of Ubuntu 16.04 #6429 --- .../eosio/chain/protocol_feature_manager.hpp | 17 ++++++++++++++++- libraries/chain/protocol_feature_manager.cpp | 2 +- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index 9a9572c07bf..b41ed966517 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -29,7 +29,22 @@ struct builtin_protocol_feature_spec { protocol_feature_subjective_restrictions subjective_restrictions{time_point{}, true, true}; }; -extern const std::unordered_map builtin_protocol_feature_codenames; +template +struct enum_hash +{ + static_assert( std::is_enum::value, "enum_hash can only be used on enumeration types" ); + + using underlying_type = typename std::underlying_type::type; + + std::size_t operator()(T t) const + { + return std::hash{}( static_cast(t) ); + } +}; + +// enum_hash needed to support old gcc compiler of Ubuntu 16.04 + +extern const std::unordered_map> builtin_protocol_feature_codenames; const char* builtin_protocol_feature_codename( builtin_protocol_feature_t ); diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 110f70f75d6..799ab99bab4 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -11,7 +11,7 @@ namespace eosio { namespace chain { - const std::unordered_map + const std::unordered_map> builtin_protocol_feature_codenames = boost::assign::map_list_of ( builtin_protocol_feature_t::preactivate_feature, { From b90ec71deb791bd936bef30b4a474f6a04ffdf04 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Mon, 25 Feb 2019 12:45:38 -0600 Subject: [PATCH 027/680] GCC defect fixes. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66297 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67274 --- libraries/chain/controller.cpp | 2 +- plugins/net_plugin/net_plugin.cpp | 2 +- plugins/state_history_plugin/state_history_plugin.cpp | 4 ++-- plugins/wallet_plugin/yubihsm_wallet.cpp | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 20f5478a079..677f7a0ab41 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -206,7 +206,7 @@ struct controller_impl { SET_APP_HANDLER( eosio, eosio, canceldelay ); fork_db.irreversible.connect( [&]( auto b ) { - on_irreversible(b); + this->on_irreversible(b); // gcc defect https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67274 }); } diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index bf14a2bc040..e4adc0dd6ac 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -686,7 +686,7 @@ namespace eosio { chain_plugin* chain_plug = nullptr; - constexpr auto stage_str(stages s ); + constexpr static auto stage_str(stages s); public: explicit sync_manager(uint32_t span); diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 49c47041e3d..cc238618079 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -320,7 +320,7 @@ struct state_history_plugin_impl : std::enable_shared_from_thisdo_accept(); }); // gcc defect https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67274 return; } catch_and_log([&] { @@ -328,7 +328,7 @@ struct state_history_plugin_impl : std::enable_shared_from_thisstart(std::move(*socket)); }); - catch_and_log([&] { do_accept(); }); + catch_and_log([&] { this->do_accept(); }); }); } diff --git a/plugins/wallet_plugin/yubihsm_wallet.cpp b/plugins/wallet_plugin/yubihsm_wallet.cpp index 5676089c0e1..ff562006e53 100644 --- a/plugins/wallet_plugin/yubihsm_wallet.cpp +++ b/plugins/wallet_plugin/yubihsm_wallet.cpp @@ -133,9 +133,9 @@ struct yubihsm_wallet_impl { yh_cmd resp_cmd; size_t resp_sz = 1; if(yh_send_secure_msg(session, YHC_ECHO, &data, 1, &resp_cmd, &resp, &resp_sz)) - lock(); + this->lock(); // gcc defect https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67274 else - prime_keepalive_timer(); + this->prime_keepalive_timer(); }); } @@ -269,4 +269,4 @@ optional yubihsm_wallet::try_sign_digest(const digest_type diges return my->try_sign_digest(digest, public_key); } -}} \ No newline at end of file +}} From bf4b1f2d4d6bfa3c5e345670f34658a98bb8e4e3 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 25 Feb 2019 15:01:22 -0500 Subject: [PATCH 028/680] Remove *.cmake from .gitignore Don't know how this crept in. --- .gitignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitignore b/.gitignore index e7a67332996..b6e828d489a 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,6 @@ *.s *.dot *.abi.hpp -*.cmake *.ninja \#* \.#* From fb5bce93e384abce6fa0ed4792318165e353161e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 25 Feb 2019 14:01:46 -0600 Subject: [PATCH 029/680] Update help text to refer to current location of transaction.hpp --- programs/cleos/help_text.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/cleos/help_text.cpp b/programs/cleos/help_text.cpp index b1f9161a54d..0f46f33114f 100644 --- a/programs/cleos/help_text.cpp +++ b/programs/cleos/help_text.cpp @@ -145,7 +145,7 @@ const char* error_advice_authority_type_exception = R"=====(Ensure that your aut )====="; const char* error_advice_action_type_exception = R"=====(Ensure that your action JSON follows the contract's abi!)====="; const char* error_advice_transaction_type_exception = R"=====(Ensure that your transaction JSON follows the right transaction format! -You can refer to contracts/eosiolib/transaction.hpp for reference)====="; +You can refer to eosio.cdt/libraries/eosiolib/transaction.hpp for reference)====="; const char* error_advice_abi_type_exception = R"=====(Ensure that your abi JSON follows the following format! { "types" : [{ "new_type_name":"type_name", "type":"type_name" }], From 0f7709fc28f50005ab7bc24bbb4cb39006c85374 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 25 Feb 2019 14:02:49 -0600 Subject: [PATCH 030/680] Add automatic handling of action.data if action.hex_data is available. Added better error messages for common mistakes. --- programs/cleos/main.cpp | 48 +++++++++++++++++++++++++++++++++++------ 1 file changed, 42 insertions(+), 6 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index 5ab286cf86a..bfceb4b40cc 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -1309,13 +1309,49 @@ struct get_transaction_id_subcommand { get_transaction_id->set_callback([&] { try { - auto trx_var = json_from_file_or_string(trx_to_check); - auto trx = trx_var.as(); - transaction_id_type id = trx.id(); - if( id == transaction().id() ) { - std::cerr << "file/string does not represent a transaction" << std::endl; + fc::variant trx_var = json_from_file_or_string(trx_to_check); + if( trx_var.is_object() ) { + fc::variant_object& vo = trx_var.get_object(); + // if actions.data & actions.hex_data provided, use the hex_data since only currently support unexploded data + if( vo.contains("actions") ) { + if( vo["actions"].is_array() ) { + fc::mutable_variant_object mvo = vo; + fc::variants& action_variants = mvo["actions"].get_array(); + for( auto& action_v : action_variants ) { + if( !action_v.is_object() ) { + std::cerr << "Empty 'action' in transaction" << endl; + return; + } + fc::variant_object& action_vo = action_v.get_object(); + if( action_vo.contains( "data" ) && action_vo.contains( "hex_data" ) ) { + fc::mutable_variant_object maction_vo = action_vo; + maction_vo["data"] = maction_vo["hex_data"]; + action_vo = maction_vo; + vo = mvo; + } else if( action_vo.contains( "data" ) ) { + if( !action_vo["data"].is_string() ) { + std::cerr << "get transaction_id only supports un-exploded 'data' (hex form)" << std::endl; + return; + } + } + } + } else { + std::cerr << "transaction json 'actions' is not an array" << std::endl; + return; + } + } else { + std::cerr << "transaction json does not include 'actions'" << std::endl; + return; + } + auto trx = trx_var.as(); + transaction_id_type id = trx.id(); + if( id == transaction().id() ) { + std::cerr << "file/string does not represent a transaction" << std::endl; + } else { + std::cout << string( id ) << std::endl; + } } else { - std::cout << string( id ) << std::endl; + std::cerr << "file/string does not represent a transaction" << std::endl; } } EOS_RETHROW_EXCEPTIONS(transaction_type_exception, "Fail to parse transaction JSON '${data}'", ("data",trx_to_check)) }); From 9940b2c352210f5be5498aec437e64edb993e412 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 25 Feb 2019 15:54:46 -0500 Subject: [PATCH 031/680] use nth_element rather than sort in calc_dpos_last_irreversible --- libraries/chain/block_header_state.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/libraries/chain/block_header_state.cpp b/libraries/chain/block_header_state.cpp index ae43585e2ff..625f6ddf257 100644 --- a/libraries/chain/block_header_state.cpp +++ b/libraries/chain/block_header_state.cpp @@ -23,9 +23,10 @@ namespace eosio { namespace chain { /// 2/3 must be greater, so if I go 1/3 into the list sorted from low to high, then 2/3 are greater if( blocknums.size() == 0 ) return 0; - /// TODO: update to nth_element - std::sort( blocknums.begin(), blocknums.end() ); - return blocknums[ (blocknums.size()-1) / 3 ]; + + std::size_t index = (blocknums.size()-1) / 3; + std::nth_element( blocknums.begin(), blocknums.begin() + index, blocknums.end() ); + return blocknums[ index ]; } pending_block_header_state block_header_state::next( block_timestamp_type when, From cee12cc3f3b6e4342f87f0c415343896d5fedd4a Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 25 Feb 2019 16:17:48 -0500 Subject: [PATCH 032/680] switch protocol_feature_activation_handlers to unordered_map #6429 --- libraries/chain/controller.cpp | 9 +++++---- .../eosio/chain/protocol_feature_manager.hpp | 15 --------------- libraries/chain/include/eosio/chain/types.hpp | 14 ++++++++++++++ 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 4aae4da1025..ca8ca86750d 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -220,7 +220,7 @@ struct controller_impl { typedef pair handler_key; map< account_name, map > apply_handlers; - map< builtin_protocol_feature_t, std::function > protocol_feature_activation_handlers; + unordered_map< builtin_protocol_feature_t, std::function, enum_hash > protocol_feature_activation_handlers; /** * Transactions that were undone by pop_block or abort_block, transactions @@ -1251,11 +1251,12 @@ struct controller_impl { { const auto& gpo = db.get(); - bool handled_all_preactivated_features = (gpo.preactivated_protocol_features.size() == 0); + auto num_preactivated_protocol_features = gpo.preactivated_protocol_features.size(); + bool handled_all_preactivated_features = (num_preactivated_protocol_features == 0); if( new_protocol_feature_activations.size() > 0 ) { flat_map preactivated_protocol_features; - preactivated_protocol_features.reserve( gpo.preactivated_protocol_features.size() ); + preactivated_protocol_features.reserve( num_preactivated_protocol_features ); for( const auto& feature_digest : gpo.preactivated_protocol_features ) { preactivated_protocol_features.emplace( feature_digest, false ); } @@ -1281,7 +1282,7 @@ struct controller_impl { ++bb._num_new_protocol_features_that_have_activated; } - if( num_preactivated_features_that_have_activated == gpo.preactivated_protocol_features.size() ) { + if( num_preactivated_features_that_have_activated == num_preactivated_protocol_features ) { handled_all_preactivated_features = true; } } diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index b41ed966517..07653951213 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -29,21 +29,6 @@ struct builtin_protocol_feature_spec { protocol_feature_subjective_restrictions subjective_restrictions{time_point{}, true, true}; }; -template -struct enum_hash -{ - static_assert( std::is_enum::value, "enum_hash can only be used on enumeration types" ); - - using underlying_type = typename std::underlying_type::type; - - std::size_t operator()(T t) const - { - return std::hash{}( static_cast(t) ); - } -}; - -// enum_hash needed to support old gcc compiler of Ubuntu 16.04 - extern const std::unordered_map> builtin_protocol_feature_codenames; const char* builtin_protocol_feature_codename( builtin_protocol_feature_t ); diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index 93a80a0b7f3..e3608cbad87 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -251,6 +251,20 @@ namespace eosio { namespace chain { return end_insert_iterator( c ); } + template + struct enum_hash + { + static_assert( std::is_enum::value, "enum_hash can only be used on enumeration types" ); + + using underlying_type = typename std::underlying_type::type; + + std::size_t operator()(T t) const + { + return std::hash{}( static_cast(t) ); + } + }; + // enum_hash needed to support old gcc compiler of Ubuntu 16.04 + } } // eosio::chain FC_REFLECT( eosio::chain::void_t, ) From fbad66b7de989e749f6662ce04336df768ecc99d Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Mon, 25 Feb 2019 18:23:18 -0500 Subject: [PATCH 033/680] Update .gitignore --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index b6e828d489a..49f96db2f43 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,8 @@ *.s *.dot *.abi.hpp +*.cmake +!CMakeModules/*.cmake *.ninja \#* \.#* From a72eda92fa80e88feea2c13ade6ce1553eefef78 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 25 Feb 2019 18:43:07 -0500 Subject: [PATCH 034/680] basic support for preactivation #6429 #6431 --- libraries/chain/controller.cpp | 107 +++++++++++++++++- .../chain/include/eosio/chain/controller.hpp | 3 + .../chain/include/eosio/chain/exceptions.hpp | 2 + libraries/chain/wasm_interface.cpp | 16 +++ 4 files changed, 122 insertions(+), 6 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index ca8ca86750d..41d8c156a1f 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -884,6 +884,8 @@ struct controller_impl { trx_context.squash(); restore.cancel(); return trace; + } catch( const protocol_feature_bad_block_exception& ) { + throw; } catch( const fc::exception& e ) { cpu_time_to_bill_us = trx_context.update_billed_cpu_time( fc::time_point::now() ); trace->except = e; @@ -1016,6 +1018,8 @@ struct controller_impl { restore.cancel(); return trace; + } catch( const protocol_feature_bad_block_exception& ) { + throw; } catch( const fc::exception& e ) { cpu_time_to_bill_us = trx_context.update_billed_cpu_time( fc::time_point::now() ); trace->except = e; @@ -1309,15 +1313,15 @@ struct controller_impl { pending->_block_stage.get()._new_pending_producer_schedule = gpo.proposed_schedule; db.modify( gpo, [&]( auto& gp ) { - gp.proposed_schedule_block_num = optional(); - gp.proposed_schedule.clear(); - if( gp.preactivated_protocol_features.size() > 0 ) { - gp.preactivated_protocol_features.clear(); - } + gp.proposed_schedule_block_num = optional(); + gp.proposed_schedule.clear(); + if( gp.preactivated_protocol_features.size() > 0 ) { + gp.preactivated_protocol_features.clear(); + } }); } else if( gpo.preactivated_protocol_features.size() > 0 ) { db.modify( gpo, [&]( auto& gp ) { - gp.preactivated_protocol_features.clear(); + gp.preactivated_protocol_features.clear(); }); } @@ -2043,6 +2047,11 @@ authorization_manager& controller::get_mutable_authorization_manager() return my->authorization; } +const protocol_feature_manager& controller::get_protocol_feature_manager()const +{ + return my->protocol_features; +} + controller::controller( const controller::config& cfg ) :my( new controller_impl( cfg, *this, protocol_feature_manager{} ) ) { @@ -2094,6 +2103,92 @@ chainbase::database& controller::mutable_db()const { return my->db; } const fork_database& controller::fork_db()const { return my->fork_db; } +void controller::preactivate_feature( const digest_type& feature_digest ) { + auto cur_time = pending_block_time(); + auto status = my->protocol_features.is_recognized( feature_digest, cur_time ); + switch( status ) { + case protocol_feature_manager::recognized_t::unrecognized: + if( is_producing_block() ) { + EOS_THROW( subjective_block_production_exception, + "protocol feature with digest '${digest}' is unrecognized", ("digest", feature_digest) ); + } else { + EOS_THROW( protocol_feature_bad_block_exception, + "protocol feature with digest '${digest}' is unrecognized", ("digest", feature_digest) ); + } + break; + case protocol_feature_manager::recognized_t::disabled: + if( is_producing_block() ) { + EOS_THROW( subjective_block_production_exception, + "protocol feature with digest '${digest}' is disabled", ("digest", feature_digest) ); + } else { + EOS_THROW( protocol_feature_bad_block_exception, + "protocol feature with digest '${digest}' is disabled", ("digest", feature_digest) ); + } + break; + case protocol_feature_manager::recognized_t::too_early: + if( is_producing_block() ) { + EOS_THROW( subjective_block_production_exception, + "${timestamp} is too early for the earliest allowed activation time of the protocol feature with digest '${digest}'", ("digest", feature_digest)("timestamp", cur_time) ); + } else { + EOS_THROW( protocol_feature_bad_block_exception, + "${timestamp} is too early for the earliest allowed activation time of the protocol feature with digest '${digest}'", ("digest", feature_digest)("timestamp", cur_time) ); + } + break; + case protocol_feature_manager::recognized_t::ready_if_preactivated: + case protocol_feature_manager::recognized_t::ready: + break; + default: + if( is_producing_block() ) { + EOS_THROW( subjective_block_production_exception, "unexpected recognized_t status" ); + } else { + EOS_THROW( protocol_feature_bad_block_exception, "unexpected recognized_t status" ); + } + break; + } + + // The above failures depend on subjective information. + // Because of deferred transactions, this complicates things considerably. + + // If producing a block, we throw a subjective failure if the feature is not properly recognized in order + // to try to avoid retiring into a block a deferred transacton driven by subjective information. + + // But it is still possible for a producer to retire a deferred transaction that deals with this subjective + // information. If they recognized the feature, they would retire it successfully, but a validator that + // does not recognize the feature should reject the entire block (not just fail the deferred transaction). + // Even if they don't recognize the feature, the producer could change their nodeos code to treat it like an + // objective failure thus leading the deferred transaction to retire with soft_fail or hard_fail. + // In this case, validators that don't recognize the feature would reject the whole block immediately, and + // validators that do recognize the feature would likely lead to a different retire status which would + // ultimately cause a validation failure and thus rejection of the block. + // In either case, it results in rejection of the block which is the desired behavior in this scenario. + + // If the feature is properly recognized by producer and validator, we have dealt with the subjectivity and + // now only consider the remaining failure modes which are deterministic and objective. + // Thus the exceptions that can be thrown below can be regular objective exceptions + // that do not cause immediate rejection of the block. + + EOS_ASSERT( !is_protocol_feature_activated( feature_digest ), + protocol_feature_exception, + "protocol feature with digest '${digest}' is already activated", + ("digest", feature_digest) + ); + + const auto& gpo = my->db.get(); + + EOS_ASSERT( std::find( gpo.preactivated_protocol_features.begin(), + gpo.preactivated_protocol_features.end(), + feature_digest + ) == gpo.preactivated_protocol_features.end(), + protocol_feature_exception, + "protocol feature with digest '${digest}' is already pre-activated", + ("digest", feature_digest) + ); + + my->db.modify( gpo, [&]( auto& gp ) { + gp.preactivated_protocol_features.push_back( feature_digest ); + } ); +} + vector controller::get_preactivated_protocol_features()const { const auto& gpo = my->db.get(); diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 794976283ff..77fd8510933 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -99,6 +99,8 @@ namespace eosio { namespace chain { void add_indices(); void startup( std::function shutdown, const snapshot_reader_ptr& snapshot = nullptr ); + void preactivate_feature( const digest_type& feature_digest ); + vector get_preactivated_protocol_features()const; /** @@ -162,6 +164,7 @@ namespace eosio { namespace chain { resource_limits_manager& get_mutable_resource_limits_manager(); const authorization_manager& get_authorization_manager()const; authorization_manager& get_mutable_authorization_manager(); + const protocol_feature_manager& get_protocol_feature_manager()const; const flat_set& get_actor_whitelist() const; const flat_set& get_actor_blacklist() const; diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index 6c71f08ed5a..d841b6f3aed 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -527,4 +527,6 @@ namespace eosio { namespace chain { 3250000, "Protocol feature exception" ) FC_DECLARE_DERIVED_EXCEPTION( protocol_feature_validation_exception, snapshot_exception, 3250001, "Protocol feature validation exception" ) + FC_DECLARE_DERIVED_EXCEPTION( protocol_feature_bad_block_exception, snapshot_exception, + 3250002, "Protocol feature exception (invalid block)" ) } } // eosio::chain diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index df0ce578b0e..74af3b88ee5 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -130,6 +130,22 @@ class privileged_api : public context_aware_api { EOS_ASSERT( false, unsupported_feature, "Unsupported Hardfork Detected" ); } + /** + * Returns true if the specified protocol feature is activated, false if not. + */ + bool is_feature_activated( const digest_type& feature_digest ) { + return context.control.is_protocol_feature_activated( feature_digest ); + } + + /** + * Pre-activates the specified protocol feature. + * Fails if the feature is unrecognized, disabled, or not allowed to be activated at the current time. + * Also fails if the feature was already activated or pre-activated. + */ + void preactivate_feature( const digest_type& feature_digest ) { + context.control.preactivate_feature( feature_digest ); + } + /** * update the resource limits associated with an account. Note these new values will not take effect until the * next resource "tick" which is currently defined as a cycle boundary inside a block. From 8c8e5fbfacaf19704a8139445d67b7f914478fea Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Mon, 25 Feb 2019 18:48:54 -0500 Subject: [PATCH 035/680] quick pipeline fixes (#6840) --- .buildkite/pipeline.yml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 6bfebd3e0c6..3f0057c5f74 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -163,7 +163,7 @@ steps: echo "--- :m: Starting MongoDB" && \ ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure label: ":ubuntu: 16.04 Tests" agents: queue: "automation-large-builder-fleet" @@ -190,7 +190,7 @@ steps: echo "--- :m: Starting MongoDB" && \ ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure label: ":ubuntu: 16.04 NP Tests" agents: queue: "automation-large-builder-fleet" @@ -217,7 +217,7 @@ steps: echo "--- :m: Starting MongoDB" && \ ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure label: ":ubuntu: 18.04 Tests" agents: queue: "automation-large-builder-fleet" @@ -244,7 +244,7 @@ steps: echo "--- :m: Starting MongoDB" && \ ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure label: ":ubuntu: 18.04 NP Tests" agents: queue: "automation-large-builder-fleet" @@ -272,7 +272,7 @@ steps: echo "--- :m: Starting MongoDB" && \ ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure label: ":centos: 7 Tests" agents: queue: "automation-large-builder-fleet" @@ -299,7 +299,7 @@ steps: echo "--- :m: Starting MongoDB" && \ ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure label: ":centos: 7 NP Tests" agents: queue: "automation-large-builder-fleet" @@ -326,7 +326,7 @@ steps: echo "--- :m: Starting MongoDB" && \ ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure label: ":aws: 1 Tests" agents: queue: "automation-large-builder-fleet" @@ -353,7 +353,7 @@ steps: echo "--- :m: Starting MongoDB" && \ ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure label: ":aws: 1 NP Tests" agents: queue: "automation-large-builder-fleet" @@ -380,7 +380,7 @@ steps: # echo "--- :m: Starting MongoDB" && \ # ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ # echo "+++ :microscope: Running tests" && \ - # cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure + # cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure # label: ":aws: 2 Tests" # agents: # queue: "automation-large-builder-fleet" @@ -407,7 +407,7 @@ steps: # echo "--- :m: Starting MongoDB" && \ # ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ # echo "+++ :microscope: Running tests" && \ - # cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure + # cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure # label: ":aws: 2 NP Tests" # agents: # queue: "automation-large-builder-fleet" @@ -434,7 +434,7 @@ steps: echo "--- :m: Starting MongoDB" && \ ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure label: ":fedora: 27 Tests" agents: queue: "automation-large-builder-fleet" @@ -461,7 +461,7 @@ steps: echo "--- :m: Starting MongoDB" && \ ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure label: ":fedora: 27 NP Tests" agents: queue: "automation-large-builder-fleet" From 6db1d6d70ecbca197322e7040b1daa4e05346152 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Mon, 25 Feb 2019 21:11:16 -0500 Subject: [PATCH 036/680] Support for amazonlinux2 (#6841) --- .buildkite/pipeline.yml | 148 ++++++++++++++++++------------------ scripts/full_uninstaller.sh | 3 +- tests/testUtils.py | 10 ++- 3 files changed, 83 insertions(+), 78 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 3f0057c5f74..4e860734910 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -84,26 +84,26 @@ steps: workdir: /data/job timeout: 60 - # - command: | - # echo "+++ :hammer: Building" && \ - # ./scripts/eosio_build.sh -y && \ - # echo "--- :compression: Compressing build directory" && \ - # tar -pczf build.tar.gz build/ - # label: ":aws: 2 Build" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: "build.tar.gz" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - # workdir: /data/job - # timeout: 60 + - command: | + echo "+++ :hammer: Building" && \ + ./scripts/eosio_build.sh -y && \ + echo "--- :compression: Compressing build directory" && \ + tar -pczf build.tar.gz build/ + label: ":aws: 2 Build" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: "build.tar.gz" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + workdir: /data/job + timeout: 60 - command: | echo "+++ :hammer: Building" && \ @@ -373,59 +373,59 @@ steps: workdir: /data/job timeout: 60 - # - command: | - # echo "--- :arrow_down: Downloading build directory" && \ - # buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ - # tar -zxf build.tar.gz && \ - # echo "--- :m: Starting MongoDB" && \ - # ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - # echo "+++ :microscope: Running tests" && \ - # cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure - # label: ":aws: 2 Tests" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: - # - "mongod.log" - # - "build/genesis.json" - # - "build/config.ini" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - # workdir: /data/job - # timeout: 60 - - # - command: | - # echo "--- :arrow_down: Downloading build directory" && \ - # buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ - # tar -zxf build.tar.gz && \ - # echo "--- :m: Starting MongoDB" && \ - # ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - # echo "+++ :microscope: Running tests" && \ - # cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure - # label: ":aws: 2 NP Tests" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: - # - "mongod.log" - # - "build/genesis.json" - # - "build/config.ini" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - # workdir: /data/job - # timeout: 60 + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ + tar -zxf build.tar.gz && \ + echo "--- :m: Starting MongoDB" && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running tests" && \ + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + label: ":aws: 2 Tests" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: + - "mongod.log" + - "build/genesis.json" + - "build/config.ini" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + workdir: /data/job + timeout: 60 + + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ + tar -zxf build.tar.gz && \ + echo "--- :m: Starting MongoDB" && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running tests" && \ + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + label: ":aws: 2 NP Tests" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: + - "mongod.log" + - "build/genesis.json" + - "build/config.ini" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + workdir: /data/job + timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ @@ -716,4 +716,4 @@ steps: artifact_paths: - "build/packages/eosio_highsierra.rb" - "build/packages/eosio.rb" - timeout: 60 + timeout: 60 \ No newline at end of file diff --git a/scripts/full_uninstaller.sh b/scripts/full_uninstaller.sh index 94401c4a12e..04e79106ead 100755 --- a/scripts/full_uninstaller.sh +++ b/scripts/full_uninstaller.sh @@ -8,9 +8,9 @@ if [ -d "/usr/local/include/eosio" ] || [ -d "$HOME/opt/eosio" ] || [ $FORCED == elif [ $1 == 1 ] || [ $FORCED == 1 ]; then ANSWER=1 fi - echo "Uninstalling..." case $ANSWER in 1 | [Yy]* ) + echo "Uninstalling..." if [ -d "$HOME/opt/eosio" ] || [[ $1 == "force-new" ]]; then if [ $( uname ) == "Darwin" ]; then # gettext and other brew packages are not modified as they can be dependencies for things other than eosio @@ -128,7 +128,6 @@ if [ -d "/usr/local/include/eosio" ] || [ -d "$HOME/opt/eosio" ] || [ $FORCED == ;; [Nn]* ) printf "Skipping\n\n" - exit 0 ;; esac fi diff --git a/tests/testUtils.py b/tests/testUtils.py index dc09eb34ae8..9e7e9c604be 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -1,3 +1,4 @@ +import re import errno import subprocess import time @@ -219,10 +220,15 @@ def arePortsAvailable(ports): @staticmethod def pgrepCmd(serverName): - pgrepOpts="-fl" # pylint: disable=deprecated-method - if platform.linux_distribution()[0] in ["Ubuntu", "LinuxMint", "Fedora","CentOS Linux","arch"]: + # pgrep differs on different platform (amazonlinux1 and 2 for example). We need to check if pgrep -h has -a available and add that if so: + try: + pgrepHelp = re.search('-a', subprocess.Popen("pgrep --help 2>/dev/null", shell=True, stdout=subprocess.PIPE).stdout.read().decode('utf-8')) + pgrepHelp.group(0) # group() errors if -a is not found, so we don't need to do anything else special here. pgrepOpts="-a" + except AttributeError as error: + # If no -a, AttributeError: 'NoneType' object has no attribute 'group' + pgrepOpts="-fl" return "pgrep %s %s" % (pgrepOpts, serverName) From c3d63f1c5c0ce68e37f86cd06af36664d0f6c1b7 Mon Sep 17 00:00:00 2001 From: Jonathan Giszczak Date: Tue, 26 Feb 2019 12:19:56 -0600 Subject: [PATCH 037/680] Work around GCC defect 67274 without explicit this pointers. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67274 --- libraries/chain/controller.cpp | 4 ++-- plugins/state_history_plugin/state_history_plugin.cpp | 6 +++--- plugins/wallet_plugin/yubihsm_wallet.cpp | 6 +++--- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 677f7a0ab41..77c0504de13 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -205,8 +205,8 @@ struct controller_impl { SET_APP_HANDLER( eosio, eosio, canceldelay ); - fork_db.irreversible.connect( [&]( auto b ) { - this->on_irreversible(b); // gcc defect https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67274 + fork_db.irreversible.connect( [&]( const block_state_ptr& b ) { + on_irreversible(b); }); } diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index cc238618079..0df317198d7 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -315,12 +315,12 @@ struct state_history_plugin_impl : std::enable_shared_from_this(app().get_io_service()); - acceptor->async_accept(*socket, [self = shared_from_this(), socket, this](auto ec) { + acceptor->async_accept(*socket, [self = shared_from_this(), socket, this](const boost::system::error_code& ec) { if (stopping) return; if (ec) { if (ec == boost::system::errc::too_many_files_open) - catch_and_log([&] { this->do_accept(); }); // gcc defect https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67274 + catch_and_log([&] { do_accept(); }); return; } catch_and_log([&] { @@ -328,7 +328,7 @@ struct state_history_plugin_impl : std::enable_shared_from_thisstart(std::move(*socket)); }); - catch_and_log([&] { this->do_accept(); }); + catch_and_log([&] { do_accept(); }); }); } diff --git a/plugins/wallet_plugin/yubihsm_wallet.cpp b/plugins/wallet_plugin/yubihsm_wallet.cpp index ff562006e53..cda0d208333 100644 --- a/plugins/wallet_plugin/yubihsm_wallet.cpp +++ b/plugins/wallet_plugin/yubihsm_wallet.cpp @@ -125,7 +125,7 @@ struct yubihsm_wallet_impl { void prime_keepalive_timer() { keepalive_timer.expires_at(std::chrono::steady_clock::now() + std::chrono::seconds(20)); - keepalive_timer.async_wait([this](auto ec){ + keepalive_timer.async_wait([this](const boost::system::error_code& ec){ if(ec || !session) return; @@ -133,9 +133,9 @@ struct yubihsm_wallet_impl { yh_cmd resp_cmd; size_t resp_sz = 1; if(yh_send_secure_msg(session, YHC_ECHO, &data, 1, &resp_cmd, &resp, &resp_sz)) - this->lock(); // gcc defect https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67274 + lock(); else - this->prime_keepalive_timer(); + prime_keepalive_timer(); }); } From 8967d471c9a7de33973ce58f6b3b3a30594b99a7 Mon Sep 17 00:00:00 2001 From: Kayan Date: Wed, 27 Feb 2019 16:41:52 +0800 Subject: [PATCH 038/680] fix switching between speculative and irreversible mode --- libraries/chain/controller.cpp | 111 +++++++++++------- libraries/chain/fork_database.cpp | 31 ++++- .../include/eosio/chain/fork_database.hpp | 6 + 3 files changed, 105 insertions(+), 43 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index b2a390fe648..4deec8a04b3 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -414,9 +414,11 @@ struct controller_impl { } int rev = 0; - while( auto obj = reversible_blocks.find(head->block_num+1) ) { + auto next_block_num = head->block_num+1; // need to cater the irreversible case that head is not advancing + while( auto obj = reversible_blocks.find(read_mode != db_read_mode::IRREVERSIBLE ? head->block_num+1 : next_block_num) ) { ++rev; replay_push_block( obj->get_block(), controller::block_status::validated ); + ++next_block_num; } ilog( "${n} reversible blocks replayed", ("n",rev) ); @@ -520,6 +522,28 @@ struct controller_impl { const auto hash = calculate_integrity_hash(); ilog( "database initialized with hash: ${hash}", ("hash", hash) ); } + + if (head && fork_db.pending_head() && fork_db.root()) { + if (read_mode != db_read_mode::IRREVERSIBLE) { + if (head->block_num < fork_db.pending_head()->block_num) { + // irreversible mode => speculative mode + wlog("db_read_mode has been changed: forwarding state from block ${h} to fork pending head ${fh}", ("h", head->block_num)("fh", fork_db.pending_head()->block_num)); + // let's go forward from lib to pending head, and set head as pending head + maybe_switch_forks( fork_db.pending_head(), controller::block_status::validated ); + } + } else { + // speculative mode => irreversible mode + uint32_t target_lib = fork_db.root()->block_num; + if (head->block_num > target_lib) { + wlog("db_read_mode has been changed, rolling back state from block ${o} to lib block ${l}", ("o", head->block_num)("l", target_lib)); + // let's go backward to lib(fork_db root) + while (head->block_num > target_lib) { + pop_block(); + } + fork_db.rollback_head_to_root(); + } + } + } } ~controller_impl() { @@ -1478,7 +1502,8 @@ struct controller_impl { emit( self.pre_accepted_block, b ); const bool skip_validate_signee = !conf.force_all_checks; - auto bsp = std::make_shared( *head, b, skip_validate_signee ); + // need to cater the irreversible mode case where head is not advancing + auto bsp = std::make_shared((read_mode == db_read_mode::IRREVERSIBLE && s != controller::block_status::irreversible && fork_db.pending_head()) ? *fork_db.pending_head() : *head, b, skip_validate_signee ); if( s != controller::block_status::irreversible ) { fork_db.add( bsp ); @@ -1519,48 +1544,52 @@ struct controller_impl { ("current_head_id", head->id)("current_head_num", head->block_num)("new_head_id", new_head->id)("new_head_num", new_head->block_num) ); auto branches = fork_db.fetch_branch_from( new_head->id, head->id ); - for( auto itr = branches.second.begin(); itr != branches.second.end(); ++itr ) { - pop_block(); - } - EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, - "loss of sync between fork_db and chainbase during fork switch" ); // _should_ never fail - - for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr ) { - optional except; - try { - apply_block( (*ritr)->block, (*ritr)->is_valid() ? controller::block_status::validated : controller::block_status::complete ); - fork_db.mark_valid( *ritr ); - head = *ritr; - } catch (const fc::exception& e) { - except = e; + if (branches.second.size()) { // cater a case of switching from fork pending head to lib + for( auto itr = branches.second.begin(); itr != branches.second.end(); ++itr ) { + pop_block(); } - if( except ) { - elog("exception thrown while switching forks ${e}", ("e", except->to_detail_string())); - - // ritr currently points to the block that threw - // Remove the block that threw and all forks built off it. - fork_db.remove( (*ritr)->id ); - - EOS_ASSERT( head->id == fork_db.head()->id, fork_database_exception, - "loss of sync between fork_db and controller head during fork switch error" ); - - // pop all blocks from the bad fork - // ritr base is a forward itr to the last block successfully applied - auto applied_itr = ritr.base(); - for( auto itr = applied_itr; itr != branches.first.end(); ++itr ) { - pop_block(); - } - EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, - "loss of sync between fork_db and chainbase during fork switch reversal" ); // _should_ never fail + EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, + "loss of sync between fork_db and chainbase during fork switch" ); // _should_ never fail + } - // re-apply good blocks - for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr ) { - apply_block( (*ritr)->block, controller::block_status::validated /* we previously validated these blocks*/ ); + if (branches.first.size()) { + for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr ) { + optional except; + try { + apply_block( (*ritr)->block, (*ritr)->is_valid() ? controller::block_status::validated : controller::block_status::complete ); + fork_db.mark_valid( *ritr ); head = *ritr; + } catch (const fc::exception& e) { + except = e; } - throw *except; - } // end if exception - } /// end for each block in branch + if( except ) { + elog("exception thrown while switching forks ${e}", ("e", except->to_detail_string())); + + // ritr currently points to the block that threw + // Remove the block that threw and all forks built off it. + fork_db.remove( (*ritr)->id ); + + EOS_ASSERT( head->id == fork_db.head()->id, fork_database_exception, + "loss of sync between fork_db and controller head during fork switch error" ); + + // pop all blocks from the bad fork + // ritr base is a forward itr to the last block successfully applied + auto applied_itr = ritr.base(); + for( auto itr = applied_itr; itr != branches.first.end(); ++itr ) { + pop_block(); + } + EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, + "loss of sync between fork_db and chainbase during fork switch reversal" ); // _should_ never fail + + // re-apply good blocks + for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr ) { + apply_block( (*ritr)->block, controller::block_status::validated /* we previously validated these blocks*/ ); + head = *ritr; + } + throw *except; + } // end if exception + } /// end for each block in branch + } ilog("successfully switched fork to new head ${new_head_id}", ("new_head_id", new_head->id)); } else { head_changed = false; @@ -2071,7 +2100,7 @@ const vector& controller::get_pending_trx_receipts()const { uint32_t controller::last_irreversible_block_num() const { uint32_t lib_num = (my->read_mode == db_read_mode::IRREVERSIBLE) - ? my->fork_db.pending_head()->dpos_irreversible_blocknum + ? std::max(my->fork_db.pending_head()->dpos_irreversible_blocknum, my->fork_db.root()->block_num) : my->head->dpos_irreversible_blocknum; return std::max( lib_num, my->snapshot_head_block ); } diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index 68238ccddda..e4816a5d3a3 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -218,6 +218,21 @@ namespace eosio { namespace chain { my->head = my->root; } + void fork_database::rollback_head_to_root() { + auto& by_id_idx = my->index.get(); + + // root probably not exist in index + auto itr = by_id_idx.begin(); + while (itr != by_id_idx.end()) { + by_id_idx.modify( itr, [&]( block_state_ptr& bsp ) { + bsp->validated = (bsp->id == my->root->id); + } ); + ++itr; + } + + my->head = my->root; + } + void fork_database::advance_root( const block_id_type& id ) { EOS_ASSERT( my->root, fork_database_exception, "root not yet set" ); @@ -326,24 +341,36 @@ namespace eosio { namespace chain { auto first_branch = get_block(first); auto second_branch = get_block(second); + // need to handle a case where first or second is the root + if (!first_branch && my->root && first == my->root->id) first_branch = my->root; + if (!second_branch && my->root && second == my->root->id) second_branch = my->root; + while( first_branch->block_num > second_branch->block_num ) { result.first.push_back(first_branch); + auto prev = first_branch->header.previous; first_branch = get_block( first_branch->header.previous ); + + if (!first_branch && my->root && prev == my->root->id) first_branch = my->root; + EOS_ASSERT( first_branch, fork_db_block_not_found, "block ${id} does not exist", - ("id", first_branch->header.previous) ); + ("id", prev) ); } while( second_branch->block_num > first_branch->block_num ) { result.second.push_back( second_branch ); + auto prev = second_branch->header.previous; second_branch = get_block( second_branch->header.previous ); + if (!second_branch && my->root && prev == my->root->id) second_branch = my->root; EOS_ASSERT( second_branch, fork_db_block_not_found, "block ${id} does not exist", - ("id", second_branch->header.previous) ); + ("id", prev) ); } + if (first_branch->id == second_branch->id) return result; + while( first_branch->header.previous != second_branch->header.previous ) { result.first.push_back(first_branch); diff --git a/libraries/chain/include/eosio/chain/fork_database.hpp b/libraries/chain/include/eosio/chain/fork_database.hpp index 8e4d9176431..1e723d054ca 100644 --- a/libraries/chain/include/eosio/chain/fork_database.hpp +++ b/libraries/chain/include/eosio/chain/fork_database.hpp @@ -36,6 +36,12 @@ namespace eosio { namespace chain { */ void reset( const block_header_state& root_bhs ); + /** + * rollback head to root if read_mode changed from speculative to irreversible + * valid flag need to set to false to avoid head advancing + */ + void rollback_head_to_root(); + /** * Advance root block forward to some other block in the tree. */ From 149928739f3be37d6894da84efac324061ea6dff Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Feb 2019 11:56:29 -0600 Subject: [PATCH 039/680] Avoid std::ostreamstream construction and reset --- libraries/chain/apply_context.cpp | 11 ++-- .../include/eosio/chain/apply_context.hpp | 23 ++------ libraries/chain/wasm_interface.cpp | 54 ++++++++++--------- 3 files changed, 36 insertions(+), 52 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 1beb647eed6..bbf02d61509 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -69,7 +69,7 @@ void apply_context::exec_one( action_trace& trace ) control.get_wasm_interface().apply( a.code_version, a.code, *this ); } catch( const wasm_exit& ) {} } - } FC_RETHROW_EXCEPTIONS( warn, "pending console output: ${console}", ("console", _pending_console_output.str()) ) + } FC_RETHROW_EXCEPTIONS( warn, "pending console output: ${console}", ("console", _pending_console_output) ) } catch( fc::exception& e ) { trace.receipt = r; // fill with known data trace.except = e; @@ -104,8 +104,8 @@ void apply_context::finalize_trace( action_trace& trace, const fc::time_point& s trace.account_ram_deltas = std::move( _account_ram_deltas ); _account_ram_deltas.clear(); - trace.console = _pending_console_output.str(); - reset_console(); + trace.console = std::move( _pending_console_output ); + _pending_console_output.clear(); trace.elapsed = fc::time_point::now() - start; } @@ -444,11 +444,6 @@ vector apply_context::get_active_producers() const { return accounts; } -void apply_context::reset_console() { - _pending_console_output = std::ostringstream(); - _pending_console_output.setf( std::ios::scientific, std::ios::floatfield ); -} - bytes apply_context::get_packed_transaction() { auto r = fc::raw::pack( static_cast(trx_context.trx) ); return r; diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index 03bfd63881e..2caaad8c696 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -466,7 +466,6 @@ class apply_context { ,idx_double(*this) ,idx_long_double(*this) { - reset_console(); } @@ -517,23 +516,11 @@ class apply_context { /// Console methods: public: - void reset_console(); - std::ostringstream& get_console_stream() { return _pending_console_output; } - const std::ostringstream& get_console_stream()const { return _pending_console_output; } + std::string& get_console() { return _pending_console_output; } + const std::string& get_console()const { return _pending_console_output; } - template - void console_append(T val) { - _pending_console_output << val; - } - - template - void console_append(T val, Ts ...rest) { - console_append(val); - console_append(rest...); - }; - - inline void console_append_formatted(const string& fmt, const variant_object& vo) { - console_append(fc::format_string(fmt, vo)); + void console_append( const string& val ) { + _pending_console_output += val; } /// Database methods: @@ -602,7 +589,7 @@ class apply_context { vector _notified; ///< keeps track of new accounts to be notifed of current message vector _inline_actions; ///< queued inline messages vector _cfa_inline_actions; ///< queued inline messages - std::ostringstream _pending_console_output; + std::string _pending_console_output; flat_set _account_ram_deltas; ///< flat_set of account_delta so json is an array of objects //bytes _cached_trx; diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index df0ce578b0e..feb9efbef8a 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -979,7 +979,7 @@ class console_api : public context_aware_api { // Kept as intrinsic rather than implementing on WASM side (using prints_l and strlen) because strlen is faster on native side. void prints(null_terminated_ptr str) { if ( !ignore ) { - context.console_append(str); + context.console_append( static_cast(str) ); } } @@ -991,13 +991,17 @@ class console_api : public context_aware_api { void printi(int64_t val) { if ( !ignore ) { - context.console_append(val); + std::ostringstream oss; + oss << val; + context.console_append( oss.str() ); } } void printui(uint64_t val) { if ( !ignore ) { - context.console_append(val); + std::ostringstream oss; + oss << val; + context.console_append( oss.str() ); } } @@ -1013,11 +1017,13 @@ class console_api : public context_aware_api { fc::uint128_t v(val_magnitude>>64, static_cast(val_magnitude) ); + string s; if( is_negative ) { - context.console_append("-"); + s += '-'; } + s += fc::variant(v).get_string(); - context.console_append(fc::variant(v).get_string()); + context.console_append( s ); } } @@ -1031,26 +1037,22 @@ class console_api : public context_aware_api { void printsf( float val ) { if ( !ignore ) { // Assumes float representation on native side is the same as on the WASM side - auto& console = context.get_console_stream(); - auto orig_prec = console.precision(); - - console.precision( std::numeric_limits::digits10 ); - context.console_append(val); - - console.precision( orig_prec ); + std::ostringstream oss; + oss.setf( std::ios::scientific, std::ios::floatfield ); + oss.precision( std::numeric_limits::digits10 ); + oss << val; + context.console_append( oss.str() ); } } void printdf( double val ) { if ( !ignore ) { // Assumes double representation on native side is the same as on the WASM side - auto& console = context.get_console_stream(); - auto orig_prec = console.precision(); - - console.precision( std::numeric_limits::digits10 ); - context.console_append(val); - - console.precision( orig_prec ); + std::ostringstream oss; + oss.setf( std::ios::scientific, std::ios::floatfield ); + oss.precision( std::numeric_limits::digits10 ); + oss << val; + context.console_append( oss.str() ); } } @@ -1068,23 +1070,23 @@ class console_api : public context_aware_api { */ if ( !ignore ) { - auto& console = context.get_console_stream(); - auto orig_prec = console.precision(); + std::ostringstream oss; + oss.setf( std::ios::scientific, std::ios::floatfield ); #ifdef __x86_64__ - console.precision( std::numeric_limits::digits10 ); + oss.precision( std::numeric_limits::digits10 ); extFloat80_t val_approx; f128M_to_extF80M(&val, &val_approx); #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wstrict-aliasing" - context.console_append( *(long double*)(&val_approx) ); + oss << *(long double*)(&val_approx); #pragma GCC diagnostic pop #else - console.precision( std::numeric_limits::digits10 ); + oss.precision( std::numeric_limits::digits10 ); double val_approx = from_softfloat64( f128M_to_f64(&val) ); - context.console_append(val_approx); + oss << val_approx; #endif - console.precision( orig_prec ); + context.console_append( oss.str() ); } } From 7115df6d811dc5de1da2a05c8ab34a2ee2de150e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Feb 2019 13:01:05 -0600 Subject: [PATCH 040/680] Remove used get_console() methods --- libraries/chain/include/eosio/chain/apply_context.hpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index 2caaad8c696..951422cb753 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -516,9 +516,6 @@ class apply_context { /// Console methods: public: - std::string& get_console() { return _pending_console_output; } - const std::string& get_console()const { return _pending_console_output; } - void console_append( const string& val ) { _pending_console_output += val; } From 988bcc0e28f69db93cd5b71cb10f13ad181ed777 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Thu, 28 Feb 2019 10:34:44 -0500 Subject: [PATCH 041/680] fixes --- scripts/eosio_build_darwin.sh | 14 ++++++++++---- scripts/eosio_build_darwin_deps | 6 +++--- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 753921beef3..c178323d61b 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -94,20 +94,26 @@ printf "\\nChecking dependencies...\\n" var_ifs="${IFS}" IFS="," while read -r name tester testee brewname uri; do - if [ "${tester}" "${testee}" ]; then - printf " - %s found\\n" "${name}" + # For directories, we want to be able to wildcard match versions to prevent upgraded brew packages (minor or patch changes to python for example) from not being seen + if [ $tester == '-d' ] && [ $(echo $testee | grep -c '*') -gt 0 ]; then + for DIR in $testee; do + testee=$(echo $DIR | sed 's/\\//g') + done + fi + if [ $tester $testee ]; then + printf " - ${name} found!\\n" continue fi # resolve conflict with homebrew glibtool and apple/gnu installs of libtool if [ "${testee}" == "/usr/local/bin/glibtool" ]; then if [ "${tester}" "/usr/local/bin/libtool" ]; then - printf " - %s found\\n" "${name}" + printf " - ${name} found!\\n" continue fi fi DEPS=$DEPS"${brewname}," DISPLAY="${DISPLAY}${COUNT}. ${name}\\n" - printf " - %s ${bldred}NOT${txtrst} found.\\n" "${name}" + printf " - ${name} ${bldred}NOT${txtrst} found.\\n" (( COUNT++ )) done < "${REPO_ROOT}/scripts/eosio_build_darwin_deps" IFS="${var_ifs}" diff --git a/scripts/eosio_build_darwin_deps b/scripts/eosio_build_darwin_deps index 44192f04309..50ebafc72a8 100755 --- a/scripts/eosio_build_darwin_deps +++ b/scripts/eosio_build_darwin_deps @@ -4,9 +4,9 @@ Libtool,-x,/usr/local/bin/glibtool,libtool,http://gnu.askapache.com/libtool/libt OpenSSL,-f,/usr/local/opt/openssl/lib/libssl.a,openssl,https://www.openssl.org/source/openssl-1.0.2n.tar.gz wget,-x,/usr/local/bin/wget,wget,https://ftp.gnu.org/gnu/wget/wget-1.19.2.tar.gz GMP,-f,/usr/local/opt/gmp/include/gmpxx.h,gmp,https://ftp.gnu.org/gnu/gmp/gmp-6.1.2.tar.bz2 -llvm,-x,/usr/local/opt/llvm@4/bin/clang-4.0,llvm@4,http://releases.llvm.org/4.0.1/llvm-4.0.1.src.tar.xz -python,-d,/usr/local/Cellar/python/3.7.2_1,python,https://www.python.org/ftp/python/3.7.2/Python-3.7.2.tgz -python@2,-d,/usr/local/Cellar/python@2/2.7.15_2,python@2,https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tgz +llvm,-d,/usr/local/opt/llvm@4,llvm@4,http://releases.llvm.org/4.0.1/llvm-4.0.1.src.tar.xz +python,-d,/usr/local/Cellar/python/3\*,python,https://www.python.org/ftp/python/3.7.2/Python-3.7.2.tgz +python@2,-d,/usr/local/Cellar/python@2/2\*,python@2,https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tgz doxygen,-f,/usr/local/bin/doxygen,doxygen,http://ftp.stack.nl/pub/users/dimitri/doxygen-1.8.14.src.tar.gz graphviz,-d,/usr/local/opt/graphviz,graphviz,https://fossies.org/linux/misc/graphviz-2.40.1.tar.gz libusb,-f,/usr/local/lib/libusb-1.0.0.dylib,libusb,https://github.com/libusb/libusb/releases/download/v1.0.22/libusb-1.0.22.tar.bz2 From 53f1368767375ba4830b43551e1dfd446f8b419b Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Thu, 28 Feb 2019 10:38:59 -0500 Subject: [PATCH 042/680] quick cleanup of spacing --- scripts/eosio_build_ubuntu.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 65fbfeeec07..5561b14a450 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -126,7 +126,7 @@ if [ "${COUNT}" -gt 1 ]; then * ) echo "Please type 'y' for yes or 'n' for no."; exit;; esac else - printf " - No required APT dependencies to install." + printf " - No required APT dependencies to install.\\n" fi From 2e2812b6c4cdd6248d3ef6d9b69efe12429942c2 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Thu, 28 Feb 2019 10:40:45 -0500 Subject: [PATCH 043/680] quick cleanup of spacing --- scripts/eosio_build_darwin.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index c178323d61b..9e07d10d261 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -161,7 +161,7 @@ if [ $COUNT -gt 1 ]; then * ) echo "Please type 'y' for yes or 'n' for no."; exit;; esac else - printf "\\n - No required Home Brew dependencies to install.\\n" + printf " - No required Home Brew dependencies to install.\\n" fi From 7ef2ec17069d0a07a374600861b4f4276cf23bca Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Thu, 28 Feb 2019 10:41:40 -0500 Subject: [PATCH 044/680] quick cleanup of spacing --- scripts/eosio_build_centos.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 3d0056f0b36..1c1e97b2fab 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -157,7 +157,7 @@ if [ "${COUNT}" -gt 1 ]; then * ) echo "Please type 'y' for yes or 'n' for no."; exit;; esac else - printf " - No required YUM dependencies to install.\\n" + printf " - No required YUM dependencies to install.\\n\\n" fi if [ -d /opt/rh/python33 ]; then From 2d4d0d352f815d0d99dd8cf8d4ce554e33fc446c Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 28 Feb 2019 13:26:50 -0500 Subject: [PATCH 045/680] initial work on #6437 --- libraries/chain/CMakeLists.txt | 1 + libraries/chain/controller.cpp | 8 +- libraries/chain/genesis_intrinsics.cpp | 231 ++++++++++++++++++ .../eosio/chain/genesis_intrinsics.hpp | 14 ++ .../eosio/chain/global_property_object.hpp | 35 ++- libraries/chain/include/eosio/chain/types.hpp | 4 +- .../include/eosio/chain/wasm_interface.hpp | 68 ++++-- libraries/chain/wasm_interface.cpp | 4 +- libraries/fc | 2 +- 9 files changed, 334 insertions(+), 33 deletions(-) create mode 100644 libraries/chain/genesis_intrinsics.cpp create mode 100644 libraries/chain/include/eosio/chain/genesis_intrinsics.hpp diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 52b4181a030..c2a28f9aaa8 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -49,6 +49,7 @@ add_library( eosio_chain transaction_metadata.cpp protocol_feature_activation.cpp protocol_feature_manager.cpp + genesis_intrinsics.cpp ${HEADERS} ) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 41d8c156a1f..e68ad4c2a8a 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -792,12 +793,15 @@ struct controller_impl { const auto& tapos_block_summary = db.get(1); db.modify( tapos_block_summary, [&]( auto& bs ) { - bs.block_id = head->id; + bs.block_id = head->id; }); conf.genesis.initial_configuration.validate(); db.create([&](auto& gpo ){ - gpo.configuration = conf.genesis.initial_configuration; + gpo.configuration = conf.genesis.initial_configuration; + for( const auto& i : genesis_intrinsics ) { + gpo.add_intrinsic_to_whitelist( i ); + } }); db.create([](auto&){}); diff --git a/libraries/chain/genesis_intrinsics.cpp b/libraries/chain/genesis_intrinsics.cpp new file mode 100644 index 00000000000..a40010f7586 --- /dev/null +++ b/libraries/chain/genesis_intrinsics.cpp @@ -0,0 +1,231 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ + +#include + +namespace eosio { namespace chain { + +const std::vector genesis_intrinsics = { + "__ashrti3", + "__lshlti3", + "__lshrti3", + "__ashlti3", + "__divti3", + "__udivti3", + "__modti3", + "__umodti3", + "__multi3", + "__addtf3", + "__subtf3", + "__multf3", + "__divtf3", + "__eqtf2", + "__netf2", + "__getf2", + "__gttf2", + "__lttf2", + "__letf2", + "__cmptf2", + "__unordtf2", + "__negtf2", + "__floatsitf", + "__floatunsitf", + "__floatditf", + "__floatunditf", + "__floattidf", + "__floatuntidf", + "__floatsidf", + "__extendsftf2", + "__extenddftf2", + "__fixtfti", + "__fixtfdi", + "__fixtfsi", + "__fixunstfti", + "__fixunstfdi", + "__fixunstfsi", + "__fixsfti", + "__fixdfti", + "__fixunssfti", + "__fixunsdfti", + "__trunctfdf2", + "__trunctfsf2", + "is_feature_active", + "activate_feature", + "get_resource_limits", + "set_resource_limits", + "set_proposed_producers", + "get_blockchain_parameters_packed", + "set_blockchain_parameters_packed", + "is_privileged", + "set_privileged", + "get_active_producers", + "db_idx64_store", + "db_idx64_remove", + "db_idx64_update", + "db_idx64_find_primary", + "db_idx64_find_secondary", + "db_idx64_lowerbound", + "db_idx64_upperbound", + "db_idx64_end", + "db_idx64_next", + "db_idx64_previous", + "db_idx128_store", + "db_idx128_remove", + "db_idx128_update", + "db_idx128_find_primary", + "db_idx128_find_secondary", + "db_idx128_lowerbound", + "db_idx128_upperbound", + "db_idx128_end", + "db_idx128_next", + "db_idx128_previous", + "db_idx256_store", + "db_idx256_remove", + "db_idx256_update", + "db_idx256_find_primary", + "db_idx256_find_secondary", + "db_idx256_lowerbound", + "db_idx256_upperbound", + "db_idx256_end", + "db_idx256_next", + "db_idx256_previous", + "db_idx_double_store", + "db_idx_double_remove", + "db_idx_double_update", + "db_idx_double_find_primary", + "db_idx_double_find_secondary", + "db_idx_double_lowerbound", + "db_idx_double_upperbound", + "db_idx_double_end", + "db_idx_double_next", + "db_idx_double_previous", + "db_idx_long_double_store", + "db_idx_long_double_remove", + "db_idx_long_double_update", + "db_idx_long_double_find_primary", + "db_idx_long_double_find_secondary", + "db_idx_long_double_lowerbound", + "db_idx_long_double_upperbound", + "db_idx_long_double_end", + "db_idx_long_double_next", + "db_idx_long_double_previous", + "db_idx64_store", + "db_idx64_remove", + "db_idx64_update", + "db_idx64_find_primary", + "db_idx64_find_secondary", + "db_idx64_lowerbound", + "db_idx64_upperbound", + "db_idx64_end", + "db_idx64_next", + "db_idx64_previous", + "db_idx128_store", + "db_idx128_remove", + "db_idx128_update", + "db_idx128_find_primary", + "db_idx128_find_secondary", + "db_idx128_lowerbound", + "db_idx128_upperbound", + "db_idx128_end", + "db_idx128_next", + "db_idx128_previous", + "db_idx256_store", + "db_idx256_remove", + "db_idx256_update", + "db_idx256_find_primary", + "db_idx256_find_secondary", + "db_idx256_lowerbound", + "db_idx256_upperbound", + "db_idx256_end", + "db_idx256_next", + "db_idx256_previous", + "db_idx_double_store", + "db_idx_double_remove", + "db_idx_double_update", + "db_idx_double_find_primary", + "db_idx_double_find_secondary", + "db_idx_double_lowerbound", + "db_idx_double_upperbound", + "db_idx_double_end", + "db_idx_double_next", + "db_idx_double_previous", + "db_idx_long_double_store", + "db_idx_long_double_remove", + "db_idx_long_double_update", + "db_idx_long_double_find_primary", + "db_idx_long_double_find_secondary", + "db_idx_long_double_lowerbound", + "db_idx_long_double_upperbound", + "db_idx_long_double_end", + "db_idx_long_double_next", + "db_idx_long_double_previous", + "db_store_i64", + "db_update_i64", + "db_remove_i64", + "db_get_i64", + "db_next_i64", + "db_previous_i64", + "db_find_i64", + "db_lowerbound_i64", + "db_upperbound_i64", + "db_end_i64", + "assert_recover_key", + "recover_key", + "assert_sha256", + "assert_sha1", + "assert_sha512", + "assert_ripemd160", + "sha1", + "sha256", + "sha512", + "ripemd160", + "check_transaction_authorization", + "check_permission_authorization", + "get_permission_last_used", + "get_account_creation_time", + "current_time", + "publication_time", + "abort", + "eosio_assert", + "eosio_assert_message", + "eosio_assert_code", + "eosio_exit", + "read_action_data", + "action_data_size", + "current_receiver", + "require_recipient", + "require_auth", + "require_auth2", + "has_auth", + "is_account", + "prints", + "prints_l", + "printi", + "printui", + "printi128", + "printui128", + "printsf", + "printdf", + "printqf", + "printn", + "printhex", + "read_transaction", + "transaction_size", + "expiration", + "tapos_block_prefix", + "tapos_block_num", + "get_action", + "send_inline", + "send_context_free_inline", + "send_deferred", + "cancel_deferred", + "get_context_free_data", + "memcpy", + "memmove", + "memcmp", + "memset" +}; + +} } // namespace eosio::chain diff --git a/libraries/chain/include/eosio/chain/genesis_intrinsics.hpp b/libraries/chain/include/eosio/chain/genesis_intrinsics.hpp new file mode 100644 index 00000000000..bd736d6a285 --- /dev/null +++ b/libraries/chain/include/eosio/chain/genesis_intrinsics.hpp @@ -0,0 +1,14 @@ + +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +namespace eosio { namespace chain { + +extern const std::vector genesis_intrinsics; + +} } // namespace eosio::chain diff --git a/libraries/chain/include/eosio/chain/global_property_object.hpp b/libraries/chain/include/eosio/chain/global_property_object.hpp index e513045c0b2..0f9579b48a7 100644 --- a/libraries/chain/include/eosio/chain/global_property_object.hpp +++ b/libraries/chain/include/eosio/chain/global_property_object.hpp @@ -26,13 +26,33 @@ namespace eosio { namespace chain { */ class global_property_object : public chainbase::object { - OBJECT_CTOR(global_property_object, (proposed_schedule)(preactivated_protocol_features)) + OBJECT_CTOR(global_property_object, (proposed_schedule)(preactivated_protocol_features)(whitelisted_intrinsics)) - id_type id; - optional proposed_schedule_block_num; - shared_producer_schedule_type proposed_schedule; - chain_config configuration; - shared_vector preactivated_protocol_features; + public: + + inline void add_intrinsic_to_whitelist( const char* name ) { + uint64_t h = static_cast( std::hash{}( std::string(name) ) ); + whitelisted_intrinsics.emplace( std::piecewise_construct, + std::forward_as_tuple( h ), + std::forward_as_tuple( name, whitelisted_intrinsics.get_allocator() ) + ); + } + + inline void add_intrinsic_to_whitelist( const std::string& name ) { + uint64_t h = static_cast( std::hash{}( name ) ); + whitelisted_intrinsics.emplace( std::piecewise_construct, + std::forward_as_tuple( h ), + std::forward_as_tuple( name.c_str(), name.size(), + whitelisted_intrinsics.get_allocator() ) + ); + } + + id_type id; + optional proposed_schedule_block_num; + shared_producer_schedule_type proposed_schedule; + chain_config configuration; + shared_vector preactivated_protocol_features; + shared_flat_multimap whitelisted_intrinsics; }; @@ -83,5 +103,6 @@ FC_REFLECT(eosio::chain::dynamic_global_property_object, ) FC_REFLECT(eosio::chain::global_property_object, - (proposed_schedule_block_num)(proposed_schedule)(configuration)(preactivated_protocol_features) + (proposed_schedule_block_num)(proposed_schedule)(configuration) + (preactivated_protocol_features)(whitelisted_intrinsics) ) diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index e3608cbad87..5ce7bf87550 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -8,7 +8,7 @@ #include -#include +#include #include #include #include @@ -95,6 +95,8 @@ namespace eosio { namespace chain { using shared_vector = boost::interprocess::vector>; template using shared_set = boost::interprocess::set, allocator>; + template + using shared_flat_multimap = boost::interprocess::flat_multimap< K, V, std::less, allocator< std::pair > >; /** * For bugs in boost interprocess we moved our blob data to shared_string diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 7e6991996af..38c6b82c063 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -17,31 +17,57 @@ namespace eosio { namespace chain { namespace webassembly { namespace common { class intrinsics_accessor; - struct root_resolver : Runtime::Resolver { - //when validating is true; only allow "env" imports. Otherwise allow any imports. This resolver is used - //in two cases: once by the generic validating code where we only want "env" to pass; and then second in the - //wavm runtime where we need to allow linkage to injected functions - root_resolver(bool validating = false) : validating(validating) {} - bool validating; + class root_resolver : public Runtime::Resolver { + public: + // The non-default constructor puts root_resolver in a mode where it does validation, i.e. only allows "env" imports. + // This mode is used by the generic validating code that runs during setcode, where we only want "env" to pass. + // The default constructor is used when no validation is required such as when the wavm runtime needs to + // allow linkage to the intrinsics and the injected functions. + + root_resolver() {} + + root_resolver( const shared_flat_multimap& whitelisted_intrinsics ) + :whitelisted_intrinsics(&whitelisted_intrinsics) + {} bool resolve(const string& mod_name, const string& export_name, IR::ObjectType type, - Runtime::ObjectInstance*& out) override { - try { - //protect access to "private" injected functions; so for now just simply allow "env" since injected functions - // are in a different module - if(validating && mod_name != "env") - EOS_ASSERT( false, wasm_exception, "importing from module that is not 'env': ${module}.${export}", ("module",mod_name)("export",export_name) ); - - // Try to resolve an intrinsic first. - if(Runtime::IntrinsicResolver::singleton.resolve(mod_name,export_name,type, out)) { - return true; - } - - EOS_ASSERT( false, wasm_exception, "${module}.${export} unresolveable", ("module",mod_name)("export",export_name) ); - return false; - } FC_CAPTURE_AND_RETHROW( (mod_name)(export_name) ) } + Runtime::ObjectInstance*& out) override + { try { + bool fail = false; + + if( whitelisted_intrinsics != nullptr ) { + // Protect access to "private" injected functions; so for now just simply allow "env" since injected + // functions are in a different module. + EOS_ASSERT( mod_name == "env", wasm_exception, + "importing from module that is not 'env': ${module}.${export}", + ("module",mod_name)("export",export_name) ); + + // Only consider imports that are in the whitelisted set of intrinsics: + uint64_t hash = static_cast( std::hash{}( export_name ) ); + auto itr = whitelisted_intrinsics->lower_bound( hash ); + fail = true; + for( const auto end = whitelisted_intrinsics->end(); itr != end && itr->first == hash; ++itr ) { + if( itr->second.compare( 0, itr->second.size(), export_name.c_str(), export_name.size() ) == 0 ) { + fail = false; + break; + } + } + } + + // Try to resolve an intrinsic first. + if( !fail && Runtime::IntrinsicResolver::singleton.resolve( mod_name, export_name, type, out ) ) { + return true; + } + + EOS_THROW( wasm_exception, "${module}.${export} unresolveable", + ("module",mod_name)("export",export_name) ); + return false; + } FC_CAPTURE_AND_RETHROW( (mod_name)(export_name) ) } + + protected: + const shared_flat_multimap* whitelisted_intrinsics = nullptr; }; } } diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 74af3b88ee5..9fcec326724 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -46,7 +46,9 @@ namespace eosio { namespace chain { wasm_validations::wasm_binary_validation validator(control, module); validator.validate(); - root_resolver resolver(true); + const auto& gpo = control.db().get(); + + root_resolver resolver( gpo.whitelisted_intrinsics ); LinkResult link_result = linkModule(module, resolver); //there are a couple opportunties for improvement here-- diff --git a/libraries/fc b/libraries/fc index 12956c33041..972200d002d 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 12956c330413e69bd998cd0657c8a82ef3e8a106 +Subproject commit 972200d002d7ca2eba52095168745ff5f3a20912 From 36d28d7a8a8bfe496aa0d6086b32c68a28c4f8ef Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Thu, 28 Feb 2019 14:00:10 -0500 Subject: [PATCH 046/680] reverting + various changes --- scripts/eosio_build_darwin.sh | 6 ------ scripts/eosio_build_darwin_deps | 4 ++-- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 9e07d10d261..e418be9a717 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -94,12 +94,6 @@ printf "\\nChecking dependencies...\\n" var_ifs="${IFS}" IFS="," while read -r name tester testee brewname uri; do - # For directories, we want to be able to wildcard match versions to prevent upgraded brew packages (minor or patch changes to python for example) from not being seen - if [ $tester == '-d' ] && [ $(echo $testee | grep -c '*') -gt 0 ]; then - for DIR in $testee; do - testee=$(echo $DIR | sed 's/\\//g') - done - fi if [ $tester $testee ]; then printf " - ${name} found!\\n" continue diff --git a/scripts/eosio_build_darwin_deps b/scripts/eosio_build_darwin_deps index 50ebafc72a8..82eed9fbaec 100755 --- a/scripts/eosio_build_darwin_deps +++ b/scripts/eosio_build_darwin_deps @@ -5,8 +5,8 @@ OpenSSL,-f,/usr/local/opt/openssl/lib/libssl.a,openssl,https://www.openssl.org/s wget,-x,/usr/local/bin/wget,wget,https://ftp.gnu.org/gnu/wget/wget-1.19.2.tar.gz GMP,-f,/usr/local/opt/gmp/include/gmpxx.h,gmp,https://ftp.gnu.org/gnu/gmp/gmp-6.1.2.tar.bz2 llvm,-d,/usr/local/opt/llvm@4,llvm@4,http://releases.llvm.org/4.0.1/llvm-4.0.1.src.tar.xz -python,-d,/usr/local/Cellar/python/3\*,python,https://www.python.org/ftp/python/3.7.2/Python-3.7.2.tgz -python@2,-d,/usr/local/Cellar/python@2/2\*,python@2,https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tgz +python,-d,/usr/local/Cellar/python,python,https://www.python.org/ftp/python/3.7.2/Python-3.7.2.tgz +python@2,-d,/usr/local/Cellar/python@2,python@2,https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tgz doxygen,-f,/usr/local/bin/doxygen,doxygen,http://ftp.stack.nl/pub/users/dimitri/doxygen-1.8.14.src.tar.gz graphviz,-d,/usr/local/opt/graphviz,graphviz,https://fossies.org/linux/misc/graphviz-2.40.1.tar.gz libusb,-f,/usr/local/lib/libusb-1.0.0.dylib,libusb,https://github.com/libusb/libusb/releases/download/v1.0.22/libusb-1.0.22.tar.bz2 From f36a0371c9c795f96afc2c464906544ac3d1b355 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 25 Feb 2019 15:20:01 -0500 Subject: [PATCH 047/680] Wire up chainbase's new DB modes and huge page support --- libraries/chain/controller.cpp | 21 ++-- .../chain/include/eosio/chain/controller.hpp | 5 + libraries/chainbase | 2 +- plugins/chain_plugin/chain_plugin.cpp | 22 +++- tests/CMakeLists.txt | 2 + tests/db_modes_test.sh | 101 ++++++++++++++++++ unittests/resource_limits_test.cpp | 2 +- 7 files changed, 138 insertions(+), 17 deletions(-) create mode 100755 tests/db_modes_test.sh diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 20f5478a079..6f1a3cd74f3 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -172,10 +172,10 @@ struct controller_impl { :self(s), db( cfg.state_dir, cfg.read_only ? database::read_only : database::read_write, - cfg.state_size ), + cfg.state_size, false, cfg.db_map_mode, cfg.db_hugepage_paths ), reversible_blocks( cfg.blocks_dir/config::reversible_blocks_dir_name, cfg.read_only ? database::read_only : database::read_write, - cfg.reversible_cache_size ), + cfg.reversible_cache_size, false, cfg.db_map_mode, cfg.db_hugepage_paths ), blog( cfg.blocks_dir ), fork_db( cfg.state_dir ), wasmif( cfg.wasm_runtime ), @@ -404,9 +404,6 @@ struct controller_impl { ~controller_impl() { pending.reset(); - - db.flush(); - reversible_blocks.flush(); } void add_indices() { @@ -421,14 +418,12 @@ struct controller_impl { void clear_all_undo() { // Rewind the database to the last irreversible block - db.with_write_lock([&] { - db.undo_all(); - /* - FC_ASSERT(db.revision() == self.head_block_num(), - "Chainbase revision does not match head block num", - ("rev", db.revision())("head_block", self.head_block_num())); - */ - }); + db.undo_all(); + /* + FC_ASSERT(db.revision() == self.head_block_num(), + "Chainbase revision does not match head block num", + ("rev", db.revision())("head_block", self.head_block_num())); + */ } void add_contract_tables_to_snapshot( const snapshot_writer_ptr& snapshot ) const { diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index f4aa46fa0dd..2aab3179668 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -2,6 +2,7 @@ #include #include #include +#include #include #include @@ -25,6 +26,7 @@ namespace eosio { namespace chain { struct controller_impl; using chainbase::database; + using chainbase::pinnable_mapped_file; using boost::signals2::signal; class dynamic_global_property_object; @@ -80,6 +82,9 @@ namespace eosio { namespace chain { db_read_mode read_mode = db_read_mode::SPECULATIVE; validation_mode block_validation_mode = validation_mode::FULL; + pinnable_mapped_file::map_mode db_map_mode = pinnable_mapped_file::map_mode::mapped; + vector db_hugepage_paths; + flat_set resource_greylist; flat_set trusted_producers; }; diff --git a/libraries/chainbase b/libraries/chainbase index 8ca96ad6b18..0c0043787db 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 8ca96ad6b18709d65a7d1f67f8893978f25babcf +Subproject commit 0c0043787db567d748aee3a4cc49d502c4507d1f diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 74bcccad6b1..53e7555f7e0 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -199,6 +199,7 @@ chain_plugin::chain_plugin() :my(new chain_plugin_impl()) { app().register_config_type(); app().register_config_type(); + app().register_config_type(); } chain_plugin::~chain_plugin(){} @@ -250,6 +251,19 @@ void chain_plugin::set_program_options(options_description& cli, options_descrip ("disable-ram-billing-notify-checks", bpo::bool_switch()->default_value(false), "Disable the check which subjectively fails a transaction if a contract bills more RAM to another account within the context of a notification handler (i.e. when the receiver is not the code of the action).") ("trusted-producer", bpo::value>()->composing(), "Indicate a producer whose blocks headers signed by it will be fully validated, but transactions in those validated blocks will be trusted.") + ("database-map-mode", bpo::value()->default_value(chainbase::pinnable_mapped_file::map_mode::mapped), + "Database map mode (\"mapped\", \"heap\", or \"locked\").\n" + "In \"mapped\" mode database is memory mapped as a file.\n" + "In \"heap\" mode database is preloaded in to swappable memory.\n" +#ifdef __linux__ + "In \"locked\" mode database is preloaded, locked in to memory, and optionally can use huge pages.\n" +#else + "In \"locked\" mode database is preloaded and locked in to memory.\n" +#endif + ) +#ifdef __linux__ + ("database-hugepage-path", bpo::value>()->composing(), "Optional path for database hugepages when in \"locked\" mode (may specify multiple times)") +#endif ; // TODO: rate limiting @@ -504,8 +518,6 @@ void chain_plugin::plugin_initialize(const variables_map& options) { my->chain_config->blocks_dir / config::reversible_blocks_dir_name ); fc::copy( backup_dir / config::reversible_blocks_dir_name / "shared_memory.bin", my->chain_config->blocks_dir / config::reversible_blocks_dir_name / "shared_memory.bin" ); - fc::copy( backup_dir / config::reversible_blocks_dir_name / "shared_memory.meta", - my->chain_config->blocks_dir / config::reversible_blocks_dir_name / "shared_memory.meta" ); } } } else if( options.at( "replay-blockchain" ).as()) { @@ -651,6 +663,12 @@ void chain_plugin::plugin_initialize(const variables_map& options) { my->chain_config->block_validation_mode = options.at("validation-mode").as(); } + my->chain_config->db_map_mode = options.at("database-map-mode").as(); +#ifdef __linux__ + if( options.count("database-hugepage-path") ) + my->chain_config->db_hugepage_paths = options.at("database-hugepage-path").as>(); +#endif + my->chain.emplace( *my->chain_config ); my->chain_id.emplace( my->chain->get_chain_id()); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 66a826fa0e6..68116bab863 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -43,6 +43,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_voting_test.py ${CMAKE_CURRENT configure_file(${CMAKE_CURRENT_SOURCE_DIR}/consensus-validation-malicious-producers.py ${CMAKE_CURRENT_BINARY_DIR}/consensus-validation-malicious-producers.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/validate-dirty-db.py ${CMAKE_CURRENT_BINARY_DIR}/validate-dirty-db.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/launcher_test.py ${CMAKE_CURRENT_BINARY_DIR}/launcher_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/db_modes_test.sh ${CMAKE_CURRENT_BINARY_DIR}/db_modes_test.sh COPYONLY) #To run plugin_test with all log from blockchain displayed, put --verbose after --, i.e. plugin_test -- --verbose add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_output) @@ -78,6 +79,7 @@ add_test(NAME validate_dirty_db_test COMMAND tests/validate-dirty-db.py -v --cle set_property(TEST validate_dirty_db_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME launcher_test COMMAND tests/launcher_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST launcher_test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME db_modes_test COMMAND tests/db_modes_test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) # Long running tests add_test(NAME nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/db_modes_test.sh b/tests/db_modes_test.sh new file mode 100755 index 00000000000..97b10160f7e --- /dev/null +++ b/tests/db_modes_test.sh @@ -0,0 +1,101 @@ +#!/usr/bin/env bash + +# This test is intended to verify that switching between DB modes "just works". Addtionally +# it tries to make sure the dirty bit behaves as expected even in heap mode. + +set -euo pipefail + +VERBOSE=0 +TEST_LOCKED_MODE=0 + +while getopts ":lv" opt; do + case ${opt} in + l) + TEST_LOCKED_MODE=1 + ;; + v) + VERBOSE=1 + set -o xtrace + ;; + \?) + echo "Use -v for verbose; -l to enable test of locked mode" + exit 1; + ;; + :) + echo "Invalid option" + exit 1; + ;; + esac +done + +EOSIO_STUFF_DIR=$(mktemp -d) +trap "rm -rf $EOSIO_STUFF_DIR" EXIT +NODEOS_LAUNCH_PARAMS="./programs/nodeos/nodeos -d $EOSIO_STUFF_DIR --config-dir $EOSIO_STUFF_DIR \ +--chain-state-db-size-mb 8 --chain-state-db-guard-size-mb 0 --reversible-blocks-db-size-mb 1 \ +--reversible-blocks-db-guard-size-mb 0 --https-server-address "''" --p2p-listen-endpoint "''" -e -peosio" + +run_nodeos() { + if (( $VERBOSE == 0 )); then + $NODEOS_LAUNCH_PARAMS "$@" 2>/dev/null & + else + $NODEOS_LAUNCH_PARAMS "$@" & + fi +} + +run_expect_success() { + run_nodeos "$@" + local NODEOS_PID=$! + sleep 5 + kill $NODEOS_PID + wait $NODEOS_PID +} + +run_and_kill() { + run_nodeos "$@" + local NODEOS_PID=$! + sleep 5 + kill -KILL $NODEOS_PID + ! wait $NODEOS_PID +} + +run_expect_failure() { + run_nodeos "$@" + local NODEOS_PID=$! + MYPID=$$ + (sleep 10; kill -ALRM $MYPID) & local TIMER_PID=$! + trap "kill $NODEOS_PID; wait $NODEOS_PID; exit 1" ALRM + sleep 5 + if wait $NODEOS_PID; then exit 1; fi + kill $TIMER_PID + trap ALRM +} + +#new chain with mapped mode +run_expect_success --delete-all-blocks +#use previous DB with heap mode +run_expect_success --database-map-mode heap +#test lock mode if enabled +if (( $TEST_LOCKED_MODE == 1 )); then + run_expect_success --database-map-mode locked +fi +#locked mode should fail when it's not possible to lock anything +ulimit -l 0 +run_expect_failure --database-map-mode locked +#But shouldn't result in the dirty flag staying set; so next launch should run +run_expect_success +#Try killing with KILL +run_and_kill +#should be dirty now +run_expect_failure +#should also still be dirty in heap mode +run_expect_failure --database-map-mode heap + +#start over again! but this time start with heap mode +run_expect_success --delete-all-blocks --database-map-mode heap +#Then switch back to mapped +run_expect_success +#try killing it while in heap mode +run_and_kill --database-map-mode heap +#should be dirty if we run in either mode node +run_expect_failure --database-map-mode heap +run_expect_failure diff --git a/unittests/resource_limits_test.cpp b/unittests/resource_limits_test.cpp index 3bcd8582e4d..9040c5bdc5f 100644 --- a/unittests/resource_limits_test.cpp +++ b/unittests/resource_limits_test.cpp @@ -14,7 +14,7 @@ using namespace eosio::chain::resource_limits; using namespace eosio::testing; using namespace eosio::chain; -class resource_limits_fixture: private chainbase_fixture<512*1024>, public resource_limits_manager +class resource_limits_fixture: private chainbase_fixture<1024*1024>, public resource_limits_manager { public: resource_limits_fixture() From d4fb62a789530650d96d059abd2336f51c1e1bf1 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Thu, 28 Feb 2019 15:24:54 -0500 Subject: [PATCH 048/680] quick fix --- scripts/eosio_build_darwin_deps | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/eosio_build_darwin_deps b/scripts/eosio_build_darwin_deps index 82eed9fbaec..617e188f99c 100755 --- a/scripts/eosio_build_darwin_deps +++ b/scripts/eosio_build_darwin_deps @@ -5,8 +5,8 @@ OpenSSL,-f,/usr/local/opt/openssl/lib/libssl.a,openssl,https://www.openssl.org/s wget,-x,/usr/local/bin/wget,wget,https://ftp.gnu.org/gnu/wget/wget-1.19.2.tar.gz GMP,-f,/usr/local/opt/gmp/include/gmpxx.h,gmp,https://ftp.gnu.org/gnu/gmp/gmp-6.1.2.tar.bz2 llvm,-d,/usr/local/opt/llvm@4,llvm@4,http://releases.llvm.org/4.0.1/llvm-4.0.1.src.tar.xz -python,-d,/usr/local/Cellar/python,python,https://www.python.org/ftp/python/3.7.2/Python-3.7.2.tgz -python@2,-d,/usr/local/Cellar/python@2,python@2,https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tgz +python,-d,/usr/local/opt/python3,python,https://www.python.org/ftp/python/3.7.2/Python-3.7.2.tgz +python@2,-d,/usr/local/opt/python2,python@2,https://www.python.org/ftp/python/2.7.13/Python-2.7.13.tgz doxygen,-f,/usr/local/bin/doxygen,doxygen,http://ftp.stack.nl/pub/users/dimitri/doxygen-1.8.14.src.tar.gz graphviz,-d,/usr/local/opt/graphviz,graphviz,https://fossies.org/linux/misc/graphviz-2.40.1.tar.gz libusb,-f,/usr/local/lib/libusb-1.0.0.dylib,libusb,https://github.com/libusb/libusb/releases/download/v1.0.22/libusb-1.0.22.tar.bz2 From 361acd3ddc176b76722c074d694e6dfa052e9ed4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 14:58:47 -0600 Subject: [PATCH 049/680] Add test for appbase stable priority execution queue --- unittests/CMakeLists.txt | 2 +- unittests/misc_tests.cpp | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index 3b288f2d2a3..dfb2a029e26 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -42,7 +42,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/contracts.hpp.in ${CMAKE_CURRENT_BINA ### BUILD UNIT TEST EXECUTABLE ### file(GLOB UNIT_TESTS "*.cpp") # find all unit test suites add_executable( unit_test ${UNIT_TESTS}) # build unit tests as one executable -target_link_libraries( unit_test eosio_chain chainbase eosio_testing fc ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( unit_test eosio_chain chainbase eosio_testing fc appbase ${PLATFORM_SPECIFIC_LIBS} ) target_compile_options(unit_test PUBLIC -DDISABLE_EOSLIB_SERIALIZE) target_include_directories( unit_test PUBLIC ${CMAKE_SOURCE_DIR}/libraries/testing/include diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index 607c78859fd..2b00d8d4e7b 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -1056,6 +1057,39 @@ BOOST_AUTO_TEST_CASE(reflector_init_test) { } FC_LOG_AND_RETHROW() } +// verify appbase::app().post() uses a stable priority queue so that jobs are executed in order, FIFO, as submitted. +BOOST_AUTO_TEST_CASE(stable_priority_queue_test) { + try { + using namespace std::chrono_literals; + + std::thread t( []() { appbase::app().exec(); } ); + std::atomic ran; + std::mutex mx; + std::vector results; + for( int i = 0; i < 50; ++i ) { + appbase::app().post(appbase::priority::high, [&mx, &ran, &results, i](){ + std::this_thread::sleep_for( 10us ); + std::lock_guard g(mx); + results.push_back( i ); + ++ran; + }); + } + + std::this_thread::sleep_for( 50 * 10us ); // will take at least this long + while( ran < 50 ) std::this_thread::sleep_for( 5us ); + + appbase::app().quit(); + t.join(); + + std::lock_guard g(mx); + BOOST_CHECK_EQUAL( 50, results.size() ); + for( int i = 0; i < 50; ++i ) { + BOOST_CHECK_EQUAL( i, results.at( i ) ); + } + + } FC_LOG_AND_RETHROW() +} + BOOST_AUTO_TEST_SUITE_END() From b0da21db0b1a702115dc754491bf3ca1f63ffdc4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 14:59:07 -0600 Subject: [PATCH 050/680] Update to appbase with stable priority queue --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index da4bf8cb324..5c10377c426 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit da4bf8cb324225b002b3105da42b62769da94ce9 +Subproject commit 5c10377c426d1905c46d781cbb75a34e79728bca From feb6456701e65758be1e3618d0abffc171b51b91 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 28 Feb 2019 16:49:25 -0500 Subject: [PATCH 051/680] bug fixes and improvements #6437 --- libraries/chain/CMakeLists.txt | 1 + libraries/chain/controller.cpp | 3 +- libraries/chain/genesis_intrinsics.cpp | 50 ----------- .../eosio/chain/global_property_object.hpp | 31 ++----- .../include/eosio/chain/wasm_interface.hpp | 17 ++-- .../eosio/chain/whitelisted_intrinsics.hpp | 22 +++++ libraries/chain/whitelisted_intrinsics.cpp | 83 +++++++++++++++++++ 7 files changed, 120 insertions(+), 87 deletions(-) create mode 100644 libraries/chain/include/eosio/chain/whitelisted_intrinsics.hpp create mode 100644 libraries/chain/whitelisted_intrinsics.cpp diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index c2a28f9aaa8..820fbcc63f0 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -50,6 +50,7 @@ add_library( eosio_chain protocol_feature_activation.cpp protocol_feature_manager.cpp genesis_intrinsics.cpp + whitelisted_intrinsics.cpp ${HEADERS} ) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index e68ad4c2a8a..fd17368c424 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -800,7 +801,7 @@ struct controller_impl { db.create([&](auto& gpo ){ gpo.configuration = conf.genesis.initial_configuration; for( const auto& i : genesis_intrinsics ) { - gpo.add_intrinsic_to_whitelist( i ); + add_intrinsic_to_whitelist( gpo.whitelisted_intrinsics, i ); } }); db.create([](auto&){}); diff --git a/libraries/chain/genesis_intrinsics.cpp b/libraries/chain/genesis_intrinsics.cpp index a40010f7586..be6077acbb1 100644 --- a/libraries/chain/genesis_intrinsics.cpp +++ b/libraries/chain/genesis_intrinsics.cpp @@ -111,56 +111,6 @@ const std::vector genesis_intrinsics = { "db_idx_long_double_end", "db_idx_long_double_next", "db_idx_long_double_previous", - "db_idx64_store", - "db_idx64_remove", - "db_idx64_update", - "db_idx64_find_primary", - "db_idx64_find_secondary", - "db_idx64_lowerbound", - "db_idx64_upperbound", - "db_idx64_end", - "db_idx64_next", - "db_idx64_previous", - "db_idx128_store", - "db_idx128_remove", - "db_idx128_update", - "db_idx128_find_primary", - "db_idx128_find_secondary", - "db_idx128_lowerbound", - "db_idx128_upperbound", - "db_idx128_end", - "db_idx128_next", - "db_idx128_previous", - "db_idx256_store", - "db_idx256_remove", - "db_idx256_update", - "db_idx256_find_primary", - "db_idx256_find_secondary", - "db_idx256_lowerbound", - "db_idx256_upperbound", - "db_idx256_end", - "db_idx256_next", - "db_idx256_previous", - "db_idx_double_store", - "db_idx_double_remove", - "db_idx_double_update", - "db_idx_double_find_primary", - "db_idx_double_find_secondary", - "db_idx_double_lowerbound", - "db_idx_double_upperbound", - "db_idx_double_end", - "db_idx_double_next", - "db_idx_double_previous", - "db_idx_long_double_store", - "db_idx_long_double_remove", - "db_idx_long_double_update", - "db_idx_long_double_find_primary", - "db_idx_long_double_find_secondary", - "db_idx_long_double_lowerbound", - "db_idx_long_double_upperbound", - "db_idx_long_double_end", - "db_idx_long_double_next", - "db_idx_long_double_previous", "db_store_i64", "db_update_i64", "db_remove_i64", diff --git a/libraries/chain/include/eosio/chain/global_property_object.hpp b/libraries/chain/include/eosio/chain/global_property_object.hpp index 0f9579b48a7..851f8e15893 100644 --- a/libraries/chain/include/eosio/chain/global_property_object.hpp +++ b/libraries/chain/include/eosio/chain/global_property_object.hpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include "multi_index_includes.hpp" @@ -29,30 +30,12 @@ namespace eosio { namespace chain { OBJECT_CTOR(global_property_object, (proposed_schedule)(preactivated_protocol_features)(whitelisted_intrinsics)) public: - - inline void add_intrinsic_to_whitelist( const char* name ) { - uint64_t h = static_cast( std::hash{}( std::string(name) ) ); - whitelisted_intrinsics.emplace( std::piecewise_construct, - std::forward_as_tuple( h ), - std::forward_as_tuple( name, whitelisted_intrinsics.get_allocator() ) - ); - } - - inline void add_intrinsic_to_whitelist( const std::string& name ) { - uint64_t h = static_cast( std::hash{}( name ) ); - whitelisted_intrinsics.emplace( std::piecewise_construct, - std::forward_as_tuple( h ), - std::forward_as_tuple( name.c_str(), name.size(), - whitelisted_intrinsics.get_allocator() ) - ); - } - - id_type id; - optional proposed_schedule_block_num; - shared_producer_schedule_type proposed_schedule; - chain_config configuration; - shared_vector preactivated_protocol_features; - shared_flat_multimap whitelisted_intrinsics; + id_type id; + optional proposed_schedule_block_num; + shared_producer_schedule_type proposed_schedule; + chain_config configuration; + shared_vector preactivated_protocol_features; + whitelisted_intrinsics_type whitelisted_intrinsics; }; diff --git a/libraries/chain/include/eosio/chain/wasm_interface.hpp b/libraries/chain/include/eosio/chain/wasm_interface.hpp index 38c6b82c063..3bc971cdd3d 100644 --- a/libraries/chain/include/eosio/chain/wasm_interface.hpp +++ b/libraries/chain/include/eosio/chain/wasm_interface.hpp @@ -1,5 +1,6 @@ #pragma once #include +#include #include #include "Runtime/Linker.h" #include "Runtime/Runtime.h" @@ -26,7 +27,7 @@ namespace eosio { namespace chain { root_resolver() {} - root_resolver( const shared_flat_multimap& whitelisted_intrinsics ) + root_resolver( const whitelisted_intrinsics_type& whitelisted_intrinsics ) :whitelisted_intrinsics(&whitelisted_intrinsics) {} @@ -44,16 +45,8 @@ namespace eosio { namespace chain { "importing from module that is not 'env': ${module}.${export}", ("module",mod_name)("export",export_name) ); - // Only consider imports that are in the whitelisted set of intrinsics: - uint64_t hash = static_cast( std::hash{}( export_name ) ); - auto itr = whitelisted_intrinsics->lower_bound( hash ); - fail = true; - for( const auto end = whitelisted_intrinsics->end(); itr != end && itr->first == hash; ++itr ) { - if( itr->second.compare( 0, itr->second.size(), export_name.c_str(), export_name.size() ) == 0 ) { - fail = false; - break; - } - } + // Only consider imports that are in the whitelisted set of intrinsics + fail = !is_intrinsic_whitelisted( *whitelisted_intrinsics, export_name ); } // Try to resolve an intrinsic first. @@ -67,7 +60,7 @@ namespace eosio { namespace chain { } FC_CAPTURE_AND_RETHROW( (mod_name)(export_name) ) } protected: - const shared_flat_multimap* whitelisted_intrinsics = nullptr; + const whitelisted_intrinsics_type* whitelisted_intrinsics = nullptr; }; } } diff --git a/libraries/chain/include/eosio/chain/whitelisted_intrinsics.hpp b/libraries/chain/include/eosio/chain/whitelisted_intrinsics.hpp new file mode 100644 index 00000000000..d9466114afe --- /dev/null +++ b/libraries/chain/include/eosio/chain/whitelisted_intrinsics.hpp @@ -0,0 +1,22 @@ + +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +namespace eosio { namespace chain { + + using whitelisted_intrinsics_type = shared_flat_multimap; + + // TODO: Improve performance by using std::string_view when we switch to C++17. + + bool is_intrinsic_whitelisted( const whitelisted_intrinsics_type& whitelisted_intrinsics, const std::string& name ); + + void add_intrinsic_to_whitelist( whitelisted_intrinsics_type& whitelisted_intrinsics, const std::string& name ); + + void remove_intrinsic_from_whitelist( whitelisted_intrinsics_type& whitelisted_intrinsics, const std::string& name ); + +} } // namespace eosio::chain diff --git a/libraries/chain/whitelisted_intrinsics.cpp b/libraries/chain/whitelisted_intrinsics.cpp new file mode 100644 index 00000000000..7b5ca67b86d --- /dev/null +++ b/libraries/chain/whitelisted_intrinsics.cpp @@ -0,0 +1,83 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include +#include + +namespace eosio { namespace chain { + + template + bool find_intrinsic_helper( uint64_t h, const std::string& name, Iterator& itr, const Iterator& end ) { + for( ; itr != end && itr->first == h; ++itr ) { + if( itr->second.compare( 0, itr->second.size(), name.c_str(), name.size() ) == 0 ) { + return true; + } + } + + return false; + } + + whitelisted_intrinsics_type::iterator + find_intrinsic( whitelisted_intrinsics_type& whitelisted_intrinsics, uint64_t h, const std::string& name ) + { + auto itr = whitelisted_intrinsics.lower_bound( h ); + const auto end = whitelisted_intrinsics.end(); + + if( !find_intrinsic_helper( h, name, itr, end ) ) + return end; + + return itr; + } + + whitelisted_intrinsics_type::const_iterator + find_intrinsic( const whitelisted_intrinsics_type& whitelisted_intrinsics, uint64_t h, const std::string& name ) + { + auto itr = whitelisted_intrinsics.lower_bound( h ); + const auto end = whitelisted_intrinsics.end(); + + if( !find_intrinsic_helper( h, name, itr, end ) ) + return end; + + return itr; + } + + bool is_intrinsic_whitelisted( const whitelisted_intrinsics_type& whitelisted_intrinsics, const std::string& name ) + { + uint64_t h = static_cast( std::hash{}( name ) ); + auto itr = whitelisted_intrinsics.lower_bound( h ); + const auto end = whitelisted_intrinsics.end(); + + return find_intrinsic_helper( h, name, itr, end ); + } + + + void add_intrinsic_to_whitelist( whitelisted_intrinsics_type& whitelisted_intrinsics, const std::string& name ) + { + uint64_t h = static_cast( std::hash{}( name ) ); + auto itr = find_intrinsic( whitelisted_intrinsics, h, name ); + EOS_ASSERT( itr == whitelisted_intrinsics.end(), database_exception, + "cannot add intrinsic '${name}' since it already exists in the whitelist", + ("name", name) + ); + + whitelisted_intrinsics.emplace( std::piecewise_construct, + std::forward_as_tuple( h ), + std::forward_as_tuple( name.c_str(), name.size(), + whitelisted_intrinsics.get_allocator() ) + ); + } + + void remove_intrinsic_from_whitelist( whitelisted_intrinsics_type& whitelisted_intrinsics, const std::string& name ) + { + uint64_t h = static_cast( std::hash{}( name ) ); + auto itr = find_intrinsic( whitelisted_intrinsics, h, name ); + EOS_ASSERT( itr != whitelisted_intrinsics.end(), database_exception, + "cannot remove intrinsic '${name}' since it does not exist in the whitelist", + ("name", name) + ); + + whitelisted_intrinsics.erase( itr ); + } + +} } From 2ae9f4e568c53510e8af5da7670058a7243a5400 Mon Sep 17 00:00:00 2001 From: Kayan Date: Fri, 1 Mar 2019 15:43:35 +0800 Subject: [PATCH 052/680] fix replay & transit --- libraries/chain/controller.cpp | 177 +++++++++++++++++------------- libraries/chain/fork_database.cpp | 20 ++-- 2 files changed, 110 insertions(+), 87 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 4deec8a04b3..a8802590edb 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -436,7 +436,14 @@ struct controller_impl { const auto& rbi = reversible_blocks.get_index(); - { + if (read_mode == db_read_mode::IRREVERSIBLE) { + // ensure there's no reversible_blocks in irreversible mode + auto rbitr = rbi.begin(); + while ( rbitr != rbi.end() ) { + reversible_blocks.remove( *rbitr ); + rbitr = rbi.begin(); + } + } else { auto rbitr = rbi.rbegin(); if( rbitr != rbi.rend() ) { EOS_ASSERT( blog_head, fork_database_exception, @@ -465,10 +472,12 @@ struct controller_impl { if( !head ) { initialize_blockchain_state(); // set head to genesis state } else { - EOS_ASSERT( last_block_num == head->block_num, fork_database_exception, - "reversible block database is inconsistent with fork database, replay blockchain", - ("head", head->block_num)("last_block_num", last_block_num) - ); + if (read_mode != db_read_mode::IRREVERSIBLE) { + EOS_ASSERT( last_block_num == head->block_num, fork_database_exception, + "reversible block database is inconsistent with fork database, replay blockchain", + ("head", head->block_num)("last_block_num", last_block_num) + ); + } } if( !blog_head ) { @@ -485,6 +494,52 @@ struct controller_impl { } } + // handle read_mode transition (probably reconstruct reversible_blocks) + if (read_mode != db_read_mode::IRREVERSIBLE) { // speculative mode + if (head->block_num < fork_db.pending_head()->block_num) { + // irreversible mode => speculative mode + wlog("db_read_mode has been changed: reconstruct reversible_blocks & forwarding state from #${h} to fork pending head #${fh}", ("h", head->block_num)("fh", fork_db.pending_head()->block_num)); + + // rebuild reversible_blocks from fork_db + auto rbitr = rbi.begin(); + while ( rbitr != rbi.end() ) { + reversible_blocks.remove( *rbitr ); + rbitr = rbi.begin(); + } + + block_state_ptr bsp; + vector head_to_lib; + auto id = fork_db.pending_head()->id; + while (id != head->id && (bsp = fork_db.get_block(id))) { + head_to_lib.push_back(bsp); + id = bsp->header.previous; + } + + auto blog_head = blog.head(); + auto blog_head_time = blog_head->timestamp.to_time_point(); + replay_head_time = blog_head_time; + for (auto itr = head_to_lib.rbegin(); itr != head_to_lib.rend(); ++itr) { + reversible_blocks.create( [&]( auto& ubo ) { + ubo.blocknum = (*itr)->block_num; + ubo.set_block( (*itr)->block ); + }); + replay_push_block( (*itr)->block, controller::block_status::validated, false); + } + replay_head_time.reset(); + } + } else { // irreversible mode + uint32_t target_lib = fork_db.root()->block_num; + if (head->block_num > target_lib) { + // speculative mode => irreversible mode + wlog("db_read_mode has been changed, rolling back state from block #${o} to lib block #${l}", ("o", head->block_num)("l", target_lib)); + // let's go backward to lib(fork_db root) + while (head->block_num > target_lib) { + pop_block(); + } + fork_db.rollback_head_to_root(); + } + } + bool report_integrity_hash = !!snapshot || (lib_num > head->block_num); // Trim any irreversible blocks from start of reversible blocks database @@ -521,29 +576,7 @@ struct controller_impl { if( report_integrity_hash ) { const auto hash = calculate_integrity_hash(); ilog( "database initialized with hash: ${hash}", ("hash", hash) ); - } - - if (head && fork_db.pending_head() && fork_db.root()) { - if (read_mode != db_read_mode::IRREVERSIBLE) { - if (head->block_num < fork_db.pending_head()->block_num) { - // irreversible mode => speculative mode - wlog("db_read_mode has been changed: forwarding state from block ${h} to fork pending head ${fh}", ("h", head->block_num)("fh", fork_db.pending_head()->block_num)); - // let's go forward from lib to pending head, and set head as pending head - maybe_switch_forks( fork_db.pending_head(), controller::block_status::validated ); - } - } else { - // speculative mode => irreversible mode - uint32_t target_lib = fork_db.root()->block_num; - if (head->block_num > target_lib) { - wlog("db_read_mode has been changed, rolling back state from block ${o} to lib block ${l}", ("o", head->block_num)("l", target_lib)); - // let's go backward to lib(fork_db root) - while (head->block_num > target_lib) { - pop_block(); - } - fork_db.rollback_head_to_root(); - } - } - } + } } ~controller_impl() { @@ -1489,7 +1522,7 @@ struct controller_impl { } FC_LOG_AND_RETHROW( ) } - void replay_push_block( const signed_block_ptr& b, controller::block_status s ) { + void replay_push_block( const signed_block_ptr& b, controller::block_status s, bool add_to_fork_db = true ) { self.validate_db_available_size(); self.validate_reversible_available_size(); @@ -1505,7 +1538,7 @@ struct controller_impl { // need to cater the irreversible mode case where head is not advancing auto bsp = std::make_shared((read_mode == db_read_mode::IRREVERSIBLE && s != controller::block_status::irreversible && fork_db.pending_head()) ? *fork_db.pending_head() : *head, b, skip_validate_signee ); - if( s != controller::block_status::irreversible ) { + if (add_to_fork_db && s != controller::block_status::irreversible ) { fork_db.add( bsp ); } @@ -1544,52 +1577,49 @@ struct controller_impl { ("current_head_id", head->id)("current_head_num", head->block_num)("new_head_id", new_head->id)("new_head_num", new_head->block_num) ); auto branches = fork_db.fetch_branch_from( new_head->id, head->id ); - if (branches.second.size()) { // cater a case of switching from fork pending head to lib - for( auto itr = branches.second.begin(); itr != branches.second.end(); ++itr ) { - pop_block(); - } - EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, - "loss of sync between fork_db and chainbase during fork switch" ); // _should_ never fail + for( auto itr = branches.second.begin(); itr != branches.second.end(); ++itr ) { + pop_block(); } + EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, + "loss of sync between fork_db and chainbase during fork switch" ); // _should_ never fail + + for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr ) { + optional except; + try { + apply_block( (*ritr)->block, (*ritr)->is_valid() ? controller::block_status::validated : controller::block_status::complete ); + fork_db.mark_valid( *ritr ); + head = *ritr; + } catch (const fc::exception& e) { + except = e; + } + if( except ) { + elog("exception thrown while switching forks ${e}", ("e", except->to_detail_string())); + + // ritr currently points to the block that threw + // Remove the block that threw and all forks built off it. + fork_db.remove( (*ritr)->id ); + + EOS_ASSERT( head->id == fork_db.head()->id, fork_database_exception, + "loss of sync between fork_db and controller head during fork switch error" ); - if (branches.first.size()) { - for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr ) { - optional except; - try { - apply_block( (*ritr)->block, (*ritr)->is_valid() ? controller::block_status::validated : controller::block_status::complete ); - fork_db.mark_valid( *ritr ); + // pop all blocks from the bad fork + // ritr base is a forward itr to the last block successfully applied + auto applied_itr = ritr.base(); + for( auto itr = applied_itr; itr != branches.first.end(); ++itr ) { + pop_block(); + } + EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, + "loss of sync between fork_db and chainbase during fork switch reversal" ); // _should_ never fail + + // re-apply good blocks + for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr ) { + apply_block( (*ritr)->block, controller::block_status::validated /* we previously validated these blocks*/ ); head = *ritr; - } catch (const fc::exception& e) { - except = e; } - if( except ) { - elog("exception thrown while switching forks ${e}", ("e", except->to_detail_string())); - - // ritr currently points to the block that threw - // Remove the block that threw and all forks built off it. - fork_db.remove( (*ritr)->id ); - - EOS_ASSERT( head->id == fork_db.head()->id, fork_database_exception, - "loss of sync between fork_db and controller head during fork switch error" ); - - // pop all blocks from the bad fork - // ritr base is a forward itr to the last block successfully applied - auto applied_itr = ritr.base(); - for( auto itr = applied_itr; itr != branches.first.end(); ++itr ) { - pop_block(); - } - EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, - "loss of sync between fork_db and chainbase during fork switch reversal" ); // _should_ never fail - - // re-apply good blocks - for( auto ritr = branches.second.rbegin(); ritr != branches.second.rend(); ++ritr ) { - apply_block( (*ritr)->block, controller::block_status::validated /* we previously validated these blocks*/ ); - head = *ritr; - } - throw *except; - } // end if exception - } /// end for each block in branch - } + throw *except; + } // end if exception + } /// end for each block in branch + ilog("successfully switched fork to new head ${new_head_id}", ("new_head_id", new_head->id)); } else { head_changed = false; @@ -2100,8 +2130,7 @@ const vector& controller::get_pending_trx_receipts()const { uint32_t controller::last_irreversible_block_num() const { uint32_t lib_num = (my->read_mode == db_read_mode::IRREVERSIBLE) - ? std::max(my->fork_db.pending_head()->dpos_irreversible_blocknum, my->fork_db.root()->block_num) - : my->head->dpos_irreversible_blocknum; + ? my->fork_db.root()->block_num : my->head->dpos_irreversible_blocknum; return std::max( lib_num, my->snapshot_head_block ); } diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index e4816a5d3a3..f7f9530b835 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -220,16 +220,13 @@ namespace eosio { namespace chain { void fork_database::rollback_head_to_root() { auto& by_id_idx = my->index.get(); - - // root probably not exist in index auto itr = by_id_idx.begin(); while (itr != by_id_idx.end()) { by_id_idx.modify( itr, [&]( block_state_ptr& bsp ) { - bsp->validated = (bsp->id == my->root->id); + bsp->validated = false; } ); ++itr; } - my->head = my->root; } @@ -338,21 +335,18 @@ namespace eosio { namespace chain { pair< branch_type, branch_type > fork_database::fetch_branch_from( const block_id_type& first, const block_id_type& second )const { pair result; - auto first_branch = get_block(first); - auto second_branch = get_block(second); + auto first_branch = (first == my->root->id) ? my->root : get_block(first); + auto second_branch = (second == my->root->id) ? my->root : get_block(second); - // need to handle a case where first or second is the root - if (!first_branch && my->root && first == my->root->id) first_branch = my->root; - if (!second_branch && my->root && second == my->root->id) second_branch = my->root; + EOS_ASSERT(first_branch, fork_db_block_not_found, "block ${id} does not exist", ("id", first)); + EOS_ASSERT(second_branch, fork_db_block_not_found, "block ${id} does not exist", ("id", second)); while( first_branch->block_num > second_branch->block_num ) { result.first.push_back(first_branch); - auto prev = first_branch->header.previous; + const auto &prev = first_branch->header.previous; first_branch = get_block( first_branch->header.previous ); - if (!first_branch && my->root && prev == my->root->id) first_branch = my->root; - EOS_ASSERT( first_branch, fork_db_block_not_found, "block ${id} does not exist", ("id", prev) ); @@ -361,7 +355,7 @@ namespace eosio { namespace chain { while( second_branch->block_num > first_branch->block_num ) { result.second.push_back( second_branch ); - auto prev = second_branch->header.previous; + const auto &prev = second_branch->header.previous; second_branch = get_block( second_branch->header.previous ); if (!second_branch && my->root && prev == my->root->id) second_branch = my->root; EOS_ASSERT( second_branch, fork_db_block_not_found, From 812631a77e955b4f1af7736f6c010053673dc0d5 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 1 Mar 2019 07:47:48 -0600 Subject: [PATCH 053/680] Fix test, init atomic int. --- unittests/misc_tests.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index 2b00d8d4e7b..c5a49cb48d4 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -1063,7 +1063,7 @@ BOOST_AUTO_TEST_CASE(stable_priority_queue_test) { using namespace std::chrono_literals; std::thread t( []() { appbase::app().exec(); } ); - std::atomic ran; + std::atomic ran{0}; std::mutex mx; std::vector results; for( int i = 0; i < 50; ++i ) { From 2b7527913cdbab07133426a3ad5255b78b571451 Mon Sep 17 00:00:00 2001 From: Haijun Yang Date: Fri, 1 Mar 2019 22:35:28 +0800 Subject: [PATCH 054/680] enable expiring data in mongodb --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 57 +++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index ee22c65c7cc..8131b6a2bb2 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -118,6 +118,7 @@ class mongo_db_plugin_impl { void init(); void wipe_database(); + void create_expiration_index(mongocxx::collection& collection, uint32_t expire_after_seconds); template void queue(Queue& queue, const Entry& e); @@ -136,6 +137,7 @@ class mongo_db_plugin_impl { bool store_transactions = true; bool store_transaction_traces = true; bool store_action_traces = true; + uint32_t expire_after_seconds = 0; std::string db_name; mongocxx::instance mongo_inst; @@ -1397,6 +1399,39 @@ void mongo_db_plugin_impl::wipe_database() { ilog("done wipe_database"); } +void mongo_db_plugin_impl::create_expiration_index(mongocxx::collection& collection, uint32_t expire_after_seconds) { + using bsoncxx::builder::basic::make_document; + using bsoncxx::builder::basic::kvp; + + auto indexes = collection.indexes(); + for( auto& index : indexes.list()) { + auto key = index["key"]; + if( !key ) { + continue; + } + auto field = key["createdAt"]; + if( !field ) { + continue; + } + + auto ttl = index["expireAfterSeconds"]; + if( ttl && ttl.get_int32() == expire_after_seconds ) { + return; + } else { + auto name = index["name"].get_utf8(); + ilog( "mongo db drop ttl index for collection ${collection}", ( "collection", collection.name().to_string())); + indexes.drop_one( name.value ); + break; + } + } + + mongocxx::options::index index_options{}; + index_options.expire_after( std::chrono::seconds( expire_after_seconds )); + index_options.background( true ); + ilog( "mongo db create ttl index for collection ${collection}", ( "collection", collection.name().to_string())); + collection.create_index( make_document( kvp( "createdAt", 1 )), index_options ); +} + void mongo_db_plugin_impl::init() { using namespace bsoncxx::types; using bsoncxx::builder::basic::make_document; @@ -1465,6 +1500,23 @@ void mongo_db_plugin_impl::init() { handle_mongo_exception( "create indexes", __LINE__ ); } } + + if( expire_after_seconds > 0 ) { + try { + mongocxx::collection block_states = mongo_conn[db_name][block_states_col]; + create_expiration_index( block_states, expire_after_seconds ); + mongocxx::collection blocks = mongo_conn[db_name][blocks_col]; + create_expiration_index( blocks, expire_after_seconds ); + mongocxx::collection trans = mongo_conn[db_name][trans_col]; + create_expiration_index( trans, expire_after_seconds ); + mongocxx::collection trans_traces = mongo_conn[db_name][trans_traces_col]; + create_expiration_index( trans_traces, expire_after_seconds ); + mongocxx::collection action_traces = mongo_conn[db_name][action_traces_col]; + create_expiration_index( action_traces, expire_after_seconds ); + } catch(...) { + handle_mongo_exception( "create expiration indexes", __LINE__ ); + } + } } catch (...) { handle_mongo_exception( "mongo init", __LINE__ ); } @@ -1517,6 +1569,8 @@ void mongo_db_plugin::set_program_options(options_description& cli, options_desc "Enables storing transaction traces in mongodb.") ("mongodb-store-action-traces", bpo::value()->default_value(true), "Enables storing action traces in mongodb.") + ("mongodb-expire-after-seconds", bpo::value()->default_value(0), + "Enables expiring data in mongodb after a specified number of seconds.") ("mongodb-filter-on", bpo::value>()->composing(), "Track actions which match receiver:action:actor. Receiver, Action, & Actor may be blank to include all. i.e. eosio:: or :transfer: Use * or leave unspecified to include all.") ("mongodb-filter-out", bpo::value>()->composing(), @@ -1574,6 +1628,9 @@ void mongo_db_plugin::plugin_initialize(const variables_map& options) if( options.count( "mongodb-store-action-traces" )) { my->store_action_traces = options.at( "mongodb-store-action-traces" ).as(); } + if( options.count( "mongodb-expire-after-seconds" )) { + my->expire_after_seconds = options.at( "mongodb-expire-after-seconds" ).as(); + } if( options.count( "mongodb-filter-on" )) { auto fo = options.at( "mongodb-filter-on" ).as>(); my->filter_on_star = false; From 8974cf783d91be9ba04d283e320bb73f4d72260a Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 1 Mar 2019 11:11:44 -0500 Subject: [PATCH 055/680] fix bug in apply_block which constructed a completed block state with a missing signature in the block #6851 Also adds unit test to check for this case. --- libraries/chain/controller.cpp | 2 +- .../chain/include/eosio/chain/block_state.hpp | 6 +-- unittests/forked_tests.cpp | 39 +++++++++++++++++++ 3 files changed, 43 insertions(+), 4 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index ea3ac8f0d05..46cd7db778f 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1399,7 +1399,7 @@ struct controller_impl { auto bsp = std::make_shared( std::move( ab._pending_block_header_state ), - std::move( ab._unsigned_block ), + b, std::move( ab._trx_metas ), true // signature should have already been verified (assuming untrusted) prior to apply_block ); diff --git a/libraries/chain/include/eosio/chain/block_state.hpp b/libraries/chain/include/eosio/chain/block_state.hpp index 94defc13cfd..1b8e659a083 100644 --- a/libraries/chain/include/eosio/chain/block_state.hpp +++ b/libraries/chain/include/eosio/chain/block_state.hpp @@ -18,9 +18,9 @@ namespace eosio { namespace chain { ); block_state( pending_block_header_state&& cur, - signed_block_ptr&& b, // unsigned block - vector&& trx_metas, - const std::function& signer + signed_block_ptr&& b, // unsigned block + vector&& trx_metas, + const std::function& signer ); block_state( pending_block_header_state&& cur, diff --git a/unittests/forked_tests.cpp b/unittests/forked_tests.cpp index 6e412bfbccc..07c1d48c1aa 100644 --- a/unittests/forked_tests.cpp +++ b/unittests/forked_tests.cpp @@ -374,6 +374,45 @@ BOOST_AUTO_TEST_CASE( prune_remove_branch ) try { } FC_LOG_AND_RETHROW() +/** + * Tests that a validating node does not accept a block which is considered invalid by another node. + */ +BOOST_AUTO_TEST_CASE( validator_accepts_valid_blocks ) try { + + tester n1; + tester n2; + tester n3; + + n1.produce_block(); + + auto id = n1.control->head_block_id(); + + block_state_ptr first_block; + + auto c = n2.control->accepted_block.connect( [&]( const block_state_ptr& bsp) { + if( bsp->block_num == 2 ) { + first_block = bsp; + } + } ); + + push_blocks( n1, n2 ); + + BOOST_CHECK_EQUAL( n2.control->head_block_id(), id ); + + BOOST_REQUIRE( first_block ); + first_block->verify_signee( first_block->signee() ); + BOOST_CHECK_EQUAL( first_block->header.id(), first_block->block->id() ); + BOOST_CHECK( first_block->header.producer_signature == first_block->block->producer_signature ); + + c.disconnect(); + + n3.push_block( first_block->block ); + + BOOST_CHECK_EQUAL( n3.control->head_block_id(), id ); + + +} FC_LOG_AND_RETHROW() + BOOST_AUTO_TEST_CASE( read_modes ) try { tester c; c.produce_block(); From b6aa52670becaf302f5ed01538bf229b0c565db3 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 1 Mar 2019 11:44:52 -0600 Subject: [PATCH 056/680] Update test to test execution_priority_queue directly instead of through application singleton. --- unittests/misc_tests.cpp | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index c5a49cb48d4..cbbc7acce21 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -10,7 +10,7 @@ #include #include -#include +#include #include #include @@ -1057,28 +1057,41 @@ BOOST_AUTO_TEST_CASE(reflector_init_test) { } FC_LOG_AND_RETHROW() } -// verify appbase::app().post() uses a stable priority queue so that jobs are executed in order, FIFO, as submitted. +// Verify appbase::execution_priority_queue uses a stable priority queue so that jobs are executed +// in order, FIFO, as submitted. BOOST_AUTO_TEST_CASE(stable_priority_queue_test) { try { using namespace std::chrono_literals; - std::thread t( []() { appbase::app().exec(); } ); + appbase::execution_priority_queue pri_queue; + auto io_serv = std::make_shared(); + auto work_ptr = std::make_unique(*io_serv); + + std::thread t( [io_serv, &pri_queue]() { + bool more = true; + while( more || io_serv->run_one() ) { + while( io_serv->poll_one() ) {} + // execute the highest priority item + more = pri_queue.execute_highest(); + } + } ); std::atomic ran{0}; std::mutex mx; std::vector results; for( int i = 0; i < 50; ++i ) { - appbase::app().post(appbase::priority::high, [&mx, &ran, &results, i](){ + boost::asio::post(*io_serv, pri_queue.wrap(appbase::priority::high, [io_serv, &mx, &ran, &results, i](){ std::this_thread::sleep_for( 10us ); std::lock_guard g(mx); results.push_back( i ); ++ran; - }); + })); } std::this_thread::sleep_for( 50 * 10us ); // will take at least this long while( ran < 50 ) std::this_thread::sleep_for( 5us ); - appbase::app().quit(); + work_ptr.reset(); + io_serv->stop(); t.join(); std::lock_guard g(mx); From 6a724fcb2983e0166ea050501d2c3a7ebaaf9d19 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 1 Mar 2019 13:57:01 -0600 Subject: [PATCH 057/680] Test priority queue honors priority along with FIFO --- unittests/misc_tests.cpp | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index cbbc7acce21..77b52c85634 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -1066,8 +1066,12 @@ BOOST_AUTO_TEST_CASE(stable_priority_queue_test) { appbase::execution_priority_queue pri_queue; auto io_serv = std::make_shared(); auto work_ptr = std::make_unique(*io_serv); + std::atomic posted{0}; - std::thread t( [io_serv, &pri_queue]() { + std::thread t( [io_serv, &pri_queue, &posted]() { + while( posted < 100 && io_serv->run_one() ) { + ++posted; + } bool more = true; while( more || io_serv->run_one() ) { while( io_serv->poll_one() ) {} @@ -1079,6 +1083,12 @@ BOOST_AUTO_TEST_CASE(stable_priority_queue_test) { std::mutex mx; std::vector results; for( int i = 0; i < 50; ++i ) { + boost::asio::post(*io_serv, pri_queue.wrap(appbase::priority::low, [io_serv, &mx, &ran, &results, i](){ + std::this_thread::sleep_for( 10us ); + std::lock_guard g(mx); + results.push_back( 50 + i ); + ++ran; + })); boost::asio::post(*io_serv, pri_queue.wrap(appbase::priority::high, [io_serv, &mx, &ran, &results, i](){ std::this_thread::sleep_for( 10us ); std::lock_guard g(mx); @@ -1087,16 +1097,16 @@ BOOST_AUTO_TEST_CASE(stable_priority_queue_test) { })); } - std::this_thread::sleep_for( 50 * 10us ); // will take at least this long - while( ran < 50 ) std::this_thread::sleep_for( 5us ); + std::this_thread::sleep_for( 100 * 10us ); // will take at least this long + while( ran < 100 ) std::this_thread::sleep_for( 5us ); work_ptr.reset(); io_serv->stop(); t.join(); std::lock_guard g(mx); - BOOST_CHECK_EQUAL( 50, results.size() ); - for( int i = 0; i < 50; ++i ) { + BOOST_CHECK_EQUAL( 100, results.size() ); + for( int i = 0; i < 100; ++i ) { BOOST_CHECK_EQUAL( i, results.at( i ) ); } From 07731872d8f9ad9d55f951f74125c4aa853ded10 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 1 Mar 2019 14:03:48 -0600 Subject: [PATCH 058/680] Sleeps no longer needed. --- unittests/misc_tests.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index 77b52c85634..bfaeca76727 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -1084,20 +1084,17 @@ BOOST_AUTO_TEST_CASE(stable_priority_queue_test) { std::vector results; for( int i = 0; i < 50; ++i ) { boost::asio::post(*io_serv, pri_queue.wrap(appbase::priority::low, [io_serv, &mx, &ran, &results, i](){ - std::this_thread::sleep_for( 10us ); std::lock_guard g(mx); results.push_back( 50 + i ); ++ran; })); boost::asio::post(*io_serv, pri_queue.wrap(appbase::priority::high, [io_serv, &mx, &ran, &results, i](){ - std::this_thread::sleep_for( 10us ); std::lock_guard g(mx); results.push_back( i ); ++ran; })); } - std::this_thread::sleep_for( 100 * 10us ); // will take at least this long while( ran < 100 ) std::this_thread::sleep_for( 5us ); work_ptr.reset(); From cc61de0afdeeb60db1f3d8460451834c8ed1b38a Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 1 Mar 2019 15:23:29 -0500 Subject: [PATCH 059/680] appbase: rework blocking (queuing) exit signals during shutdown MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous fix for blocking signals during shutdown seems to have problems on older platforms/compilers/stdlibs like gcc6. To be completely frank I couldn’t quite pinpoint the exact cause but I highly suspect it is something to do with the application instance being static and thus the ordering of static destructors being unfavorable. I’ve changed the implementation such that during startup a separate thread is run that catches the signals but after startup that thread is retired and signal handling is then handled on the main io_service. Signals end up being blocked (queued) until destruction of application’s io_service because the async_wait() will hold a shared_ptr to the signal_set. The implementation ends up being a bit long winded but means there are no shenanigans trying to clean up threads after main() has fully returned. --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index da4bf8cb324..7daf480c3c1 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit da4bf8cb324225b002b3105da42b62769da94ce9 +Subproject commit 7daf480c3c1755faf9ae1b53b8084510cad30055 From ac9db66523e45b8bc638480339247964a4f0f7ef Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 1 Mar 2019 15:32:18 -0500 Subject: [PATCH 060/680] bump chainbase submodule --- libraries/chainbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chainbase b/libraries/chainbase index 0c0043787db..a2563660f08 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit 0c0043787db567d748aee3a4cc49d502c4507d1f +Subproject commit a2563660f082622ab7a18778f5b91cc91f51c0c3 From 7ea33636dd7cb228d9ecbb1c6a1a9ee62df7f00a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Sat, 2 Mar 2019 08:47:13 -0600 Subject: [PATCH 061/680] Update appbase --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index 5c10377c426..94ccf0eed23 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 5c10377c426d1905c46d781cbb75a34e79728bca +Subproject commit 94ccf0eed230ded7f73ac848a07414899c197b28 From 3ebba5eac7a01e5b7e98b468168fde01c020c55c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 14:58:47 -0600 Subject: [PATCH 062/680] Add test for appbase stable priority execution queue --- unittests/CMakeLists.txt | 2 +- unittests/misc_tests.cpp | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/unittests/CMakeLists.txt b/unittests/CMakeLists.txt index 3b288f2d2a3..dfb2a029e26 100644 --- a/unittests/CMakeLists.txt +++ b/unittests/CMakeLists.txt @@ -42,7 +42,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/contracts.hpp.in ${CMAKE_CURRENT_BINA ### BUILD UNIT TEST EXECUTABLE ### file(GLOB UNIT_TESTS "*.cpp") # find all unit test suites add_executable( unit_test ${UNIT_TESTS}) # build unit tests as one executable -target_link_libraries( unit_test eosio_chain chainbase eosio_testing fc ${PLATFORM_SPECIFIC_LIBS} ) +target_link_libraries( unit_test eosio_chain chainbase eosio_testing fc appbase ${PLATFORM_SPECIFIC_LIBS} ) target_compile_options(unit_test PUBLIC -DDISABLE_EOSLIB_SERIALIZE) target_include_directories( unit_test PUBLIC ${CMAKE_SOURCE_DIR}/libraries/testing/include diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index 607c78859fd..2b00d8d4e7b 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -1056,6 +1057,39 @@ BOOST_AUTO_TEST_CASE(reflector_init_test) { } FC_LOG_AND_RETHROW() } +// verify appbase::app().post() uses a stable priority queue so that jobs are executed in order, FIFO, as submitted. +BOOST_AUTO_TEST_CASE(stable_priority_queue_test) { + try { + using namespace std::chrono_literals; + + std::thread t( []() { appbase::app().exec(); } ); + std::atomic ran; + std::mutex mx; + std::vector results; + for( int i = 0; i < 50; ++i ) { + appbase::app().post(appbase::priority::high, [&mx, &ran, &results, i](){ + std::this_thread::sleep_for( 10us ); + std::lock_guard g(mx); + results.push_back( i ); + ++ran; + }); + } + + std::this_thread::sleep_for( 50 * 10us ); // will take at least this long + while( ran < 50 ) std::this_thread::sleep_for( 5us ); + + appbase::app().quit(); + t.join(); + + std::lock_guard g(mx); + BOOST_CHECK_EQUAL( 50, results.size() ); + for( int i = 0; i < 50; ++i ) { + BOOST_CHECK_EQUAL( i, results.at( i ) ); + } + + } FC_LOG_AND_RETHROW() +} + BOOST_AUTO_TEST_SUITE_END() From 3515fe1660b87c470480ab8a9479e7bd641651a6 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 28 Feb 2019 14:59:07 -0600 Subject: [PATCH 063/680] Update to appbase with stable priority queue --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index 7daf480c3c1..94ccf0eed23 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 7daf480c3c1755faf9ae1b53b8084510cad30055 +Subproject commit 94ccf0eed230ded7f73ac848a07414899c197b28 From 5fb82d79f2ad414251f73e9a6509495511ec5b09 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 1 Mar 2019 07:47:48 -0600 Subject: [PATCH 064/680] Fix test, init atomic int. --- unittests/misc_tests.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index 2b00d8d4e7b..c5a49cb48d4 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -1063,7 +1063,7 @@ BOOST_AUTO_TEST_CASE(stable_priority_queue_test) { using namespace std::chrono_literals; std::thread t( []() { appbase::app().exec(); } ); - std::atomic ran; + std::atomic ran{0}; std::mutex mx; std::vector results; for( int i = 0; i < 50; ++i ) { From 802176218adf419927cd2b66424994baa8225657 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 1 Mar 2019 11:44:52 -0600 Subject: [PATCH 065/680] Update test to test execution_priority_queue directly instead of through application singleton. --- unittests/misc_tests.cpp | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index c5a49cb48d4..cbbc7acce21 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -10,7 +10,7 @@ #include #include -#include +#include #include #include @@ -1057,28 +1057,41 @@ BOOST_AUTO_TEST_CASE(reflector_init_test) { } FC_LOG_AND_RETHROW() } -// verify appbase::app().post() uses a stable priority queue so that jobs are executed in order, FIFO, as submitted. +// Verify appbase::execution_priority_queue uses a stable priority queue so that jobs are executed +// in order, FIFO, as submitted. BOOST_AUTO_TEST_CASE(stable_priority_queue_test) { try { using namespace std::chrono_literals; - std::thread t( []() { appbase::app().exec(); } ); + appbase::execution_priority_queue pri_queue; + auto io_serv = std::make_shared(); + auto work_ptr = std::make_unique(*io_serv); + + std::thread t( [io_serv, &pri_queue]() { + bool more = true; + while( more || io_serv->run_one() ) { + while( io_serv->poll_one() ) {} + // execute the highest priority item + more = pri_queue.execute_highest(); + } + } ); std::atomic ran{0}; std::mutex mx; std::vector results; for( int i = 0; i < 50; ++i ) { - appbase::app().post(appbase::priority::high, [&mx, &ran, &results, i](){ + boost::asio::post(*io_serv, pri_queue.wrap(appbase::priority::high, [io_serv, &mx, &ran, &results, i](){ std::this_thread::sleep_for( 10us ); std::lock_guard g(mx); results.push_back( i ); ++ran; - }); + })); } std::this_thread::sleep_for( 50 * 10us ); // will take at least this long while( ran < 50 ) std::this_thread::sleep_for( 5us ); - appbase::app().quit(); + work_ptr.reset(); + io_serv->stop(); t.join(); std::lock_guard g(mx); From f95c6d21516379f731b84c7edc8f743ce0634513 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 1 Mar 2019 13:57:01 -0600 Subject: [PATCH 066/680] Test priority queue honors priority along with FIFO --- unittests/misc_tests.cpp | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index cbbc7acce21..77b52c85634 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -1066,8 +1066,12 @@ BOOST_AUTO_TEST_CASE(stable_priority_queue_test) { appbase::execution_priority_queue pri_queue; auto io_serv = std::make_shared(); auto work_ptr = std::make_unique(*io_serv); + std::atomic posted{0}; - std::thread t( [io_serv, &pri_queue]() { + std::thread t( [io_serv, &pri_queue, &posted]() { + while( posted < 100 && io_serv->run_one() ) { + ++posted; + } bool more = true; while( more || io_serv->run_one() ) { while( io_serv->poll_one() ) {} @@ -1079,6 +1083,12 @@ BOOST_AUTO_TEST_CASE(stable_priority_queue_test) { std::mutex mx; std::vector results; for( int i = 0; i < 50; ++i ) { + boost::asio::post(*io_serv, pri_queue.wrap(appbase::priority::low, [io_serv, &mx, &ran, &results, i](){ + std::this_thread::sleep_for( 10us ); + std::lock_guard g(mx); + results.push_back( 50 + i ); + ++ran; + })); boost::asio::post(*io_serv, pri_queue.wrap(appbase::priority::high, [io_serv, &mx, &ran, &results, i](){ std::this_thread::sleep_for( 10us ); std::lock_guard g(mx); @@ -1087,16 +1097,16 @@ BOOST_AUTO_TEST_CASE(stable_priority_queue_test) { })); } - std::this_thread::sleep_for( 50 * 10us ); // will take at least this long - while( ran < 50 ) std::this_thread::sleep_for( 5us ); + std::this_thread::sleep_for( 100 * 10us ); // will take at least this long + while( ran < 100 ) std::this_thread::sleep_for( 5us ); work_ptr.reset(); io_serv->stop(); t.join(); std::lock_guard g(mx); - BOOST_CHECK_EQUAL( 50, results.size() ); - for( int i = 0; i < 50; ++i ) { + BOOST_CHECK_EQUAL( 100, results.size() ); + for( int i = 0; i < 100; ++i ) { BOOST_CHECK_EQUAL( i, results.at( i ) ); } From e81c80a4d73e745b8ae1814f715395d675c18c5d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 1 Mar 2019 14:03:48 -0600 Subject: [PATCH 067/680] Sleeps no longer needed. --- unittests/misc_tests.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index 77b52c85634..bfaeca76727 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -1084,20 +1084,17 @@ BOOST_AUTO_TEST_CASE(stable_priority_queue_test) { std::vector results; for( int i = 0; i < 50; ++i ) { boost::asio::post(*io_serv, pri_queue.wrap(appbase::priority::low, [io_serv, &mx, &ran, &results, i](){ - std::this_thread::sleep_for( 10us ); std::lock_guard g(mx); results.push_back( 50 + i ); ++ran; })); boost::asio::post(*io_serv, pri_queue.wrap(appbase::priority::high, [io_serv, &mx, &ran, &results, i](){ - std::this_thread::sleep_for( 10us ); std::lock_guard g(mx); results.push_back( i ); ++ran; })); } - std::this_thread::sleep_for( 100 * 10us ); // will take at least this long while( ran < 100 ) std::this_thread::sleep_for( 5us ); work_ptr.reset(); From 1d2330b3df39d07fec2cac51a460113e69133d1a Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 4 Mar 2019 01:15:12 -0500 Subject: [PATCH 068/680] major changes to controller_impl::init to fix #6842 --- libraries/chain/controller.cpp | 321 +++++++++--------- libraries/chain/fork_database.cpp | 34 +- .../include/eosio/chain/fork_database.hpp | 5 +- .../eosio/chain/reversible_block_object.hpp | 9 + unittests/forked_tests.cpp | 4 +- 5 files changed, 194 insertions(+), 179 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 13158b9f790..c8e5213763a 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -307,19 +307,7 @@ struct controller_impl { if( fork_head->dpos_irreversible_blocknum <= lib_num ) return; - /* - const auto& rbi = reversible_blocks.get_index(); - auto libitr = rbi.find( fork_head->dpos_irreversible_blocknum ); - EOS_ASSERT( libitr != rbi.end(), fork_database_exception, - "new LIB according to fork database is not in reversible block database" ); - - fc::datastream ds( libitr->packedblock.data(), libitr->packedblock.size() ); - block_header h; - fc::raw::unpack( ds, h ); - auto lib_id = h.id(); - */ - - const auto branch = fork_db.fetch_branch( fork_head->id, fork_head->dpos_irreversible_blocknum ); //fork_db.fetch_branch( lib_id ); + const auto branch = fork_db.fetch_branch( fork_head->id, fork_head->dpos_irreversible_blocknum ); try { const auto& rbi = reversible_blocks.get_index(); @@ -402,7 +390,15 @@ struct controller_impl { std::cerr<< "\n"; ilog( "${n} irreversible blocks replayed", ("n", 1 + head->block_num - start_block_num) ); - fork_db.reset( *head ); + auto pending_head = fork_db.pending_head(); + if( pending_head->block_num < head->block_num || head->block_num <= fork_db.root()->block_num ) { + fork_db.reset( *head ); + } else { + auto new_root = fork_db.search_on_branch( pending_head->id, head->block_num ); + EOS_ASSERT( new_root, fork_database_exception, "unexpected error: could not find new LIB in fork database" ); + fork_db.mark_valid( new_root ); + fork_db.advance_root( new_root->id ); + } // if the irreverible log is played without undo sessions enabled, we need to sync the // revision ordinal to the appropriate expected value here. @@ -429,142 +425,63 @@ struct controller_impl { } void init(std::function shutdown, const snapshot_reader_ptr& snapshot) { - auto blog_head = blog.head(); - auto lib_num = (blog_head ? blog_head->block_num() : 1); - auto last_block_num = lib_num; - - const auto& rbi = reversible_blocks.get_index(); - - if (read_mode == db_read_mode::IRREVERSIBLE) { - // ensure there's no reversible_blocks in irreversible mode - auto rbitr = rbi.begin(); - while ( rbitr != rbi.end() ) { - reversible_blocks.remove( *rbitr ); - rbitr = rbi.begin(); - } - } else { - auto rbitr = rbi.rbegin(); - if( rbitr != rbi.rend() ) { - EOS_ASSERT( blog_head, fork_database_exception, - "non-empty reversible blocks despite empty irreversible block log" ); - EOS_ASSERT( rbitr->blocknum > lib_num, fork_database_exception, - "reversible block database is inconsistent with the block log" ); - last_block_num = rbitr->blocknum; - } - } - - // Setup state if necessary (or in the default case stay with already loaded state) + // Setup state if necessary (or in the default case stay with already loaded state): + auto lib_num = 1; if( snapshot ) { - EOS_ASSERT( !head, fork_database_exception, "" ); snapshot->validate(); - - read_from_snapshot( snapshot ); - - if( !blog_head ) { + if( blog.head() ) { + lib_num = blog.head()->block_num(); + read_from_snapshot( snapshot, blog.first_block_num(), lib_num ); + } else { + read_from_snapshot( snapshot, 0, std::numeric_limits::max() ); lib_num = head->block_num; blog.reset( conf.genesis, signed_block_ptr(), lib_num + 1 ); } - - EOS_ASSERT( lib_num >= head->block_num, fork_database_exception, - "Block log is provided with snapshot but does not contain the head block from the snapshot" ); } else { - if( !head ) { + if( !fork_db.head() ) { + ilog( "No head block in fork datbase. Initializing fresh blockchain state." ); initialize_blockchain_state(); // set head to genesis state - } else { - if (read_mode != db_read_mode::IRREVERSIBLE) { - EOS_ASSERT( last_block_num == head->block_num, fork_database_exception, - "reversible block database is inconsistent with fork database, replay blockchain", - ("head", head->block_num)("last_block_num", last_block_num) - ); - } - } - - if( !blog_head ) { - if( blog.first_block_num() > 1 ) { - lib_num = blog.first_block_num() - 1; - last_block_num = lib_num; - EOS_ASSERT( lib_num == head->block_num, fork_database_exception, - "Empty block log requires the next block to be appended to it to be at height ${first_block_num} which is not compatible with the head block", - ("first_block_num", blog.first_block_num())("head", head->block_num) - ); + if( blog.head() ) { + EOS_ASSERT( blog.first_block_num() == 1, block_log_exception, + "block log does not start with genesis block" + ); } else { blog.reset( conf.genesis, head->block ); } - } - } - - // handle read_mode transition (probably reconstruct reversible_blocks) - if (read_mode != db_read_mode::IRREVERSIBLE) { // speculative mode - if (head->block_num < fork_db.pending_head()->block_num) { - // irreversible mode => speculative mode - wlog("db_read_mode has been changed: reconstruct reversible_blocks & forwarding state from #${h} to fork pending head #${fh}", ("h", head->block_num)("fh", fork_db.pending_head()->block_num)); - - // rebuild reversible_blocks from fork_db - auto rbitr = rbi.begin(); - while ( rbitr != rbi.end() ) { - reversible_blocks.remove( *rbitr ); - rbitr = rbi.begin(); + } else { + lib_num = fork_db.root()->block_num; + auto first_block_num = blog.first_block_num(); + if( blog.head() ) { + EOS_ASSERT( first_block_num <= lib_num && lib_num <= blog.head()->block_num(), + block_log_exception, + "block log does not contain last irreversible block", + ("block_log_first_num", first_block_num) + ("block_log_last_num", blog.head()->block_num()) + ("fork_db_lib", lib_num) + ); + lib_num = blog.head()->block_num(); + } else { + lib_num = fork_db.root()->block_num; + if( first_block_num != (lib_num + 1) ) { + blog.reset( conf.genesis, signed_block_ptr(), lib_num + 1 ); + } } - block_state_ptr bsp; - vector head_to_lib; - auto id = fork_db.pending_head()->id; - while (id != head->id && (bsp = fork_db.get_block(id))) { - head_to_lib.push_back(bsp); - id = bsp->header.previous; - } - - auto blog_head = blog.head(); - auto blog_head_time = blog_head->timestamp.to_time_point(); - replay_head_time = blog_head_time; - for (auto itr = head_to_lib.rbegin(); itr != head_to_lib.rend(); ++itr) { - reversible_blocks.create( [&]( auto& ubo ) { - ubo.blocknum = (*itr)->block_num; - ubo.set_block( (*itr)->block ); - }); - replay_push_block( (*itr)->block, controller::block_status::validated, false); - } - replay_head_time.reset(); - } - } else { // irreversible mode - uint32_t target_lib = fork_db.root()->block_num; - if (head->block_num > target_lib) { - // speculative mode => irreversible mode - wlog("db_read_mode has been changed, rolling back state from block #${o} to lib block #${l}", ("o", head->block_num)("l", target_lib)); - // let's go backward to lib(fork_db root) - while (head->block_num > target_lib) { - pop_block(); + if( read_mode == db_read_mode::IRREVERSIBLE && fork_db.head()->id != fork_db.root()->id ) { + fork_db.rollback_head_to_root(); } - fork_db.rollback_head_to_root(); - } - } - - bool report_integrity_hash = !!snapshot || (lib_num > head->block_num); - - // Trim any irreversible blocks from start of reversible blocks database - if( lib_num >= last_block_num ) { - last_block_num = lib_num; - auto rbitr = rbi.begin(); - while( rbitr != rbi.end() && rbitr->blocknum <= lib_num ) { - reversible_blocks.remove( *rbitr ); - rbitr = rbi.begin(); + head = fork_db.head(); } } - - if( lib_num > head->block_num ) { - replay( shutdown ); // replay irreversible blocks and any reversible blocks - } else if( last_block_num > lib_num ) { - replay( shutdown ); // replay reversible blocks - } - - if( shutdown() ) return; + // At this point head == fork_db.head() != nullptr && fork_db.root() != nullptr && fork_db.root()->block_num <= lib_num. + // Also, even though blog.head() may still be nullptr, blog.first_block_num() is guaranteed to be lib_num + 1. EOS_ASSERT( db.revision() >= head->block_num, fork_database_exception, - "fork database is inconsistent with state", + "fork database head is inconsistent with state", ("db",db.revision())("head",head->block_num) ); if( db.revision() > head->block_num ) { - wlog( "warning: database revision (${db}) is greater than head block number (${head}), " + wlog( "database revision (${db}) is greater than head block number (${head}), " "attempting to undo pending changes", ("db",db.revision())("head",head->block_num) ); } @@ -572,10 +489,102 @@ struct controller_impl { db.undo(); } + const auto& rbi = reversible_blocks.get_index(); + auto last_block_num = lib_num; + + if (read_mode == db_read_mode::IRREVERSIBLE) { + // ensure there are no reversible blocks + auto itr = rbi.begin(); + if( itr != rbi.end() ) { + wlog( "read_mode has changed to irreversible: erasing reversible blocks" ); + } + for( ; itr != rbi.end(); itr = rbi.begin() ) + reversible_blocks.remove( *itr ); + } else { + auto itr = rbi.begin(); + for( ; itr != rbi.end() && itr->blocknum <= lib_num; itr = rbi.begin() ) + reversible_blocks.remove( *itr ); + + EOS_ASSERT( itr == rbi.end() || itr->blocknum == lib_num + 1, reversible_blocks_exception, + "gap exists between last irreversible block and first reversible block", + ("lib", lib_num)("first_reversible_block_num", itr->blocknum) + ); + + auto ritr = rbi.rbegin(); + + if( ritr != rbi.rend() ) { + last_block_num = ritr->blocknum; + EOS_ASSERT( blog.head(), reversible_blocks_exception, + "non-empty reversible blocks despite empty irreversible block log" + ); + } + + EOS_ASSERT( head->block_num <= last_block_num, reversible_blocks_exception, + "head block (${head_num}) is greater than the last locally stored block (${last_block_num})", + ("head_num", head->block_num)("last_block_num", last_block_num) + ); + + auto pending_head = fork_db.pending_head(); + + if( ritr != rbi.rend() + && blog.head()->block_num() < pending_head->block_num + && pending_head->block_num <= last_block_num + ) { + auto rbitr = rbi.find( pending_head->block_num ); + EOS_ASSERT( rbitr != rbi.end(), reversible_blocks_exception, "pending head block not found in reversible blocks"); + auto rev_id = rbitr->get_block_id(); + EOS_ASSERT( rev_id == pending_head->id, + reversible_blocks_exception, + "mismatch in block id of pending head block ${num} in reversible blocks database: " + "expected: ${expected}, actual: ${actual}", + ("num", pending_head->block_num)("expected", pending_head->id)("actual", rev_id) + ); + } else if( last_block_num < pending_head->block_num ) { + const auto& branch = fork_db.fetch_branch( pending_head->id ); + auto num_blocks_prior_to_pending_head = pending_head->block_num - last_block_num; + FC_ASSERT( 1 <= num_blocks_prior_to_pending_head && num_blocks_prior_to_pending_head <= branch.size(), + "unexpected violation of invariants" ); + + if( ritr != rbi.rend() ) { + FC_ASSERT( num_blocks_prior_to_pending_head < branch.size(), "unexpected violation of invariants" ); + auto fork_id = branch[branch.size() - num_blocks_prior_to_pending_head - 1]->id; + auto rev_id = ritr->get_block_id(); + EOS_ASSERT( rev_id == fork_id, + reversible_blocks_exception, + "mismatch in block id of last block (${num}) in reversible blocks database: " + "expected: ${expected}, actual: ${actual}", + ("num", last_block_num)("expected", fork_id)("actual", rev_id) + ); + } + + if( pending_head->id != head->id ) { + wlog( "read_mode has changed from irreversible: reconstructing reversible blocks from fork database" ); + + for( auto n = branch.size() - num_blocks_prior_to_pending_head; n < branch.size(); ++n ) { + reversible_blocks.create( [&]( auto& rbo ) { + rbo.blocknum = branch[n]->block_num; + rbo.set_block( branch[n]->block ); + }); + } + + last_block_num = pending_head->block_num; + } + } + // else no checks needed since fork_db will be completely reset on replay anyway + } + + bool report_integrity_hash = !!snapshot || (lib_num > head->block_num); + + if( last_block_num > head->block_num ) { + replay( shutdown ); // replay any irreversible and reversible blocks ahead of current head + } + + if( shutdown() ) return; + if( report_integrity_hash ) { const auto hash = calculate_integrity_hash(); ilog( "database initialized with hash: ${hash}", ("hash", hash) ); - } + } } ~controller_impl() { @@ -696,7 +705,7 @@ struct controller_impl { resource_limits.add_to_snapshot(snapshot); } - void read_from_snapshot( const snapshot_reader_ptr& snapshot ) { + void read_from_snapshot( const snapshot_reader_ptr& snapshot, uint32_t blog_start, uint32_t blog_end ) { snapshot->read_section([this]( auto §ion ){ chain_snapshot_header header; section.read_row(header, db); @@ -704,14 +713,22 @@ struct controller_impl { }); - snapshot->read_section([this]( auto §ion ){ + snapshot->read_section([this, blog_start, blog_end]( auto §ion ){ block_header_state head_header_state; section.read_row(head_header_state, db); + snapshot_head_block = head_header_state.block_num; + auto next_block_after_snapshot_head = snapshot_head_block + 1; + EOS_ASSERT( blog_start <= next_block_after_snapshot_head && next_block_after_snapshot_head <= blog_end, + block_log_exception, + "Block log is provided with snapshot but does not contain the block after the head block from the snapshot", + ("next_block_after_snapshot_head_num", next_block_after_snapshot_head) + ("block_log_first_num", blog_start) + ("block_log_last_num", blog_end) + ); + fork_db.reset( head_header_state ); - auto head_state = std::make_shared(); - static_cast(*head_state) = head_header_state; - head = head_state; + head = fork_db.head(); snapshot_head_block = head->block_num; }); @@ -1521,7 +1538,7 @@ struct controller_impl { } FC_LOG_AND_RETHROW( ) } - void replay_push_block( const signed_block_ptr& b, controller::block_status s, bool add_to_fork_db = true ) { + void replay_push_block( const signed_block_ptr& b, controller::block_status s ) { self.validate_db_available_size(); self.validate_reversible_available_size(); @@ -1537,8 +1554,8 @@ struct controller_impl { // need to cater the irreversible mode case where head is not advancing auto bsp = std::make_shared((read_mode == db_read_mode::IRREVERSIBLE && s != controller::block_status::irreversible && fork_db.pending_head()) ? *fork_db.pending_head() : *head, b, skip_validate_signee ); - if (add_to_fork_db && s != controller::block_status::irreversible ) { - fork_db.add( bsp ); + if( s != controller::block_status::irreversible ) { + fork_db.add( bsp, true ); } emit( self.accepted_block_header, bsp ); @@ -1576,6 +1593,7 @@ struct controller_impl { ("current_head_id", head->id)("current_head_num", head->block_num)("new_head_id", new_head->id)("new_head_num", new_head->block_num) ); auto branches = fork_db.fetch_branch_from( new_head->id, head->id ); + EOS_ASSERT( branches.second.size() > 0, fork_database_exception, "fork switch does not require popping blocks" ); for( auto itr = branches.second.begin(); itr != branches.second.end(); ++itr ) { pop_block(); } @@ -1598,9 +1616,6 @@ struct controller_impl { // Remove the block that threw and all forks built off it. fork_db.remove( (*ritr)->id ); - EOS_ASSERT( head->id == fork_db.head()->id, fork_database_exception, - "loss of sync between fork_db and controller head during fork switch error" ); - // pop all blocks from the bad fork // ritr base is a forward itr to the last block successfully applied auto applied_itr = ritr.base(); @@ -1922,14 +1937,9 @@ void controller::add_indices() { } void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { - my->head = my->fork_db.head(); if( snapshot ) { ilog( "Starting initialization from snapshot, this may take a significant amount of time" ); } - else if( !my->head ) { - elog( "No head block in fork db, perhaps we need to replay" ); - } - try { my->init(shutdown, snapshot); } catch (boost::interprocess::bad_alloc& e) { @@ -2141,9 +2151,7 @@ const vector& controller::get_pending_trx_receipts()const { } uint32_t controller::last_irreversible_block_num() const { - uint32_t lib_num = (my->read_mode == db_read_mode::IRREVERSIBLE) - ? my->fork_db.root()->block_num : my->head->dpos_irreversible_blocknum; - return std::max( lib_num, my->snapshot_head_block ); + return my->fork_db.root()->block_num; } block_id_type controller::last_irreversible_block_id() const { @@ -2202,13 +2210,7 @@ block_state_ptr controller::fetch_block_state_by_number( uint32_t block_num )con } } - fc::datastream ds( objitr->packedblock.data(), objitr->packedblock.size() ); - block_header h; - fc::raw::unpack( ds, h ); - // Only need the block id to then look up the block state in fork database, so just unpack the block_header from the stored packed data. - // Avoid calling objitr->get_block() since that constructs a new signed_block in heap memory and unpacks the full signed_block from the stored packed data. - - return my->fork_db.get_block( h.id() ); + return my->fork_db.get_block( objitr->get_block_id() ); } FC_CAPTURE_AND_RETHROW( (block_num) ) } block_id_type controller::get_block_id_for_num( uint32_t block_num )const { try { @@ -2221,10 +2223,7 @@ block_id_type controller::get_block_id_for_num( uint32_t block_num )const { try const auto& rev_blocks = my->reversible_blocks.get_index(); auto objitr = rev_blocks.find(block_num); if( objitr != rev_blocks.end() ) { - fc::datastream ds( objitr->packedblock.data(), objitr->packedblock.size() ); - block_header h; - fc::raw::unpack( ds, h ); - return h.id(); + return objitr->get_block_id(); } } else { auto bsp = my->fork_db.search_on_branch( my->fork_db.pending_head()->id, block_num ); diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index f7f9530b835..6b8dc149775 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -277,7 +277,7 @@ namespace eosio { namespace chain { return block_header_state_ptr(); } - void fork_database::add( const block_state_ptr& n ) { + void fork_database::add( const block_state_ptr& n, bool ignore_duplicate ) { EOS_ASSERT( my->root, fork_database_exception, "root not yet set" ); EOS_ASSERT( n, fork_database_exception, "attempt to add null block state" ); @@ -285,7 +285,10 @@ namespace eosio { namespace chain { "unlinkable block", ("id", n->id)("previous", n->header.previous) ); auto inserted = my->index.insert(n); - EOS_ASSERT( inserted.second, fork_database_exception, "duplicate block added", ("id", n->id) ); + if( !inserted.second ) { + if( ignore_duplicate ) return; + EOS_THROW( fork_database_exception, "duplicate block added", ("id", n->id) ); + } auto candidate = my->index.get().begin(); if( (*candidate)->is_valid() ) { @@ -344,23 +347,23 @@ namespace eosio { namespace chain { while( first_branch->block_num > second_branch->block_num ) { result.first.push_back(first_branch); - const auto &prev = first_branch->header.previous; - first_branch = get_block( first_branch->header.previous ); - if (!first_branch && my->root && prev == my->root->id) first_branch = my->root; + const auto& prev = first_branch->header.previous; + first_branch = (prev == my->root->id) ? my->root : get_block( prev ); EOS_ASSERT( first_branch, fork_db_block_not_found, "block ${id} does not exist", - ("id", prev) ); + ("id", prev) + ); } while( second_branch->block_num > first_branch->block_num ) { result.second.push_back( second_branch ); - const auto &prev = second_branch->header.previous; - second_branch = get_block( second_branch->header.previous ); - if (!second_branch && my->root && prev == my->root->id) second_branch = my->root; + const auto& prev = second_branch->header.previous; + second_branch = (prev == my->root->id) ? my->root : get_block( prev ); EOS_ASSERT( second_branch, fork_db_block_not_found, "block ${id} does not exist", - ("id", prev) ); + ("id", prev) + ); } if (first_branch->id == second_branch->id) return result; @@ -371,10 +374,13 @@ namespace eosio { namespace chain { result.second.push_back(second_branch); first_branch = get_block( first_branch->header.previous ); second_branch = get_block( second_branch->header.previous ); - EOS_ASSERT( first_branch && second_branch, fork_db_block_not_found, - "either block ${fid} or ${sid} does not exist", - ("fid", first_branch->header.previous) - ("sid", second_branch->header.previous) + EOS_ASSERT( first_branch, fork_db_block_not_found, + "block ${id} does not exist", + ("id", first_branch->header.previous) + ); + EOS_ASSERT( second_branch, fork_db_block_not_found, + "block ${id} does not exist", + ("id", second_branch->header.previous) ); } diff --git a/libraries/chain/include/eosio/chain/fork_database.hpp b/libraries/chain/include/eosio/chain/fork_database.hpp index 1e723d054ca..900e06466f2 100644 --- a/libraries/chain/include/eosio/chain/fork_database.hpp +++ b/libraries/chain/include/eosio/chain/fork_database.hpp @@ -37,8 +37,7 @@ namespace eosio { namespace chain { void reset( const block_header_state& root_bhs ); /** - * rollback head to root if read_mode changed from speculative to irreversible - * valid flag need to set to false to avoid head advancing + * Removes validated flag from all blocks in fork database and resets head to point to the root. */ void rollback_head_to_root(); @@ -51,7 +50,7 @@ namespace eosio { namespace chain { * Add block state to fork database. * Must link to existing block in fork database or the root. */ - void add( const block_state_ptr& next_block ); + void add( const block_state_ptr& next_block, bool ignore_duplicate = false ); void remove( const block_id_type& id ); diff --git a/libraries/chain/include/eosio/chain/reversible_block_object.hpp b/libraries/chain/include/eosio/chain/reversible_block_object.hpp index ea9a4c9e122..daaac00a71b 100644 --- a/libraries/chain/include/eosio/chain/reversible_block_object.hpp +++ b/libraries/chain/include/eosio/chain/reversible_block_object.hpp @@ -32,6 +32,15 @@ namespace eosio { namespace chain { fc::raw::unpack( ds, *result ); return result; } + + block_id_type get_block_id()const { + fc::datastream ds( packedblock.data(), packedblock.size() ); + block_header h; + fc::raw::unpack( ds, h ); + // Only need the block id to then look up the block state in fork database, so just unpack the block_header from the stored packed data. + // Avoid calling get_block() since that constructs a new signed_block in heap memory and unpacks the full signed_block from the stored packed data. + return h.id(); + } }; struct by_num; diff --git a/unittests/forked_tests.cpp b/unittests/forked_tests.cpp index 07c1d48c1aa..0e2d03cdd2f 100644 --- a/unittests/forked_tests.cpp +++ b/unittests/forked_tests.cpp @@ -154,7 +154,9 @@ BOOST_AUTO_TEST_CASE( fork_with_bad_block ) try { } // push the block which should attempt the corrupted fork and fail - BOOST_REQUIRE_THROW(bios.push_block(fork.blocks.back()), fc::exception); + BOOST_REQUIRE_EXCEPTION( bios.push_block(fork.blocks.back()), fc::exception, + fc_exception_message_is( "Block ID does not match" ) + ); } } From cbf86878b1941dc0644a6e0b1cc459b2ea7bf103 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 4 Mar 2019 02:09:43 -0500 Subject: [PATCH 069/680] fix bug in init that caused restart-scenarios-test-hard_replay to fail --- libraries/chain/controller.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index c8e5213763a..072bc76385a 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -445,6 +445,7 @@ struct controller_impl { EOS_ASSERT( blog.first_block_num() == 1, block_log_exception, "block log does not start with genesis block" ); + lib_num = blog.head()->block_num(); } else { blog.reset( conf.genesis, head->block ); } From 3728fe7255d778de9bfc8589344f28363931e44f Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Mon, 4 Mar 2019 11:58:07 +0800 Subject: [PATCH 070/680] Add unit test to ensure that broadcasted block by producer node and receiving node is identical --- unittests/block_tests.cpp | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/unittests/block_tests.cpp b/unittests/block_tests.cpp index 89aceda3ff0..fab79a306a1 100644 --- a/unittests/block_tests.cpp +++ b/unittests/block_tests.cpp @@ -163,4 +163,32 @@ BOOST_AUTO_TEST_CASE(untrusted_producer_test) }) ; } +/** + * Ensure that the block broadcasted by producing node and receiving node is identical + */ +BOOST_AUTO_TEST_CASE(broadcasted_block_test) +{ + + tester producer_node; + tester receiving_node; + + signed_block_ptr bcasted_blk_by_prod_node; + signed_block_ptr bcasted_blk_by_recv_node; + + producer_node.control->accepted_block.connect( [&](const block_state_ptr& bs) { + bcasted_blk_by_prod_node = bs->block; + }); + receiving_node.control->accepted_block.connect( [&](const block_state_ptr& bs) { + bcasted_blk_by_recv_node = bs->block; + }); + + auto b = producer_node.produce_block(); + receiving_node.push_block(b); + + bytes bcasted_blk_by_prod_node_packed = fc::raw::pack(*bcasted_blk_by_prod_node); + bytes bcasted_blk_by_recv_node_packed = fc::raw::pack(*bcasted_blk_by_recv_node); + BOOST_CHECK(std::equal(bcasted_blk_by_prod_node_packed.begin(), bcasted_blk_by_prod_node_packed.end(), bcasted_blk_by_recv_node_packed.begin())); + +} + BOOST_AUTO_TEST_SUITE_END() From 7ff3d092d1b5a403c41348c9e3331ae6207262f1 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 4 Mar 2019 12:09:09 -0500 Subject: [PATCH 071/680] fix block log validation bug in read_from_snapshot --- libraries/chain/controller.cpp | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 072bc76385a..718cbbb1260 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -419,7 +419,7 @@ struct controller_impl { ilog( "${n} reversible blocks replayed", ("n",rev) ); auto end = fc::time_point::now(); ilog( "replayed ${n} blocks in ${duration} seconds, ${mspb} ms/block", - ("n", head->block_num - start_block_num)("duration", (end-start).count()/1000000) + ("n", head->block_num + 1 - start_block_num)("duration", (end-start).count()/1000000) ("mspb", ((end-start).count()/1000.0)/(head->block_num-start_block_num)) ); replay_head_time.reset(); } @@ -719,11 +719,10 @@ struct controller_impl { section.read_row(head_header_state, db); snapshot_head_block = head_header_state.block_num; - auto next_block_after_snapshot_head = snapshot_head_block + 1; - EOS_ASSERT( blog_start <= next_block_after_snapshot_head && next_block_after_snapshot_head <= blog_end, + EOS_ASSERT( blog_start <= (snapshot_head_block + 1) && snapshot_head_block <= blog_end, block_log_exception, - "Block log is provided with snapshot but does not contain the block after the head block from the snapshot", - ("next_block_after_snapshot_head_num", next_block_after_snapshot_head) + "Block log is provided with snapshot but does not contain the head block from the snapshot nor a block right after it", + ("snapshot_head_block", snapshot_head_block) ("block_log_first_num", blog_start) ("block_log_last_num", blog_end) ); From c982dd6bf6840d68edc01880996fd3264b6c0b26 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 4 Mar 2019 21:16:31 -0500 Subject: [PATCH 072/680] Replay no longer removed fork_db; avoids losing info on irreversible replay. No longer reconstruct reversible blocks when switching from irreversible to speculative mode since that can be dangerous. Instead use maybe switch forks after the replay is complete. --- libraries/chain/controller.cpp | 76 ++++++++++++--------------- plugins/chain_plugin/chain_plugin.cpp | 10 +++- 2 files changed, 43 insertions(+), 43 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 718cbbb1260..919b14b7cc0 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -438,8 +438,8 @@ struct controller_impl { blog.reset( conf.genesis, signed_block_ptr(), lib_num + 1 ); } } else { - if( !fork_db.head() ) { - ilog( "No head block in fork datbase. Initializing fresh blockchain state." ); + if( db.revision() < 1 /* !fork_db.head() */) { + ilog( "No head block in fork database. Initializing fresh blockchain state." ); initialize_blockchain_state(); // set head to genesis state if( blog.head() ) { EOS_ASSERT( blog.first_block_num() == 1, block_log_exception, @@ -493,7 +493,7 @@ struct controller_impl { const auto& rbi = reversible_blocks.get_index(); auto last_block_num = lib_num; - if (read_mode == db_read_mode::IRREVERSIBLE) { + if( read_mode == db_read_mode::IRREVERSIBLE ) { // ensure there are no reversible blocks auto itr = rbi.begin(); if( itr != rbi.end() ) { @@ -515,9 +515,6 @@ struct controller_impl { if( ritr != rbi.rend() ) { last_block_num = ritr->blocknum; - EOS_ASSERT( blog.head(), reversible_blocks_exception, - "non-empty reversible blocks despite empty irreversible block log" - ); } EOS_ASSERT( head->block_num <= last_block_num, reversible_blocks_exception, @@ -528,7 +525,7 @@ struct controller_impl { auto pending_head = fork_db.pending_head(); if( ritr != rbi.rend() - && blog.head()->block_num() < pending_head->block_num + && lib_num < pending_head->block_num && pending_head->block_num <= last_block_num ) { auto rbitr = rbi.find( pending_head->block_num ); @@ -540,36 +537,16 @@ struct controller_impl { "expected: ${expected}, actual: ${actual}", ("num", pending_head->block_num)("expected", pending_head->id)("actual", rev_id) ); - } else if( last_block_num < pending_head->block_num ) { - const auto& branch = fork_db.fetch_branch( pending_head->id ); - auto num_blocks_prior_to_pending_head = pending_head->block_num - last_block_num; - FC_ASSERT( 1 <= num_blocks_prior_to_pending_head && num_blocks_prior_to_pending_head <= branch.size(), - "unexpected violation of invariants" ); - - if( ritr != rbi.rend() ) { - FC_ASSERT( num_blocks_prior_to_pending_head < branch.size(), "unexpected violation of invariants" ); - auto fork_id = branch[branch.size() - num_blocks_prior_to_pending_head - 1]->id; - auto rev_id = ritr->get_block_id(); - EOS_ASSERT( rev_id == fork_id, - reversible_blocks_exception, - "mismatch in block id of last block (${num}) in reversible blocks database: " - "expected: ${expected}, actual: ${actual}", - ("num", last_block_num)("expected", fork_id)("actual", rev_id) - ); - } - - if( pending_head->id != head->id ) { - wlog( "read_mode has changed from irreversible: reconstructing reversible blocks from fork database" ); - - for( auto n = branch.size() - num_blocks_prior_to_pending_head; n < branch.size(); ++n ) { - reversible_blocks.create( [&]( auto& rbo ) { - rbo.blocknum = branch[n]->block_num; - rbo.set_block( branch[n]->block ); - }); - } - - last_block_num = pending_head->block_num; - } + } else if( ritr != rbi.rend() && last_block_num < pending_head->block_num ) { + const auto b = fork_db.search_on_branch( pending_head->id, last_block_num ); + FC_ASSERT( b, "unexpected violation of invariants" ); + auto rev_id = ritr->get_block_id(); + EOS_ASSERT( rev_id == b->id, + reversible_blocks_exception, + "mismatch in block id of last block (${num}) in reversible blocks database: " + "expected: ${expected}, actual: ${actual}", + ("num", last_block_num)("expected", b->id)("actual", rev_id) + ); } // else no checks needed since fork_db will be completely reset on replay anyway } @@ -582,6 +559,20 @@ struct controller_impl { if( shutdown() ) return; + if( read_mode != db_read_mode::IRREVERSIBLE + && fork_db.pending_head()->id != fork_db.head()->id + && fork_db.head()->id == fork_db.root()->id + ) { + wlog( "read_mode has changed from irreversible: applying best branch from fork database" ); + + for( auto pending_head = fork_db.pending_head(); + pending_head->id != fork_db.head()->id; + pending_head = fork_db.pending_head() + ) { + maybe_switch_forks( pending_head, controller::block_status::complete ); + } + } + if( report_integrity_hash ) { const auto hash = calculate_integrity_hash(); ilog( "database initialized with hash: ${hash}", ("hash", hash) ); @@ -1593,12 +1584,13 @@ struct controller_impl { ("current_head_id", head->id)("current_head_num", head->block_num)("new_head_id", new_head->id)("new_head_num", new_head->block_num) ); auto branches = fork_db.fetch_branch_from( new_head->id, head->id ); - EOS_ASSERT( branches.second.size() > 0, fork_database_exception, "fork switch does not require popping blocks" ); - for( auto itr = branches.second.begin(); itr != branches.second.end(); ++itr ) { - pop_block(); + if( branches.second.size() > 0 ) { + for( auto itr = branches.second.begin(); itr != branches.second.end(); ++itr ) { + pop_block(); + } + EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, + "loss of sync between fork_db and chainbase during fork switch" ); // _should_ never fail } - EOS_ASSERT( self.head_block_id() == branches.second.back()->header.previous, fork_database_exception, - "loss of sync between fork_db and chainbase during fork switch" ); // _should_ never fail for( auto ritr = branches.first.rbegin(); ritr != branches.first.rend(); ++ritr ) { optional except; diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 4e80263b028..06e664dfc39 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -329,6 +329,14 @@ void clear_directory_contents( const fc::path& p ) { } } +void clear_chainbase_files( const fc::path& p ) { + if( !fc::is_directory( p ) ) + return; + + fc::remove( p / "shared_memory.bin" ); + fc::remove( p / "shared_memory.meta" ); +} + void chain_plugin::plugin_initialize(const variables_map& options) { ilog("initializing chain plugin"); @@ -512,7 +520,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { ilog( "Replay requested: deleting state database" ); if( options.at( "truncate-at-block" ).as() > 0 ) wlog( "The --truncate-at-block option does not work for a regular replay of the blockchain." ); - clear_directory_contents( my->chain_config->state_dir ); + clear_chainbase_files( my->chain_config->state_dir ); if( options.at( "fix-reversible-blocks" ).as()) { if( !recover_reversible_blocks( my->chain_config->blocks_dir / config::reversible_blocks_dir_name, my->chain_config->reversible_cache_size )) { From 5f4287070954d97f79eeee66850080e98ea4d5cd Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 4 Mar 2019 21:53:31 -0500 Subject: [PATCH 073/680] correct ilog message on fresh state database; better error message when applying best branch from fork_db --- libraries/chain/controller.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 919b14b7cc0..88e135bf136 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -438,9 +438,9 @@ struct controller_impl { blog.reset( conf.genesis, signed_block_ptr(), lib_num + 1 ); } } else { - if( db.revision() < 1 /* !fork_db.head() */) { - ilog( "No head block in fork database. Initializing fresh blockchain state." ); - initialize_blockchain_state(); // set head to genesis state + if( db.revision() < 1 ) { + ilog( "No existing chain state. Initializing fresh blockchain state." ); + initialize_blockchain_state(); // sets head to genesis state if( blog.head() ) { EOS_ASSERT( blog.first_block_num() == 1, block_log_exception, "block log does not start with genesis block" @@ -569,6 +569,7 @@ struct controller_impl { pending_head->id != fork_db.head()->id; pending_head = fork_db.pending_head() ) { + wlog( "applying branch from fork database ending with block id '${id}'", ("id", pending_head->id) ); maybe_switch_forks( pending_head, controller::block_status::complete ); } } From 2aeb626012e523e85e2cda3a6853a052598d5c0a Mon Sep 17 00:00:00 2001 From: Kayan Date: Tue, 5 Mar 2019 17:37:32 +0800 Subject: [PATCH 074/680] fix pending_schedule_hash --- libraries/chain/block_header_state.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/block_header_state.cpp b/libraries/chain/block_header_state.cpp index 625f6ddf257..545bf366f5d 100644 --- a/libraries/chain/block_header_state.cpp +++ b/libraries/chain/block_header_state.cpp @@ -239,7 +239,7 @@ namespace eosio { namespace chain { if( h.new_producers ) { result.pending_schedule.schedule = *h.new_producers; - result.pending_schedule.schedule_hash = digest_type::hash( result.pending_schedule ); + result.pending_schedule.schedule_hash = digest_type::hash( *h.new_producers ); result.pending_schedule.schedule_lib_num = block_number; } else { if( was_pending_promoted ) { From 256c8a46f6c73c6946fe5d18e965a531efb418b1 Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 5 Mar 2019 12:26:43 -0500 Subject: [PATCH 075/680] more fixes for irreversible mode --- libraries/chain/controller.cpp | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 88e135bf136..801870e866d 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -364,9 +364,7 @@ struct controller_impl { head = std::make_shared(); static_cast(*head) = genheader; head->block = std::make_shared(genheader.header); - fork_db.reset( *head ); db.set_revision( head->block_num ); - initialize_database(); } @@ -391,11 +389,15 @@ struct controller_impl { ilog( "${n} irreversible blocks replayed", ("n", 1 + head->block_num - start_block_num) ); auto pending_head = fork_db.pending_head(); - if( pending_head->block_num < head->block_num || head->block_num <= fork_db.root()->block_num ) { + if( pending_head->block_num < head->block_num || head->block_num < fork_db.root()->block_num ) { + ilog( "resetting fork database with new last irreversible block as the new root: ${id}", + ("id", head->id) ); fork_db.reset( *head ); - } else { + } else if( head->block_num != fork_db.root()->block_num ) { auto new_root = fork_db.search_on_branch( pending_head->id, head->block_num ); EOS_ASSERT( new_root, fork_database_exception, "unexpected error: could not find new LIB in fork database" ); + ilog( "advancing fork database root to new last irreversible block within existing fork database: ${id}", + ("id", new_root->id) ); fork_db.mark_valid( new_root ); fork_db.advance_root( new_root->id ); } @@ -438,9 +440,23 @@ struct controller_impl { blog.reset( conf.genesis, signed_block_ptr(), lib_num + 1 ); } } else { - if( db.revision() < 1 ) { - ilog( "No existing chain state. Initializing fresh blockchain state." ); + if( db.revision() < 1 || !fork_db.head() ) { + if( fork_db.head() ) { + if( read_mode == db_read_mode::IRREVERSIBLE && fork_db.head()->id != fork_db.root()->id ) { + fork_db.rollback_head_to_root(); + } + wlog( "No existing chain state. Initializing fresh blockchain state." ); + } else { + EOS_ASSERT( db.revision() < 1, database_exception, + "No existing fork database despite existing chain state. Replay required." ); + wlog( "No existing chain state or fork database. Initializing fresh blockchain state and resetting fork database."); + } initialize_blockchain_state(); // sets head to genesis state + + if( !fork_db.head() ) { + fork_db.reset( *head ); + } + if( blog.head() ) { EOS_ASSERT( blog.first_block_num() == 1, block_log_exception, "block log does not start with genesis block" @@ -474,7 +490,8 @@ struct controller_impl { head = fork_db.head(); } } - // At this point head == fork_db.head() != nullptr && fork_db.root() != nullptr && fork_db.root()->block_num <= lib_num. + // At this point head != nullptr && fork_db.head() != nullptr && fork_db.root() != nullptr. + // Furthermore, fork_db.root()->block_num <= lib_num. // Also, even though blog.head() may still be nullptr, blog.first_block_num() is guaranteed to be lib_num + 1. EOS_ASSERT( db.revision() >= head->block_num, fork_database_exception, @@ -569,7 +586,7 @@ struct controller_impl { pending_head->id != fork_db.head()->id; pending_head = fork_db.pending_head() ) { - wlog( "applying branch from fork database ending with block id '${id}'", ("id", pending_head->id) ); + wlog( "applying branch from fork database ending with block: ${id}", ("id", pending_head->id) ); maybe_switch_forks( pending_head, controller::block_status::complete ); } } From 5e23af72786493bee8eac924f98db8ce94713101 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 5 Mar 2019 15:57:18 -0500 Subject: [PATCH 076/680] appbase: ensure ctrl-c during startup handled correctly --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index 94ccf0eed23..f97eaef38f0 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 94ccf0eed230ded7f73ac848a07414899c197b28 +Subproject commit f97eaef38f09d3e0a261540c6e0f5868b0bf61e9 From 3f930affcb43b1466f6849fb469230be471182e4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 09:00:21 -0500 Subject: [PATCH 077/680] Call recover keys before transactions execution so trx->sig_cpu_usage is set correctly --- libraries/chain/controller.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 402b61df0cf..3f1d5cf7837 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -992,6 +992,9 @@ struct controller_impl { transaction_trace_ptr trace; try { auto start = fc::time_point::now(); + const bool check_auth = !self.skip_auth_check() && !trx->implicit; + // call recover keys so that trx->sig_cpu_usage is set correctly + const flat_set& recovered_keys = check_auth ? trx->recover_keys( chain_id ) : flat_set(); if( !explicit_billed_cpu_time ) { fc::microseconds already_consumed_time( EOS_PERCENT(trx->sig_cpu_usage.count(), conf.sig_cpu_bill_pct) ); @@ -1024,10 +1027,10 @@ struct controller_impl { trx_context.delay = fc::seconds(trn.delay_sec); - if( !self.skip_auth_check() && !trx->implicit ) { + if( check_auth ) { authorization.check_authorization( trn.actions, - trx->recover_keys( chain_id ), + recovered_keys, {}, trx_context.delay, [](){} From 580361fbae198eee501dca1d24e06acf2c6e57ad Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 5 Mar 2019 19:44:54 -0500 Subject: [PATCH 078/680] simply logic now that there is a guarantee that no reversible blocks will exist while in irreversible mode --- libraries/chain/controller.cpp | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 801870e866d..d205b2ad86a 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -411,11 +411,9 @@ struct controller_impl { } int rev = 0; - auto next_block_num = head->block_num+1; // need to cater the irreversible case that head is not advancing - while( auto obj = reversible_blocks.find(read_mode != db_read_mode::IRREVERSIBLE ? head->block_num+1 : next_block_num) ) { + while( auto obj = reversible_blocks.find(head->block_num+1) ) { ++rev; replay_push_block( obj->get_block(), controller::block_status::validated ); - ++next_block_num; } ilog( "${n} reversible blocks replayed", ("n",rev) ); @@ -1560,8 +1558,7 @@ struct controller_impl { emit( self.pre_accepted_block, b ); const bool skip_validate_signee = !conf.force_all_checks; - // need to cater the irreversible mode case where head is not advancing - auto bsp = std::make_shared((read_mode == db_read_mode::IRREVERSIBLE && s != controller::block_status::irreversible && fork_db.pending_head()) ? *fork_db.pending_head() : *head, b, skip_validate_signee ); + auto bsp = std::make_shared( *head, b, skip_validate_signee ); if( s != controller::block_status::irreversible ) { fork_db.add( bsp, true ); @@ -1576,10 +1573,10 @@ struct controller_impl { // On replay, log_irreversible is not called and so no irreversible_block signal is emittted. // So emit it explicitly here. emit( self.irreversible_block, bsp ); - } else if( read_mode != db_read_mode::IRREVERSIBLE ) { - maybe_switch_forks( bsp, s ); } else { - log_irreversible(); + EOS_ASSERT( read_mode != db_read_mode::IRREVERSIBLE, block_validate_exception, + "invariant failure: cannot replay reversible blocks while in irreversible mode" ); + maybe_switch_forks( bsp, s ); } } FC_LOG_AND_RETHROW( ) From ad2f5344fa027568e2a70971dce6e21c7e6b4a80 Mon Sep 17 00:00:00 2001 From: Kayan Date: Wed, 6 Mar 2019 10:29:04 +0800 Subject: [PATCH 079/680] fix potential crash on assert --- libraries/chain/fork_database.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index 6b8dc149775..0cb2485e23b 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -372,15 +372,17 @@ namespace eosio { namespace chain { { result.first.push_back(first_branch); result.second.push_back(second_branch); - first_branch = get_block( first_branch->header.previous ); - second_branch = get_block( second_branch->header.previous ); + const auto &first_prev = first_branch->header.previous; + first_branch = get_block( first_prev ); + const auto &second_prev = second_branch->header.previous; + second_branch = get_block( second_prev ); EOS_ASSERT( first_branch, fork_db_block_not_found, "block ${id} does not exist", - ("id", first_branch->header.previous) + ("id", first_prev) ); EOS_ASSERT( second_branch, fork_db_block_not_found, "block ${id} does not exist", - ("id", second_branch->header.previous) + ("id", second_prev) ); } From 7ef47d019ffc1fe0a8ad369db932a061d065f397 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 13 Feb 2019 16:19:14 +0800 Subject: [PATCH 080/680] Add test cases for irreversible mode Add more test cases Make test cases more granular --- tests/Cluster.py | 13 +- tests/Node.py | 11 +- tests/nodeos_irreversible_mode_test.py | 316 +++++++++++++++++++++++++ 3 files changed, 333 insertions(+), 7 deletions(-) create mode 100644 tests/nodeos_irreversible_mode_test.py diff --git a/tests/Cluster.py b/tests/Cluster.py index 3bc0f215566..8363e41781e 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -217,7 +217,7 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne producerNodes={} producers=[] for append in range(ord('a'),ord('a')+numProducers): - name="defproducer" + chr(append) + name="defproducer" + chr(append) producers.append(name) # first group starts at 0 @@ -440,7 +440,7 @@ def waitOnClusterSync(self, timeout=None, blockType=BlockType.head, blockAdvanci assert(len(self.nodes) > 0) node=self.nodes[0] targetBlockNum=node.getBlockNum(blockType) #retrieve node 0's head or irrevercible block number - targetBlockNum+=blockAdvancing + targetBlockNum+=blockAdvancing if Utils.Debug: Utils.Print("%s block number on root node: %d" % (blockType.type, targetBlockNum)) if targetBlockNum == -1: @@ -1430,7 +1430,7 @@ def getInfos(self, silentErrors=False, exitOnError=False): def reportStatus(self): if hasattr(self, "biosNode") and self.biosNode is not None: self.biosNode.reportStatus() - if hasattr(self, "nodes"): + if hasattr(self, "nodes"): for node in self.nodes: try: node.reportStatus() @@ -1523,10 +1523,10 @@ def identifyCommon(blockLogs, blockNameExtensions, first, last): commonBlockLogs=[] commonBlockNameExtensions=[] for i in range(numNodes): - if (len(blockLogs[i]) >= last): + if (len(blockLogs[i]) >= last): commonBlockLogs.append(blockLogs[i][first:last]) commonBlockNameExtensions.append(blockNameExtensions[i]) - return (commonBlockLogs,commonBlockNameExtensions) + return (commonBlockLogs,commonBlockNameExtensions) # compare the contents of the blockLogs for the given common block number span def compareCommon(blockLogs, blockNameExtensions, first, last): @@ -1567,3 +1567,6 @@ def stripValues(lowestMaxes,greaterThan): first=lowestMaxes[0]+1 lowestMaxes=stripValues(lowestMaxes,lowestMaxes[0]) + @staticmethod + def getDataDir(nodeId): + return os.path.join(Cluster.__dataDir, "node_%02d" % (nodeId)) diff --git a/tests/Node.py b/tests/Node.py index 1c01893ceca..0b0e57dae2c 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -558,7 +558,7 @@ def getEosAccount(self, name, exitOnError=False, returnType=ReturnType.json, avo msg="( getEosAccount(name=%s) )" % (name); return self.processCleosCmd(cmd, cmdDesc, silentErrors=False, exitOnError=exitOnError, exitMsg=msg, returnType=returnType) else: - assert returnType == ReturnType.json, "MongoDB only supports a returnType of ReturnType.json" + assert returnType == ReturnType.json, "MongoDB only supports a returnType of ReturnType.json" return self.getEosAccountFromDb(name, exitOnError=exitOnError) def getEosAccountFromDb(self, name, exitOnError=False): @@ -1198,7 +1198,11 @@ def kill(self, killSignal): if Utils.Debug: Utils.Print("Killing node: %s" % (self.cmd)) assert(self.pid is not None) try: - os.kill(self.pid, killSignal) + if self.popenProc is not None: + self.popenProc.send_signal(killSignal) + self.popenProc.wait() + else: + os.kill(self.pid, killSignal) except OSError as ex: Utils.Print("ERROR: Failed to kill node (%d)." % (self.cmd), ex) return False @@ -1360,6 +1364,9 @@ def isNodeAlive(): Utils.Print("Node relaunch was successfull.") else: Utils.Print("ERROR: Node relaunch Failed.") + # Ensure the node process is really killed + self.popenProc.send_signal(signal.SIGTERM) + self.popenProc.wait() self.pid=None return False diff --git a/tests/nodeos_irreversible_mode_test.py b/tests/nodeos_irreversible_mode_test.py new file mode 100644 index 00000000000..ba929fadd3c --- /dev/null +++ b/tests/nodeos_irreversible_mode_test.py @@ -0,0 +1,316 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +from Cluster import Cluster +from WalletMgr import WalletMgr +from Node import Node +from Node import ReturnType +from TestHelper import TestHelper +from testUtils import Account + +import urllib.request +import re +import os +import time +import signal +import subprocess +import shutil + + +Print = Utils.Print +errorExit = Utils.errorExit +cmdError = Utils.cmdError +relaunchTimeout = 5 + +# Parse command line arguments +args = TestHelper.parse_args({"-v"}) +Utils.Debug = args.v + +# Setup cluster and it's wallet manager +walletMgr=WalletMgr(True) +cluster=Cluster(walletd=True) +cluster.setWalletMgr(walletMgr) + +def removeReversibleBlks(nodeId): + dataDir = Cluster.getDataDir(nodeId) + reversibleBlks = os.path.join(dataDir, "blocks", "reversible") + shutil.rmtree(reversibleBlks, ignore_errors=True) + +def getHeadLibAndForkDbHead(node: Node): + info = node.getInfo() + head = int(info["head_block_num"]) + lib = int(info["last_irreversible_block_num"]) + forkDbHead = int(info["fork_db_head_block_num"]) + return head, lib, forkDbHead + +def waitForBlksProducedAndLibAdvanced(): + time.sleep(60) + +def ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest): + # Ensure that the relaunched node received blks from producers + head, lib, _ = getHeadLibAndForkDbHead(nodeToTest) + waitForBlksProducedAndLibAdvanced() + headAfterWaiting, libAfterWaiting, _ = getHeadLibAndForkDbHead(nodeToTest) + assert headAfterWaiting > head and libAfterWaiting > lib, "Head is not advancing" + +# Confirm the head lib and fork db of irreversible mode +# Under any condition of irreversible mode: forkDbHead > head == lib +# headLibAndForkDbHeadBeforeSwitchMode should be only passed IF production is disabled, otherwise it provides erroneous check +# Comparing with the state before mode is switched: head == libBeforeSwitchMode and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode +def confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode=None): + # In irreversible mode, head should be equal to lib and not equal to fork Db blk num + head, lib, forkDbHead = getHeadLibAndForkDbHead(nodeToTest) + assert head == lib, "Head ({}) should be equal to lib ({})".format(head, lib) + assert forkDbHead > head, "Fork db head ({}) should be larger than the head ({})".format(forkDbHead, head) + + if headLibAndForkDbHeadBeforeSwitchMode: + headBeforeSwitchMode, libBeforeSwitchMode, forkDbHeadBeforeSwitchMode = headLibAndForkDbHeadBeforeSwitchMode + assert head == libBeforeSwitchMode, "Head ({}) should be equal to lib before switch mode ({})".format(head, libBeforeSwitchMode) + assert forkDbHead == headBeforeSwitchMode, "Fork db head ({}) should be equal to head before switch mode ({})".format(forkDbHead, headBeforeSwitchMode) + assert forkDbHead == forkDbHeadBeforeSwitchMode, "Fork db head ({}) should not be equal to fork db before switch mode ({})".format(forkDbHead, forkDbHeadBeforeSwitchMode) + +# Confirm the head lib and fork db of speculative mode +# Under any condition of irreversible mode: forkDbHead == head > lib +# headLibAndForkDbHeadBeforeSwitchMode should be only passed IF production is disabled, otherwise it provides erroneous check +# Comparing with the state before mode is switched: head == forkDbHeadBeforeSwitchMode == forkDbHead and lib == libBeforeSwitchMode +def confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode=None): + # In speculative mode, head should be equal to lib and not equal to fork Db blk num + head, lib, forkDbHead = getHeadLibAndForkDbHead(nodeToTest) + assert head > lib, "Head should be larger than lib (head: {}, lib: {})".format(head, lib) + assert head == forkDbHead, "Head ({}) should be equal to fork db head ({})".format(head, forkDbHead) + + if headLibAndForkDbHeadBeforeSwitchMode: + _, libBeforeSwitchMode, forkDbHeadBeforeSwitchMode = headLibAndForkDbHeadBeforeSwitchMode + assert head == forkDbHeadBeforeSwitchMode, "Head ({}) should be equal to fork db head before switch mode ({})".format(head, forkDbHeadBeforeSwitchMode) + assert lib == libBeforeSwitchMode, "Lib ({}) should be equal to lib before switch mode ({})".format(lib, libBeforeSwitchMode) + assert forkDbHead == forkDbHeadBeforeSwitchMode, "Fork db head ({}) should be equal to fork db before switch mode ({})".format(forkDbHead, forkDbHeadBeforeSwitchMode) + +def relaunchNode(node: Node, nodeId, chainArg="", addOrSwapFlags=None, relaunchAssertMessage="Fail to relaunch"): + isRelaunchSuccess = node.relaunch(nodeId, chainArg=chainArg, addOrSwapFlags=addOrSwapFlags, timeout=relaunchTimeout) + assert isRelaunchSuccess, relaunchAssertMessage + return isRelaunchSuccess + +# List to contain the test result message +testResultMsgs = [] +try: + TestHelper.printSystemInfo("BEGIN") + cluster.killall(allInstances=True) + cluster.cleanup() + numOfProducers = 4 + totalNodes = 12 + cluster.launch( + prodCount=numOfProducers, + totalProducers=numOfProducers, + totalNodes=totalNodes, + pnodes=1, + useBiosBootFile=False, + topo="mesh", + specificExtraNodeosArgs={ + 0:"--enable-stale-production", + 4:"--read-mode irreversible", + 6:"--read-mode irreversible", + 8:"--read-mode irreversible"}) + + producingNodeId = 0 + producingNode = cluster.getNode(producingNodeId) + + def stopBlkProduction(): + if not producingNode.killed: + producingNode.kill(signal.SIGTERM) + + def resumeBlkProduction(): + if producingNode.killed: + producingNode.relaunch(producingNodeId, "", timeout=relaunchTimeout) + + # Give some time for it to produce, so lib is advancing + waitForBlksProducedAndLibAdvanced() + + # Kill all nodes, so we can test all node in isolated environment + for clusterNode in cluster.nodes: + clusterNode.kill(signal.SIGTERM) + cluster.biosNode.kill(signal.SIGTERM) + + # Wrapper function to execute test + # This wrapper function will resurrect the node to be tested, and shut it down by the end of the test + def executeTest(nodeIdOfNodeToTest, runTestScenario): + try: + nodeToTest = cluster.getNode(nodeIdOfNodeToTest) + # Resurrect killed node to be tested + relaunchNode(nodeToTest, nodeIdOfNodeToTest, relaunchAssertMessage="Fail to relaunch before running test scenario") + # Run test scenario + runTestScenario(nodeIdOfNodeToTest, nodeToTest) + # Kill node after use + if not nodeToTest.killed: nodeToTest.kill(signal.SIGTERM) + testResultMsgs.append("!!!TEST CASE #{} ({}) IS SUCCESSFUL".format(nodeIdOfNodeToTest, runTestScenario.__name__)) + except Exception as e: + testResultMsgs.append("!!!BUG IS CONFIRMED ON TEST CASE #{} ({}): {}".format(nodeIdOfNodeToTest, runTestScenario.__name__, e)) + + # 1st test case: Replay in irreversible mode with reversible blks + # Expectation: Node replays and launches successfully + # Current Bug: duplicate blk added error + def replayInIrrModeWithRevBlks(nodeIdOfNodeToTest, nodeToTest): + # Kill node and replay in irreversible mode + nodeToTest.kill(signal.SIGTERM) + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible --replay") + # Confirm state + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) + + + # 2nd test case: Replay in irreversible mode without reversible blks + # Expectation: Node replays and launches successfully with lib == head == libBeforeSwitchMode + # Current Bug: last_irreversible_blk != the real lib (e.g. if lib is 1000, it replays up to 1000 saying head is 1000 and lib is 999) + def replayInIrrModeWithoutRevBlksAndCompareState(nodeIdOfNodeToTest, nodeToTest): + # Track head blk num and lib before shutdown + headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) + # Shut node, remove reversible blks and relaunch + nodeToTest.kill(signal.SIGTERM) + removeReversibleBlks(nodeIdOfNodeToTest) + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible --replay") + # Confirm state + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) + + # 3rd test case: Switch mode speculative -> irreversible without replay and production disabled + # Expectation: Node switches mode successfully + def switchSpecToIrrMode(nodeIdOfNodeToTest, nodeToTest): + # Relaunch in irreversible mode + nodeToTest.kill(signal.SIGTERM) + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible") + # Confirm state + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) + + # 4th test case: Switch mode irreversible -> speculative without replay and production disabled + # Expectation: Node switches mode successfully + def switchIrrToSpecMode(nodeIdOfNodeToTest, nodeToTest): + # Relaunch in speculative mode + nodeToTest.kill(signal.SIGTERM) + relaunchNode(nodeToTest, nodeIdOfNodeToTest, addOrSwapFlags={"--read-mode": "speculative"}) + # Confirm state + confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest) + + # 5th test case: Switch mode irreversible -> speculative without replay and production enabled + # Expectation: Node switches mode successfully and receives next blk from the producer + # Current Bug: Fail to switch to irreversible mode, blk_validate_exception next blk in the future will be thrown + def switchSpecToIrrModeWithProdEnabled(nodeIdOfNodeToTest, nodeToTest): + try: + # Resume blk production + resumeBlkProduction() + # Kill and relaunch in irreversible mode + nodeToTest.kill(signal.SIGTERM) + waitForBlksProducedAndLibAdvanced() # Wait for some blks to be produced and lib advance + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible") + # Ensure that the relaunched node received blks from producers + ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) + # Confirm state + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) + finally: + # Stop blk production + stopBlkProduction() + + # 6th test case: Switch mode irreversible -> speculative without replay and production enabled + # Expectation: Node switches mode successfully and receives next blk from the producer + # Current Bug: Node switches mode successfully, however, it fails to establish connection with the producing node + def switchIrrToSpecModeWithProdEnabled(nodeIdOfNodeToTest, nodeToTest): + try: + # Resume blk production + resumeBlkProduction() + # Kill and relaunch in irreversible mode + nodeToTest.kill(signal.SIGTERM) + waitForBlksProducedAndLibAdvanced() # Wait for some blks to be produced and lib advance) + relaunchNode(nodeToTest, nodeIdOfNodeToTest, addOrSwapFlags={"--read-mode": "speculative"}) + # Ensure that the relaunched node received blks from producers + ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) + # Confirm state + confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest) + finally: + # Stop blk production + stopBlkProduction() + + # 7th test case: Switch mode speculative -> irreversible and compare the state before shutdown + # Expectation: Node switch mode successfully and head == libBeforeSwitchMode and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode + def switchSpecToIrrModeAndCompareState(nodeIdOfNodeToTest, nodeToTest): + # Track head blk num and lib before shutdown + headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) + # Kill and relaunch in irreversible mode + nodeToTest.kill(signal.SIGTERM) + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible") + # Confirm state + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) + + # 8th test case: Switch mode irreversible -> speculative and compare the state before shutdown + # Expectation: Node switch mode successfully and head == forkDbHeadBeforeSwitchMode == forkDbHead and lib == libBeforeSwitchMode + def switchIrrToSpecModeAndCompareState(nodeIdOfNodeToTest, nodeToTest): + # Track head blk num and lib before shutdown + headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) + # Kill and relaunch in speculative mode + nodeToTest.kill(signal.SIGTERM) + relaunchNode(nodeToTest, nodeIdOfNodeToTest, addOrSwapFlags={"--read-mode": "speculative"}) + # Confirm state + confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) + + # 9th test case: Replay in irreversible mode with reversible blks while production is enabled + # Expectation: Node replays and launches successfully and the head and lib should be advancing + # Current Bug: duplicate blk added error + def replayInIrrModeWithRevBlksAndProdEnabled(nodeIdOfNodeToTest, nodeToTest): + try: + resumeBlkProduction() + # Kill node and replay in irreversible mode + nodeToTest.kill(signal.SIGTERM) + waitForBlksProducedAndLibAdvanced() # Wait + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible --replay") + # Ensure that the relaunched node received blks from producers + ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) + # Confirm state + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) + finally: + stopBlkProduction() + + # 10th test case: Replay in irreversible mode without reversible blks while production is enabled + # Expectation: Node replays and launches successfully and the head and lib should be advancing + def replayInIrrModeWithoutRevBlksAndProdEnabled(nodeIdOfNodeToTest, nodeToTest): + try: + resumeBlkProduction() + # Kill node and replay in irreversible mode + nodeToTest.kill(signal.SIGTERM) + # Remove rev blks + removeReversibleBlks(nodeIdOfNodeToTest) + waitForBlksProducedAndLibAdvanced() # Wait + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible --replay") + # Ensure that the relaunched node received blks from producers + ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) + # Confirm state + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) + finally: + stopBlkProduction() + + # 11th test case: Replay in irreversible mode with reversible blks and compare the state before switch mode + # Expectation: Node replays and launches successfully and (head == libBeforeShutdown and lib == libBeforeShutdown and forkDbHead == forkDbHeadBeforeShutdown) + # Current Bug: duplicate blk added error (similar to 1st test case) + # Once bug in 1st test case is fixed, this can be merged with 1st test case + def replayInIrrModeWithRevBlksAndCompareState(nodeIdOfNodeToTest, nodeToTest): + # Track head blk num and lib before shutdown + headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) + # Kill node and replay in irreversible mode + nodeToTest.kill(signal.SIGTERM) + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible --replay") + # Confirm state + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) + + # Start executing test cases here + executeTest(1, replayInIrrModeWithRevBlks) + executeTest(2, replayInIrrModeWithoutRevBlksAndCompareState) + executeTest(3, switchSpecToIrrMode) + executeTest(4, switchIrrToSpecMode) + executeTest(5, switchSpecToIrrModeWithProdEnabled) + executeTest(6, switchIrrToSpecModeWithProdEnabled) + executeTest(7, switchSpecToIrrModeAndCompareState) + executeTest(8, switchIrrToSpecModeAndCompareState) + executeTest(9, replayInIrrModeWithRevBlksAndProdEnabled) + executeTest(10, replayInIrrModeWithoutRevBlksAndProdEnabled) + executeTest(11, replayInIrrModeWithRevBlksAndCompareState) + +finally: + # TestHelper.shutdown(cluster, walletMgr) + # Print test result + for msg in testResultMsgs: Print(msg) + +exit(0) From ecbd50c6c980a5780f70869eeb8e75a189bbf4d0 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 27 Feb 2019 15:26:41 +0800 Subject: [PATCH 081/680] Simplify irreversible mode test cases and add more description Tidy up the test case Add more test option --- tests/nodeos_irreversible_mode_test.py | 198 +++++++++++++------------ 1 file changed, 102 insertions(+), 96 deletions(-) diff --git a/tests/nodeos_irreversible_mode_test.py b/tests/nodeos_irreversible_mode_test.py index ba929fadd3c..719d159be0a 100644 --- a/tests/nodeos_irreversible_mode_test.py +++ b/tests/nodeos_irreversible_mode_test.py @@ -21,10 +21,18 @@ errorExit = Utils.errorExit cmdError = Utils.cmdError relaunchTimeout = 5 +numOfProducers = 4 +totalNodes = 9 # Parse command line arguments -args = TestHelper.parse_args({"-v"}) +args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running","--keep-logs"}) Utils.Debug = args.v +killAll=args.clean_run +dumpErrorDetails=args.dump_error_details +dontKill=args.leave_running +killEosInstances=not dontKill +killWallet=not dontKill +keepLogs=args.keep_logs # Setup cluster and it's wallet manager walletMgr=WalletMgr(True) @@ -38,25 +46,30 @@ def removeReversibleBlks(nodeId): def getHeadLibAndForkDbHead(node: Node): info = node.getInfo() + assert info is not None, "Fail to retrieve info from the node, the node is currently having a problem" head = int(info["head_block_num"]) lib = int(info["last_irreversible_block_num"]) forkDbHead = int(info["fork_db_head_block_num"]) return head, lib, forkDbHead +# Around 30 seconds should be enough to advance lib for 4 producers def waitForBlksProducedAndLibAdvanced(): - time.sleep(60) + time.sleep(30) +# Ensure that the relaunched node received blks from producers, in other words head and lib is advancing def ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest): - # Ensure that the relaunched node received blks from producers - head, lib, _ = getHeadLibAndForkDbHead(nodeToTest) + head, lib, forkDbHead = getHeadLibAndForkDbHead(nodeToTest) waitForBlksProducedAndLibAdvanced() - headAfterWaiting, libAfterWaiting, _ = getHeadLibAndForkDbHead(nodeToTest) - assert headAfterWaiting > head and libAfterWaiting > lib, "Head is not advancing" + headAfterWaiting, libAfterWaiting, forkDbHeadAfterWaiting = getHeadLibAndForkDbHead(nodeToTest) + assert headAfterWaiting > head and libAfterWaiting > lib and forkDbHeadAfterWaiting > forkDbHead, \ + "Either Head ({} -> {})/ Lib ({} -> {})/ Fork Db Head ({} -> {}) is not advancing".format(head, headAfterWaiting, lib, libAfterWaiting, forkDbHead, forkDbHeadAfterWaiting) # Confirm the head lib and fork db of irreversible mode -# Under any condition of irreversible mode: forkDbHead > head == lib +# Under any condition of irreversible mode: +# - forkDbHead > head == lib # headLibAndForkDbHeadBeforeSwitchMode should be only passed IF production is disabled, otherwise it provides erroneous check -# Comparing with the state before mode is switched: head == libBeforeSwitchMode and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode +# When comparing with the the state before node is switched: +# - head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode def confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode=None): # In irreversible mode, head should be equal to lib and not equal to fork Db blk num head, lib, forkDbHead = getHeadLibAndForkDbHead(nodeToTest) @@ -66,13 +79,16 @@ def confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeS if headLibAndForkDbHeadBeforeSwitchMode: headBeforeSwitchMode, libBeforeSwitchMode, forkDbHeadBeforeSwitchMode = headLibAndForkDbHeadBeforeSwitchMode assert head == libBeforeSwitchMode, "Head ({}) should be equal to lib before switch mode ({})".format(head, libBeforeSwitchMode) - assert forkDbHead == headBeforeSwitchMode, "Fork db head ({}) should be equal to head before switch mode ({})".format(forkDbHead, headBeforeSwitchMode) - assert forkDbHead == forkDbHeadBeforeSwitchMode, "Fork db head ({}) should not be equal to fork db before switch mode ({})".format(forkDbHead, forkDbHeadBeforeSwitchMode) + assert lib == libBeforeSwitchMode, "Lib ({}) should be equal to lib before switch mode ({})".format(lib, libBeforeSwitchMode) + assert forkDbHead == headBeforeSwitchMode and forkDbHead == forkDbHeadBeforeSwitchMode, \ + "Fork db head ({}) should be equal to head before switch mode ({}) and fork db head before switch mode ({})".format(forkDbHead, headBeforeSwitchMode, forkDbHeadBeforeSwitchMode) # Confirm the head lib and fork db of speculative mode -# Under any condition of irreversible mode: forkDbHead == head > lib +# Under any condition of irreversible mode: +# - forkDbHead == head > lib # headLibAndForkDbHeadBeforeSwitchMode should be only passed IF production is disabled, otherwise it provides erroneous check -# Comparing with the state before mode is switched: head == forkDbHeadBeforeSwitchMode == forkDbHead and lib == libBeforeSwitchMode +# When comparing with the the state before node is switched: +# - head == forkDbHeadBeforeSwitchMode == forkDbHead and lib == headBeforeSwitchMode == libBeforeSwitchMode def confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode=None): # In speculative mode, head should be equal to lib and not equal to fork Db blk num head, lib, forkDbHead = getHeadLibAndForkDbHead(nodeToTest) @@ -80,10 +96,12 @@ def confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest, headLibAndForkDbHeadBefore assert head == forkDbHead, "Head ({}) should be equal to fork db head ({})".format(head, forkDbHead) if headLibAndForkDbHeadBeforeSwitchMode: - _, libBeforeSwitchMode, forkDbHeadBeforeSwitchMode = headLibAndForkDbHeadBeforeSwitchMode + headBeforeSwitchMode, libBeforeSwitchMode, forkDbHeadBeforeSwitchMode = headLibAndForkDbHeadBeforeSwitchMode assert head == forkDbHeadBeforeSwitchMode, "Head ({}) should be equal to fork db head before switch mode ({})".format(head, forkDbHeadBeforeSwitchMode) - assert lib == libBeforeSwitchMode, "Lib ({}) should be equal to lib before switch mode ({})".format(lib, libBeforeSwitchMode) - assert forkDbHead == forkDbHeadBeforeSwitchMode, "Fork db head ({}) should be equal to fork db before switch mode ({})".format(forkDbHead, forkDbHeadBeforeSwitchMode) + assert lib == headBeforeSwitchMode and lib == libBeforeSwitchMode, \ + "Lib ({}) should be equal to head before switch mode ({}) and lib before switch mode ({})".format(lib, headBeforeSwitchMode, libBeforeSwitchMode) + assert forkDbHead == forkDbHeadBeforeSwitchMode, \ + "Fork db head ({}) should be equal to fork db head before switch mode ({}) ".format(forkDbHead, forkDbHeadBeforeSwitchMode) def relaunchNode(node: Node, nodeId, chainArg="", addOrSwapFlags=None, relaunchAssertMessage="Fail to relaunch"): isRelaunchSuccess = node.relaunch(nodeId, chainArg=chainArg, addOrSwapFlags=addOrSwapFlags, timeout=relaunchTimeout) @@ -92,12 +110,12 @@ def relaunchNode(node: Node, nodeId, chainArg="", addOrSwapFlags=None, relaunchA # List to contain the test result message testResultMsgs = [] +testSuccessful = False try: + # Kill any existing instances and launch cluster TestHelper.printSystemInfo("BEGIN") - cluster.killall(allInstances=True) + cluster.killall(allInstances=killAll) cluster.cleanup() - numOfProducers = 4 - totalNodes = 12 cluster.launch( prodCount=numOfProducers, totalProducers=numOfProducers, @@ -108,8 +126,7 @@ def relaunchNode(node: Node, nodeId, chainArg="", addOrSwapFlags=None, relaunchA specificExtraNodeosArgs={ 0:"--enable-stale-production", 4:"--read-mode irreversible", - 6:"--read-mode irreversible", - 8:"--read-mode irreversible"}) + 6:"--read-mode irreversible"}) producingNodeId = 0 producingNode = cluster.getNode(producingNodeId) @@ -134,11 +151,13 @@ def resumeBlkProduction(): # This wrapper function will resurrect the node to be tested, and shut it down by the end of the test def executeTest(nodeIdOfNodeToTest, runTestScenario): try: + # Relaunch killed node so it can be used for the test nodeToTest = cluster.getNode(nodeIdOfNodeToTest) - # Resurrect killed node to be tested relaunchNode(nodeToTest, nodeIdOfNodeToTest, relaunchAssertMessage="Fail to relaunch before running test scenario") + # Run test scenario runTestScenario(nodeIdOfNodeToTest, nodeToTest) + # Kill node after use if not nodeToTest.killed: nodeToTest.kill(signal.SIGTERM) testResultMsgs.append("!!!TEST CASE #{} ({}) IS SUCCESSFUL".format(nodeIdOfNodeToTest, runTestScenario.__name__)) @@ -147,108 +166,106 @@ def executeTest(nodeIdOfNodeToTest, runTestScenario): # 1st test case: Replay in irreversible mode with reversible blks # Expectation: Node replays and launches successfully + # with head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode # Current Bug: duplicate blk added error def replayInIrrModeWithRevBlks(nodeIdOfNodeToTest, nodeToTest): # Kill node and replay in irreversible mode nodeToTest.kill(signal.SIGTERM) relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible --replay") + # Confirm state confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) - # 2nd test case: Replay in irreversible mode without reversible blks - # Expectation: Node replays and launches successfully with lib == head == libBeforeSwitchMode - # Current Bug: last_irreversible_blk != the real lib (e.g. if lib is 1000, it replays up to 1000 saying head is 1000 and lib is 999) - def replayInIrrModeWithoutRevBlksAndCompareState(nodeIdOfNodeToTest, nodeToTest): + # Expectation: Node replays and launches successfully + # with head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode + # Current Bug: lib != libBeforeSwitchMode + def replayInIrrModeWithoutRevBlks(nodeIdOfNodeToTest, nodeToTest): # Track head blk num and lib before shutdown headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) - # Shut node, remove reversible blks and relaunch + + # Shut down node, remove reversible blks and relaunch nodeToTest.kill(signal.SIGTERM) removeReversibleBlks(nodeIdOfNodeToTest) relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible --replay") - # Confirm state + + # Ensure the node condition is as expected after relaunch confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) - # 3rd test case: Switch mode speculative -> irreversible without replay and production disabled + # 3rd test case: Switch mode speculative -> irreversible without replay # Expectation: Node switches mode successfully + # with head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode + # Current Bug: head != lib def switchSpecToIrrMode(nodeIdOfNodeToTest, nodeToTest): - # Relaunch in irreversible mode + # Track head blk num and lib before shutdown + headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) + + # Kill and relaunch in irreversible mode nodeToTest.kill(signal.SIGTERM) relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible") - # Confirm state - confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) - # 4th test case: Switch mode irreversible -> speculative without replay and production disabled + # Ensure the node condition is as expected after relaunch + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) + + # 4th test case: Switch mode irreversible -> speculative without replay # Expectation: Node switches mode successfully + # with head == forkDbHeadBeforeSwitchMode == forkDbHead and lib == headBeforeSwitchMode == libBeforeSwitchMode + # Current Bug: head != forkDbHead and head != forkDbHeadBeforeSwitchMode and lib != libBeforeSwitchMode def switchIrrToSpecMode(nodeIdOfNodeToTest, nodeToTest): - # Relaunch in speculative mode + # Track head blk num and lib before shutdown + headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) + + # Kill and relaunch in speculative mode nodeToTest.kill(signal.SIGTERM) relaunchNode(nodeToTest, nodeIdOfNodeToTest, addOrSwapFlags={"--read-mode": "speculative"}) - # Confirm state - confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest) + + # Ensure the node condition is as expected after relaunch + confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) # 5th test case: Switch mode irreversible -> speculative without replay and production enabled - # Expectation: Node switches mode successfully and receives next blk from the producer + # Expectation: Node switches mode successfully + # and the head and lib should be advancing after some blocks produced + # with head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode # Current Bug: Fail to switch to irreversible mode, blk_validate_exception next blk in the future will be thrown def switchSpecToIrrModeWithProdEnabled(nodeIdOfNodeToTest, nodeToTest): try: - # Resume blk production resumeBlkProduction() + # Kill and relaunch in irreversible mode nodeToTest.kill(signal.SIGTERM) waitForBlksProducedAndLibAdvanced() # Wait for some blks to be produced and lib advance relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible") - # Ensure that the relaunched node received blks from producers + + # Ensure the node condition is as expected after relaunch ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) - # Confirm state confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) finally: - # Stop blk production stopBlkProduction() # 6th test case: Switch mode irreversible -> speculative without replay and production enabled - # Expectation: Node switches mode successfully and receives next blk from the producer + # Expectation: Node switches mode successfully + # and the head and lib should be advancing after some blocks produced + # with head == forkDbHeadBeforeSwitchMode == forkDbHead and lib == headBeforeSwitchMode == libBeforeSwitchMode # Current Bug: Node switches mode successfully, however, it fails to establish connection with the producing node def switchIrrToSpecModeWithProdEnabled(nodeIdOfNodeToTest, nodeToTest): try: - # Resume blk production resumeBlkProduction() + # Kill and relaunch in irreversible mode nodeToTest.kill(signal.SIGTERM) waitForBlksProducedAndLibAdvanced() # Wait for some blks to be produced and lib advance) relaunchNode(nodeToTest, nodeIdOfNodeToTest, addOrSwapFlags={"--read-mode": "speculative"}) - # Ensure that the relaunched node received blks from producers + + # Ensure the node condition is as expected after relaunch ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) - # Confirm state confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest) finally: - # Stop blk production stopBlkProduction() - # 7th test case: Switch mode speculative -> irreversible and compare the state before shutdown - # Expectation: Node switch mode successfully and head == libBeforeSwitchMode and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode - def switchSpecToIrrModeAndCompareState(nodeIdOfNodeToTest, nodeToTest): - # Track head blk num and lib before shutdown - headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) - # Kill and relaunch in irreversible mode - nodeToTest.kill(signal.SIGTERM) - relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible") - # Confirm state - confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) - - # 8th test case: Switch mode irreversible -> speculative and compare the state before shutdown - # Expectation: Node switch mode successfully and head == forkDbHeadBeforeSwitchMode == forkDbHead and lib == libBeforeSwitchMode - def switchIrrToSpecModeAndCompareState(nodeIdOfNodeToTest, nodeToTest): - # Track head blk num and lib before shutdown - headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) - # Kill and relaunch in speculative mode - nodeToTest.kill(signal.SIGTERM) - relaunchNode(nodeToTest, nodeIdOfNodeToTest, addOrSwapFlags={"--read-mode": "speculative"}) - # Confirm state - confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) - - # 9th test case: Replay in irreversible mode with reversible blks while production is enabled - # Expectation: Node replays and launches successfully and the head and lib should be advancing + # 7th test case: Replay in irreversible mode with reversible blks while production is enabled + # Expectation: Node replays and launches successfully + # and the head and lib should be advancing after some blocks produced + # with head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode # Current Bug: duplicate blk added error def replayInIrrModeWithRevBlksAndProdEnabled(nodeIdOfNodeToTest, nodeToTest): try: @@ -257,59 +274,48 @@ def replayInIrrModeWithRevBlksAndProdEnabled(nodeIdOfNodeToTest, nodeToTest): nodeToTest.kill(signal.SIGTERM) waitForBlksProducedAndLibAdvanced() # Wait relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible --replay") - # Ensure that the relaunched node received blks from producers + + # Ensure the node condition is as expected after relaunch ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) - # Confirm state confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) finally: stopBlkProduction() - # 10th test case: Replay in irreversible mode without reversible blks while production is enabled - # Expectation: Node replays and launches successfully and the head and lib should be advancing + # 8th test case: Replay in irreversible mode without reversible blks while production is enabled + # Expectation: Node replays and launches successfully + # and the head and lib should be advancing after some blocks produced + # with head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode + # Current Bug: Nothing def replayInIrrModeWithoutRevBlksAndProdEnabled(nodeIdOfNodeToTest, nodeToTest): try: resumeBlkProduction() - # Kill node and replay in irreversible mode + + # Kill node, remove rev blks and then replay in irreversible mode nodeToTest.kill(signal.SIGTERM) - # Remove rev blks removeReversibleBlks(nodeIdOfNodeToTest) waitForBlksProducedAndLibAdvanced() # Wait relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible --replay") - # Ensure that the relaunched node received blks from producers + + # Ensure the node condition is as expected after relaunch ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) - # Confirm state confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) finally: stopBlkProduction() - # 11th test case: Replay in irreversible mode with reversible blks and compare the state before switch mode - # Expectation: Node replays and launches successfully and (head == libBeforeShutdown and lib == libBeforeShutdown and forkDbHead == forkDbHeadBeforeShutdown) - # Current Bug: duplicate blk added error (similar to 1st test case) - # Once bug in 1st test case is fixed, this can be merged with 1st test case - def replayInIrrModeWithRevBlksAndCompareState(nodeIdOfNodeToTest, nodeToTest): - # Track head blk num and lib before shutdown - headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) - # Kill node and replay in irreversible mode - nodeToTest.kill(signal.SIGTERM) - relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible --replay") - # Confirm state - confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) # Start executing test cases here executeTest(1, replayInIrrModeWithRevBlks) - executeTest(2, replayInIrrModeWithoutRevBlksAndCompareState) + executeTest(2, replayInIrrModeWithoutRevBlks) executeTest(3, switchSpecToIrrMode) executeTest(4, switchIrrToSpecMode) executeTest(5, switchSpecToIrrModeWithProdEnabled) executeTest(6, switchIrrToSpecModeWithProdEnabled) - executeTest(7, switchSpecToIrrModeAndCompareState) - executeTest(8, switchIrrToSpecModeAndCompareState) - executeTest(9, replayInIrrModeWithRevBlksAndProdEnabled) - executeTest(10, replayInIrrModeWithoutRevBlksAndProdEnabled) - executeTest(11, replayInIrrModeWithRevBlksAndCompareState) + executeTest(7, replayInIrrModeWithRevBlksAndProdEnabled) + executeTest(8, replayInIrrModeWithoutRevBlksAndProdEnabled) + testSuccessful = True finally: - # TestHelper.shutdown(cluster, walletMgr) + TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) # Print test result for msg in testResultMsgs: Print(msg) From a02c5e1ef88f96ab43e27dd9ac84dd19a8714968 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 27 Feb 2019 17:13:44 +0800 Subject: [PATCH 082/680] Modify Cmakelist to include test for irreversible mode --- tests/CMakeLists.txt | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 66a826fa0e6..7bd36d5490e 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -22,7 +22,7 @@ target_include_directories( plugin_test PUBLIC ${CMAKE_SOURCE_DIR}/plugins/net_plugin/include ${CMAKE_SOURCE_DIR}/plugins/chain_plugin/include ${CMAKE_BINARY_DIR}/unittests/include/ ) - + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/core_symbol.py.in ${CMAKE_CURRENT_BINARY_DIR}/core_symbol.py) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/testUtils.py ${CMAKE_CURRENT_BINARY_DIR}/testUtils.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/WalletMgr.py ${CMAKE_CURRENT_BINARY_DIR}/WalletMgr.py COPYONLY) @@ -40,6 +40,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_test.py ${CMAKE_CURRENT_BI configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_remote_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_remote_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_under_min_avail_ram.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_under_min_avail_ram.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_voting_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_voting_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_irreversible_mode_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_irreversible_mode_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/consensus-validation-malicious-producers.py ${CMAKE_CURRENT_BINARY_DIR}/consensus-validation-malicious-producers.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/validate-dirty-db.py ${CMAKE_CURRENT_BINARY_DIR}/validate-dirty-db.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/launcher_test.py ${CMAKE_CURRENT_BINARY_DIR}/launcher_test.py COPYONLY) @@ -104,6 +105,8 @@ set_property(TEST nodeos_voting_bnet_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --wallet-port 9904 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_irreversible_mode_lr_test COMMAND tests/nodeos_irreversible_mode_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_irreversible_mode_lr_test PROPERTY LABELS long_running_tests) if(ENABLE_COVERAGE_TESTING) From 70e277f9c19f2423083b250bf3597a438269577c Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 27 Feb 2019 17:16:37 +0800 Subject: [PATCH 083/680] Update irreversible mode exit code depending on test result Update comments and rename terms to make it clearer --- tests/nodeos_irreversible_mode_test.py | 69 +++++++++++++++----------- 1 file changed, 40 insertions(+), 29 deletions(-) mode change 100644 => 100755 tests/nodeos_irreversible_mode_test.py diff --git a/tests/nodeos_irreversible_mode_test.py b/tests/nodeos_irreversible_mode_test.py old mode 100644 new mode 100755 index 719d159be0a..f6cf8dec951 --- a/tests/nodeos_irreversible_mode_test.py +++ b/tests/nodeos_irreversible_mode_test.py @@ -17,6 +17,13 @@ import shutil +############################################################### +# nodeos_irreversible_mode_test +# --dump-error-details +# --keep-logs +# -v --leave-running --clean-run +############################################################### + Print = Utils.Print errorExit = Utils.errorExit cmdError = Utils.cmdError @@ -131,11 +138,11 @@ def relaunchNode(node: Node, nodeId, chainArg="", addOrSwapFlags=None, relaunchA producingNodeId = 0 producingNode = cluster.getNode(producingNodeId) - def stopBlkProduction(): + def stopProdNode(): if not producingNode.killed: producingNode.kill(signal.SIGTERM) - def resumeBlkProduction(): + def startProdNode(): if producingNode.killed: producingNode.relaunch(producingNodeId, "", timeout=relaunchTimeout) @@ -150,6 +157,7 @@ def resumeBlkProduction(): # Wrapper function to execute test # This wrapper function will resurrect the node to be tested, and shut it down by the end of the test def executeTest(nodeIdOfNodeToTest, runTestScenario): + testResult = False try: # Relaunch killed node so it can be used for the test nodeToTest = cluster.getNode(nodeIdOfNodeToTest) @@ -161,8 +169,10 @@ def executeTest(nodeIdOfNodeToTest, runTestScenario): # Kill node after use if not nodeToTest.killed: nodeToTest.kill(signal.SIGTERM) testResultMsgs.append("!!!TEST CASE #{} ({}) IS SUCCESSFUL".format(nodeIdOfNodeToTest, runTestScenario.__name__)) + testResult = True except Exception as e: testResultMsgs.append("!!!BUG IS CONFIRMED ON TEST CASE #{} ({}): {}".format(nodeIdOfNodeToTest, runTestScenario.__name__, e)) + return testResult # 1st test case: Replay in irreversible mode with reversible blks # Expectation: Node replays and launches successfully @@ -222,14 +232,14 @@ def switchIrrToSpecMode(nodeIdOfNodeToTest, nodeToTest): # Ensure the node condition is as expected after relaunch confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) - # 5th test case: Switch mode irreversible -> speculative without replay and production enabled + # 5th test case: Switch mode speculative -> irreversible without replay and connected to producing node # Expectation: Node switches mode successfully # and the head and lib should be advancing after some blocks produced # with head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode # Current Bug: Fail to switch to irreversible mode, blk_validate_exception next blk in the future will be thrown - def switchSpecToIrrModeWithProdEnabled(nodeIdOfNodeToTest, nodeToTest): + def switchSpecToIrrModeWithConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): try: - resumeBlkProduction() + startProdNode() # Kill and relaunch in irreversible mode nodeToTest.kill(signal.SIGTERM) @@ -240,16 +250,16 @@ def switchSpecToIrrModeWithProdEnabled(nodeIdOfNodeToTest, nodeToTest): ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) finally: - stopBlkProduction() + stopProdNode() - # 6th test case: Switch mode irreversible -> speculative without replay and production enabled + # 6th test case: Switch mode irreversible -> speculative without replay and connected to producing node # Expectation: Node switches mode successfully # and the head and lib should be advancing after some blocks produced # with head == forkDbHeadBeforeSwitchMode == forkDbHead and lib == headBeforeSwitchMode == libBeforeSwitchMode # Current Bug: Node switches mode successfully, however, it fails to establish connection with the producing node - def switchIrrToSpecModeWithProdEnabled(nodeIdOfNodeToTest, nodeToTest): + def switchIrrToSpecModeWithConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): try: - resumeBlkProduction() + startProdNode() # Kill and relaunch in irreversible mode nodeToTest.kill(signal.SIGTERM) @@ -260,16 +270,16 @@ def switchIrrToSpecModeWithProdEnabled(nodeIdOfNodeToTest, nodeToTest): ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest) finally: - stopBlkProduction() + stopProdNode() - # 7th test case: Replay in irreversible mode with reversible blks while production is enabled + # 7th test case: Replay in irreversible mode with reversible blks while connected to producing node # Expectation: Node replays and launches successfully # and the head and lib should be advancing after some blocks produced # with head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode # Current Bug: duplicate blk added error - def replayInIrrModeWithRevBlksAndProdEnabled(nodeIdOfNodeToTest, nodeToTest): + def replayInIrrModeWithRevBlksAndConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): try: - resumeBlkProduction() + startProdNode() # Kill node and replay in irreversible mode nodeToTest.kill(signal.SIGTERM) waitForBlksProducedAndLibAdvanced() # Wait @@ -279,16 +289,16 @@ def replayInIrrModeWithRevBlksAndProdEnabled(nodeIdOfNodeToTest, nodeToTest): ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) finally: - stopBlkProduction() + stopProdNode() - # 8th test case: Replay in irreversible mode without reversible blks while production is enabled + # 8th test case: Replay in irreversible mode without reversible blks while connected to producing node # Expectation: Node replays and launches successfully # and the head and lib should be advancing after some blocks produced # with head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode # Current Bug: Nothing - def replayInIrrModeWithoutRevBlksAndProdEnabled(nodeIdOfNodeToTest, nodeToTest): + def replayInIrrModeWithoutRevBlksAndConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): try: - resumeBlkProduction() + startProdNode() # Kill node, remove rev blks and then replay in irreversible mode nodeToTest.kill(signal.SIGTERM) @@ -300,23 +310,24 @@ def replayInIrrModeWithoutRevBlksAndProdEnabled(nodeIdOfNodeToTest, nodeToTest): ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) finally: - stopBlkProduction() + stopProdNode() # Start executing test cases here - executeTest(1, replayInIrrModeWithRevBlks) - executeTest(2, replayInIrrModeWithoutRevBlks) - executeTest(3, switchSpecToIrrMode) - executeTest(4, switchIrrToSpecMode) - executeTest(5, switchSpecToIrrModeWithProdEnabled) - executeTest(6, switchIrrToSpecModeWithProdEnabled) - executeTest(7, replayInIrrModeWithRevBlksAndProdEnabled) - executeTest(8, replayInIrrModeWithoutRevBlksAndProdEnabled) - - testSuccessful = True + testResult1 = executeTest(1, replayInIrrModeWithRevBlks) + testResult2 = executeTest(2, replayInIrrModeWithoutRevBlks) + testResult3 = executeTest(3, switchSpecToIrrMode) + testResult4 = executeTest(4, switchIrrToSpecMode) + testResult5 = executeTest(5, switchSpecToIrrModeWithConnectedToProdNode) + testResult6 = executeTest(6, switchIrrToSpecModeWithConnectedToProdNode) + testResult7 = executeTest(7, replayInIrrModeWithRevBlksAndConnectedToProdNode) + testResult8 = executeTest(8, replayInIrrModeWithoutRevBlksAndConnectedToProdNode) + + testSuccessful = testResult1 and testResult2 and testResult3 and testResult4 and testResult5 and testResult6 and testResult7 and testResult8 finally: TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) # Print test result for msg in testResultMsgs: Print(msg) -exit(0) +exitCode = 0 if testSuccessful else 1 +exit(exitCode) From 41224982aa73e0efc64a7146ec316ba04ca2dd6e Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 27 Feb 2019 18:16:30 +0800 Subject: [PATCH 084/680] Handle assertion when reversible blocks do not exist --- tests/nodeos_irreversible_mode_test.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/tests/nodeos_irreversible_mode_test.py b/tests/nodeos_irreversible_mode_test.py index f6cf8dec951..522dcfde251 100755 --- a/tests/nodeos_irreversible_mode_test.py +++ b/tests/nodeos_irreversible_mode_test.py @@ -77,18 +77,27 @@ def ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest): # headLibAndForkDbHeadBeforeSwitchMode should be only passed IF production is disabled, otherwise it provides erroneous check # When comparing with the the state before node is switched: # - head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode -def confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode=None): +def confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode=None, isReversibleBlocksDeleted=False): # In irreversible mode, head should be equal to lib and not equal to fork Db blk num head, lib, forkDbHead = getHeadLibAndForkDbHead(nodeToTest) assert head == lib, "Head ({}) should be equal to lib ({})".format(head, lib) - assert forkDbHead > head, "Fork db head ({}) should be larger than the head ({})".format(forkDbHead, head) + + # Fork db can be equal to the head if there is no reversible blocks + if not isReversibleBlocksDeleted: + assert forkDbHead > head, "Fork db head ({}) should be larger to the head ({}) when there's reversible blocks".format(forkDbHead, head) + else: + assert forkDbHead == head, "Fork db head ({}) should be larger or equal to the head ({}) when there's no reversible blocks".format(forkDbHead, head) if headLibAndForkDbHeadBeforeSwitchMode: headBeforeSwitchMode, libBeforeSwitchMode, forkDbHeadBeforeSwitchMode = headLibAndForkDbHeadBeforeSwitchMode assert head == libBeforeSwitchMode, "Head ({}) should be equal to lib before switch mode ({})".format(head, libBeforeSwitchMode) assert lib == libBeforeSwitchMode, "Lib ({}) should be equal to lib before switch mode ({})".format(lib, libBeforeSwitchMode) - assert forkDbHead == headBeforeSwitchMode and forkDbHead == forkDbHeadBeforeSwitchMode, \ - "Fork db head ({}) should be equal to head before switch mode ({}) and fork db head before switch mode ({})".format(forkDbHead, headBeforeSwitchMode, forkDbHeadBeforeSwitchMode) + # Different case when reversible blocks are deleted + if not isReversibleBlocksDeleted: + assert forkDbHead == headBeforeSwitchMode and forkDbHead == forkDbHeadBeforeSwitchMode, \ + "Fork db head ({}) should be equal to head before switch mode ({}) and fork db head before switch mode ({})".format(forkDbHead, headBeforeSwitchMode, forkDbHeadBeforeSwitchMode) + else: + assert forkDbHead == libBeforeSwitchMode, "Fork db head ({}) should be equal to lib before switch mode ({}) when there's no reversible blocks".format(forkDbHead, libBeforeSwitchMode) # Confirm the head lib and fork db of speculative mode # Under any condition of irreversible mode: @@ -200,7 +209,7 @@ def replayInIrrModeWithoutRevBlks(nodeIdOfNodeToTest, nodeToTest): relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible --replay") # Ensure the node condition is as expected after relaunch - confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode, True) # 3rd test case: Switch mode speculative -> irreversible without replay # Expectation: Node switches mode successfully From 36f6f7c5bd59af617a1b8566f40948917d634ce4 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Thu, 28 Feb 2019 16:33:21 +0800 Subject: [PATCH 085/680] Enable cacheopen for the node relaunch --- tests/Node.py | 7 ++++--- tests/nodeos_irreversible_mode_test.py | 4 ++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 0b0e57dae2c..16eede1b46a 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1204,7 +1204,7 @@ def kill(self, killSignal): else: os.kill(self.pid, killSignal) except OSError as ex: - Utils.Print("ERROR: Failed to kill node (%d)." % (self.cmd), ex) + Utils.Print("ERROR: Failed to kill node (%s)." % (self.cmd), ex) return False # wait for kill validation @@ -1365,8 +1365,9 @@ def isNodeAlive(): else: Utils.Print("ERROR: Node relaunch Failed.") # Ensure the node process is really killed - self.popenProc.send_signal(signal.SIGTERM) - self.popenProc.wait() + if self.popenProc: + self.popenProc.send_signal(signal.SIGTERM) + self.popenProc.wait() self.pid=None return False diff --git a/tests/nodeos_irreversible_mode_test.py b/tests/nodeos_irreversible_mode_test.py index 522dcfde251..1e8f9191a6a 100755 --- a/tests/nodeos_irreversible_mode_test.py +++ b/tests/nodeos_irreversible_mode_test.py @@ -120,7 +120,7 @@ def confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest, headLibAndForkDbHeadBefore "Fork db head ({}) should be equal to fork db head before switch mode ({}) ".format(forkDbHead, forkDbHeadBeforeSwitchMode) def relaunchNode(node: Node, nodeId, chainArg="", addOrSwapFlags=None, relaunchAssertMessage="Fail to relaunch"): - isRelaunchSuccess = node.relaunch(nodeId, chainArg=chainArg, addOrSwapFlags=addOrSwapFlags, timeout=relaunchTimeout) + isRelaunchSuccess = node.relaunch(nodeId, chainArg=chainArg, addOrSwapFlags=addOrSwapFlags, timeout=relaunchTimeout, cachePopen=True) assert isRelaunchSuccess, relaunchAssertMessage return isRelaunchSuccess @@ -153,7 +153,7 @@ def stopProdNode(): def startProdNode(): if producingNode.killed: - producingNode.relaunch(producingNodeId, "", timeout=relaunchTimeout) + relaunchNode(producingNode, producingNodeId) # Give some time for it to produce, so lib is advancing waitForBlksProducedAndLibAdvanced() From 708d193bfd30291091828e6ed08ecaa25087c6b3 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Tue, 5 Mar 2019 12:13:12 +0800 Subject: [PATCH 086/680] Add missing check for test case 1, update some comments, and remove duplicate comment --- tests/nodeos_irreversible_mode_test.py | 42 +++++++++++--------------- 1 file changed, 17 insertions(+), 25 deletions(-) diff --git a/tests/nodeos_irreversible_mode_test.py b/tests/nodeos_irreversible_mode_test.py index 1e8f9191a6a..09a55165947 100755 --- a/tests/nodeos_irreversible_mode_test.py +++ b/tests/nodeos_irreversible_mode_test.py @@ -73,20 +73,14 @@ def ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest): # Confirm the head lib and fork db of irreversible mode # Under any condition of irreversible mode: -# - forkDbHead > head == lib +# - forkDbHead >= head == lib # headLibAndForkDbHeadBeforeSwitchMode should be only passed IF production is disabled, otherwise it provides erroneous check # When comparing with the the state before node is switched: # - head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode def confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode=None, isReversibleBlocksDeleted=False): - # In irreversible mode, head should be equal to lib and not equal to fork Db blk num head, lib, forkDbHead = getHeadLibAndForkDbHead(nodeToTest) assert head == lib, "Head ({}) should be equal to lib ({})".format(head, lib) - - # Fork db can be equal to the head if there is no reversible blocks - if not isReversibleBlocksDeleted: - assert forkDbHead > head, "Fork db head ({}) should be larger to the head ({}) when there's reversible blocks".format(forkDbHead, head) - else: - assert forkDbHead == head, "Fork db head ({}) should be larger or equal to the head ({}) when there's no reversible blocks".format(forkDbHead, head) + assert forkDbHead >= head, "Fork db head ({}) should be larger or equal to the head ({})".format(forkDbHead, head) if headLibAndForkDbHeadBeforeSwitchMode: headBeforeSwitchMode, libBeforeSwitchMode, forkDbHeadBeforeSwitchMode = headLibAndForkDbHeadBeforeSwitchMode @@ -100,15 +94,14 @@ def confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeS assert forkDbHead == libBeforeSwitchMode, "Fork db head ({}) should be equal to lib before switch mode ({}) when there's no reversible blocks".format(forkDbHead, libBeforeSwitchMode) # Confirm the head lib and fork db of speculative mode -# Under any condition of irreversible mode: -# - forkDbHead == head > lib +# Under any condition of speculative mode: +# - forkDbHead == head >= lib # headLibAndForkDbHeadBeforeSwitchMode should be only passed IF production is disabled, otherwise it provides erroneous check # When comparing with the the state before node is switched: # - head == forkDbHeadBeforeSwitchMode == forkDbHead and lib == headBeforeSwitchMode == libBeforeSwitchMode def confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode=None): - # In speculative mode, head should be equal to lib and not equal to fork Db blk num head, lib, forkDbHead = getHeadLibAndForkDbHead(nodeToTest) - assert head > lib, "Head should be larger than lib (head: {}, lib: {})".format(head, lib) + assert head >= lib, "Head should be larger or equal to lib (head: {}, lib: {})".format(head, lib) assert head == forkDbHead, "Head ({}) should be equal to fork db head ({})".format(head, forkDbHead) if headLibAndForkDbHeadBeforeSwitchMode: @@ -184,20 +177,21 @@ def executeTest(nodeIdOfNodeToTest, runTestScenario): return testResult # 1st test case: Replay in irreversible mode with reversible blks - # Expectation: Node replays and launches successfully - # with head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode + # Expectation: Node replays and launches successfully and forkdb head, head, and lib matches the irreversible mode expectation # Current Bug: duplicate blk added error def replayInIrrModeWithRevBlks(nodeIdOfNodeToTest, nodeToTest): + # Track head blk num and lib before shutdown + headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) + # Kill node and replay in irreversible mode nodeToTest.kill(signal.SIGTERM) relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible --replay") # Confirm state - confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) # 2nd test case: Replay in irreversible mode without reversible blks - # Expectation: Node replays and launches successfully - # with head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode + # Expectation: Node replays and launches successfully and forkdb head, head, and lib matches the irreversible mode expectation # Current Bug: lib != libBeforeSwitchMode def replayInIrrModeWithoutRevBlks(nodeIdOfNodeToTest, nodeToTest): # Track head blk num and lib before shutdown @@ -212,8 +206,7 @@ def replayInIrrModeWithoutRevBlks(nodeIdOfNodeToTest, nodeToTest): confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode, True) # 3rd test case: Switch mode speculative -> irreversible without replay - # Expectation: Node switches mode successfully - # with head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode + # Expectation: Node switches mode successfully and forkdb head, head, and lib matches the irreversible mode expectation # Current Bug: head != lib def switchSpecToIrrMode(nodeIdOfNodeToTest, nodeToTest): # Track head blk num and lib before shutdown @@ -227,8 +220,7 @@ def switchSpecToIrrMode(nodeIdOfNodeToTest, nodeToTest): confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) # 4th test case: Switch mode irreversible -> speculative without replay - # Expectation: Node switches mode successfully - # with head == forkDbHeadBeforeSwitchMode == forkDbHead and lib == headBeforeSwitchMode == libBeforeSwitchMode + # Expectation: Node switches mode successfully and forkdb head, head, and lib matches the speculative mode expectation # Current Bug: head != forkDbHead and head != forkDbHeadBeforeSwitchMode and lib != libBeforeSwitchMode def switchIrrToSpecMode(nodeIdOfNodeToTest, nodeToTest): # Track head blk num and lib before shutdown @@ -244,7 +236,7 @@ def switchIrrToSpecMode(nodeIdOfNodeToTest, nodeToTest): # 5th test case: Switch mode speculative -> irreversible without replay and connected to producing node # Expectation: Node switches mode successfully # and the head and lib should be advancing after some blocks produced - # with head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode + # and forkdb head, head, and lib matches the irreversible mode expectation # Current Bug: Fail to switch to irreversible mode, blk_validate_exception next blk in the future will be thrown def switchSpecToIrrModeWithConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): try: @@ -264,7 +256,7 @@ def switchSpecToIrrModeWithConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): # 6th test case: Switch mode irreversible -> speculative without replay and connected to producing node # Expectation: Node switches mode successfully # and the head and lib should be advancing after some blocks produced - # with head == forkDbHeadBeforeSwitchMode == forkDbHead and lib == headBeforeSwitchMode == libBeforeSwitchMode + # and forkdb head, head, and lib matches the speculative mode expectation # Current Bug: Node switches mode successfully, however, it fails to establish connection with the producing node def switchIrrToSpecModeWithConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): try: @@ -284,7 +276,7 @@ def switchIrrToSpecModeWithConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): # 7th test case: Replay in irreversible mode with reversible blks while connected to producing node # Expectation: Node replays and launches successfully # and the head and lib should be advancing after some blocks produced - # with head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode + # and forkdb head, head, and lib matches the irreversible mode expectation # Current Bug: duplicate blk added error def replayInIrrModeWithRevBlksAndConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): try: @@ -303,7 +295,7 @@ def replayInIrrModeWithRevBlksAndConnectedToProdNode(nodeIdOfNodeToTest, nodeToT # 8th test case: Replay in irreversible mode without reversible blks while connected to producing node # Expectation: Node replays and launches successfully # and the head and lib should be advancing after some blocks produced - # with head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode + # and forkdb head, head, and lib matches the irreversible mode expectation # Current Bug: Nothing def replayInIrrModeWithoutRevBlksAndConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): try: From d89aae8d2c57e04021f2e07061962c82baa95be8 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Tue, 5 Mar 2019 17:52:44 +0800 Subject: [PATCH 087/680] Add additional test case for #6704 --- tests/Cluster.py | 2 +- tests/nodeos_irreversible_mode_test.py | 110 +++++++++++++++++++++---- 2 files changed, 96 insertions(+), 16 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 8363e41781e..e7740d19cd3 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -1569,4 +1569,4 @@ def stripValues(lowestMaxes,greaterThan): @staticmethod def getDataDir(nodeId): - return os.path.join(Cluster.__dataDir, "node_%02d" % (nodeId)) + return os.path.abspath(os.path.join(Cluster.__dataDir, "node_%02d" % (nodeId))) diff --git a/tests/nodeos_irreversible_mode_test.py b/tests/nodeos_irreversible_mode_test.py index 09a55165947..73796eb1f4c 100755 --- a/tests/nodeos_irreversible_mode_test.py +++ b/tests/nodeos_irreversible_mode_test.py @@ -29,7 +29,7 @@ cmdError = Utils.cmdError relaunchTimeout = 5 numOfProducers = 4 -totalNodes = 9 +totalNodes = 10 # Parse command line arguments args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running","--keep-logs"}) @@ -46,11 +46,42 @@ cluster=Cluster(walletd=True) cluster.setWalletMgr(walletMgr) +def makeSnapshot(nodeId): + req = urllib.request.Request("http://127.0.0.1:{}/v1/producer/create_snapshot".format(8888 + int(nodeId))) + urllib.request.urlopen(req) + +def backupBlksDir(nodeId): + dataDir = Cluster.getDataDir(nodeId) + sourceDir = os.path.join(dataDir, "blocks") + destinationDir = os.path.join(os.path.dirname(dataDir), os.path.basename(dataDir) + "-backup", "blocks") + shutil.copytree(sourceDir, destinationDir) + +def recoverBackedupBlksDir(nodeId): + dataDir = Cluster.getDataDir(nodeId) + # Delete existing one and copy backed up one + existingBlocksDir = os.path.join(dataDir, "blocks") + backedupBlocksDir = os.path.join(os.path.dirname(dataDir), os.path.basename(dataDir) + "-backup", "blocks") + shutil.rmtree(existingBlocksDir, ignore_errors=True) + shutil.copytree(backedupBlocksDir, existingBlocksDir) + +def getLatestSnapshot(nodeId): + snapshotDir = os.path.join(Cluster.getDataDir(nodeId), "snapshots") + snapshotDirContents = os.listdir(snapshotDir) + assert len(snapshotDirContents) > 0 + snapshotDirContents.sort() + return os.path.join(snapshotDir, snapshotDirContents[-1]) + + def removeReversibleBlks(nodeId): dataDir = Cluster.getDataDir(nodeId) reversibleBlks = os.path.join(dataDir, "blocks", "reversible") shutil.rmtree(reversibleBlks, ignore_errors=True) +def removeState(nodeId): + dataDir = Cluster.getDataDir(nodeId) + state = os.path.join(dataDir, "state") + shutil.rmtree(state, ignore_errors=True) + def getHeadLibAndForkDbHead(node: Node): info = node.getInfo() assert info is not None, "Fail to retrieve info from the node, the node is currently having a problem" @@ -135,7 +166,8 @@ def relaunchNode(node: Node, nodeId, chainArg="", addOrSwapFlags=None, relaunchA specificExtraNodeosArgs={ 0:"--enable-stale-production", 4:"--read-mode irreversible", - 6:"--read-mode irreversible"}) + 6:"--read-mode irreversible", + 9:"--plugin eosio::producer_api_plugin"}) producingNodeId = 0 producingNode = cluster.getNode(producingNodeId) @@ -167,13 +199,13 @@ def executeTest(nodeIdOfNodeToTest, runTestScenario): # Run test scenario runTestScenario(nodeIdOfNodeToTest, nodeToTest) - - # Kill node after use - if not nodeToTest.killed: nodeToTest.kill(signal.SIGTERM) testResultMsgs.append("!!!TEST CASE #{} ({}) IS SUCCESSFUL".format(nodeIdOfNodeToTest, runTestScenario.__name__)) testResult = True except Exception as e: testResultMsgs.append("!!!BUG IS CONFIRMED ON TEST CASE #{} ({}): {}".format(nodeIdOfNodeToTest, runTestScenario.__name__, e)) + finally: + # Kill node after use + if not nodeToTest.killed: nodeToTest.kill(signal.SIGTERM) return testResult # 1st test case: Replay in irreversible mode with reversible blks @@ -313,18 +345,66 @@ def replayInIrrModeWithoutRevBlksAndConnectedToProdNode(nodeIdOfNodeToTest, node finally: stopProdNode() + # 9th test case: Switch to speculative mode while using irreversible mode snapshots and using backed up speculative blocks + # Expectation: Node replays and launches successfully + # and the head and lib should be advancing after some blocks produced + # and forkdb head, head, and lib should stay the same after relaunch + # Current Bug: Nothing + def switchToSpecModeWithIrrModeSnapshot(nodeIdOfNodeToTest, nodeToTest): + try: + # Kill node and backup blocks directory of speculative mode + headLibAndForkDbHeadBeforeShutdown = getHeadLibAndForkDbHead(nodeToTest) + nodeToTest.kill(signal.SIGTERM) + backupBlksDir(nodeIdOfNodeToTest) + + # Relaunch in irreversible mode and create the snapshot + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible") + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest) + makeSnapshot(nodeIdOfNodeToTest) + nodeToTest.kill(signal.SIGTERM) + + # Start from clean data dir, recover back up blocks, and then relaunch with irreversible snapshot + removeState(nodeIdOfNodeToTest) + recoverBackedupBlksDir(nodeIdOfNodeToTest) # this function will delete the existing blocks dir first + relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --snapshot {}".format(getLatestSnapshot(nodeIdOfNodeToTest)), addOrSwapFlags={"--read-mode": "speculative"}) + confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest) + # Ensure it automatically replays "reversible blocks", i.e. head lib and fork db should be the same + headLibAndForkDbHeadAfterRelaunch = getHeadLibAndForkDbHead(nodeToTest) + assert headLibAndForkDbHeadBeforeShutdown == headLibAndForkDbHeadAfterRelaunch, \ + "Head, Lib, and Fork Db after relaunch is different {} vs {}".format(headLibAndForkDbHeadBeforeShutdown, headLibAndForkDbHeadAfterRelaunch) + + # Start production and wait until lib advance, ensure everything is alright + startProdNode() + ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest) + + # Note the head, lib and fork db head + stopProdNode() + headLibAndForkDbHeadBeforeShutdown = getHeadLibAndForkDbHead(nodeToTest) + nodeToTest.kill(signal.SIGTERM) + + # Relaunch the node again (using the same snapshot) + # This time ensure it automatically replays both "irreversible blocks" and "reversible blocks", i.e. the end result should be the same as before shutdown + removeState(nodeIdOfNodeToTest) + relaunchNode(nodeToTest, nodeIdOfNodeToTest) + headLibAndForkDbHeadAfterRelaunch = getHeadLibAndForkDbHead(nodeToTest) + assert headLibAndForkDbHeadBeforeShutdown == headLibAndForkDbHeadAfterRelaunch, \ + "Head, Lib, and Fork Db after relaunch is different {} vs {}".format(headLibAndForkDbHeadBeforeShutdown, headLibAndForkDbHeadAfterRelaunch) + finally: + stopProdNode() # Start executing test cases here - testResult1 = executeTest(1, replayInIrrModeWithRevBlks) - testResult2 = executeTest(2, replayInIrrModeWithoutRevBlks) - testResult3 = executeTest(3, switchSpecToIrrMode) - testResult4 = executeTest(4, switchIrrToSpecMode) - testResult5 = executeTest(5, switchSpecToIrrModeWithConnectedToProdNode) - testResult6 = executeTest(6, switchIrrToSpecModeWithConnectedToProdNode) - testResult7 = executeTest(7, replayInIrrModeWithRevBlksAndConnectedToProdNode) - testResult8 = executeTest(8, replayInIrrModeWithoutRevBlksAndConnectedToProdNode) - - testSuccessful = testResult1 and testResult2 and testResult3 and testResult4 and testResult5 and testResult6 and testResult7 and testResult8 + testResults = [] + testResults.append( executeTest(1, replayInIrrModeWithRevBlks) ) + testResults.append( executeTest(2, replayInIrrModeWithoutRevBlks) ) + testResults.append( executeTest(3, switchSpecToIrrMode) ) + testResults.append( executeTest(4, switchIrrToSpecMode) ) + testResults.append( executeTest(5, switchSpecToIrrModeWithConnectedToProdNode) ) + testResults.append( executeTest(6, switchIrrToSpecModeWithConnectedToProdNode) ) + testResults.append( executeTest(7, replayInIrrModeWithRevBlksAndConnectedToProdNode) ) + testResults.append( executeTest(8, replayInIrrModeWithoutRevBlksAndConnectedToProdNode) ) + testResults.append( executeTest(9, switchToSpecModeWithIrrModeSnapshot) ) + + testSuccessful = all(testResults) finally: TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) # Print test result From 5e3ed75c976e1e0e64132aa867bdf56829c94fd3 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 6 Mar 2019 13:49:13 +0800 Subject: [PATCH 088/680] Modify test case 2 to match new behavior --- tests/nodeos_irreversible_mode_test.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/tests/nodeos_irreversible_mode_test.py b/tests/nodeos_irreversible_mode_test.py index 73796eb1f4c..2710488db4a 100755 --- a/tests/nodeos_irreversible_mode_test.py +++ b/tests/nodeos_irreversible_mode_test.py @@ -90,9 +90,12 @@ def getHeadLibAndForkDbHead(node: Node): forkDbHead = int(info["fork_db_head_block_num"]) return head, lib, forkDbHead -# Around 30 seconds should be enough to advance lib for 4 producers +# Wait for some time until LIB advance def waitForBlksProducedAndLibAdvanced(): - time.sleep(30) + # Give 6 seconds buffer time + requiredConfirmation = int(2 / 3 * numOfProducers) + 1 + timeToWait = ((12 * requiredConfirmation - 1) * 2) + 6 + time.sleep(timeToWait) # Ensure that the relaunched node received blks from producers, in other words head and lib is advancing def ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest): @@ -108,7 +111,7 @@ def ensureHeadLibAndForkDbHeadIsAdvancing(nodeToTest): # headLibAndForkDbHeadBeforeSwitchMode should be only passed IF production is disabled, otherwise it provides erroneous check # When comparing with the the state before node is switched: # - head == libBeforeSwitchMode == lib and forkDbHead == headBeforeSwitchMode == forkDbHeadBeforeSwitchMode -def confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode=None, isReversibleBlocksDeleted=False): +def confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode=None): head, lib, forkDbHead = getHeadLibAndForkDbHead(nodeToTest) assert head == lib, "Head ({}) should be equal to lib ({})".format(head, lib) assert forkDbHead >= head, "Fork db head ({}) should be larger or equal to the head ({})".format(forkDbHead, head) @@ -117,12 +120,7 @@ def confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeS headBeforeSwitchMode, libBeforeSwitchMode, forkDbHeadBeforeSwitchMode = headLibAndForkDbHeadBeforeSwitchMode assert head == libBeforeSwitchMode, "Head ({}) should be equal to lib before switch mode ({})".format(head, libBeforeSwitchMode) assert lib == libBeforeSwitchMode, "Lib ({}) should be equal to lib before switch mode ({})".format(lib, libBeforeSwitchMode) - # Different case when reversible blocks are deleted - if not isReversibleBlocksDeleted: - assert forkDbHead == headBeforeSwitchMode and forkDbHead == forkDbHeadBeforeSwitchMode, \ - "Fork db head ({}) should be equal to head before switch mode ({}) and fork db head before switch mode ({})".format(forkDbHead, headBeforeSwitchMode, forkDbHeadBeforeSwitchMode) - else: - assert forkDbHead == libBeforeSwitchMode, "Fork db head ({}) should be equal to lib before switch mode ({}) when there's no reversible blocks".format(forkDbHead, libBeforeSwitchMode) + assert forkDbHead == headBeforeSwitchMode and forkDbHead == forkDbHeadBeforeSwitchMode # Confirm the head lib and fork db of speculative mode # Under any condition of speculative mode: @@ -145,6 +143,7 @@ def confirmHeadLibAndForkDbHeadOfSpecMode(nodeToTest, headLibAndForkDbHeadBefore def relaunchNode(node: Node, nodeId, chainArg="", addOrSwapFlags=None, relaunchAssertMessage="Fail to relaunch"): isRelaunchSuccess = node.relaunch(nodeId, chainArg=chainArg, addOrSwapFlags=addOrSwapFlags, timeout=relaunchTimeout, cachePopen=True) + time.sleep(1) # Give a second to replay or resync if needed assert isRelaunchSuccess, relaunchAssertMessage return isRelaunchSuccess @@ -235,7 +234,7 @@ def replayInIrrModeWithoutRevBlks(nodeIdOfNodeToTest, nodeToTest): relaunchNode(nodeToTest, nodeIdOfNodeToTest, chainArg=" --read-mode irreversible --replay") # Ensure the node condition is as expected after relaunch - confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode, True) + confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeSwitchMode) # 3rd test case: Switch mode speculative -> irreversible without replay # Expectation: Node switches mode successfully and forkdb head, head, and lib matches the irreversible mode expectation From c237422224555afeae639f151da0e87ac6170b44 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 6 Mar 2019 13:50:15 +0800 Subject: [PATCH 089/680] Remove current bug comments as they are fixed --- tests/nodeos_irreversible_mode_test.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/tests/nodeos_irreversible_mode_test.py b/tests/nodeos_irreversible_mode_test.py index 2710488db4a..8aadc991bdc 100755 --- a/tests/nodeos_irreversible_mode_test.py +++ b/tests/nodeos_irreversible_mode_test.py @@ -209,7 +209,6 @@ def executeTest(nodeIdOfNodeToTest, runTestScenario): # 1st test case: Replay in irreversible mode with reversible blks # Expectation: Node replays and launches successfully and forkdb head, head, and lib matches the irreversible mode expectation - # Current Bug: duplicate blk added error def replayInIrrModeWithRevBlks(nodeIdOfNodeToTest, nodeToTest): # Track head blk num and lib before shutdown headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) @@ -223,7 +222,6 @@ def replayInIrrModeWithRevBlks(nodeIdOfNodeToTest, nodeToTest): # 2nd test case: Replay in irreversible mode without reversible blks # Expectation: Node replays and launches successfully and forkdb head, head, and lib matches the irreversible mode expectation - # Current Bug: lib != libBeforeSwitchMode def replayInIrrModeWithoutRevBlks(nodeIdOfNodeToTest, nodeToTest): # Track head blk num and lib before shutdown headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) @@ -238,7 +236,6 @@ def replayInIrrModeWithoutRevBlks(nodeIdOfNodeToTest, nodeToTest): # 3rd test case: Switch mode speculative -> irreversible without replay # Expectation: Node switches mode successfully and forkdb head, head, and lib matches the irreversible mode expectation - # Current Bug: head != lib def switchSpecToIrrMode(nodeIdOfNodeToTest, nodeToTest): # Track head blk num and lib before shutdown headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) @@ -252,7 +249,6 @@ def switchSpecToIrrMode(nodeIdOfNodeToTest, nodeToTest): # 4th test case: Switch mode irreversible -> speculative without replay # Expectation: Node switches mode successfully and forkdb head, head, and lib matches the speculative mode expectation - # Current Bug: head != forkDbHead and head != forkDbHeadBeforeSwitchMode and lib != libBeforeSwitchMode def switchIrrToSpecMode(nodeIdOfNodeToTest, nodeToTest): # Track head blk num and lib before shutdown headLibAndForkDbHeadBeforeSwitchMode = getHeadLibAndForkDbHead(nodeToTest) @@ -268,7 +264,6 @@ def switchIrrToSpecMode(nodeIdOfNodeToTest, nodeToTest): # Expectation: Node switches mode successfully # and the head and lib should be advancing after some blocks produced # and forkdb head, head, and lib matches the irreversible mode expectation - # Current Bug: Fail to switch to irreversible mode, blk_validate_exception next blk in the future will be thrown def switchSpecToIrrModeWithConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): try: startProdNode() @@ -288,7 +283,6 @@ def switchSpecToIrrModeWithConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): # Expectation: Node switches mode successfully # and the head and lib should be advancing after some blocks produced # and forkdb head, head, and lib matches the speculative mode expectation - # Current Bug: Node switches mode successfully, however, it fails to establish connection with the producing node def switchIrrToSpecModeWithConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): try: startProdNode() @@ -308,7 +302,6 @@ def switchIrrToSpecModeWithConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): # Expectation: Node replays and launches successfully # and the head and lib should be advancing after some blocks produced # and forkdb head, head, and lib matches the irreversible mode expectation - # Current Bug: duplicate blk added error def replayInIrrModeWithRevBlksAndConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): try: startProdNode() @@ -327,7 +320,6 @@ def replayInIrrModeWithRevBlksAndConnectedToProdNode(nodeIdOfNodeToTest, nodeToT # Expectation: Node replays and launches successfully # and the head and lib should be advancing after some blocks produced # and forkdb head, head, and lib matches the irreversible mode expectation - # Current Bug: Nothing def replayInIrrModeWithoutRevBlksAndConnectedToProdNode(nodeIdOfNodeToTest, nodeToTest): try: startProdNode() @@ -348,7 +340,6 @@ def replayInIrrModeWithoutRevBlksAndConnectedToProdNode(nodeIdOfNodeToTest, node # Expectation: Node replays and launches successfully # and the head and lib should be advancing after some blocks produced # and forkdb head, head, and lib should stay the same after relaunch - # Current Bug: Nothing def switchToSpecModeWithIrrModeSnapshot(nodeIdOfNodeToTest, nodeToTest): try: # Kill node and backup blocks directory of speculative mode From e0160e2cda44e5bfd09f841c0cad64001137ee9d Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 6 Mar 2019 16:00:49 +0800 Subject: [PATCH 090/680] Update wrong calculation and add back deleted assert message --- tests/nodeos_irreversible_mode_test.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/tests/nodeos_irreversible_mode_test.py b/tests/nodeos_irreversible_mode_test.py index 8aadc991bdc..4468173e0a2 100755 --- a/tests/nodeos_irreversible_mode_test.py +++ b/tests/nodeos_irreversible_mode_test.py @@ -92,9 +92,11 @@ def getHeadLibAndForkDbHead(node: Node): # Wait for some time until LIB advance def waitForBlksProducedAndLibAdvanced(): - # Give 6 seconds buffer time requiredConfirmation = int(2 / 3 * numOfProducers) + 1 - timeToWait = ((12 * requiredConfirmation - 1) * 2) + 6 + maxNumOfBlksReqToConfirmLib = (12 * requiredConfirmation - 1) * 2 + # Give 6 seconds buffer time + bufferTime = 6 + timeToWait = maxNumOfBlksReqToConfirmLib / 2 + bufferTime time.sleep(timeToWait) # Ensure that the relaunched node received blks from producers, in other words head and lib is advancing @@ -120,7 +122,8 @@ def confirmHeadLibAndForkDbHeadOfIrrMode(nodeToTest, headLibAndForkDbHeadBeforeS headBeforeSwitchMode, libBeforeSwitchMode, forkDbHeadBeforeSwitchMode = headLibAndForkDbHeadBeforeSwitchMode assert head == libBeforeSwitchMode, "Head ({}) should be equal to lib before switch mode ({})".format(head, libBeforeSwitchMode) assert lib == libBeforeSwitchMode, "Lib ({}) should be equal to lib before switch mode ({})".format(lib, libBeforeSwitchMode) - assert forkDbHead == headBeforeSwitchMode and forkDbHead == forkDbHeadBeforeSwitchMode + assert forkDbHead == headBeforeSwitchMode and forkDbHead == forkDbHeadBeforeSwitchMode, \ + "Fork db head ({}) should be equal to head before switch mode ({}) and fork db head before switch mode ({})".format(forkDbHead, headBeforeSwitchMode, forkDbHeadBeforeSwitchMode) # Confirm the head lib and fork db of speculative mode # Under any condition of speculative mode: From b1ada0fefde2b50ad3803625cb70f788f1cef676 Mon Sep 17 00:00:00 2001 From: ovi Date: Wed, 6 Mar 2019 16:43:39 +0200 Subject: [PATCH 091/680] update the bios boot tutorial python script to create also the eosio.rex account along the other system accounts. --- tutorials/bios-boot-tutorial/bios-boot-tutorial.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tutorials/bios-boot-tutorial/bios-boot-tutorial.py b/tutorials/bios-boot-tutorial/bios-boot-tutorial.py index 59a1590d4d6..c52a3add0ef 100755 --- a/tutorials/bios-boot-tutorial/bios-boot-tutorial.py +++ b/tutorials/bios-boot-tutorial/bios-boot-tutorial.py @@ -26,6 +26,7 @@ 'eosio.stake', 'eosio.token', 'eosio.vpay', + 'eosio.rex', ] def jsonArg(a): From 481149643325a201fe4f40da3b19588f0b09f8d7 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 5 Mar 2019 16:53:20 -0500 Subject: [PATCH 092/680] Consolidated Security Fixes for 1.7.0-rc2 - Fix small memory leak in net_plugin. - Add additional deadline checks to transaction authorization. --- libraries/chain/controller.cpp | 4 +--- plugins/net_plugin/net_plugin.cpp | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 3f1d5cf7837..f3b0a841981 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1033,9 +1033,7 @@ struct controller_impl { recovered_keys, {}, trx_context.delay, - [](){} - /*std::bind(&transaction_context::add_cpu_usage_and_check_time, &trx_context, - std::placeholders::_1)*/, + [&trx_context](){ trx_context.checktime(); }, false ); } diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index e4adc0dd6ac..320214ae933 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -716,6 +716,7 @@ namespace eosio { void rejected_block(const block_id_type& id); void recv_block(const connection_ptr& conn, const block_id_type& msg, uint32_t bnum); + void expire_blocks( uint32_t bnum ); void recv_transaction(const connection_ptr& conn, const transaction_id_type& id); void recv_notice(const connection_ptr& conn, const notice_message& msg, bool generated); @@ -1656,11 +1657,23 @@ namespace eosio { } void dispatch_manager::rejected_block(const block_id_type& id) { - fc_dlog(logger,"not sending rejected transaction ${tid}",("tid",id)); + fc_dlog( logger, "rejected block ${id}", ("id", id) ); auto range = received_blocks.equal_range(id); received_blocks.erase(range.first, range.second); } + void dispatch_manager::expire_blocks( uint32_t lib_num ) { + for( auto i = received_blocks.begin(); i != received_blocks.end(); ) { + const block_id_type& blk_id = i->first; + uint32_t blk_num = block_header::num_from_id( blk_id ); + if( blk_num <= lib_num ) { + i = received_blocks.erase( i ); + } else { + ++i; + } + } + } + void dispatch_manager::bcast_transaction(const transaction_metadata_ptr& ptrx) { std::set skips; const auto& id = ptrx->id; @@ -2590,6 +2603,7 @@ namespace eosio { } else { sync_master->rejected_block(c, blk_num); + dispatcher->rejected_block( blk_id ); } } @@ -2657,6 +2671,7 @@ namespace eosio { controller& cc = chain_plug->chain(); uint32_t lib = cc.last_irreversible_block_num(); + dispatcher->expire_blocks( lib ); for ( auto &c : connections ) { auto &stale_txn = c->trx_state.get(); stale_txn.erase( stale_txn.lower_bound(1), stale_txn.upper_bound(lib) ); From 3c6d222da586acb4a2cdbd9c6f0b8708d94e4140 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Wed, 6 Mar 2019 19:40:55 -0500 Subject: [PATCH 093/680] Two small compiler warning fixes Unused variable & catching polymorphic exception by value --- plugins/producer_plugin/producer_plugin.cpp | 2 -- plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index f3eb2164cea..a35fa34a9c5 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1584,8 +1584,6 @@ void producer_plugin_impl::produce_block() { } ); chain.commit_block(); - auto hbt = chain.head_block_time(); - //idump((fc::time_point::now() - hbt)); block_state_ptr new_bs = chain.head_block_state(); _producer_watermarks[new_bs->header.producer] = chain.head_block_num(); diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index d4f197df468..460ecb57bd6 100755 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -436,7 +436,7 @@ void txn_test_gen_plugin::plugin_shutdown() { try { my->stop_generation(); } - catch(fc::exception e) { + catch(fc::exception& e) { } } From 97f57de358af847e1060284a3ffc84f623125cb7 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 7 Mar 2019 11:15:55 -0500 Subject: [PATCH 094/680] add dependencies to hash that generates feature digest --- libraries/chain/protocol_feature_manager.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 799ab99bab4..e815eaf1950 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -102,6 +102,7 @@ namespace eosio { namespace chain { digest_type::encoder enc; fc::raw::pack( enc, _type ); fc::raw::pack( enc, description_digest ); + fc::raw::pack( enc, dependencies ); fc::raw::pack( enc, _codename ); return enc.result(); From ea72642b8e1ebde93f5cce865960ba4fd73aca1f Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Thu, 7 Mar 2019 11:16:30 -0500 Subject: [PATCH 095/680] Long Running Tests fix (#6890) --- .buildkite/long_running_tests.yml | 303 ++++++++++++++++++++++-------- 1 file changed, 224 insertions(+), 79 deletions(-) diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index e22016c4de4..0e6133019ce 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -1,120 +1,197 @@ steps: + - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - echo 1 | ./eosio_build.sh && \ - echo "--- Compressing build directory :compression:" && \ - tar -pczf build.tar.gz build/ - label: ":darwin: Build" + echo "+++ :hammer: Building" && \ + ./scripts/eosio_build.sh -y && \ + echo "--- :compression: Compressing build directory" && \ + tar -pczf build.tar.gz build/ + label: ":ubuntu: 16.04 Build" agents: - - "role=macos-builder" + queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + workdir: /data/job timeout: 60 - command: | echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ + ./scripts/eosio_build.sh -y && \ echo "--- :compression: Compressing build directory" && \ tar -pczf build.tar.gz build/ - label: ":ubuntu: Build" + label: ":ubuntu: 18.04 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" workdir: /data/job timeout: 60 - + - command: | echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ + ./scripts/eosio_build.sh -y && \ echo "--- :compression: Compressing build directory" && \ tar -pczf build.tar.gz build/ - label: ":ubuntu: 18.04 Build" + label: ":centos: 7 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" workdir: /data/job timeout: 60 - command: | echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ + ./scripts/eosio_build.sh -y && \ echo "--- :compression: Compressing build directory" && \ tar -pczf build.tar.gz build/ - label: ":fedora: Build" + label: ":aws: 1 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v1.4.0: - image: "eosio/ci:fedora" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" workdir: /data/job timeout: 60 - + - command: | echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ + ./scripts/eosio_build.sh -y && \ echo "--- :compression: Compressing build directory" && \ tar -pczf build.tar.gz build/ - label: ":centos: Build" + label: ":aws: 2 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v1.4.0: - image: "eosio/ci:centos" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" workdir: /data/job timeout: 60 - command: | echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh && \ + ./scripts/eosio_build.sh -y && \ echo "--- :compression: Compressing build directory" && \ tar -pczf build.tar.gz build/ - label: ":aws: Build" + label: ":fedora: 27 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v1.4.0: - image: "eosio/ci:amazonlinux" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" workdir: /data/job timeout: 60 + - command: | + echo "--- Creating symbolic link to job directory :file_folder:" && \ + sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ + echo "+++ Building :hammer:" && \ + ./scripts/eosio_build.sh -y && \ + echo "--- Compressing build directory :compression:" && \ + tar -pczf build.tar.gz build/ + label: ":darwin: Mojave Build" + agents: + - "role=builder-v2-1" + - "os=mojave" + artifact_paths: "build.tar.gz" + timeout: 60 + + - command: | + echo "--- Creating symbolic link to job directory :file_folder:" && \ + sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ + echo "+++ Building :hammer:" && \ + ./scripts/eosio_build.sh -y && \ + echo "--- Compressing build directory :compression:" && \ + tar -pczf build.tar.gz build/ + label: ":darwin: High Sierra Build" + agents: + - "role=builder-v2-1" + - "os=high-sierra" + artifact_paths: "build.tar.gz" + timeout: 60 + - wait - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":darwin: Tests" + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running LR Tests" && \ + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + label: ":ubuntu: 16.04 LR Tests" agents: - - "role=macos-tester" + queue: "automation-large-builder-fleet" artifact_paths: - "mongod.log" - "build/genesis.json" - "build/config.ini" - timeout: 100 - + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + workdir: /data/job + timeout: 60 + - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":ubuntu: Tests" + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running LR Tests" && \ + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + label: ":ubuntu: 18.04 LR Tests" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -122,20 +199,26 @@ steps: - "build/genesis.json" - "build/config.ini" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" workdir: /data/job - timeout: 100 - + timeout: 60 + - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":ubuntu: 18.04 Tests" + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running LR Tests" && \ + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + label: ":centos: 7 LR Tests" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -143,20 +226,26 @@ steps: - "build/genesis.json" - "build/config.ini" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" workdir: /data/job - timeout: 100 + timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":fedora: Tests" + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running LR Tests" && \ + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + label: ":aws: 1 LR Tests" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -164,20 +253,26 @@ steps: - "build/genesis.json" - "build/config.ini" plugins: - docker#v1.4.0: - image: "eosio/ci:fedora" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" workdir: /data/job - timeout: 100 + timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":centos: Tests" + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running LR Tests" && \ + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + label: ":aws: 2 LR Tests" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -185,20 +280,26 @@ steps: - "build/genesis.json" - "build/config.ini" plugins: - docker#v1.4.0: - image: "eosio/ci:centos" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" workdir: /data/job - timeout: 100 + timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -L long_running_tests --output-on-failure - label: ":aws: Tests" + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running LR Tests" && \ + cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + label: ":fedora: 27 LR Tests" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -206,7 +307,51 @@ steps: - "build/genesis.json" - "build/config.ini" plugins: - docker#v1.4.0: - image: "eosio/ci:amazonlinux" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" workdir: /data/job - timeout: 100 + timeout: 60 + + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ + tar -zxf build.tar.gz && \ + echo "--- :m: Starting MongoDB" && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running LR Tests" && \ + ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + label: ":darwin: High Sierra LR Tests" + agents: + - "role=tester-v2-1" + - "os=high-sierra" + artifact_paths: + - "mongod.log" + - "build/genesis.json" + - "build/config.ini" + timeout: 60 + + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ + tar -zxf build.tar.gz && \ + echo "--- :m: Starting MongoDB" && \ + ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running LR Tests" && \ + ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + label: ":darwin: Mojave LR Tests" + agents: + - "role=tester-v2-1" + - "os=mojave" + artifact_paths: + - "mongod.log" + - "build/genesis.json" + - "build/config.ini" + timeout: 60 + + \ No newline at end of file From 1459875c4dbfaccc9c4982ee6203934eedfcf9f4 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 7 Mar 2019 11:18:40 -0500 Subject: [PATCH 096/680] remove digest from protocol feature JSON filenames since they can become inconsistent with the contents --- plugins/chain_plugin/chain_plugin.cpp | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 5c366613ed2..991fe7d3b60 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -469,21 +469,14 @@ protocol_feature_manager initialize_protocol_features( const fc::path& p, bool p auto output_protocol_feature = [&p]( const builtin_protocol_feature& f, const digest_type& feature_digest ) { static constexpr int max_tries = 10; - string digest_string("-"); - { - fc::variant v; - to_variant( feature_digest, v ); - digest_string += v.get_string(); - } - string filename_base( "BUILTIN-" ); filename_base += builtin_protocol_feature_codename( f.get_codename() ); - string filename = filename_base + digest_string + ".json"; + string filename = filename_base+ ".json"; int i = 0; for( ; i < max_tries && fc::exists( p / filename ); - ++i, filename = filename_base + digest_string + "-" + std::to_string(i) + ".json" ) + ++i, filename = filename_base + "-" + std::to_string(i) + ".json" ) ; EOS_ASSERT( i < max_tries, plugin_exception, From 8c915c90c64aa923c129e78f20d4e791f0fe6665 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 7 Mar 2019 11:45:20 -0500 Subject: [PATCH 097/680] set description digest for PREACTIVATE_FEATURE --- libraries/chain/protocol_feature_manager.cpp | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index e815eaf1950..3970cab6766 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -14,9 +14,16 @@ namespace eosio { namespace chain { const std::unordered_map> builtin_protocol_feature_codenames = boost::assign::map_list_of - ( builtin_protocol_feature_t::preactivate_feature, { + ( builtin_protocol_feature_t::preactivate_feature, { "PREACTIVATE_FEATURE", - digest_type{}, + fc::variant("64fe7df32e9b86be2b296b3f81dfd527f84e82b98e363bc97e40bc7a83733310").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: PREACTIVATE_FEATURE + +Adds privileged intrinsic to enable a contract to pre-activate a protocol feature specified by its digest. +Pre-activated protocol features must be activated in the next block. +*/ {}, {time_point{}, false, true} // enabled without preactivation and ready to go at any time } ) From 12ab91c6443d432fdcb64fec79e94a481dad8e3e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 7 Mar 2019 15:04:07 -0500 Subject: [PATCH 098/680] Move json::to_string processing to http thread pool --- plugins/http_plugin/http_plugin.cpp | 32 ++++++++++--------- .../include/eosio/http_plugin/http_plugin.hpp | 2 +- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index 3bb5b530d5f..7e205736874 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -304,18 +304,20 @@ namespace eosio { [ioc = this->server_ioc, &bytes_in_flight = this->bytes_in_flight, handler_itr, resource{std::move( resource )}, body{std::move( body )}, con]() { try { - bytes_in_flight -= body.size(); handler_itr->second( resource, body, - [ioc{std::move(ioc)}, &bytes_in_flight, con]( int code, std::string response_body ) { - bytes_in_flight += response_body.size(); - boost::asio::post( *ioc, [ioc, response_body{std::move( response_body )}, &bytes_in_flight, con, code]() { - size_t body_size = response_body.size(); - con->set_body( std::move( response_body ) ); + [ioc{std::move(ioc)}, &bytes_in_flight, con]( int code, fc::variant response_body ) { + boost::asio::post( *ioc, [ioc, response_body{std::move( response_body )}, &bytes_in_flight, con, code]() mutable { + std::string json = fc::json::to_string( response_body ); + response_body.clear(); + const size_t json_size = json.size(); + bytes_in_flight += json_size; + con->set_body( std::move( json ) ); con->set_status( websocketpp::http::status_code::value( code ) ); con->send_http_response(); - bytes_in_flight -= body_size; + bytes_in_flight -= json_size; } ); }); + bytes_in_flight -= body.size(); } catch( ... ) { handle_exception( con ); con->send_http_response(); @@ -592,7 +594,7 @@ namespace eosio { try { if (body.empty()) body = "{}"; auto result = (*this).get_supported_apis(); - cb(200, fc::json::to_string(result)); + cb(200, fc::variant(result)); } catch (...) { handle_exception("node", "get_supported_apis", body, cb); } @@ -629,21 +631,21 @@ namespace eosio { throw; } catch (chain::unknown_block_exception& e) { error_results results{400, "Unknown Block", error_results::error_info(e, verbose_http_errors)}; - cb( 400, fc::json::to_string( results )); + cb( 400, fc::variant( results )); } catch (chain::unsatisfied_authorization& e) { error_results results{401, "UnAuthorized", error_results::error_info(e, verbose_http_errors)}; - cb( 401, fc::json::to_string( results )); + cb( 401, fc::variant( results )); } catch (chain::tx_duplicate& e) { error_results results{409, "Conflict", error_results::error_info(e, verbose_http_errors)}; - cb( 409, fc::json::to_string( results )); + cb( 409, fc::variant( results )); } catch (fc::eof_exception& e) { error_results results{422, "Unprocessable Entity", error_results::error_info(e, verbose_http_errors)}; - cb( 422, fc::json::to_string( results )); + cb( 422, fc::variant( results )); elog( "Unable to parse arguments to ${api}.${call}", ("api", api_name)( "call", call_name )); dlog("Bad arguments: ${args}", ("args", body)); } catch (fc::exception& e) { error_results results{500, "Internal Service Error", error_results::error_info(e, verbose_http_errors)}; - cb( 500, fc::json::to_string( results )); + cb( 500, fc::variant( results )); if (e.code() != chain::greylist_net_usage_exceeded::code_value && e.code() != chain::greylist_cpu_usage_exceeded::code_value) { elog( "FC Exception encountered while processing ${api}.${call}", ("api", api_name)( "call", call_name )); @@ -651,14 +653,14 @@ namespace eosio { } } catch (std::exception& e) { error_results results{500, "Internal Service Error", error_results::error_info(fc::exception( FC_LOG_MESSAGE( error, e.what())), verbose_http_errors)}; - cb( 500, fc::json::to_string( results )); + cb( 500, fc::variant( results )); elog( "STD Exception encountered while processing ${api}.${call}", ("api", api_name)( "call", call_name )); dlog( "Exception Details: ${e}", ("e", e.what())); } catch (...) { error_results results{500, "Internal Service Error", error_results::error_info(fc::exception( FC_LOG_MESSAGE( error, "Unknown Exception" )), verbose_http_errors)}; - cb( 500, fc::json::to_string( results )); + cb( 500, fc::variant( results )); elog( "Unknown Exception encountered while processing ${api}.${call}", ("api", api_name)( "call", call_name )); } diff --git a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp index a522b2b1739..eaa132ce0e4 100644 --- a/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp +++ b/plugins/http_plugin/include/eosio/http_plugin/http_plugin.hpp @@ -17,7 +17,7 @@ namespace eosio { * * Arguments: response_code, response_body */ - using url_response_callback = std::function; + using url_response_callback = std::function; /** * @brief Callback type for a URL handler From 97b385cf0303c7ad86fb1e45289bad2124745d56 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 7 Mar 2019 15:05:35 -0500 Subject: [PATCH 099/680] Move json::to_string to http thread pool --- plugins/chain_api_plugin/chain_api_plugin.cpp | 10 +++++----- plugins/db_size_api_plugin/db_size_api_plugin.cpp | 2 +- .../faucet_testnet_plugin/faucet_testnet_plugin.cpp | 2 +- plugins/history_api_plugin/history_api_plugin.cpp | 4 ++-- plugins/login_plugin/login_plugin.cpp | 4 ++-- plugins/net_api_plugin/net_api_plugin.cpp | 2 +- plugins/producer_api_plugin/producer_api_plugin.cpp | 2 +- .../test_control_api_plugin.cpp | 4 ++-- plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp | 4 ++-- plugins/wallet_api_plugin/wallet_api_plugin.cpp | 2 +- programs/keosd/main.cpp | 2 +- 11 files changed, 19 insertions(+), 19 deletions(-) diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index 8b9fd3f843c..8243765783d 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -28,10 +28,10 @@ chain_api_plugin::~chain_api_plugin(){} void chain_api_plugin::set_program_options(options_description&, options_description&) {} void chain_api_plugin::plugin_initialize(const variables_map&) {} -struct async_result_visitor : public fc::visitor { +struct async_result_visitor : public fc::visitor { template - std::string operator()(const T& v) const { - return fc::json::to_string(v); + fc::variant operator()(const T& v) const { + return fc::variant(v); } }; @@ -41,8 +41,8 @@ struct async_result_visitor : public fc::visitor { api_handle.validate(); \ try { \ if (body.empty()) body = "{}"; \ - auto result = api_handle.call_name(fc::json::from_string(body).as()); \ - cb(http_response_code, fc::json::to_string(result)); \ + fc::variant result( api_handle.call_name(fc::json::from_string(body).as()) ); \ + cb(http_response_code, std::move(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ diff --git a/plugins/db_size_api_plugin/db_size_api_plugin.cpp b/plugins/db_size_api_plugin/db_size_api_plugin.cpp index 8eed8b388ed..8c6df9566fe 100644 --- a/plugins/db_size_api_plugin/db_size_api_plugin.cpp +++ b/plugins/db_size_api_plugin/db_size_api_plugin.cpp @@ -18,7 +18,7 @@ using namespace eosio; try { \ if (body.empty()) body = "{}"; \ INVOKE \ - cb(http_response_code, fc::json::to_string(result)); \ + cb(http_response_code, fc::variant(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ diff --git a/plugins/faucet_testnet_plugin/faucet_testnet_plugin.cpp b/plugins/faucet_testnet_plugin/faucet_testnet_plugin.cpp index d6f8f53e7b3..32db7146f6d 100644 --- a/plugins/faucet_testnet_plugin/faucet_testnet_plugin.cpp +++ b/plugins/faucet_testnet_plugin/faucet_testnet_plugin.cpp @@ -60,7 +60,7 @@ using results_pair = std::pair; try { \ if (body.empty()) body = "{}"; \ const auto result = api_handle->invoke_cb(body); \ - response_cb(result.first, fc::json::to_string(result.second)); \ + response_cb(result.first, fc::variant(result.second)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, response_cb); \ } \ diff --git a/plugins/history_api_plugin/history_api_plugin.cpp b/plugins/history_api_plugin/history_api_plugin.cpp index f9030d8c91c..d76dd7fd44b 100644 --- a/plugins/history_api_plugin/history_api_plugin.cpp +++ b/plugins/history_api_plugin/history_api_plugin.cpp @@ -24,8 +24,8 @@ void history_api_plugin::plugin_initialize(const variables_map&) {} [api_handle](string, string body, url_response_callback cb) mutable { \ try { \ if (body.empty()) body = "{}"; \ - auto result = api_handle.call_name(fc::json::from_string(body).as()); \ - cb(200, fc::json::to_string(result)); \ + fc::variant result( api_handle.call_name(fc::json::from_string(body).as()) ); \ + cb(200, std::move(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ diff --git a/plugins/login_plugin/login_plugin.cpp b/plugins/login_plugin/login_plugin.cpp index 0aeac67dce4..374a04b25dc 100644 --- a/plugins/login_plugin/login_plugin.cpp +++ b/plugins/login_plugin/login_plugin.cpp @@ -68,8 +68,8 @@ void login_plugin::plugin_initialize(const variables_map& options) { try { \ if (body.empty()) \ body = "{}"; \ - auto result = call_name(fc::json::from_string(body).as()); \ - cb(http_response_code, fc::json::to_string(result)); \ + fc::variant result( call_name(fc::json::from_string(body).as()) ); \ + cb(http_response_code, std::move(result)); \ } catch (...) { \ http_plugin::handle_exception("login", #call_name, body, cb); \ } \ diff --git a/plugins/net_api_plugin/net_api_plugin.cpp b/plugins/net_api_plugin/net_api_plugin.cpp index 3b7327c4313..315ea2816e9 100644 --- a/plugins/net_api_plugin/net_api_plugin.cpp +++ b/plugins/net_api_plugin/net_api_plugin.cpp @@ -29,7 +29,7 @@ using namespace eosio; try { \ if (body.empty()) body = "{}"; \ INVOKE \ - cb(http_response_code, fc::json::to_string(result)); \ + cb(http_response_code, fc::variant(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ diff --git a/plugins/producer_api_plugin/producer_api_plugin.cpp b/plugins/producer_api_plugin/producer_api_plugin.cpp index 7fcde1ac98c..0ef7631c868 100644 --- a/plugins/producer_api_plugin/producer_api_plugin.cpp +++ b/plugins/producer_api_plugin/producer_api_plugin.cpp @@ -30,7 +30,7 @@ using namespace eosio; try { \ if (body.empty()) body = "{}"; \ INVOKE \ - cb(http_response_code, fc::json::to_string(result)); \ + cb(http_response_code, fc::variant(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ diff --git a/plugins/test_control_api_plugin/test_control_api_plugin.cpp b/plugins/test_control_api_plugin/test_control_api_plugin.cpp index 307cccc197e..a932a27cad9 100644 --- a/plugins/test_control_api_plugin/test_control_api_plugin.cpp +++ b/plugins/test_control_api_plugin/test_control_api_plugin.cpp @@ -40,8 +40,8 @@ struct async_result_visitor : public fc::visitor { [api_handle](string, string body, url_response_callback cb) mutable { \ try { \ if (body.empty()) body = "{}"; \ - auto result = api_handle.call_name(fc::json::from_string(body).as()); \ - cb(http_response_code, fc::json::to_string(result)); \ + fc::variant result( api_handle.call_name(fc::json::from_string(body).as()) ); \ + cb(http_response_code, std::move(result)); \ } catch (...) { \ http_plugin::handle_exception(#api_name, #call_name, body, cb); \ } \ diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index 460ecb57bd6..414664be32a 100755 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -45,7 +45,7 @@ using io_work_t = boost::asio::executor_work_guard(argc, argv)) return -1; auto& http = app().get_plugin(); - http.add_handler("/v1/keosd/stop", [](string, string, url_response_callback cb) { cb(200, "{}"); std::raise(SIGTERM); } ); + http.add_handler("/v1/keosd/stop", [](string, string, url_response_callback cb) { cb(200, fc::variant(fc::variant_object())); std::raise(SIGTERM); } ); app().startup(); app().exec(); } catch (const fc::exception& e) { From 6e3c7cb93415a6c6a5427d43a0ecb9c8edf95ad4 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 7 Mar 2019 16:19:17 -0500 Subject: [PATCH 100/680] fix bug in protocol_feature_manager::add_feature leading to undefined behavior --- libraries/chain/protocol_feature_manager.cpp | 8 ++++---- .../include/eosio/chain_plugin/chain_plugin.hpp | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 3970cab6766..d565cf53a8f 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -14,7 +14,7 @@ namespace eosio { namespace chain { const std::unordered_map> builtin_protocol_feature_codenames = boost::assign::map_list_of - ( builtin_protocol_feature_t::preactivate_feature, { + ( builtin_protocol_feature_t::preactivate_feature, builtin_protocol_feature_spec{ "PREACTIVATE_FEATURE", fc::variant("64fe7df32e9b86be2b296b3f81dfd527f84e82b98e363bc97e40bc7a83733310").as(), // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). @@ -208,8 +208,8 @@ Pre-activated protocol features must be activated in the next block. auto dependency_digest = get_builtin_digest( d ); EOS_ASSERT( dependency_digest, protocol_feature_exception, "cannot make default builtin protocol feature with codename '${codename}' since it has a dependency that has not been added yet: ${dependency_codename}", - ("codename", static_cast(itr->first)) - ("dependency_codename", static_cast(d)) + ("codename", builtin_protocol_feature_codename(itr->first)) + ("dependency_codename", builtin_protocol_feature_codename(d)) ); dependencies.insert( *dependency_digest ); } @@ -297,7 +297,7 @@ Pre-activated protocol features must be activated in the next block. "builtin protocol feature with codename '${codename}' has a digest of ${digest} but another protocol feature with the same digest has already been added", ("codename", f.builtin_feature_codename)("digest", feature_digest) ); - if( indx < _builtin_protocol_features.size() ) { + if( indx >= _builtin_protocol_features.size() ) { for( auto i =_builtin_protocol_features.size(); i <= indx; ++i ) { _builtin_protocol_features.push_back( builtin_protocol_feature_entry{ _recognized_protocol_features.end(), diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 48b44895b8e..7abb15e8b90 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -104,7 +104,7 @@ class read_only { //string recent_slots; //double participation_rate = 0; optional server_version_string; - optional fork_db_head_block_num = 0; + optional fork_db_head_block_num; optional fork_db_head_block_id; }; get_info_results get_info(const get_info_params&) const; From 69800ae78e1598446408a1c88b6df014da0e8054 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 8 Mar 2019 09:03:42 -0500 Subject: [PATCH 101/680] Add example logging.json --- programs/nodeos/logging.json | 87 ++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 programs/nodeos/logging.json diff --git a/programs/nodeos/logging.json b/programs/nodeos/logging.json new file mode 100644 index 00000000000..0b02060b82e --- /dev/null +++ b/programs/nodeos/logging.json @@ -0,0 +1,87 @@ +{ + "includes": [], + "appenders": [{ + "name": "stderr", + "type": "console", + "args": { + "stream": "std_error", + "level_colors": [{ + "level": "debug", + "color": "green" + },{ + "level": "warn", + "color": "brown" + },{ + "level": "error", + "color": "red" + } + ] + }, + "enabled": true + },{ + "name": "stdout", + "type": "console", + "args": { + "stream": "std_out", + "level_colors": [{ + "level": "debug", + "color": "green" + },{ + "level": "warn", + "color": "brown" + },{ + "level": "error", + "color": "red" + } + ] + }, + "enabled": true + },{ + "name": "net", + "type": "gelf", + "args": { + "endpoint": "10.10.10.10:12201", + "host": "host_name" + }, + "enabled": true + } + ], + "loggers": [{ + "name": "default", + "level": "debug", + "enabled": true, + "additivity": false, + "appenders": [ + "stderr", + "net" + ] + },{ + "name": "net_plugin_impl", + "level": "debug", + "enabled": true, + "additivity": false, + "appenders": [ + "stderr", + "net" + ] + },{ + "name": "bnet_plugin", + "level": "debug", + "enabled": true, + "additivity": false, + "appenders": [ + "stderr", + "net" + ] + },{ + "name": "producer_plugin", + "level": "debug", + "enabled": true, + "additivity": false, + "appenders": [ + "stderr", + "net" + ] + } + ] +} From 5cb3e2948d27683eb3e9ec4544f883400f5ebcaf Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 8 Mar 2019 14:14:21 -0500 Subject: [PATCH 102/680] add back integration_test contract; needed by tests/nodeos_under_min_avail_ram.py long running test --- unittests/test-contracts/CMakeLists.txt | 1 + .../integration_test/CMakeLists.txt | 6 ++ .../integration_test/integration_test.abi | 57 ++++++++++++++++++ .../integration_test/integration_test.cpp | 29 +++++++++ .../integration_test/integration_test.hpp | 27 +++++++++ .../integration_test/integration_test.wasm | Bin 0 -> 5651 bytes 6 files changed, 120 insertions(+) create mode 100644 unittests/test-contracts/integration_test/CMakeLists.txt create mode 100644 unittests/test-contracts/integration_test/integration_test.abi create mode 100644 unittests/test-contracts/integration_test/integration_test.cpp create mode 100644 unittests/test-contracts/integration_test/integration_test.hpp create mode 100755 unittests/test-contracts/integration_test/integration_test.wasm diff --git a/unittests/test-contracts/CMakeLists.txt b/unittests/test-contracts/CMakeLists.txt index 4a458969514..59f4ec0c28d 100644 --- a/unittests/test-contracts/CMakeLists.txt +++ b/unittests/test-contracts/CMakeLists.txt @@ -9,6 +9,7 @@ endif() add_subdirectory( asserter ) add_subdirectory( deferred_test ) +add_subdirectory( integration_test ) add_subdirectory( noop ) add_subdirectory( payloadless ) add_subdirectory( proxy ) diff --git a/unittests/test-contracts/integration_test/CMakeLists.txt b/unittests/test-contracts/integration_test/CMakeLists.txt new file mode 100644 index 00000000000..aaf8d2115ea --- /dev/null +++ b/unittests/test-contracts/integration_test/CMakeLists.txt @@ -0,0 +1,6 @@ +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( integration_test integration_test integration_test.cpp ) +else() + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/integration_test.wasm ${CMAKE_CURRENT_BINARY_DIR}/integration_test.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/integration_test.abi ${CMAKE_CURRENT_BINARY_DIR}/integration_test.abi COPYONLY ) +endif() diff --git a/unittests/test-contracts/integration_test/integration_test.abi b/unittests/test-contracts/integration_test/integration_test.abi new file mode 100644 index 00000000000..8cd5c3ee8fa --- /dev/null +++ b/unittests/test-contracts/integration_test/integration_test.abi @@ -0,0 +1,57 @@ +{ + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", + "version": "eosio::abi/1.1", + "types": [], + "structs": [ + { + "name": "payload", + "base": "", + "fields": [ + { + "name": "key", + "type": "uint64" + }, + { + "name": "data", + "type": "uint64[]" + } + ] + }, + { + "name": "store", + "base": "", + "fields": [ + { + "name": "from", + "type": "name" + }, + { + "name": "to", + "type": "name" + }, + { + "name": "num", + "type": "uint64" + } + ] + } + ], + "actions": [ + { + "name": "store", + "type": "store", + "ricardian_contract": "" + } + ], + "tables": [ + { + "name": "payloads", + "type": "payload", + "index_type": "i64", + "key_names": [], + "key_types": [] + } + ], + "ricardian_clauses": [], + "variants": [] +} \ No newline at end of file diff --git a/unittests/test-contracts/integration_test/integration_test.cpp b/unittests/test-contracts/integration_test/integration_test.cpp new file mode 100644 index 00000000000..ec8543caafb --- /dev/null +++ b/unittests/test-contracts/integration_test/integration_test.cpp @@ -0,0 +1,29 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include "integration_test.hpp" + +using namespace eosio; + +void integration_test::store( name from, name to, uint64_t num ) { + require_auth( from ); + + check( is_account( to ), "to account does not exist" ); + check( num < std::numeric_limits::max(), "num to large" ); + + payloads_table data( get_self(), from.value ); + uint64_t key = 0; + const uint64_t num_keys = 5; + + while( data.find( key ) != data.end() ) { + key += num_keys; + } + + for( uint64_t i = 0; i < num_keys; ++i ) { + data.emplace( from, [&]( auto& g ) { + g.key = key + i; + g.data = std::vector( static_cast(num), 5 ); + } ); + } +} diff --git a/unittests/test-contracts/integration_test/integration_test.hpp b/unittests/test-contracts/integration_test/integration_test.hpp new file mode 100644 index 00000000000..cbdc02295b2 --- /dev/null +++ b/unittests/test-contracts/integration_test/integration_test.hpp @@ -0,0 +1,27 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +class [[eosio::contract]] integration_test : public eosio::contract { +public: + using eosio::contract::contract; + + [[eosio::action]] + void store( eosio::name from, eosio::name to, uint64_t num ); + + struct [[eosio::table("payloads")]] payload { + uint64_t key; + std::vector data; + + uint64_t primary_key()const { return key; } + + EOSLIB_SERIALIZE( payload, (key)(data) ) + }; + + using payloads_table = eosio::multi_index< "payloads"_n, payload >; + +}; diff --git a/unittests/test-contracts/integration_test/integration_test.wasm b/unittests/test-contracts/integration_test/integration_test.wasm new file mode 100755 index 0000000000000000000000000000000000000000..81e7b13d27478b078f916d8a02a6d95e0ae760dd GIT binary patch literal 5651 zcmcgwOKfCE6|K+vbyv6jJRiGl&m{F;oX`_~gC?>^Y;ai`Cm}>3QN#iv**$h=(ru61 zZnuMjhG_?}KnR5e8^kILBt%fygcU1~_=E)#kzgc{Lc}7AfQ0x739%rYQ`PNwvSEj3 zy5Dq4x{+x*Dkx*86}dUP9qco6b7K7J72dN>^V2goLr z@Q)fdSTP)3AS2clha`pNP^iDjx^C~hQ0uq%4m$nzTI*o&ypSBKcJ|j=8ymfYoqub+-b~e{Ki;swiBl*Tbzu(>&to7R)?amABzKH#BYh%#q z?W}FK2CcRI&X?LE@x%RgYZF>d=0wWjU454&(l=vJWsN8G8a)SccqlE zmO>`edMMq|u`om7-o0~jNh>mL?e1>BC?=|RADW9q{*ZZ5=VTz`sd>O_&@Qwj1` zEh~8F#G}xoifT>5)}m@OhW}sxY<1#3sp2FRS*2?7`o!eanX~84PtP=F=Pq2Fujwc< zHfbstZc|=hyNxbsyAt0j5khM-TUo7Nq~WzkjIFrmyQP3&X%GpkpRN~9V(N;qT$@=q zTh(R!?IoyM|{ zLkugd$n){)RZK;g`WOP~j+wXG9Hf|>h8K1XWC3?s7ynQ4lDQO%J_1KE^`_{C97hmd zR5kkREbg+9_ru%=p!szDewSg{hpFr3JJ97Sczl>v{Q*eHy14n*`hsc&dJ% zn}->fkP1Q^*|Ct!A>ucxa6iH5d+(sKsdfu$_S51TyfRqdM829TD=QqRVcR(~A_Z{DFiEe)8gPYgq+yquFAVkb?V?}xlHBfKyv-N2= ziOB~r*;LD=xmLg6atxFVD5|F<3QJ+lDki2dQ5bgy7fZuNpaa5uNMRwDpT+e32m~F; zIB*UFFt$QRG9L#xavp;hF^Jn~++F}0iP9N2gC}Bs{MlLdBrDD($gl|-gaMeOI!v&&PK?@sr!d}9= zPmU2btxI^0b&W1${8!Gq8mlP1R+k8J{-B}3jY5vLVO*~K@db^2hlo28=w zz&0=xjnIz0T`|LqKA!cU%#fheW~gIY7u)pY0`9STiw&mGfEvhPtR1Q|C%a`<7fcdX zq8mvjv=U%Nzr9NOU1nBuQ=QL3DDT2APj=Gd0o~P)VowSe8GDeor4nq>;VH4{R+%|{ z@LEv?!F!8f1C~`+(yx`(74ZzLLC{m15cQcm+_M_<9_#S+rRZ_S0NBl_6DS*FJX`Q? zfO$|cfq-zqVm6nIfrM=OX%Eml9-uMOL-?okngB)IJV3cYC7+&SVO-ML={8B;W-29d zTIK#Zc&gz8M(dt}bycp)KM&Mtgdu-$H1KR4AdC>YmD})E-on?HKR3^8#26&8**L$S zPWVUjGcDtx-TNLms772mosj^#W1f z1QET)HV$T01hk-#G|@x0sh ziuuefNImS2U`rwpdQo;kC(a_FwCGh@0Nhx_oj4!d5Zse&;Sm?XIGB{uB!3)}xF^Ul zdzuT>2^T1Cr%t#avMKE#em!51CCn=dX+$SD7N=SPW22VyIPeNO8()aL~)f`?qOV#(N)GMuq}ly^9d>OvWI&BO~&)EaBr{z{El644@rz{ z!Xi+JyI+vF{2p12bI@3b>_sN^WC&jOcF|+fk5IOBow#$lD^8X%lvBQlhBIZ z55MjxjUR)zqapK!e6Dy_q}}Y55v(C6Y6OfC0aic(aN+CYB1W{}$wviB0EMeq96m7` z>3dJ?p)b8B;rigUuYzaLqP82c#8nJ zAqCjt+2YtB`ci z$;W(FVH7d%c_KL>2xN?f?b(3{XL?!l3G4YA0j<8K$DN-1Ox!T8uK{Ep#{@5H&znK~ z0!=_DTw$w24L^fUm<|F+`=1n(P=l5Pe%x~6bf5CS5D`~|fpj2g@dyvdpf5r8a4h%O zC2I$CW@OvbMx-8bV~EXcjPrhzlBg7R$sF91LN{0`RU%1>8Z2v|_%L z`9dj3qR6Fih{7*j3ARM_ObOHmXAwsnYK2L#I-fBU3@!)p^q>i&vt|psf-*yRA)t8h zf>-=J0X1WNIxn%5XXueK?1V`w3ross^5SN6~`G6+`}@o&ywjz^`S6;Jt$851h>L6B8rB{(a0g%qTj;?8IYmXevZkp3MUkvQrMKC zHvDGsTZSLveoI;cEnbB=l2V`0l20>u8?boC(die;z|`-!F#48bP`V&!7+)e=Z#kxE zV8xRR&oUyhT30w!F**n$nSPObN!)N2WQ=k5on!y=1D4|0D9j@A6~F}DfB?e}48YFQ znmD?sehX7Rh3h-cn3q*W_*ZXN20c4^4QV%f?R~q`8`$)rOo0CISX>6dl}?S2by3fCan3A=Z8 z?9GGiL1ztbHrt0-i63odrrq!N`j#&&J3BY*co}BV3QdGJk+{Xn5_4svwL=>>APS3u y!hMTP2d(Sd7~C_j1kv literal 0 HcmV?d00001 From bf756800d436a95b99b92815836be19385559eb7 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 8 Mar 2019 18:32:49 -0500 Subject: [PATCH 103/680] fix bad pipeline.yml from bad merge --- .buildkite/pipeline.yml | 87 +++-------------------------------------- 1 file changed, 5 insertions(+), 82 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 047ee8f736c..4e860734910 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -105,27 +105,6 @@ steps: workdir: /data/job timeout: 60 - # - command: | - # echo "+++ :hammer: Building" && \ - # ./scripts/eosio_build.sh -y && \ - # echo "--- :compression: Compressing build directory" && \ - # tar -pczf build.tar.gz build/ - # label: ":aws: 2 Build" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: "build.tar.gz" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - # workdir: /data/job - # timeout: 60 - - command: | echo "+++ :hammer: Building" && \ ./scripts/eosio_build.sh -y && \ @@ -230,7 +209,7 @@ steps: image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" workdir: /data/job timeout: 60 - + - command: | echo "--- :arrow_down: Downloading build directory" && \ buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ @@ -394,60 +373,6 @@ steps: workdir: /data/job timeout: 60 - # - command: | - # echo "--- :arrow_down: Downloading build directory" && \ - # buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ - # tar -zxf build.tar.gz && \ - # echo "--- :m: Starting MongoDB" && \ - # ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - # echo "+++ :microscope: Running tests" && \ - # cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -j8 -LE _tests --output-on-failure - # label: ":aws: 2 Tests" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: - # - "mongod.log" - # - "build/genesis.json" - # - "build/config.ini" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - # workdir: /data/job - # timeout: 60 - - # - command: | - # echo "--- :arrow_down: Downloading build directory" && \ - # buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ - # tar -zxf build.tar.gz && \ - # echo "--- :m: Starting MongoDB" && \ - # ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - # echo "+++ :microscope: Running tests" && \ - # cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ~/bin/ctest -L nonparallelizable_tests --output-on-failure - # label: ":aws: 2 NP Tests" - # agents: - # queue: "automation-large-builder-fleet" - # artifact_paths: - # - "mongod.log" - # - "build/genesis.json" - # - "build/config.ini" - # plugins: - # ecr#v1.1.4: - # login: true - # account_ids: "436617320021" - # no-include-email: true - # region: "us-west-2" - # docker#v2.1.0: - # debug: true - # image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - # workdir: /data/job - # timeout: 60 - - command: | echo "--- :arrow_down: Downloading build directory" && \ buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ @@ -512,8 +437,7 @@ steps: cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure label: ":fedora: 27 Tests" agents: - - "role=tester-v2-1" - - "os=high-sierra" + queue: "automation-large-builder-fleet" artifact_paths: - "mongod.log" - "build/genesis.json" @@ -540,8 +464,7 @@ steps: cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure label: ":fedora: 27 NP Tests" agents: - - "role=tester-v2-1" - - "os=high-sierra" + queue: "automation-large-builder-fleet" artifact_paths: - "mongod.log" - "build/genesis.json" @@ -629,7 +552,7 @@ steps: - "build/genesis.json" - "build/config.ini" timeout: 60 - + - wait - command: | @@ -793,4 +716,4 @@ steps: artifact_paths: - "build/packages/eosio_highsierra.rb" - "build/packages/eosio.rb" - timeout: 60 + timeout: 60 \ No newline at end of file From 09737b465666e6d0a256801f91e17dd1478c6b47 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 8 Mar 2019 20:20:42 -0500 Subject: [PATCH 104/680] fix bug in get_block of chain_plugin which could cause unnecessary failure if passed in a block number; also update fc submodule --- libraries/fc | 2 +- plugins/chain_plugin/chain_plugin.cpp | 26 ++++++++++++++++++-------- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/libraries/fc b/libraries/fc index 12956c33041..0c348cc9af4 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 12956c330413e69bd998cd0657c8a82ef3e8a106 +Subproject commit 0c348cc9af47d71af57e6926fd64848594a78658 diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 53e7555f7e0..dbff9e03cbc 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1124,7 +1124,7 @@ uint64_t convert_to_type(const string& str, const string& desc) { try { return boost::lexical_cast(str.c_str(), str.size()); } catch( ... ) { } - + try { auto trimmed_str = str; boost::trim(trimmed_str); @@ -1138,7 +1138,7 @@ uint64_t convert_to_type(const string& str, const string& desc) { return symb.value(); } catch( ... ) { } } - + try { return ( eosio::chain::string_to_symbol( 0, str.c_str() ) >> 8 ); } catch( ... ) { @@ -1529,14 +1529,24 @@ read_only::get_scheduled_transactions( const read_only::get_scheduled_transactio fc::variant read_only::get_block(const read_only::get_block_params& params) const { signed_block_ptr block; - EOS_ASSERT(!params.block_num_or_id.empty() && params.block_num_or_id.size() <= 64, chain::block_id_type_exception, "Invalid Block number or ID, must be greater than 0 and less than 64 characters" ); + optional block_num; + + EOS_ASSERT( !params.block_num_or_id.empty() && params.block_num_or_id.size() <= 64, + chain::block_id_type_exception, + "Invalid Block number or ID, must be greater than 0 and less than 64 characters" + ); + try { - block = db.fetch_block_by_id(fc::variant(params.block_num_or_id).as()); - if (!block) { - block = db.fetch_block_by_number(fc::to_uint64(params.block_num_or_id)); - } + block_num = fc::to_uint64(params.block_num_or_id); + } catch( ... ) {} - } EOS_RETHROW_EXCEPTIONS(chain::block_id_type_exception, "Invalid block ID: ${block_num_or_id}", ("block_num_or_id", params.block_num_or_id)) + if( block_num.valid() ) { + block = db.fetch_block_by_number( *block_num ); + } else { + try { + block = db.fetch_block_by_id( fc::variant(params.block_num_or_id).as() ); + } EOS_RETHROW_EXCEPTIONS(chain::block_id_type_exception, "Invalid block ID: ${block_num_or_id}", ("block_num_or_id", params.block_num_or_id)) + } EOS_ASSERT( block, unknown_block_exception, "Could not find block: ${block}", ("block", params.block_num_or_id)); From d4cde397706e82189873b47717c5cfd4cef2704e Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 11 Mar 2019 13:42:17 -0400 Subject: [PATCH 105/680] Fix db_modes_test to not use http --- tests/db_modes_test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/db_modes_test.sh b/tests/db_modes_test.sh index 97b10160f7e..4cf43e246fd 100755 --- a/tests/db_modes_test.sh +++ b/tests/db_modes_test.sh @@ -32,7 +32,7 @@ EOSIO_STUFF_DIR=$(mktemp -d) trap "rm -rf $EOSIO_STUFF_DIR" EXIT NODEOS_LAUNCH_PARAMS="./programs/nodeos/nodeos -d $EOSIO_STUFF_DIR --config-dir $EOSIO_STUFF_DIR \ --chain-state-db-size-mb 8 --chain-state-db-guard-size-mb 0 --reversible-blocks-db-size-mb 1 \ ---reversible-blocks-db-guard-size-mb 0 --https-server-address "''" --p2p-listen-endpoint "''" -e -peosio" +--reversible-blocks-db-guard-size-mb 0 --http-server-address "''" --p2p-listen-endpoint "''" -e -peosio" run_nodeos() { if (( $VERBOSE == 0 )); then From 1dd9a35182e9c67758a4e41dc25436bd762877d4 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 11 Mar 2019 15:49:59 -0400 Subject: [PATCH 106/680] add preactivate_feature and is_feature_activated intrinsics; add get_scheduled_protocol_feature_activations and schedule_protocol_feature_activations to producer_api_plugin --- libraries/chain/controller.cpp | 6 +++ .../chain/include/eosio/chain/controller.hpp | 2 + .../chain/include/eosio/chain/exceptions.hpp | 2 + libraries/chain/wasm_interface.cpp | 2 + .../producer_api_plugin.cpp | 12 +++-- .../eosio/producer_plugin/producer_plugin.hpp | 9 +++- plugins/producer_plugin/producer_plugin.cpp | 53 ++++++++++++++++++- 7 files changed, 80 insertions(+), 6 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 87fe11d8e3e..d90b785b393 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -2280,6 +2280,12 @@ vector controller::get_preactivated_protocol_features()const { return preactivated_protocol_features; } +void controller::validate_protocol_features( const vector& features_to_activate )const { + my->check_protocol_features( my->head->header.timestamp, + my->head->activated_protocol_features->protocol_features, + features_to_activate ); +} + void controller::start_block( block_timestamp_type when, uint16_t confirm_block_count ) { validate_db_available_size(); diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 9e7d23454e6..04a24acaef7 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -108,6 +108,8 @@ namespace eosio { namespace chain { vector get_preactivated_protocol_features()const; + void validate_protocol_features( const vector& features_to_activate )const; + /** * Starts a new pending block session upon which new transactions can * be pushed. diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index d841b6f3aed..848dcab6654 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -463,6 +463,8 @@ namespace eosio { namespace chain { 3170007, "The configured snapshot directory does not exist" ) FC_DECLARE_DERIVED_EXCEPTION( snapshot_exists_exception, producer_exception, 3170008, "The requested snapshot already exists" ) + FC_DECLARE_DERIVED_EXCEPTION( invalid_protocol_features_to_activate, producer_exception, + 3170009, "The protocol features to be activated were not valid" ) FC_DECLARE_DERIVED_EXCEPTION( reversible_blocks_exception, chain_exception, 3180000, "Reversible Blocks exception" ) diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 69460be197a..6a37bc0e407 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -1720,6 +1720,8 @@ REGISTER_INTRINSICS(privileged_api, (set_blockchain_parameters_packed, void(int,int) ) (is_privileged, int(int64_t) ) (set_privileged, void(int64_t, int) ) + (is_feature_activated, int(int) ) + (preactivate_feature, void(int) ) ); REGISTER_INJECTED_INTRINSICS(transaction_context, diff --git a/plugins/producer_api_plugin/producer_api_plugin.cpp b/plugins/producer_api_plugin/producer_api_plugin.cpp index 7fcde1ac98c..e02ec265f5e 100644 --- a/plugins/producer_api_plugin/producer_api_plugin.cpp +++ b/plugins/producer_api_plugin/producer_api_plugin.cpp @@ -79,17 +79,21 @@ void producer_api_plugin::plugin_startup() { CALL(producer, producer, add_greylist_accounts, INVOKE_V_R(producer, add_greylist_accounts, producer_plugin::greylist_params), 201), CALL(producer, producer, remove_greylist_accounts, - INVOKE_V_R(producer, remove_greylist_accounts, producer_plugin::greylist_params), 201), + INVOKE_V_R(producer, remove_greylist_accounts, producer_plugin::greylist_params), 201), CALL(producer, producer, get_greylist, - INVOKE_R_V(producer, get_greylist), 201), + INVOKE_R_V(producer, get_greylist), 201), CALL(producer, producer, get_whitelist_blacklist, INVOKE_R_V(producer, get_whitelist_blacklist), 201), - CALL(producer, producer, set_whitelist_blacklist, - INVOKE_V_R(producer, set_whitelist_blacklist, producer_plugin::whitelist_blacklist), 201), + CALL(producer, producer, set_whitelist_blacklist, + INVOKE_V_R(producer, set_whitelist_blacklist, producer_plugin::whitelist_blacklist), 201), CALL(producer, producer, get_integrity_hash, INVOKE_R_V(producer, get_integrity_hash), 201), CALL(producer, producer, create_snapshot, INVOKE_R_V(producer, create_snapshot), 201), + CALL(producer, producer, get_scheduled_protocol_feature_activations, + INVOKE_R_V(producer, get_scheduled_protocol_feature_activations), 201), + CALL(producer, producer, schedule_protocol_feature_activations, + INVOKE_V_R(producer, schedule_protocol_feature_activations, producer_plugin::scheduled_protocol_feature_activations), 201), }); } diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index 66030cc587e..c441f4530ec 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -51,6 +51,10 @@ class producer_plugin : public appbase::plugin { std::string snapshot_name; }; + struct scheduled_protocol_feature_activations { + std::vector protocol_features_to_activate; + }; + producer_plugin(); virtual ~producer_plugin(); @@ -83,6 +87,9 @@ class producer_plugin : public appbase::plugin { integrity_hash_information get_integrity_hash() const; snapshot_information create_snapshot() const; + scheduled_protocol_feature_activations get_scheduled_protocol_feature_activations() const; + void schedule_protocol_feature_activations(const scheduled_protocol_feature_activations& schedule); + signal confirmed_block; private: std::shared_ptr my; @@ -95,4 +102,4 @@ FC_REFLECT(eosio::producer_plugin::greylist_params, (accounts)); FC_REFLECT(eosio::producer_plugin::whitelist_blacklist, (actor_whitelist)(actor_blacklist)(contract_whitelist)(contract_blacklist)(action_blacklist)(key_blacklist) ) FC_REFLECT(eosio::producer_plugin::integrity_hash_information, (head_block_id)(integrity_hash)) FC_REFLECT(eosio::producer_plugin::snapshot_information, (head_block_id)(snapshot_name)) - +FC_REFLECT(eosio::producer_plugin::scheduled_protocol_feature_activations, (protocol_features_to_activate)) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index b3af3360940..3e4a78db58c 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -142,6 +142,8 @@ class producer_plugin_impl : public std::enable_shared_from_this _protocol_features_to_activate; + time_point _last_signed_block_time; time_point _start_time = fc::time_point::now(); uint32_t _last_signed_block_num = 0; @@ -964,6 +966,21 @@ producer_plugin::snapshot_information producer_plugin::create_snapshot() const { return {head_id, snapshot_path}; } +producer_plugin::scheduled_protocol_feature_activations +producer_plugin::get_scheduled_protocol_feature_activations()const { + return {my->_protocol_features_to_activate}; +} + +void producer_plugin::schedule_protocol_feature_activations( const scheduled_protocol_feature_activations& schedule ) { + const chain::controller& chain = my->chain_plug->chain(); + std::set set_of_features_to_activate( schedule.protocol_features_to_activate.begin(), + schedule.protocol_features_to_activate.end() ); + EOS_ASSERT( set_of_features_to_activate.size() == schedule.protocol_features_to_activate.size(), + invalid_protocol_features_to_activate, "duplicate digests" ); + chain.validate_protocol_features( schedule.protocol_features_to_activate ); + my->_protocol_features_to_activate = schedule.protocol_features_to_activate; +} + optional producer_plugin_impl::calculate_next_block_time(const account_name& producer_name, const block_timestamp_type& current_block_time) const { chain::controller& chain = chain_plug->chain(); const auto& hbs = chain.head_block_state(); @@ -1123,7 +1140,41 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { } chain.abort_block(); - chain.start_block(block_time, blocks_to_confirm); + + auto features_to_activate = chain.get_preactivated_protocol_features(); + if( _pending_block_mode == pending_block_mode::producing && _protocol_features_to_activate.size() > 0 ) { + bool drop_features_to_activate = false; + try { + chain.validate_protocol_features( _protocol_features_to_activate ); + } catch( const fc::exception& e ) { + wlog( "protocol features to activate are no longer all valid: ${details}", + ("details",e.to_detail_string()) ); + drop_features_to_activate = true; + } + + if( drop_features_to_activate ) { + _protocol_features_to_activate.clear(); + } else { + if( features_to_activate.size() > 0 ) { + _protocol_features_to_activate.reserve( _protocol_features_to_activate.size() + + features_to_activate.size() ); + std::set set_of_features_to_activate( _protocol_features_to_activate.begin(), + _protocol_features_to_activate.end() ); + for( const auto& f : features_to_activate ) { + auto res = set_of_features_to_activate.insert( f ); + if( res.second ) { + _protocol_features_to_activate.push_back( f ); + } + } + features_to_activate.clear(); + } + std::swap( features_to_activate, _protocol_features_to_activate ); + ilog( "signaling activation of the following protocol features in block ${num}: ${features_to_activate}", + ("num", hbs->block_num + 1)("features_to_activate", features_to_activate) ); + } + } + + chain.start_block( block_time, blocks_to_confirm, features_to_activate ); } FC_LOG_AND_DROP(); if( chain.is_building_block() ) { From df1379ee1cd32b9e43278297a626c9962c42c231 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 11 Mar 2019 17:40:25 -0400 Subject: [PATCH 107/680] fix bug in pending_block_header_state::_finish_next which created an inconsistent header_exts --- libraries/chain/block_header_state.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/chain/block_header_state.cpp b/libraries/chain/block_header_state.cpp index 545bf366f5d..543482863e0 100644 --- a/libraries/chain/block_header_state.cpp +++ b/libraries/chain/block_header_state.cpp @@ -216,12 +216,12 @@ namespace eosio { namespace chain { auto exts = h.validate_and_extract_header_extensions(); { if( exts.size() > 0 ) { - auto& new_protocol_features = exts.front().get().protocol_features; + const auto& new_protocol_features = exts.front().get().protocol_features; validator( timestamp, prev_activated_protocol_features->protocol_features, new_protocol_features ); new_activated_protocol_features = std::make_shared( *prev_activated_protocol_features, - std::move( new_protocol_features ) + new_protocol_features ); } else { new_activated_protocol_features = std::move( prev_activated_protocol_features ); From c64c963cc6c3fea09e036f683a7e5f40fc00b944 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 11 Mar 2019 19:15:36 -0400 Subject: [PATCH 108/680] add intrinsics on PREACTIVATE_FEATURE activation --- libraries/chain/controller.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index d90b785b393..27f41e6939b 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -2894,7 +2894,10 @@ const flat_set &controller::get_resource_greylist() const { template<> void controller_impl::on_activation() { - + db.modify( db.get(), [&]( auto& gp ) { + add_intrinsic_to_whitelist( gp.whitelisted_intrinsics, "preactivate_feature" ); + add_intrinsic_to_whitelist( gp.whitelisted_intrinsics, "is_feature_activated" ); + } ); } /// End of protocol feature activation handlers From 523ce5eeed410ecf10b55752d685d5835b224e0d Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Mon, 11 Mar 2019 20:03:25 -0400 Subject: [PATCH 109/680] Fix empty strings in db_modes_test.sh again My bash-fu is just not good enough to resolve this "right". Fix it again as it's clearly still wrong --- tests/db_modes_test.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/db_modes_test.sh b/tests/db_modes_test.sh index 4cf43e246fd..f29b8ffcb51 100755 --- a/tests/db_modes_test.sh +++ b/tests/db_modes_test.sh @@ -32,13 +32,13 @@ EOSIO_STUFF_DIR=$(mktemp -d) trap "rm -rf $EOSIO_STUFF_DIR" EXIT NODEOS_LAUNCH_PARAMS="./programs/nodeos/nodeos -d $EOSIO_STUFF_DIR --config-dir $EOSIO_STUFF_DIR \ --chain-state-db-size-mb 8 --chain-state-db-guard-size-mb 0 --reversible-blocks-db-size-mb 1 \ ---reversible-blocks-db-guard-size-mb 0 --http-server-address "''" --p2p-listen-endpoint "''" -e -peosio" +--reversible-blocks-db-guard-size-mb 0 -e -peosio" run_nodeos() { if (( $VERBOSE == 0 )); then - $NODEOS_LAUNCH_PARAMS "$@" 2>/dev/null & + $NODEOS_LAUNCH_PARAMS --http-server-address '' --p2p-listen-endpoint '' "$@" 2>/dev/null & else - $NODEOS_LAUNCH_PARAMS "$@" & + $NODEOS_LAUNCH_PARAMS --http-server-address '' --p2p-listen-endpoint '' "$@" & fi } From 9deb6f75ae479caefad7b03e201c8d73389043b5 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 11 Mar 2019 21:37:27 -0400 Subject: [PATCH 110/680] add protocol_feature_tests/activate_preactivate_feature unit test --- unittests/contracts.hpp.in | 2 + unittests/contracts/CMakeLists.txt | 2 + unittests/contracts/eosio.bios/eosio.bios.abi | 37 +- .../contracts/eosio.bios/eosio.bios.wasm | Bin 12032 -> 17779 bytes .../v1.6.0-rc3/eosio.bios/eosio.bios.abi | 522 ++++++++++++++++++ .../v1.6.0-rc3/eosio.bios/eosio.bios.wasm | Bin 0 -> 13358 bytes unittests/protocol_feature_tests.cpp | 84 ++- 7 files changed, 640 insertions(+), 7 deletions(-) create mode 100644 unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios/eosio.bios.abi create mode 100755 unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios/eosio.bios.wasm diff --git a/unittests/contracts.hpp.in b/unittests/contracts.hpp.in index 8dd1f2b4dcf..bc61854d403 100644 --- a/unittests/contracts.hpp.in +++ b/unittests/contracts.hpp.in @@ -36,6 +36,8 @@ namespace eosio { MAKE_READ_WASM_ABI(eosio_token, eosio.token, contracts) MAKE_READ_WASM_ABI(eosio_wrap, eosio.wrap, contracts) + MAKE_READ_WASM_ABI(before_preactivate_eosio_bios, eosio.bios, contracts/old_versions/v1.6.0-rc3) + // Contracts in `eos/unittests/unittests/test-contracts' directory MAKE_READ_WASM_ABI(asserter, asserter, test-contracts) MAKE_READ_WASM_ABI(deferred_test, deferred_test, test-contracts) diff --git a/unittests/contracts/CMakeLists.txt b/unittests/contracts/CMakeLists.txt index 59ea1c1ca26..f64c79de062 100644 --- a/unittests/contracts/CMakeLists.txt +++ b/unittests/contracts/CMakeLists.txt @@ -6,3 +6,5 @@ file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/eosio.msig/ DESTINATION ${CMAKE_CURRENT_BI file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/eosio.system/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/eosio.system/) file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/eosio.token/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/eosio.token/) file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/eosio.wrap/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/eosio.wrap/) + +file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/old_versions/v1.6.0-rc3/eosio.bios/ DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/old_versions/v1.6.0-rc3/eosio.bios/) diff --git a/unittests/contracts/eosio.bios/eosio.bios.abi b/unittests/contracts/eosio.bios/eosio.bios.abi index 0d5749b981b..3f9749263ce 100644 --- a/unittests/contracts/eosio.bios/eosio.bios.abi +++ b/unittests/contracts/eosio.bios/eosio.bios.abi @@ -1,6 +1,7 @@ { - "____comment": "This file was generated with eosio-abigen. DO NOT EDIT Mon Dec 3 17:06:17 2018", + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", "version": "eosio::abi/1.1", + "types": [], "structs": [ { "name": "abi_hash", @@ -240,6 +241,16 @@ } ] }, + { + "name": "preactivate", + "base": "", + "fields": [ + { + "name": "feature_digest", + "type": "checksum256" + } + ] + }, { "name": "producer_key", "base": "", @@ -254,6 +265,16 @@ } ] }, + { + "name": "reqactivated", + "base": "", + "fields": [ + { + "name": "feature_digest", + "type": "checksum256" + } + ] + }, { "name": "reqauth", "base": "", @@ -429,7 +450,6 @@ ] } ], - "types": [], "actions": [ { "name": "canceldelay", @@ -456,6 +476,16 @@ "type": "onerror", "ricardian_contract": "" }, + { + "name": "preactivate", + "type": "preactivate", + "ricardian_contract": "" + }, + { + "name": "reqactivated", + "type": "reqactivated", + "ricardian_contract": "" + }, { "name": "reqauth", "type": "reqauth", @@ -517,6 +547,5 @@ } ], "ricardian_clauses": [], - "variants": [], - "abi_extensions": [] + "variants": [] } \ No newline at end of file diff --git a/unittests/contracts/eosio.bios/eosio.bios.wasm b/unittests/contracts/eosio.bios/eosio.bios.wasm index ea62431344e57e7aeda1d3fbc9985bb9acec872b..9d15da6fc49d215664c142c5055de37d8a39746d 100755 GIT binary patch literal 17779 zcmeI4e~cYhb;s}AnfJ@Hj>m~{mQ8?p50^gEKsUs!S7}ssoDd9z;6x#qLbbbDzr_32 z-d*pmgHuuO(nd6@P!+X6TlzymszeANRc+NG)E|ac6;f$I;zvoP3MBrar9wzmp`sG$ z=X>tV+qdg=Vq&L1BnaBMbLZZ3&pp4-x!2FluAYvZbJ6cbV>9vk`E@tr*ZGsIpFe-z z&D8YZ>DT$=+)T6{t@DRAPX_WgUbjc2QJS7qGuET?;e|m0XS2@RcsEnmD4WP9Eg0K6 z{1wb(Jz%1xY4hFxS~42<79ZX>zdHS3Z+7kMN^g30Zf*YI*|pvYSJ$WOmREXJS2e~p z^d(dG(&F@q*|pi})%joOxu$h9{)9m(zqyvWM|w-E^GnmStE;`0H5aL?e!6#hZut?n z%f7Gl&YYds`e)Zpx>Q})toGKXmsjQ=o z`m#R^Us_&T?VVuW(uuQky_HpWjXr8EzK6Ba6Aw&3IKOyedVb=N+oR8|X)rr^YHj`& zm+5`)Ft*ckODB3RQGYgfc4eivC=BL$^AGn{++KC>0^md7C3v=*`s&Hq*SvPZU8}Dn zd|h2zVsH9>ow|0@b#@ubg}#02tIa;J1Ziq&=i18yreYP9N&T2hOaqg}gdapa=<{&+nqHZHhy-4&ns z>%pVZifXgV%cmZ3H?;Odu87jsjW5kR2VT)V_*1+7ZX>!gaoNwOKk#w1?$1Z-FLy=0 z(1{MXJj&g{*meAo{IT0y%Bw%prM1gi<6bftcVix9t_=x_ybqaO`n@01-u(~K9x*h^r~i;dChw0&3!V6Y%SQc_ zm~`)M=cl&*hk!iu%%6Sl4}SB*Pak>vZfCF0eDLqT{bjvIyl!l4e8^t!*&T=13%~J$ zuReX`p4~}!-T2e*^Lp>@W*!%A!oMF;DWVDg7|%RD7(W&rj-?d5W-l2VX{OA2hW|bN ziQoC^r`O*W)_C&ir~mo=zx}VDTn|tm|Cqh={odV;0QN2(e!?FOK#v}dwIMBa!S%D3 zjg2$}cVpvIVJ8)Gdi^6IR}WC$!y}&X=K_>-hhvGppk)25JvLGgKsGl1$&jm{K!Wf8 zmY~!Ely~t+Cj9XL<@n)vyo55|&t5(@QcJ_IQ#@)D{$v<-@^GA&!}5Oi6JsMuIc$w# z^$EWmhAkhCrMALpw4eRt*hpLsyN{m6gg+LB9XlLPmcu4(|GpgdsAe`N{02{ve*^N% zhFNz%d*#?jRE|8sz}AGH3FBrC$K7&Rx1ZfKmKH918zUZ%(B$-5lo<<{$wK+Uy&-F) zU-D5yCUqPMYbQBUmiw+x+BH2~yI#1q_fwhgG+rO1E^id?P2PxzHL`cMy`e$Zo_?`> zW})L*B=>xZ$5N~&mZqC0N1SLzZ@(SWnfI~7?WUhXm>Wgo&AVOUihsG_vcx`uSTn1Q z>Ctbu1Yc9O_A;;ehRu0t-ppG?q*)2%&JhilBi0i>qhp^YxB8*a$e83{q!G0uIvbGc z_Fv^(yO}2kpFdk zvKJeQfALwimZm?9qoxS`gmif)N{b-+ucUPeIx6Yx;UwgieDGywa-&0RAD*OYLFq5AuCmfW})2JyRdY*B11|kfu)xcia?(F z-pE}n5>H6;6w_3?m;OrZ>xA#f!o~|G3*z;$!0VFb$JrMeaa(wZ(ShYt+HWxlpO>xm zIk6o2i!Vv>T^Rmqo=syb>f7~G1GY}eCIt?_a~#Jo&zphn01(fc;(5?Z#H-R!kQAq3 zxR_iqyi1!epd%axI(jfZ-iF}^<6~`j1dH3PXJLs#KNNSb-+?8_p|RwG!kXy3ms(zM z=3>z#X@$Og?8b-x?N7dQGrFG=hqBa?90TRHsf9qIRdR5usoFX4g) zJbOOQ9>akn|D4NqLCH&*8swK9F*F+w^fuz}XjTYW^zmY|^0+*~*Mcv(OuO=U)PYN- zVfmWo13e%jj4f~gV(1_AY+4_q{>}k`p@N~=(f0dbXzq_>V>*aw$qw`KuG9yYy|aT- zBKnwQm$k+eg`s3p)J;ck-?j`%KUE8{+jnKVb_yxrXJf$AW~GPCa^2>p9yW&-(!$Lu zfK#Y$%7+eKq$o#}bv4NBthT^=pX%CL73>++MsQd)lQ$}*|E@S=8 zz~GqJnbTG39?Ez;;0dg*zrzFyq7>t&;;w0EjG8ueVdRvEy0Q~=Q)F!B!SUqMf2l}n%!aIeKIze5ik%KE1xjB&HxcW#}+yb zAzMSzq6^g%6T8#ak{0>CppOx=7f!hR4f{D8pkUQ%n!B(yMsNorroDDS$%N zi!E9y!lnBdHfYbH0K<_nxC}|1LZgcUC3Q}gz*{U#Td`#wb(5W>?T@ttEc8=d;2B3{LiLGGeeIKv z0=HhELUjUUbcrba3^9J`4vlTbJ(BSSd=Gw*;;?@!wmFjOEo65D3B)i2ZgwcTC?5f{ zM(*FDfqcMlc=LditDs{mZ{A5BVq575eM>mlU1VF5+~j^68^SQnt(4j}4oz%p(4!_Q zl{68EVVYF4OPXN1MH36WE1J}dCO*)_izcg5q$XU5g>5c?g$dw>uVq{vPYHK^rt`GqL9ykCPMr0ETs*qeEBKrc3 zuMiOo(Ul@{<5fWAdLP++qV{3$Gi+(=d2`6p zS}JC~hXYgW7$&j9dCW&U_276YH!WnZGw&h8h=VFkWEdV!95CJk?}IV6cxG|m}+YC78$4n(; zR9qN5h>J5nMlxKrxi9XGM6G33|BbZ4ks1z51mK|(gje~pzh}hZ9=g@Y1v=J zz7ZM%+igRJ}IDC+4{4lrG6{bh z6Pr^atnC>@AaKTIdpbri(e1s75_5&en`IBhLcn9_=Py{fG*Q)}h~$TDY_?~N3YpOv z=8We=3a|>SlPv+bN}RAnbge0zLpniG!Q**pTo-o>$atjQL8Z(WH6=~DIiDC}}b!gn-3{Qhpb9SNN{E^RyfEZG$yKC9*K7e6Kzrch=HbsQa2|dU^88x!$d0q z<=T@p(wG=d57Q6DwZOu_MrtR)1WC0m8HS)J@*wjfr5To8um~ZgQW{m}5Jw5Jx~t?X zm)IR~ZD<23G69xzsH<*@9VVhALa^{U?X4eaz7L8uZg%d`PI6@Ou}4WOqGBDFM9rl6 zK1*H*yIF3KBw)~j;C(ZQ3T9iBlz}riFx)UOg;a!3@dc<(;k&^hj5Z81vp5wh7zhbs z5wD@R*@!7JOjVkr2j-F$WUj+{DtPfBQ;&-OdKnEb@JqXgZ%hnbXr6UtRhL)w3ZeXT zDU|XEm1PXt?{#Ebn*I%r#vzDRR&`b@W7NckNdYi7HJ#>cHE{N>+HV+Jjzd5wc|@{% zb~HX@zHFR)5%4_Z!F-+Bb{x)zt6Wy*J-vf88;bzm16<8)aZq4K`2kDL z@naxE*BcV6j$bvJa0R0>ciSZEQgS7My0j#jz)$!Cl6%=I9)uvWbKM|havoc~qs+5K z4Y=v9KcEVOd7?^;Oxl!-A7Cnubm;O01M~8@Oiq3<`+6yo#W9jbH2M;=2NU*hn_xST zufubi8re$x;du1sG4NomW~e;dMll46Xeh`_65 z^B>T)^D_Tl#{d%?aTf&s*}06ohO^4Jl{#@M;K8C8Q^G`8rkY0uF9uXwE;hh1?qNR!fVWN!-Dz>8uw;d@ulkFyc+P*EEP%Rkn;GAt_Hl%bCb1P+AxaGB8a0R1V z2T5b62@X~CHy63idEr4W3gZYd;EP&BdfiA1B8*aZ78?co4Wk7IhEv38qK zHtT4R8!0d01Ai}Y1jg~GWD(AhO<}!`OXuu*ANK&2AABPm5D~thsDqD0E;D>A4~Ufg zcT8_}86;>hhRp7$=!6s-FV4<%=o+WZW0~22gSnO)Y3V!kXNvf`Jam+VN%^Z|N2M8M z{>r|pQ%BpYP-cDGK>%Ypd@wa;xJ=@-fK9dIQOjb*1`bbO!D4)57h16dmV`q`VKvl$ z2ceYN^bK9v(Zv&4OZfOKww)bqGw&*HmlnP}R4u;3DX>M;vhn)vi;sQfsTU1dd8lYs zJ~k_lS2=~DPX>nGn_-5NM9mgCmJj@B)yfC0(Lsxp$A&vGM3&jgyp)Z1@)2+?h+yy$ zP+W{aflHPQ`~ztC=Y@t3Xvk-7aV-ScwiF9(_b#yt6~|87XXze6e(*84rh$*KW1pRU z4D^KQ+1>bjWNbJDHlVB5QzXOt@bg1{vg|?odcVyrNLCj8|~_!yoPD?;u`LC{rF#s~lxc z!J-l=2=PiD^kk+35{g}TC|;p7gZ3Py1wI{N$%UGsZ11)z+q1|Z{qxB4kINL?Q#4d9 zl4M5J;LjO6fU(d+cYT4R ztLIi!<@TrwoqMSngR)git=6d$>czEAnG>O!KBa545{{e`Q<7au2H zQAN3UN)1`rFqYb~4NHnI6VQa{1uMG7Sw7)>$WaMlDN=^$#XN&jPm63rSfP3%#y1L~ zfI^GWQyn-Tg}bYfRn^3AVnT*m5!4{+5M4D-6%B<>C_IKF+jSb4TmluO=%k(fBXuVs zs}>{V3yT5K(oR`X9}W}3yQ*}>a#ezh3`EXd33io`;e=XiJF3>ia*Jl+aHt`PH}*qX za0=V7B$pD115<22$l<3NLP=$NKtYx8wMv2&`8TwfYLJ7Q(&;33Iq7f(tKZOxxVz~D z3z}ljy8iLoC8Q8-Zw(yum3YT)1Jpq$yDPm$7^suCccrQI-kmPk4beimwjH^6ipLO_ zlY=68Gs{%##o61LDkobm1|&Xeu`q?uihut_2mt!p(Fo73A8G5mA_}Cf$sbTVwjGtS z|K7CbvglN=I zUS<(ug~l1-KwSrsI(J6V!?|!jLfLK(l3KX2BB}CB1CZ`M=TX^n4(Tu$c^HF0=dpbg zb8=6{VK;Gs%Ch0K%RuU`b_r6Lu!_B>cZYwlr-(L!*z8f)OzqH+rtWpKv9hJlF8evv z=8=EYkWau`$oO}fGOa`jJKrFAlO5cA^MWn}m0D+af|zF6zSV=u)B;5=b&PT`sV)=f zs*aH3VNhlg#Y1R$c{fOgXIF?A0RJQ3AQ=Yr+=y(yK{7o1@)5ywT`5Jj-ynH@i0pWS zWEj+QQ)K%MlHu8xPm%7GQ)K%MlIMrWjyFh#K|MD`w%;Hbo_+ZgnY?m}Y`;PB{1CaY z;|-EwSi>Z_%o`-b)3?xsQ$I`QhfWQv8zdXb+JmWyEJ`RTL#8pJd|645*79-QSDC~WNPDSnfIUu|Ie zL%mgJmlM*L^j21uR&xFB#r)z!19i&|xlK0pP8R<-Eb z>;tFhcrc&k`^nx)KDV^Ew!+UeFq1?4^wk$3e|qV}{DY5N0&iD*fl1YJ<DiS>@`c_b`MXc{pp_w^z0WR|+W}$z{cGCcXE@fD=9W(7 z<#r{awbH&xqsmOpMP*?5+xR8Ni>ejNXz z4MFjnY}h5zqydIintXOD)dN6|djYzsx<+gT7&VaaY`;R5Zd98YY^0%R!^NLZu1mU$ z=km_<`s~u;?XE4IU7hYu&d)B) zuCKXFGy4Ph((=+;cZPLKGiRr}t81>MN$tga*ejbkJ^94!;>_gi_z^dx>3sU^>S}jU zn5Vn5=en!z8VwHu_$2!Y&Zw-fo$1{1(Q&s&vn^)V)|a4J^SOqG7&^O*M8ep$8mo0q zFTrw6_g*xfo?n`tn?BQ-U7TF*tacW<>yXLq^c+l@WV|-LDp(^KyRCMe+Z*kT_a=MO zy^gDMjYh*Y_U&`myZ!qQ9N@tnI8bjk4m9>R_8&-V(Q?#m){@9Y_5I04R9?8~vJF>$ z@wcP?rSY|>B~dM{o{wENIeDhrS)M%IS?eaz;AGHrcX1|(Yn|og`SWgXZSu2=Pxaib z^~ul9%%3{v#x&6Bx!W|*Vc_|d z_1DM8FJ1ck|N4tR`kS|p|N4-#$4i&K@$Or1AOE$Xh{p>TUJA3148`_%;Saz4?{B<) z{L!JhPf9l)e~L>q9-s7yPYx$1k0zdNA_jdgzkZ}u&%&w;-~H;JzV*$GO)M8Ky!*#* zAAe*h-Q43#LRt$*XSl@U@l-%Mbu?*KNSnR$K%HX%;QIsyxJ)5#4pcgSVv#2rptp86>Nc=m1_)uq(uW0cy4&qJteG)~(!U0l0fx^^5-%4|!P3Y-(TH|2$A-v&X+8@#$zhF&;Z0)58fYHJN)ZvdN}Tplx%qc{F)0V)i>W>zm9KsEMWJ zSkH$$?<6~TmN^;9=c0nKQ69K_N9)1PuV{wiM6`f0jBkz%_k694&i2{~q;t1|RE!#5 z@0q;NNVTwjt#bhN#TDlh63O#1O2E1HZm9uLuqLl(e~`rWjrdvAaUr^bI^v1&w^B%j zJ*t!uqiS>_>$#Fnj>GLqpX!3_bvYF-k0S{*bR=BHbTJJ{R}$oSyli(D9k)3{xnzS) zZeZKuL&Cf}#A(d`;z8);06!w9CNFlmnqw1|L87n*&% z&31>AQ*Bg9*k}*vGSMEqik=4j0Dj_p{o3dA+#uVsH$dCk}rwDkXS z8h4k&Dy12Z5*rVd&%fgG7=C@v$7NHN*(jS2Y#Wvgndy~LFx!2496(J{02Uh*$%BN6 zA4!0ELfw+^rVwv&qiv=SC%!FtT*n2Y6E?#W#5bA8g&v@eNma? zazW;&V%1|oXqD`aygn7 z_Ku7MNiJ`UD97zlmU7#&md(8nB>QeXX<)a)9crG-SuUR5;6FMfA`kRaCKwW;-Ti$3AaL}T${Onb0NHVj9UlObL4sM3ZK@kf1-gvMtG1`x@c(LxF;z2`f%c#k#4&gi~RQ^RX<9pV~GKt6{uv+)WCa05-7= z>^=j7Xsb5M2v18(be@mPR-|Cvy%HxhzQMOFD^PYO(kvycEXRtwYjp9mrI-%wa&q0ObakGrd z?^|jOfXB6hcHpq;xzVCF6ooQoll9eCA{0Q`{(`2L{?VIYpAyt!A@WxvkX zg5$XIOiwi6Z^Rt22Am0867@Cj%#jjIT|iC&qt-@Tbx(^x9?eBPo@sHby2~!bA>Lc5 zDDf4j?{JK)9+zjw3P`nnK&n*%skT+K*ZKjes@ZE*K$3r&__x=HC(Vj#2tef^5*fp^ zx0NOkhbLI}WE#O(3`3KsoH+)HB>#m_-WPS5+?5K&hL`Zn^;L<$jyxM<0*7x%uJl8W zA>u-~`(Rv{Dp=+rl*=*~bM^6f-EMPeV^Aphg}D3|QAb}us?w!JZ4`W>{1@@2JxyCF z5j5o8e~ZXFzS3)?1+20HV)zPcud)c!yxJmG zQd2<1HWHmm7>xxgqv~E%X8)AX9|lJ=^M>lC(kajcMZ&9(9+s;aI|W$B7K)lD9o75s zt9+AyYgvX+X7h3R*KfXX{X;CPV=K`*&0gZ`4<0O%`-iOY9h=W@ZKvv$U77YQ?aA2+ zr%1qN)ddsi-}%kE;my0ex^uyt$(N%s71I{y&isS3wO5+Jw4ldt#!m+g5C3>{{>$;w0-t<*5am#-UkcDM<0l zDt$8{Nh#PTekIP#aL7go$}(O)wkONph?3X>t}r)Ef7>h|vVov}5CkJtMM0)&BW@}m z(?kssD27>L@0TGp#9CM_LcEeftro#WVNA6M6$!OSqFO}fDAXcoyQ)J7qv;~NL4*byWz=`3zA?D`Yfs=i9gZ;?vz>nkzwPt0K@eY&R2LG+g!_1^G|O8dtW2%Y79t-ix-X&YX2rO;s$n z;7nqq;0XN!o3=!ch;PmnY3PnNKBXgUw&%F+R`Fwq<4~sAh4!SsFi=TOB#2ww20p}X zAW4x*uhMZ)ZehYo?QGF@b?u0d&ww2<8eC)&Qj&r*L>A6pl-7z!1TrLrN2)4g%H7cC zDxv*llBE55+rv3xhI{aXcbLM_ajGFSs#xO*ETL9D9oRpiR*6SS*cxRjC9zVL;B1gZ z7*JArHuE)#{fGl!muE#<-gvHz*2=$C!EM%%RyAnauR+z?Y(FH>Ak+5peM7kN)Ma@G!G!M+QM3VT2@xsbVB_S`K zWuQ$DXp;+|v>Qs@!#r6z-r)GFfBf^glvYx0g4Ixo3Mp$Q5{w|f8UA!sj>7El7rPv8 zt8WlC_BNfFvikDNz8Ngk-m`F!lOj~?V$8b};snTM`7UKT`fcD`%H;En2v0aKEWV&OD^WhV;hCf#lAS zYIq}1rzbWoD$qnyT*^Xz*fllnnVVSv6lCjVmVnjgHv#cld2>U$xfpA0@B6B4DW=ZC51RnpbV7b)cB9OVnTX*c??9CR93TE0Q0 zAIf^d);i}Z1Z~QC-W(xg#2w0ddr6_wr)~8%7RS3L7)C`hof^Oer8bE|0}-9A1gPUs zNi!IaFe-gXd+mDV&Xz?&heFa(nBokrNBC+-gtb_43`Hu>m?DL{>IW=*Sk$vE73Je{ zxmRf}G8CTd30J8#?6xmDt}++dM~Ll}Sfj0&5bQ?tfeow76+_Spwi!+b8X`#%2-`v` zVRkDtu$?x`Sh9I!ko|++KZxP*R~v8Z%67D5{eFz(6_=Oz(~$1u-#T;p9|n?by%=p$ zUum$=pb?8V8!?9jiC%X(4B42KVO*}8va`jlZVDdMO5xx5)*SXv0^I7T3||5&bh6bJ zL0%8Y5A;NOF*c{;4JfI)Av9=pyU9>uw{HCd-B4eqbVJAJ+`-^Vw0E$GI3adA;>0Fh zi7fx>UiJQwkFTwvL(P(1$$KuV!*_mbc8xTsL7pbEFglja**SC>DhmK&!U!BG)TR%y zO@F^6{}@Z-32kVbfr&|N7M8%z$Es8t%OYANkkPo+*1WPhgf#hqv^#c^QjH(UYA)a7 zGG5!)UHR2lU3Mp(Qn`P#xUo05aG2kv(t+L0RuO<+Q18j|V+b?*&j>SP3~9&E>h=1@ zQ~2q;3TE<$ScrH8UhcCG29)*{_E09NuBq$f9_pqhg)?!7d808mqcPZrH%xBlf6V4bR}3%X#fz zzGE-kKiH)Ha`|P;hG<{LVZ{vF^F+|u(RJ>G?Ona=+yUO&9XKz!uG-V?z`k#H;KREE zzhQUaes|!9b_dRDz}-mZ=G}pF;=LQ}M|KB(Yefx z!h(QEwog=CI2GkH{k43v{zbw9`j}s^S(0Owo`|@A7+e5Xmv*kCmJ~=}lSh zb+{w|^4458R$RPZ7U3nJq87~1Sbh=uwTsc-D+ahfQU3EqwF*%w<`^gj#Ydd*p3e)Q z*Cr||{@QD{p->a7m{5l7&58K+2~uMTcofJpdu1kMluaiU_mQU+6DydDm`(U5h#c9{ zQvHAMpdTuORK#$=sdk>@OWucrwpt$)sr~4OwVm+kH7EE;K9qo@9u@bhHdLOdSUPb~ zaZ<>dRvvl1XlmJu!?0-(@*Xf`e^QH@cJ%RDDJJ8I(%s^|elNzQ{QJ_qRhazQe!i_*G4gNjkMygmtn~3}x$S9Fs zx|gN)R*Lh1`F1KY0kIE$2^K>-7FNRmR6Oa?icqX#g)x;jRZ$dM2^9Dka?qY^ zqn!?vGxb0_uPbEmI6+WbqC%O6MvqjhH=oi%V5kN|O)*sSO)=ydcz}{IFjI#aS_yr# z1usdtVObm_3wGEyHG8*%V94|*hGMo-c`hRjO?IS$oI0O=BB(SNNcoJ9=BNoxY4$!; z4hW#BJ(!u9i7Q{j8?Z;!@Un++;vIauaKEhaWjHdT!cbLEnJ^T_n-pUE%!tAYW;=y=tkG!x4uvSbWXw{P7o}>bR(U`SX#Ti?!x;OLd$`aiPCa%?3YqG8UqdMIj2DhZIs&M&c^LB~|~( z+p9peMQirMgrJLkWOpu|?sccv{jxqJp7HBTewMFiJA8#Zsrxm*xU}wP7yZK7`SsaJ zzVYoo?VOHwvW4#I>e8y$*U7VsPi`)A3!Qns$DMp)b!ma0E=Zs1tofPl8egl19hMg7 z&->F|KMf=!ohR7Q+o!PxI=!^8JkK}002wMi{+(gZ=U*YnXgBxNDl{_kA|rQBcNRtB zG&Ad6AGlc)#p|8Z^9(%UJItS9hv}uo^;N#3W+f*fS-p?u`#juKp^+FoU*vAGBZ(|5 z&CEV=o@E@wWVe1mU}5@-vf{jElgbM8LsH4PP4@Bd^)u90e4gnnKH2qr#l6s3J@4nb c=lxS>y6DGflp;Edl@uVDuSK$yZ=c=&0&}UPi~s-t diff --git a/unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios/eosio.bios.abi b/unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios/eosio.bios.abi new file mode 100644 index 00000000000..ebdfccd0704 --- /dev/null +++ b/unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios/eosio.bios.abi @@ -0,0 +1,522 @@ +{ + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT Mon Mar 11 20:20:06 2019", + "version": "eosio::abi/1.1", + "structs": [ + { + "name": "abi_hash", + "base": "", + "fields": [ + { + "name": "owner", + "type": "name" + }, + { + "name": "hash", + "type": "checksum256" + } + ] + }, + { + "name": "authority", + "base": "", + "fields": [ + { + "name": "threshold", + "type": "uint32" + }, + { + "name": "keys", + "type": "key_weight[]" + }, + { + "name": "accounts", + "type": "permission_level_weight[]" + }, + { + "name": "waits", + "type": "wait_weight[]" + } + ] + }, + { + "name": "blockchain_parameters", + "base": "", + "fields": [ + { + "name": "max_block_net_usage", + "type": "uint64" + }, + { + "name": "target_block_net_usage_pct", + "type": "uint32" + }, + { + "name": "max_transaction_net_usage", + "type": "uint32" + }, + { + "name": "base_per_transaction_net_usage", + "type": "uint32" + }, + { + "name": "net_usage_leeway", + "type": "uint32" + }, + { + "name": "context_free_discount_net_usage_num", + "type": "uint32" + }, + { + "name": "context_free_discount_net_usage_den", + "type": "uint32" + }, + { + "name": "max_block_cpu_usage", + "type": "uint32" + }, + { + "name": "target_block_cpu_usage_pct", + "type": "uint32" + }, + { + "name": "max_transaction_cpu_usage", + "type": "uint32" + }, + { + "name": "min_transaction_cpu_usage", + "type": "uint32" + }, + { + "name": "max_transaction_lifetime", + "type": "uint32" + }, + { + "name": "deferred_trx_expiration_window", + "type": "uint32" + }, + { + "name": "max_transaction_delay", + "type": "uint32" + }, + { + "name": "max_inline_action_size", + "type": "uint32" + }, + { + "name": "max_inline_action_depth", + "type": "uint16" + }, + { + "name": "max_authority_depth", + "type": "uint16" + } + ] + }, + { + "name": "canceldelay", + "base": "", + "fields": [ + { + "name": "canceling_auth", + "type": "permission_level" + }, + { + "name": "trx_id", + "type": "checksum256" + } + ] + }, + { + "name": "deleteauth", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "permission", + "type": "name" + } + ] + }, + { + "name": "key_weight", + "base": "", + "fields": [ + { + "name": "key", + "type": "public_key" + }, + { + "name": "weight", + "type": "uint16" + } + ] + }, + { + "name": "linkauth", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "code", + "type": "name" + }, + { + "name": "type", + "type": "name" + }, + { + "name": "requirement", + "type": "name" + } + ] + }, + { + "name": "newaccount", + "base": "", + "fields": [ + { + "name": "creator", + "type": "name" + }, + { + "name": "name", + "type": "name" + }, + { + "name": "owner", + "type": "authority" + }, + { + "name": "active", + "type": "authority" + } + ] + }, + { + "name": "onerror", + "base": "", + "fields": [ + { + "name": "sender_id", + "type": "uint128" + }, + { + "name": "sent_trx", + "type": "bytes" + } + ] + }, + { + "name": "permission_level", + "base": "", + "fields": [ + { + "name": "actor", + "type": "name" + }, + { + "name": "permission", + "type": "name" + } + ] + }, + { + "name": "permission_level_weight", + "base": "", + "fields": [ + { + "name": "permission", + "type": "permission_level" + }, + { + "name": "weight", + "type": "uint16" + } + ] + }, + { + "name": "producer_key", + "base": "", + "fields": [ + { + "name": "producer_name", + "type": "name" + }, + { + "name": "block_signing_key", + "type": "public_key" + } + ] + }, + { + "name": "reqauth", + "base": "", + "fields": [ + { + "name": "from", + "type": "name" + } + ] + }, + { + "name": "setabi", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "abi", + "type": "bytes" + } + ] + }, + { + "name": "setalimits", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "ram_bytes", + "type": "int64" + }, + { + "name": "net_weight", + "type": "int64" + }, + { + "name": "cpu_weight", + "type": "int64" + } + ] + }, + { + "name": "setcode", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "vmtype", + "type": "uint8" + }, + { + "name": "vmversion", + "type": "uint8" + }, + { + "name": "code", + "type": "bytes" + } + ] + }, + { + "name": "setglimits", + "base": "", + "fields": [ + { + "name": "ram", + "type": "uint64" + }, + { + "name": "net", + "type": "uint64" + }, + { + "name": "cpu", + "type": "uint64" + } + ] + }, + { + "name": "setparams", + "base": "", + "fields": [ + { + "name": "params", + "type": "blockchain_parameters" + } + ] + }, + { + "name": "setpriv", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "is_priv", + "type": "uint8" + } + ] + }, + { + "name": "setprods", + "base": "", + "fields": [ + { + "name": "schedule", + "type": "producer_key[]" + } + ] + }, + { + "name": "unlinkauth", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "code", + "type": "name" + }, + { + "name": "type", + "type": "name" + } + ] + }, + { + "name": "updateauth", + "base": "", + "fields": [ + { + "name": "account", + "type": "name" + }, + { + "name": "permission", + "type": "name" + }, + { + "name": "parent", + "type": "name" + }, + { + "name": "auth", + "type": "authority" + } + ] + }, + { + "name": "wait_weight", + "base": "", + "fields": [ + { + "name": "wait_sec", + "type": "uint32" + }, + { + "name": "weight", + "type": "uint16" + } + ] + } + ], + "types": [], + "actions": [ + { + "name": "canceldelay", + "type": "canceldelay", + "ricardian_contract": "" + }, + { + "name": "deleteauth", + "type": "deleteauth", + "ricardian_contract": "" + }, + { + "name": "linkauth", + "type": "linkauth", + "ricardian_contract": "" + }, + { + "name": "newaccount", + "type": "newaccount", + "ricardian_contract": "" + }, + { + "name": "onerror", + "type": "onerror", + "ricardian_contract": "" + }, + { + "name": "reqauth", + "type": "reqauth", + "ricardian_contract": "" + }, + { + "name": "setabi", + "type": "setabi", + "ricardian_contract": "" + }, + { + "name": "setalimits", + "type": "setalimits", + "ricardian_contract": "" + }, + { + "name": "setcode", + "type": "setcode", + "ricardian_contract": "" + }, + { + "name": "setglimits", + "type": "setglimits", + "ricardian_contract": "" + }, + { + "name": "setparams", + "type": "setparams", + "ricardian_contract": "" + }, + { + "name": "setpriv", + "type": "setpriv", + "ricardian_contract": "" + }, + { + "name": "setprods", + "type": "setprods", + "ricardian_contract": "" + }, + { + "name": "unlinkauth", + "type": "unlinkauth", + "ricardian_contract": "" + }, + { + "name": "updateauth", + "type": "updateauth", + "ricardian_contract": "" + } + ], + "tables": [ + { + "name": "abihash", + "type": "abi_hash", + "index_type": "i64", + "key_names": [], + "key_types": [] + } + ], + "ricardian_clauses": [], + "variants": [], + "abi_extensions": [] +} \ No newline at end of file diff --git a/unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios/eosio.bios.wasm b/unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios/eosio.bios.wasm new file mode 100755 index 0000000000000000000000000000000000000000..fd6478776ed05e9dc57cae07d6419c266969e48f GIT binary patch literal 13358 zcmcJWYm8jyS;yaVo0;|5jZb37HtRy)ISsHbZL@CNwMj#>$7$1)7V4Hzq6)Dd?@r>K zytqQ{Y{?B`6 zc6PnKAiKWIIp@7R?{mMM8Q0lZikx%NuSCbr$6H(Oe75yGf86v^71JvnChaQut5 zw2IGU%Px^7ZDH7@$>-0fdSH>`UJKp%hDK}$5H*nSY@Y&_Zq>;d zpDsGHoAWEnGjpBI&dkRAFLqtShS$5DxtXCk*VOQ6cV%OKWu~*S(OutkPD72Q?$Ye) zW!JL#_3qlG`StEh=hEgyH=?1tHoBWLtLyVm&o6eL>dv_X8a$-I_3p;XrS;kF%;Nmg z{N{$sHFG2^Us+w*=+3ckW$w~!cYVW+YEox0AI{3>F3dbRzdScHKXuBDX}Xxbw7%Y5 z7UbFP{L|facbA3_u<$9)(>lYlv2n5U?)OZ&gPI*>c4Kn|oHbu)XpEstt8gTY-L0{# zb72LNGu?aPcwup6c478nXMTBRwX@z?>TZH2x3dcnX`Jyo^oGlt$;rv=9(O1@6dy_s zrH33>=bFu?YaTw_y4SfQM~)ul$sIk~Xf=;Ek2H@QO|xh5AMywO{1+y{`>&HI+IpL-{@pE<$$hT4C!Y3^cOM^f z@$}ALAD_K?^~?YDd%y8VZ=C(PF=vlgum0(GzWK)4pB;;M+}{35nElvTY>(T&{`dd# z+8bv-KGyI_<)-3~aEYek$9&?G6Uk$z6VEXbgT7bXJ3iXT!>;Xr`orJ;=3i_Lplok{ z=QrOt`?0ZfaK`5aGz&oIxWrTO`2h6%>7-QyZS{(yEhar%C+Nox^QLl z13}QfS$!(~5Kx-M``WRgz|g^7wYs)Y#vI^drlRrOr`0!HuX21At#OZBrs9Xf<%}-A z*%JfO>P#;zEq<6)RjbFpGV$p|JUtaVR;DKsNNO?nVq}vopMcw7vUNInF=F;xHtSo= zm57P0)nw0yJMY9hXqGw|$QPrMu?Zfyd`tU*&Zlq&<3zXsF^msJj`e(2MVET*1k|}( zgDZxOZ}f~`NTlA_xZ61v_1P5{m00ppj1VxcLpx%C73?V*`R9_ju@yg$IJTqfh$Eg3 ze`|r%(4$%yF``B$(w^(dE}Y4s-TWO9$lIQD$l&EzhLO$X2BdP5uNa~(pNmcHHMV9|#95rP% zlh={bZ5fSw(4m!*j5iUqrBGg6Y_}wQB-{M(n8UDMdc_s7P5QWMNoBe8+9NVbZP`7_ zzSakK_T|3iN2(|&I%-|p2`V7r9@gd&Bn?v~0<2GL*VBOK)kN=|&b663k@&Vacs+w~ zzExd~)?jqJ52GgopGW$9&OakNh`#@Ja4xX9Hiy~(_H$7%ntn&XP32Net5P;9ihT}$$>WWkkKR&|{K z+#L9mRzC>0>T_E=LAW(^kPdFuL6|^UYxs0v%q|z-f{|z76grnySMcWdg&-Uv@HOuI zAA&m{*&TslI<{2%dOtE0fN$W=&kebA83K4>f^|fih~IA+01mdATxyR574wP0?MU^_ zmtTMTN699M&<+JC?k}5+PA&$e@T5YZqs!w05+Ic$f)zcc%mu%nnB&v5a;(W^?Jl)L z>b4VEu4_Ps@5O-cc_0Gl+(OwDvYAv^h-NjS^3mT0(WW7q?LrjFh3H(sL{Nk%O4~;? z3y3y>7O-S-NHTCMcvL5qW#4(r(E8At#mPL3pBox~929K4uRcmj!m{Zy7RqKh@Aomw zK-x0KDngIb3dt9z$?i?Rw&n;B=J-5NX~94jwKT71k*B| zGL@+k)rjo1T#4@#2HV@8|JOhHx0Cpsk&^fZ2f!_nG+Z%Jx`}wwo*uOzwta=*Cq`iE zWEZyTEC*uq#R;kBM4636As1rFC@T~5;w%o!Ra3C|?e*dhj79LmPJBnNY)FX_A%~GD zx$0t12#{TIFUnYF3^Pt;-Vs?r21lhuP7ye3Gt%&6`2@D&B$JqzIfeQz|D7cGRm=9o zy%P4Hh+)+e1B=i47N6A?pY2$Dw#VYL+TvxT#{J$O}}AbFBc`%$AWlr6Q&tAeOizWO^Bq9U%Z_Wkhp zN*pCyK8PI%k+!hUa@n*@7j|=uy3d49B`AY8hG`5>s>6(NXo7&~L1on)0tv-;656qz z#YrQV4=QB}!ZO)IZd8+tj+oU+KQvZGMpYm;|BvYV#6rdP{fKsZ(Ckaa>QeYFG_RG4 zl;+M6{q4j&W5aYOZO3Pwk_I76egccy8Rp$cg&>~^W=$Xelom2UxQs16wlI>Mr`DtRN+YEd`sNg&E;5p& zH$L7YhOkU~`+1xZ4o{3U_>qZAHBSU$m?!<+HBWG4;)ywjK2I{^NgQ|*izk_H4Dm!E z|9$B4NHw~nH@>k@`CYY?s8NRrwzh-E)7e$KmHPVDs!7Qx0rH{1S zOSt4Mphy&+2mR9+*`gXOVt^&Ek*SA3X-G=ovAARyIcvzsB};*YTn$1lM&2vxleX{f zJ|Y01-w*%{BeDYpQ%G+Uk;8$UL-rdF?DuQHh%mjr-R#;vWWdT?4T2eP6asEc+CzSk z|8B_3vpX1lF4ibm6~HHbUSbC0;1rWzj>9kpNY@ z$gn*0lyTmJ=z}x1(oBKP+@qXhYe5l;iVL8FR#=}!E%~?>#STt#$j4bfQ!M&!v{Ski z*0uHoX|=avCC;EknWC(i1Lak9AQZJ0%G8RaYU?9y|_9NAEWqiG*<*fRZl!k-IG<%2I2|$ zAf2fU$_Rd>2#P~mXM=1a-X@jjD4!i1WFSb!6?vJ8VM^&gm8xu4dc5<(K)Dd`So+7I zq)S!dRFUGatu69#vUuY&!kP4B3Wy4<(_INT6_-RJzGh12P!GjadZsLVwpYD%$QE;y zr&W1dD_)&~kI3$rE~-#RLWRoAsFXk)f;FX8Ct{NqSESCVXQ>)aETyKE_Rx0l@1rCD1DP&6VTij(3IJ|fq<~YEfecm_DM$hr_OP@H7*pL$M zi2gewZwd|c6tE<8vbnVedMs!J{RRejwf1AwpIYd30Z0iz zm5e|=RU%mvBXlA}tL*VLJ3>TLBt(+!(ENt`o+nGH8c=CMR7oF6cGg+99#LunG%J+3 zIVl0eOvD}`T7$@oCRwBjQKd1&|78^Oo>=W|y+43_+lUN<3&MPrNGk7y5OYd0FCg}Z zFc;#?XGSG`XA9qzuqZBqxUMhzp2^8c`7mDTWqnTp`1^TYm_r zZS!&U2d}?;ZxCNB@zLHe@zI-w81y8FPkjL>YRBNNu2EU5cM`N*t+M`|AKV=q+@XV} z`!0yOE_JBB>QZM5p%aJFoWRk>VGe5vX0LLU2%wOq#%`w4GN2LHr}huOH8kRYhO7XU zh^lbBVqn9L;vgf}yh~tbqIiF9$);n;Dg=MM?6u%8o~y%#y<3d%d~HBlfb3}qBNh3L zq&Rsn&;Lqq>HAUy)NVQM5n^A0gxt8g%AcleP(hc7U;Qrwswy$QSki;Bed9xVbf{6x zzUZ9sjQ5eCWi-ja_v634pxCiebWzNQPQ2v;O%F1n9T^zB1W>ye33}wu*K$!W!WXR3 z!xr_#$!$XO-uNfaa2@{jZ3F(D;oVIP+p5UB<7LFV8Yi1VYf!I=c>57`0wW*aonecb zh4oh7H=w(?KflXqx8&gn)47eF%ops9P!$qL7Q}=Dq^!&@{V>P$&r6C2*&0vlKsyXb zOzN<(1$rK=bE;T+HcC1$5s$VtujCD{+u{S%9@vXYJ^r3NbHzatWCwE93txBn1AJMh zIx@&%?He0VSiDymBBxudf&s`nIhYqejxy%|8KGv3p{+5vdNuVu{U$ZHA*T2+8)1)@ zS4U9MiZ@(AE1AV2puAUej{}Geb2?ty+uJze9w&6{!`Ky6chqwkQOgP1! z^PdjiYkHGYBbW`1p_{BIB6n}8bbY63SNw!pox}m9>Q>_{6nxF6degpOvwu(MCoL8G z{a4iD>qi0%4sHH__k@N)p(jCx@bcDrLc_cJh)4xk^z6B-6}b42#`gobzDIwJe_goZ)g9Fe^}q2b-P zj>x_}ppdQ>&7zjZ&W2+EW<}Z0&+;-ei;gMj$u#dpZic~|Lv7fQ&*yq(ei-EVY}x`Wjy!V z_;$s-QB978JZ#N6Ib;1d+wha@n=J$Z1<%~R(xCOJmOvtGwFGXhCs)jRUW$;Hj7k*x z|Dof4ERek>y2VPLsBcD+Nd*DedVi3o&a=0ZjGL3s1&@GF`9!!^VXO>KDWM9DM~{cs zPPEdjwD9^Oz^E5R!@NESg#5R%sAUbuS1S=2Pgm}p?z4}eW6F76e8kF~he<6ej(Ct6 z&Lg=Y|D0kLUiB3V8kDsrDB;s(`Y^pH(W6>-+Kq!MrSHS!BUFF}HWB)~PE&FA;v;O; zw+0Z`=%SjjePChin8uE>!5TLogh2ifFq5?^6GTvb zI=uScR;Je~42JOlLu+>!hV`0Z3^~_uG_wao1aYO3(u0$dQt1oeg<+aM#~Wx<1tsv} z^lL9fa8t!Z^|?<+cs;3$ZmCET2`=;olqW-v9H0+Q$&w;_z_TcI$5etzD(957*Zyh; z*Zj*#%UG5_uyUc-o!#`S`nATK-(2zY{6wL{PaS4-zu}ixHvRmvU%IroIX}Y>DZ0-( zYl-Gd-SzdAb+4a*%r8GR*yffxi~Q(e=E?PyB|f?UeWtVF=eisGA|RZwvb=cNU+DT- zRx;9gk`ulC{J@sZt}LxC@=Os*ZHvuJ84Mf@4ANarIoq)Cof-@Xd0G- #include -#include - #include #include @@ -21,10 +19,90 @@ using namespace eosio::chain; using namespace eosio::testing; +protocol_feature_manager make_protocol_feature_manager() { + protocol_feature_manager pfm; + + set visited_builtins; + + std::function add_builtins = + [&pfm, &visited_builtins, &add_builtins]( builtin_protocol_feature_t codename ) -> void { + auto res = visited_builtins.emplace( codename ); + if( !res.second ) return; + + auto f = pfm.make_default_builtin_protocol_feature( codename, [&add_builtins]( builtin_protocol_feature_t d ) { + add_builtins( d ); + } ); + + pfm.add_feature( f ); + }; + + for( const auto& p : builtin_protocol_feature_codenames ) { + add_builtins( p.first ); + } + + return pfm; +} BOOST_AUTO_TEST_SUITE(protocol_feature_tests) -BOOST_AUTO_TEST_CASE( unaccepted_protocol_activation ) try { +BOOST_AUTO_TEST_CASE( activate_preactivate_feature ) try { + tester c(false, db_read_mode::SPECULATIVE); + c.close(); + c.open( make_protocol_feature_manager(), nullptr ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + + c.produce_block(); + + // Cannot set latest bios contract since it requires intrinsics that have not yet been whitelisted. + BOOST_CHECK_EXCEPTION( c.set_code( config::system_account_name, contracts::eosio_bios_wasm() ), + wasm_exception, fc_exception_message_is("env.is_feature_activated unresolveable") + ); + + // But the old bios contract can still be set. + c.set_code( config::system_account_name, contracts::before_preactivate_eosio_bios_wasm() ); + c.set_abi( config::system_account_name, contracts::before_preactivate_eosio_bios_abi().data() ); + + auto t = c.control->pending_block_time(); + c.control->abort_block(); + BOOST_REQUIRE_EXCEPTION( c.control->start_block( t, 0, {digest_type()} ), protocol_feature_exception, + fc_exception_message_starts_with( "unrecognized protocol feature with digest:" ) + ); + + auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::preactivate_feature ); + + BOOST_REQUIRE( d ); + + // Activate PREACTIVATE_FEATURE. + c.control->start_block( t, 0, { *d } ); + c.finish_block(); + c.produce_block(); + + // Now the latest bios contract can be set. + c.set_code( config::system_account_name, contracts::eosio_bios_wasm() ); + c.set_abi( config::system_account_name, contracts::eosio_bios_abi().data() ); + + c.produce_block(); + + BOOST_CHECK_EXCEPTION( c.push_action( config::system_account_name, N(reqactivated), config::system_account_name, + mutable_variant_object()("feature_digest", digest_type()) ), + eosio_assert_message_exception, + eosio_assert_message_is( "protocol feature is not activated" ) + ); + + c.push_action( config::system_account_name, N(reqactivated), config::system_account_name, mutable_variant_object() + ("feature_digest", *d ) + ); + + c.produce_block(); + + // Ensure validator node accepts the blockchain + + tester c2(false, db_read_mode::SPECULATIVE); + c2.close(); + c2.open( make_protocol_feature_manager(), nullptr ); + + push_blocks( c, c2 ); } FC_LOG_AND_RETHROW() From c20443bd584bc6c3ad18217b16ca0b550fbb30e1 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 11 Mar 2019 20:38:22 -0500 Subject: [PATCH 111/680] Turning off bnet long running tests to allow other long running tests to complete in the 1 hour timeslot. --- tests/CMakeLists.txt | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 68116bab863..f5a3b90fe76 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -84,8 +84,6 @@ add_test(NAME db_modes_test COMMAND tests/db_modes_test.sh WORKING_DIRECTORY ${C # Long running tests add_test(NAME nodeos_sanity_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_sanity_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_sanity_bnet_lr_test COMMAND tests/nodeos_run_test.py -v --sanity-test --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_sanity_bnet_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_run_check_lr_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_check_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_remote_lr_test COMMAND tests/nodeos_run_remote_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -100,9 +98,6 @@ set_property(TEST nodeos_forked_chain_lr_test PROPERTY LABELS long_running_tests add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9902 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_voting_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_voting_bnet_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9903 --p2p-plugin bnet --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_voting_bnet_lr_test PROPERTY LABELS long_running_tests) - add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --wallet-port 9904 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) From 9340e2c5db135e13ba9e8a9d22ab9907593d8e69 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Tue, 12 Mar 2019 14:44:01 +0800 Subject: [PATCH 112/680] Initial modification to tester for pfm without modifying current test behaviour --- .../testing/include/eosio/testing/tester.hpp | 35 +++- libraries/testing/tester.cpp | 157 ++++++++++++++++-- unittests/api_tests.cpp | 6 +- unittests/forked_tests.cpp | 8 +- unittests/protocol_feature_tests.cpp | 12 +- 5 files changed, 182 insertions(+), 36 deletions(-) diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index a7f00d6f6a8..8e785700034 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -56,6 +56,14 @@ namespace boost { namespace test_tools { namespace tt_detail { } } } namespace eosio { namespace testing { + enum class setup_policy { + none, + old_bios_only, + preactivate_feature_only, + preactivate_feature_and_new_bios, + full + }; + std::vector read_wasm( const char* fn ); std::vector read_abi( const char* fn ); std::string read_wast( const char* fn ); @@ -67,6 +75,8 @@ namespace eosio { namespace testing { bool expect_assert_message(const fc::exception& ex, string expected); + protocol_feature_manager make_protocol_feature_manager(); + /** * @class tester * @brief provides utility function to simplify the creation of unit tests @@ -82,13 +92,14 @@ namespace eosio { namespace testing { virtual ~base_tester() {}; - void init(bool push_genesis = true, db_read_mode read_mode = db_read_mode::SPECULATIVE); + void init(const setup_policy policy = setup_policy::old_bios_only, db_read_mode read_mode = db_read_mode::SPECULATIVE); void init(controller::config config, const snapshot_reader_ptr& snapshot = nullptr); void init(controller::config config, protocol_feature_manager&& pfm, const snapshot_reader_ptr& snapshot = nullptr); + void execute_setup_policy(const setup_policy policy); void close(); - void open( protocol_feature_manager&& pfm, const snapshot_reader_ptr& snapshot ); - void open( const snapshot_reader_ptr& snapshot ); + void open( protocol_feature_manager&& pfm, const snapshot_reader_ptr& snapshot); + void open( const snapshot_reader_ptr& snapshot); bool is_same_chain( base_tester& other ); virtual signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ ) = 0; @@ -154,7 +165,8 @@ namespace eosio { namespace testing { return traces; } - void push_genesis_block(); + void set_before_preactivate_bios_contract(); + void set_bios_contract(); vector get_producer_keys( const vector& producer_names )const; transaction_trace_ptr set_producers(const vector& producer_names); @@ -282,6 +294,10 @@ namespace eosio { namespace testing { return cfg; } + void schedule_protocol_features_wo_preactivation(const vector feature_digests); + void preactivate_protocol_features(const vector feature_digests); + void schedule_all_builtin_protocol_features(); + protected: signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs = false, uint32_t skip_flag = 0 ); void _start_block(fc::time_point block_time); @@ -298,12 +314,13 @@ namespace eosio { namespace testing { controller::config cfg; map chain_transactions; map last_produced_block; + vector protocol_features_to_be_activated_wo_preactivation; }; class tester : public base_tester { public: - tester(bool push_genesis = true, db_read_mode read_mode = db_read_mode::SPECULATIVE ) { - init(push_genesis, read_mode); + tester(setup_policy policy = setup_policy::old_bios_only, db_read_mode read_mode = db_read_mode::SPECULATIVE ) { + init(policy, read_mode); } tester(controller::config config) { @@ -372,11 +389,11 @@ namespace eosio { namespace testing { vcfg.trusted_producers = trusted_producers; - validating_node = std::make_unique(vcfg); + validating_node = std::make_unique(vcfg, make_protocol_feature_manager()); validating_node->add_indices(); validating_node->startup( []() { return false; } ); - init(true); + init(); } validating_tester(controller::config config) { @@ -387,7 +404,7 @@ namespace eosio { namespace testing { vcfg.blocks_dir = vcfg.blocks_dir.parent_path() / std::string("v_").append( vcfg.blocks_dir.filename().generic_string() ); vcfg.state_dir = vcfg.state_dir.parent_path() / std::string("v_").append( vcfg.state_dir.filename().generic_string() ); - validating_node = std::make_unique(vcfg); + validating_node = std::make_unique(vcfg, make_protocol_feature_manager()); validating_node->add_indices(); validating_node->startup( []() { return false; } ); diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index b29bee310e0..8fdcd177a30 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -79,11 +79,35 @@ namespace eosio { namespace testing { memcpy( data.data(), obj.value.data(), obj.value.size() ); } + protocol_feature_manager make_protocol_feature_manager() { + protocol_feature_manager pfm; + + set visited_builtins; + + std::function add_builtins = + [&pfm, &visited_builtins, &add_builtins]( builtin_protocol_feature_t codename ) -> void { + auto res = visited_builtins.emplace( codename ); + if( !res.second ) return; + + auto f = pfm.make_default_builtin_protocol_feature( codename, [&add_builtins]( builtin_protocol_feature_t d ) { + add_builtins( d ); + } ); + + pfm.add_feature( f ); + }; + + for( const auto& p : builtin_protocol_feature_codenames ) { + add_builtins( p.first ); + } + + return pfm; + } + bool base_tester::is_same_chain( base_tester& other ) { return control->head_block_id() == other.control->head_block_id(); } - void base_tester::init(bool push_genesis, db_read_mode read_mode) { + void base_tester::init(const setup_policy policy, db_read_mode read_mode) { cfg.blocks_dir = tempdir.path() / config::default_blocks_dir_name; cfg.state_dir = tempdir.path() / config::default_state_dir_name; cfg.state_size = 1024*1024*8; @@ -104,31 +128,82 @@ namespace eosio { namespace testing { } open(nullptr); - - if (push_genesis) - push_genesis_block(); + execute_setup_policy(policy); } - void base_tester::init(controller::config config, const snapshot_reader_ptr& snapshot) { cfg = config; open(snapshot); } - void base_tester::init(controller::config config, protocol_feature_manager&& pfm, const snapshot_reader_ptr& snapshot ) { + void base_tester::init(controller::config config, protocol_feature_manager&& pfm, const snapshot_reader_ptr& snapshot) { cfg = config; open(std::move(pfm), snapshot); } + void base_tester::execute_setup_policy(const setup_policy policy) { + const auto& pfm = control->get_protocol_feature_manager(); + + auto schedule_preactivate_protocol_feature = [&]() { + auto preactivate_feature_digest = pfm.get_builtin_digest(builtin_protocol_feature_t::preactivate_feature); + schedule_protocol_features_wo_preactivation({*preactivate_feature_digest}); + }; + + auto schedule_all_builtin_protocol_features = [&]() { + const auto& head_block_num = control->head_block_num(); + // Check all builtins and split them based on whether a preactivation is required or not + vector require_preactivation, without_preactivation; + for (auto itr = builtin_protocol_feature_codenames.begin(); itr != builtin_protocol_feature_codenames.end(); itr++) { + const auto& codename = itr->first; + if (pfm.is_builtin_activated(codename, head_block_num) || !itr->second.subjective_restrictions.enabled) continue; + const digest_type digest = *pfm.get_builtin_digest(codename); + if (itr->second.subjective_restrictions.preactivation_required) { + require_preactivation.emplace_back(digest); + } else { + without_preactivation.emplace_back(digest); + } + } + preactivate_protocol_features(require_preactivation); + schedule_protocol_features_wo_preactivation(without_preactivation); + }; + + switch (policy) { + case setup_policy::old_bios_only: { + set_before_preactivate_bios_contract(); + break; + } + case setup_policy::preactivate_feature_only: { + schedule_preactivate_protocol_feature(); + produce_block(); // block production is required to activate protocol feature + break; + } + case setup_policy::preactivate_feature_and_new_bios: { + schedule_preactivate_protocol_feature(); + produce_block(); + set_bios_contract(); + break; + } + case setup_policy::full: { + schedule_preactivate_protocol_feature(); + produce_block(); + set_bios_contract(); + schedule_all_builtin_protocol_features(); + produce_block(); + break; + } + case setup_policy::none: + default: + break; + }; + } void base_tester::close() { control.reset(); chain_transactions.clear(); } - - void base_tester::open( const snapshot_reader_ptr& snapshot) { - open( protocol_feature_manager{}, snapshot ); + void base_tester::open( const snapshot_reader_ptr& snapshot ) { + open( make_protocol_feature_manager(), snapshot ); } void base_tester::open( protocol_feature_manager&& pfm, const snapshot_reader_ptr& snapshot ) { @@ -209,7 +284,26 @@ namespace eosio { namespace testing { } control->abort_block(); - control->start_block( block_time, head_block_number - last_produced_block_num ); + + vector feature_to_be_activated; + // First add protocol features to be activated WITHOUT preactivation + feature_to_be_activated.insert( + feature_to_be_activated.end(), + protocol_features_to_be_activated_wo_preactivation.begin(), + protocol_features_to_be_activated_wo_preactivation.end() + ); + // Then add protocol features to be activated WITH preactivation + const auto preactivated_protocol_features = control->get_preactivated_protocol_features(); + feature_to_be_activated.insert( + feature_to_be_activated.end(), + preactivated_protocol_features.begin(), + preactivated_protocol_features.end() + ); + + control->start_block( block_time, head_block_number - last_produced_block_num, feature_to_be_activated ); + + // Clear the list, if start block finishes successfuly, the protocol features should be assumed to be activated + protocol_features_to_be_activated_wo_preactivation.clear(); } signed_block_ptr base_tester::_finish_block() { @@ -835,11 +929,17 @@ namespace eosio { namespace testing { sync_dbs(other, *this); } - void base_tester::push_genesis_block() { + void base_tester::set_before_preactivate_bios_contract() { + set_code(config::system_account_name, contracts::before_preactivate_eosio_bios_wasm()); + set_abi(config::system_account_name, contracts::before_preactivate_eosio_bios_abi().data()); + } + + void base_tester::set_bios_contract() { set_code(config::system_account_name, contracts::eosio_bios_wasm()); set_abi(config::system_account_name, contracts::eosio_bios_abi().data()); } + vector base_tester::get_producer_keys( const vector& producer_names )const { // Create producer schedule vector schedule; @@ -862,6 +962,41 @@ namespace eosio { namespace testing { return tid; } + void base_tester::schedule_protocol_features_wo_preactivation(const vector feature_digests) { + protocol_features_to_be_activated_wo_preactivation.insert( + protocol_features_to_be_activated_wo_preactivation.end(), + feature_digests.begin(), + feature_digests.end() + ); + } + + void base_tester::preactivate_protocol_features(const vector feature_digests) { + for (auto& feature_digest: feature_digests) { + push_action(config::system_account_name, N(preactivate_feature), N(eosio), fc::mutable_variant_object()("feature_digest", feature_digest)); + } + } + + void base_tester::schedule_all_builtin_protocol_features() { + const auto&pfm = control->get_protocol_feature_manager(); + const auto& head_block_num = control->head_block_num(); + // Check all builtins and split them based on whether a preactivation is required or not + vector require_preactivation; + vector without_preactivation; + for (auto itr = builtin_protocol_feature_codenames.begin(); itr != builtin_protocol_feature_codenames.end(); itr++) { + const auto& codename = itr->first; + if (pfm.is_builtin_activated(codename, head_block_num) || !itr->second.subjective_restrictions.enabled) continue; + const digest_type digest = *pfm.get_builtin_digest(codename); + if (itr->second.subjective_restrictions.preactivation_required) { + require_preactivation.emplace_back(digest); + } else { + without_preactivation.emplace_back(digest); + } + } + + preactivate_protocol_features(require_preactivation); + schedule_protocol_features_wo_preactivation(without_preactivation); + } + bool fc_exception_message_is::operator()( const fc::exception& ex ) { auto message = ex.get_log().at( 0 ).get_message(); bool match = (message == expected); diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 1d4fedc0e7c..aee65a2d25e 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -387,10 +387,10 @@ BOOST_FIXTURE_TEST_CASE(action_receipt_tests, TESTER) { try { BOOST_CHECK_EQUAL( m.begin()->second, base_test_auth_seq_num + 4 ); } ); - set_code( config::system_account_name, contracts::eosio_bios_wasm() ); + set_code( config::system_account_name, contracts::before_preactivate_eosio_bios_wasm() ); - set_code( N(test), contracts::eosio_bios_wasm() ); - set_abi( N(test), contracts::eosio_bios_abi().data() ); + set_code( N(test), contracts::before_preactivate_eosio_bios_wasm() ); + set_abi( N(test), contracts::before_preactivate_eosio_bios_abi().data() ); set_code( N(test), contracts::payloadless_wasm() ); call_doit_and_check( N(test), N(test), [&]( const transaction_trace_ptr& res ) { diff --git a/unittests/forked_tests.cpp b/unittests/forked_tests.cpp index 99619b6aaa3..0abc0c42e96 100644 --- a/unittests/forked_tests.cpp +++ b/unittests/forked_tests.cpp @@ -391,17 +391,17 @@ BOOST_AUTO_TEST_CASE( read_modes ) try { auto head_block_num = c.control->head_block_num(); auto last_irreversible_block_num = c.control->last_irreversible_block_num(); - tester head(true, db_read_mode::HEAD); + tester head(setup_policy::old_bios_only, db_read_mode::HEAD); push_blocks(c, head); BOOST_CHECK_EQUAL(head_block_num, head.control->fork_db_head_block_num()); BOOST_CHECK_EQUAL(head_block_num, head.control->head_block_num()); - tester read_only(false, db_read_mode::READ_ONLY); + tester read_only(setup_policy::none, db_read_mode::READ_ONLY); push_blocks(c, read_only); BOOST_CHECK_EQUAL(head_block_num, read_only.control->fork_db_head_block_num()); BOOST_CHECK_EQUAL(head_block_num, read_only.control->head_block_num()); - tester irreversible(true, db_read_mode::IRREVERSIBLE); + tester irreversible(setup_policy::old_bios_only, db_read_mode::IRREVERSIBLE); push_blocks(c, irreversible); BOOST_CHECK_EQUAL(head_block_num, irreversible.control->fork_db_pending_head_block_num()); BOOST_CHECK_EQUAL(last_irreversible_block_num, irreversible.control->fork_db_head_block_num()); @@ -475,7 +475,7 @@ BOOST_AUTO_TEST_CASE( irreversible_mode ) try { BOOST_REQUIRE( hbn4 > hbn3 ); BOOST_REQUIRE( lib4 < hbn1 ); - tester irreversible(false, db_read_mode::IRREVERSIBLE); + tester irreversible(setup_policy::none, db_read_mode::IRREVERSIBLE); push_blocks( main, irreversible, hbn1 ); diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index f0f79f7d12b..26d29c6b78c 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -46,9 +46,7 @@ protocol_feature_manager make_protocol_feature_manager() { BOOST_AUTO_TEST_SUITE(protocol_feature_tests) BOOST_AUTO_TEST_CASE( activate_preactivate_feature ) try { - tester c(false, db_read_mode::SPECULATIVE); - c.close(); - c.open( make_protocol_feature_manager(), nullptr ); + tester c(setup_policy::none, db_read_mode::SPECULATIVE); const auto& pfm = c.control->get_protocol_feature_manager(); @@ -74,8 +72,7 @@ BOOST_AUTO_TEST_CASE( activate_preactivate_feature ) try { BOOST_REQUIRE( d ); // Activate PREACTIVATE_FEATURE. - c.control->start_block( t, 0, { *d } ); - c.finish_block(); + c.schedule_protocol_features_wo_preactivation({ *d }); c.produce_block(); // Now the latest bios contract can be set. @@ -98,10 +95,7 @@ BOOST_AUTO_TEST_CASE( activate_preactivate_feature ) try { // Ensure validator node accepts the blockchain - tester c2(false, db_read_mode::SPECULATIVE); - c2.close(); - c2.open( make_protocol_feature_manager(), nullptr ); - + tester c2(setup_policy::none, db_read_mode::SPECULATIVE); push_blocks( c, c2 ); } FC_LOG_AND_RETHROW() From 2398d8c629b26eca74d0f532262a4e4f848826d7 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Tue, 12 Mar 2019 16:41:15 +0800 Subject: [PATCH 113/680] Change default setup_policy to full and fixes the existing test --- .../testing/include/eosio/testing/tester.hpp | 4 +-- libraries/testing/tester.cpp | 2 +- unittests/api_tests.cpp | 6 ++-- unittests/database_tests.cpp | 32 ++++++------------- unittests/forked_tests.cpp | 13 ++++---- unittests/producer_schedule_tests.cpp | 18 +++++++---- unittests/protocol_feature_tests.cpp | 24 -------------- 7 files changed, 34 insertions(+), 65 deletions(-) diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 8e785700034..afd2354f9ac 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -92,7 +92,7 @@ namespace eosio { namespace testing { virtual ~base_tester() {}; - void init(const setup_policy policy = setup_policy::old_bios_only, db_read_mode read_mode = db_read_mode::SPECULATIVE); + void init(const setup_policy policy = setup_policy::full, db_read_mode read_mode = db_read_mode::SPECULATIVE); void init(controller::config config, const snapshot_reader_ptr& snapshot = nullptr); void init(controller::config config, protocol_feature_manager&& pfm, const snapshot_reader_ptr& snapshot = nullptr); void execute_setup_policy(const setup_policy policy); @@ -319,7 +319,7 @@ namespace eosio { namespace testing { class tester : public base_tester { public: - tester(setup_policy policy = setup_policy::old_bios_only, db_read_mode read_mode = db_read_mode::SPECULATIVE ) { + tester(setup_policy policy = setup_policy::full, db_read_mode read_mode = db_read_mode::SPECULATIVE ) { init(policy, read_mode); } diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 8fdcd177a30..4621e13ae28 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -146,7 +146,7 @@ namespace eosio { namespace testing { auto schedule_preactivate_protocol_feature = [&]() { auto preactivate_feature_digest = pfm.get_builtin_digest(builtin_protocol_feature_t::preactivate_feature); - schedule_protocol_features_wo_preactivation({*preactivate_feature_digest}); + schedule_protocol_features_wo_preactivation( { *preactivate_feature_digest } ); }; auto schedule_all_builtin_protocol_features = [&]() { diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index aee65a2d25e..1d4fedc0e7c 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -387,10 +387,10 @@ BOOST_FIXTURE_TEST_CASE(action_receipt_tests, TESTER) { try { BOOST_CHECK_EQUAL( m.begin()->second, base_test_auth_seq_num + 4 ); } ); - set_code( config::system_account_name, contracts::before_preactivate_eosio_bios_wasm() ); + set_code( config::system_account_name, contracts::eosio_bios_wasm() ); - set_code( N(test), contracts::before_preactivate_eosio_bios_wasm() ); - set_abi( N(test), contracts::before_preactivate_eosio_bios_abi().data() ); + set_code( N(test), contracts::eosio_bios_wasm() ); + set_abi( N(test), contracts::eosio_bios_abi().data() ); set_code( N(test), contracts::payloadless_wasm() ); call_doit_and_check( N(test), N(test), [&]( const transaction_trace_ptr& res ) { diff --git a/unittests/database_tests.cpp b/unittests/database_tests.cpp index 21dabc36c56..bd00e7b60eb 100644 --- a/unittests/database_tests.cpp +++ b/unittests/database_tests.cpp @@ -64,37 +64,23 @@ BOOST_AUTO_TEST_SUITE(database_tests) BOOST_TEST(test.control->fetch_block_by_number(i + 1)->id() == block_ids.back()); } - // Utility function to check expected irreversible block - auto calc_exp_last_irr_block_num = [&](uint32_t head_block_num) -> uint32_t { - const auto producers_size = test.control->head_block_state()->active_schedule.producers.size(); - const auto max_reversible_rounds = EOS_PERCENT(producers_size, config::percent_100 - config::irreversible_threshold_percent); - if( max_reversible_rounds == 0) { - return head_block_num; - } else { - const auto current_round = head_block_num / config::producer_repetitions; - const auto irreversible_round = current_round - max_reversible_rounds; - return (irreversible_round + 1) * config::producer_repetitions - 1; - } - }; - - // Check the last irreversible block number is set correctly - const auto expected_last_irreversible_block_number = calc_exp_last_irr_block_num(num_of_blocks_to_prod); + // Check the last irreversible block number is set correctly, with one producer, irreversibility should only just 1 block before + const auto expected_last_irreversible_block_number = test.control->head_block_num() - 1; BOOST_TEST(test.control->head_block_state()->dpos_irreversible_blocknum == expected_last_irreversible_block_number); - // Check that block 201 cannot be found (only 20 blocks exist) - BOOST_TEST(test.control->fetch_block_by_number(num_of_blocks_to_prod + 1 + 1) == nullptr); + // Ensure that future block doesn't exist + const auto nonexisting_future_block_num = test.control->head_block_num() + 1; + BOOST_TEST(test.control->fetch_block_by_number(nonexisting_future_block_num) == nullptr); const uint32_t next_num_of_blocks_to_prod = 100; - // Produce 100 blocks and check their IDs should match the above test.produce_blocks(next_num_of_blocks_to_prod); - const auto next_expected_last_irreversible_block_number = calc_exp_last_irr_block_num( - num_of_blocks_to_prod + next_num_of_blocks_to_prod); + const auto next_expected_last_irreversible_block_number = test.control->head_block_num() - 1; // Check the last irreversible block number is updated correctly BOOST_TEST(test.control->head_block_state()->dpos_irreversible_blocknum == next_expected_last_irreversible_block_number); - // Check that block 201 can now be found - BOOST_CHECK_NO_THROW(test.control->fetch_block_by_number(num_of_blocks_to_prod + 1)); + // Previous nonexisting future block should exist by now + BOOST_CHECK_NO_THROW(test.control->fetch_block_by_number(nonexisting_future_block_num)); // Check the latest head block match - BOOST_TEST(test.control->fetch_block_by_number(num_of_blocks_to_prod + next_num_of_blocks_to_prod + 1)->id() == + BOOST_TEST(test.control->fetch_block_by_number(test.control->head_block_num())->id() == test.control->head_block_id()); } FC_LOG_AND_RETHROW() } diff --git a/unittests/forked_tests.cpp b/unittests/forked_tests.cpp index 0abc0c42e96..feeebf7664f 100644 --- a/unittests/forked_tests.cpp +++ b/unittests/forked_tests.cpp @@ -136,8 +136,9 @@ BOOST_AUTO_TEST_CASE( fork_with_bad_block ) try { BOOST_AUTO_TEST_CASE( forking ) try { tester c; - c.produce_block(); - c.produce_block(); + while (c.control->head_block_num() < 3) { + c.produce_block(); + } auto r = c.create_accounts( {N(dan),N(sam),N(pam)} ); wdump((fc::json::to_pretty_string(r))); c.produce_block(); @@ -287,7 +288,9 @@ BOOST_AUTO_TEST_CASE( forking ) try { */ BOOST_AUTO_TEST_CASE( prune_remove_branch ) try { tester c; - c.produce_blocks(10); + while (c.control->head_block_num() < 11) { + c.produce_block(); + } auto r = c.create_accounts( {N(dan),N(sam),N(pam),N(scott)} ); auto res = c.set_producers( {N(dan),N(sam),N(pam),N(scott)} ); wlog("set producer schedule to [dan,sam,pam,scott]"); @@ -357,9 +360,7 @@ BOOST_AUTO_TEST_CASE( validator_accepts_valid_blocks ) try { block_state_ptr first_block; auto c = n2.control->accepted_block.connect( [&]( const block_state_ptr& bsp) { - if( bsp->block_num == 2 ) { - first_block = bsp; - } + first_block = bsp; } ); push_blocks( n1, n2 ); diff --git a/unittests/producer_schedule_tests.cpp b/unittests/producer_schedule_tests.cpp index de103654380..3f030c2f705 100644 --- a/unittests/producer_schedule_tests.cpp +++ b/unittests/producer_schedule_tests.cpp @@ -204,7 +204,9 @@ BOOST_AUTO_TEST_SUITE(producer_schedule_tests) BOOST_FIXTURE_TEST_CASE( producer_schedule_promotion_test, TESTER ) try { create_accounts( {N(alice),N(bob),N(carol)} ); - produce_block(); + while (control->head_block_num() < 3) { + produce_block(); + } auto compare_schedules = [&]( const vector& a, const producer_schedule_type& b ) { return std::equal( a.begin(), a.end(), b.producers.begin(), b.producers.end() ); @@ -228,7 +230,7 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_promotion_test, TESTER ) try { produce_block(); // Starts new block which promotes the pending schedule to active BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->active_producers() ) ); - produce_blocks(7); + produce_blocks(6); res = set_producers( {N(alice),N(bob),N(carol)} ); vector sch2 = { @@ -267,7 +269,9 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_promotion_test, TESTER ) try { BOOST_FIXTURE_TEST_CASE( producer_schedule_reduction, tester ) try { create_accounts( {N(alice),N(bob),N(carol)} ); - produce_block(); + while (control->head_block_num() < 3) { + produce_block(); + } auto compare_schedules = [&]( const vector& a, const producer_schedule_type& b ) { return std::equal( a.begin(), a.end(), b.producers.begin(), b.producers.end() ); @@ -291,7 +295,7 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_reduction, tester ) try { produce_block(); // Starts new block which promotes the pending schedule to active BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->active_producers() ) ); - produce_blocks(7); + produce_blocks(6); res = set_producers( {N(alice),N(bob)} ); vector sch2 = { @@ -324,7 +328,9 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_reduction, tester ) try { BOOST_FIXTURE_TEST_CASE( empty_producer_schedule_has_no_effect, tester ) try { create_accounts( {N(alice),N(bob),N(carol)} ); - produce_block(); + while (control->head_block_num() < 3) { + produce_block(); + } auto compare_schedules = [&]( const vector& a, const producer_schedule_type& b ) { return std::equal( a.begin(), a.end(), b.producers.begin(), b.producers.end() ); @@ -350,7 +356,7 @@ BOOST_FIXTURE_TEST_CASE( empty_producer_schedule_has_no_effect, tester ) try { produce_block(); BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->active_producers() ) ); - produce_blocks(7); + produce_blocks(6); res = set_producers( {} ); wlog("set producer schedule to []"); diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 26d29c6b78c..031294277e9 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -19,30 +19,6 @@ using namespace eosio::chain; using namespace eosio::testing; -protocol_feature_manager make_protocol_feature_manager() { - protocol_feature_manager pfm; - - set visited_builtins; - - std::function add_builtins = - [&pfm, &visited_builtins, &add_builtins]( builtin_protocol_feature_t codename ) -> void { - auto res = visited_builtins.emplace( codename ); - if( !res.second ) return; - - auto f = pfm.make_default_builtin_protocol_feature( codename, [&add_builtins]( builtin_protocol_feature_t d ) { - add_builtins( d ); - } ); - - pfm.add_feature( f ); - }; - - for( const auto& p : builtin_protocol_feature_codenames ) { - add_builtins( p.first ); - } - - return pfm; -} - BOOST_AUTO_TEST_SUITE(protocol_feature_tests) BOOST_AUTO_TEST_CASE( activate_preactivate_feature ) try { From 653eba52fe755cb34fdca9504a5084007baf23b4 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Tue, 12 Mar 2019 12:45:51 -0400 Subject: [PATCH 114/680] Pipeline upgrades for 2-1 (#6919) --- .buildkite/coverage.yml | 10 +- .buildkite/debug.yml | 187 ++++++++++++++++++++++++++++++-------- .buildkite/sanitizers.yml | 44 +++++++-- 3 files changed, 192 insertions(+), 49 deletions(-) diff --git a/.buildkite/coverage.yml b/.buildkite/coverage.yml index c5a50bc64f4..190c9c7f5f9 100644 --- a/.buildkite/coverage.yml +++ b/.buildkite/coverage.yml @@ -15,8 +15,14 @@ steps: agents: queue: "automation-large-builder-fleet" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" workdir: /data/job mounts: - /etc/buildkite-agent/config:/config diff --git a/.buildkite/debug.yml b/.buildkite/debug.yml index 28576d56195..3cd6b16d23a 100644 --- a/.buildkite/debug.yml +++ b/.buildkite/debug.yml @@ -6,9 +6,10 @@ steps: echo 1 | ./eosio_build.sh -o Debug && \ echo "--- Compressing build directory :compression:" && \ tar -pczf build.tar.gz build/ - label: ":darwin: Build" + label: ":darwin: Mojave Build" agents: - - "role=macos-builder" + - "role=builder-v2-1" + - "os=mojave" artifact_paths: "build.tar.gz" timeout: 60 @@ -17,13 +18,19 @@ steps: echo 1 | ./eosio_build.sh -o Debug && \ echo "--- :compression: Compressing build directory" && \ tar -pczf build.tar.gz build/ - label: ":ubuntu: Build" + label: ":ubuntu: 16.04 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v2.0.0: - image: "eosio/ci:ubuntu" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" workdir: /data/job timeout: 60 @@ -37,8 +44,14 @@ steps: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v2.0.0: - image: "eosio/ci:ubuntu18" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" workdir: /data/job timeout: 60 @@ -47,13 +60,19 @@ steps: echo 1 | ./eosio_build.sh -o Debug && \ echo "--- :compression: Compressing build directory" && \ tar -pczf build.tar.gz build/ - label: ":fedora: Build" + label: ":fedora: 27 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v2.0.0: - image: "eosio/ci:fedora" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" workdir: /data/job timeout: 60 @@ -62,13 +81,19 @@ steps: echo 1 | ./eosio_build.sh -o Debug && \ echo "--- :compression: Compressing build directory" && \ tar -pczf build.tar.gz build/ - label: ":centos: Build" + label: ":centos: 7 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v2.0.0: - image: "eosio/ci:centos" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" workdir: /data/job timeout: 60 @@ -77,13 +102,40 @@ steps: echo 1 | ./eosio_build.sh -o Debug && \ echo "--- :compression: Compressing build directory" && \ tar -pczf build.tar.gz build/ - label: ":aws: Build" + label: ":aws: 1 Build" agents: queue: "automation-large-builder-fleet" artifact_paths: "build.tar.gz" plugins: - docker#v2.0.0: - image: "eosio/ci:amazonlinux" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + workdir: /data/job + timeout: 60 + + - command: | + echo "+++ :hammer: Building" && \ + echo 1 | ./eosio_build.sh -o Debug && \ + echo "--- :compression: Compressing build directory" && \ + tar -pczf build.tar.gz build/ + label: ":aws: 2 Build" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: "build.tar.gz" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" workdir: /data/job timeout: 60 @@ -91,7 +143,7 @@ steps: - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ @@ -100,9 +152,10 @@ steps: retry: automatic: limit: 1 - label: ":darwin: Tests" + label: ":darwin: Mojave Tests" agents: - - "role=macos-tester" + - "role=builder-v2-1" + - "os=mojave" artifact_paths: - "mongod.log" - "build/genesis.json" @@ -111,7 +164,7 @@ steps: - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ @@ -120,7 +173,7 @@ steps: retry: automatic: limit: 1 - label: ":ubuntu: Tests" + label: ":ubuntu: 16.04 Tests" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -128,8 +181,14 @@ steps: - "build/genesis.json" - "build/config.ini" plugins: - docker#v2.0.0: - image: "eosio/ci:ubuntu" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" workdir: /data/job timeout: 60 @@ -152,14 +211,20 @@ steps: - "build/genesis.json" - "build/config.ini" plugins: - docker#v2.0.0: - image: "eosio/ci:ubuntu18" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" workdir: /data/job timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ @@ -168,7 +233,7 @@ steps: retry: automatic: limit: 1 - label: ":fedora: Tests" + label: ":fedora: 27 Tests" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -176,14 +241,20 @@ steps: - "build/genesis.json" - "build/config.ini" plugins: - docker#v2.0.0: - image: "eosio/ci:fedora" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" workdir: /data/job timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ @@ -192,7 +263,7 @@ steps: retry: automatic: limit: 1 - label: ":centos: Tests" + label: ":centos: 7 Tests" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -200,14 +271,20 @@ steps: - "build/genesis.json" - "build/config.ini" plugins: - docker#v2.0.0: - image: "eosio/ci:centos" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" workdir: /data/job timeout: 60 - command: | echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: Build" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" && \ tar -zxf build.tar.gz && \ echo "--- :m: Starting MongoDB" && \ $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ @@ -216,7 +293,7 @@ steps: retry: automatic: limit: 1 - label: ":aws: Tests" + label: ":aws: 1 Tests" agents: queue: "automation-large-builder-fleet" artifact_paths: @@ -224,7 +301,43 @@ steps: - "build/genesis.json" - "build/config.ini" plugins: - docker#v2.0.0: - image: "eosio/ci:amazonlinux" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" workdir: /data/job timeout: 60 + + - command: | + echo "--- :arrow_down: Downloading build directory" && \ + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ + tar -zxf build.tar.gz && \ + echo "--- :m: Starting MongoDB" && \ + $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ + echo "+++ :microscope: Running tests" && \ + cd /data/job/build && ctest -LE long_running_tests --output-on-failure + retry: + automatic: + limit: 1 + label: ":aws: 2 Tests" + agents: + queue: "automation-large-builder-fleet" + artifact_paths: + - "mongod.log" + - "build/genesis.json" + - "build/config.ini" + plugins: + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + workdir: /data/job + timeout: 60 \ No newline at end of file diff --git a/.buildkite/sanitizers.yml b/.buildkite/sanitizers.yml index b8588135610..d49493eb5ee 100644 --- a/.buildkite/sanitizers.yml +++ b/.buildkite/sanitizers.yml @@ -26,10 +26,16 @@ steps: - "build.tar.gz" - "ninja.log" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - command: ["--privileged"] + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" workdir: /data/job + command: ["--privileged"] mounts: - /etc/buildkite-agent/config:/config environment: @@ -69,10 +75,16 @@ steps: - "build.tar.gz" - "ninja.log" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" - command: ["--privileged"] + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" workdir: /data/job + command: ["--privileged"] mounts: - /etc/buildkite-agent/config:/config environment: @@ -101,8 +113,14 @@ steps: - "mongod.log" - "sanitizer.log" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" workdir: /data/job mounts: - /etc/buildkite-agent/config:/config @@ -123,8 +141,14 @@ steps: - "mongod.log" - "sanitizer.log" plugins: - docker#v1.4.0: - image: "eosio/ci:ubuntu18" + ecr#v1.1.4: + login: true + account_ids: "436617320021" + no-include-email: true + region: "us-west-2" + docker#v2.1.0: + debug: true + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" workdir: /data/job mounts: - /etc/buildkite-agent/config:/config From 8fb0d292bc0378645420c423a8c351944d1f811e Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 12 Mar 2019 15:51:15 -0400 Subject: [PATCH 115/680] rename schedule_all_builtin_protocol_features to preactivate_all_builtin_protocol_feature and reimplement to respect dependencies and earliest allow activation time --- .../testing/include/eosio/testing/tester.hpp | 2 +- libraries/testing/tester.cpp | 72 +++++++++---------- 2 files changed, 36 insertions(+), 38 deletions(-) diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index afd2354f9ac..15c48c38bb3 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -296,7 +296,7 @@ namespace eosio { namespace testing { void schedule_protocol_features_wo_preactivation(const vector feature_digests); void preactivate_protocol_features(const vector feature_digests); - void schedule_all_builtin_protocol_features(); + void preactivate_all_builtin_protocol_features(); protected: signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs = false, uint32_t skip_flag = 0 ); diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 4621e13ae28..33b16b1c0a9 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -146,27 +146,10 @@ namespace eosio { namespace testing { auto schedule_preactivate_protocol_feature = [&]() { auto preactivate_feature_digest = pfm.get_builtin_digest(builtin_protocol_feature_t::preactivate_feature); + FC_ASSERT( preactivate_feature_digest, "PREACTIVATE_FEATURE not found" ); schedule_protocol_features_wo_preactivation( { *preactivate_feature_digest } ); }; - auto schedule_all_builtin_protocol_features = [&]() { - const auto& head_block_num = control->head_block_num(); - // Check all builtins and split them based on whether a preactivation is required or not - vector require_preactivation, without_preactivation; - for (auto itr = builtin_protocol_feature_codenames.begin(); itr != builtin_protocol_feature_codenames.end(); itr++) { - const auto& codename = itr->first; - if (pfm.is_builtin_activated(codename, head_block_num) || !itr->second.subjective_restrictions.enabled) continue; - const digest_type digest = *pfm.get_builtin_digest(codename); - if (itr->second.subjective_restrictions.preactivation_required) { - require_preactivation.emplace_back(digest); - } else { - without_preactivation.emplace_back(digest); - } - } - preactivate_protocol_features(require_preactivation); - schedule_protocol_features_wo_preactivation(without_preactivation); - }; - switch (policy) { case setup_policy::old_bios_only: { set_before_preactivate_bios_contract(); @@ -187,7 +170,7 @@ namespace eosio { namespace testing { schedule_preactivate_protocol_feature(); produce_block(); set_bios_contract(); - schedule_all_builtin_protocol_features(); + preactivate_all_builtin_protocol_features(); produce_block(); break; } @@ -971,30 +954,45 @@ namespace eosio { namespace testing { } void base_tester::preactivate_protocol_features(const vector feature_digests) { - for (auto& feature_digest: feature_digests) { - push_action(config::system_account_name, N(preactivate_feature), N(eosio), fc::mutable_variant_object()("feature_digest", feature_digest)); + for( const auto& feature_digest: feature_digests ) { + push_action( config::system_account_name, N(preactivate_feature), config::system_account_name, + fc::mutable_variant_object()("feature_digest", feature_digest) ); } } - void base_tester::schedule_all_builtin_protocol_features() { - const auto&pfm = control->get_protocol_feature_manager(); - const auto& head_block_num = control->head_block_num(); - // Check all builtins and split them based on whether a preactivation is required or not - vector require_preactivation; - vector without_preactivation; - for (auto itr = builtin_protocol_feature_codenames.begin(); itr != builtin_protocol_feature_codenames.end(); itr++) { - const auto& codename = itr->first; - if (pfm.is_builtin_activated(codename, head_block_num) || !itr->second.subjective_restrictions.enabled) continue; - const digest_type digest = *pfm.get_builtin_digest(codename); - if (itr->second.subjective_restrictions.preactivation_required) { - require_preactivation.emplace_back(digest); - } else { - without_preactivation.emplace_back(digest); + void base_tester::preactivate_all_builtin_protocol_features() { + const auto& pfm = control->get_protocol_feature_manager(); + const auto current_block_num = control->head_block_num() + (control->is_building_block() ? 1 : 0); + const auto current_block_time = ( control->is_building_block() ? control->pending_block_time() + : control->head_block_time() + fc::milliseconds(config::block_interval_ms) ); + + set preactivation_set; + vector preactivations; + + std::function add_digests = + [&pfm, current_block_num, current_block_time, &preactivation_set, &preactivations, &add_digests] + ( const digest_type& feature_digest ) { + const auto& pf = pfm.get_protocol_feature( feature_digest ); + FC_ASSERT( pf.builtin_feature, "called add_digests on a non-builtin protocol feature" ); + if( !pf.enabled || pf.earliest_allowed_activation_time > current_block_time + || pfm.is_builtin_activated( *pf.builtin_feature, current_block_num ) ) return; + + for( const auto& dependency : pf.dependencies ) { + add_digests( dependency ); } + + auto res = preactivation_set.emplace( feature_digest ); + if( !res.second ) return; + preactivations.emplace_back( feature_digest ); + }; + + for( const auto& f : builtin_protocol_feature_codenames ) { + auto digest = pfm.get_builtin_digest( f.first ); + if( !digest ) continue; + add_digests( *digest ); } - preactivate_protocol_features(require_preactivation); - schedule_protocol_features_wo_preactivation(without_preactivation); + preactivate_protocol_features( preactivations ); } bool fc_exception_message_is::operator()( const fc::exception& ex ) { From 6bd09b1de6d3b7334e2ebcddd6703aac0cdbb879 Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 12 Mar 2019 16:55:00 -0400 Subject: [PATCH 116/680] for now switch back to using old bios contract in smoke tests until they are updated to initially activate PREACTIVATE_FEATURE --- testnet.template | 4 +++- tests/Cluster.py | 3 ++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/testnet.template b/testnet.template index f9e793a5c89..637fea419af 100644 --- a/testnet.template +++ b/testnet.template @@ -75,7 +75,9 @@ wcmd create --to-console -n ignition # ------ DO NOT ALTER THE NEXT LINE ------- ###INSERT prodkeys -ecmd set contract eosio unittests/contracts/eosio.bios eosio.bios.wasm eosio.bios.abi +# Use old bios contract for now (switch to new after adding changes to activate PREACTIVATE_FEATURE) +ecmd set contract eosio unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios eosio.bios.wasm eosio.bios.abi +#ecmd set contract eosio unittests/contracts/eosio.bios eosio.bios.wasm eosio.bios.abi # Create required system accounts ecmd create key --to-console diff --git a/tests/Cluster.py b/tests/Cluster.py index e7740d19cd3..b68ea7190f6 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -960,7 +960,8 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM return None contract="eosio.bios" - contractDir="unittests/contracts/%s" % (contract) + #contractDir="unittests/contracts/%s" % (contract) + contractDir="unittests/contracts/old_versions/v1.6.0-rc3/%s" % (contract) # use old eosio.bios for now wasmFile="%s.wasm" % (contract) abiFile="%s.abi" % (contract) Utils.Print("Publish %s contract" % (contract)) From 5c36f61532f9596e7ce4bff18426293a9f01871c Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 12 Mar 2019 18:12:01 -0400 Subject: [PATCH 117/680] avoid redundant validate_and_extract_header_extensions in fork_database::add --- libraries/chain/fork_database.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/chain/fork_database.cpp b/libraries/chain/fork_database.cpp index 20f9d8382a0..1fe7459dfae 100644 --- a/libraries/chain/fork_database.cpp +++ b/libraries/chain/fork_database.cpp @@ -312,10 +312,10 @@ namespace eosio { namespace chain { if( validate ) { try { - auto exts = n->block->validate_and_extract_header_extensions(); + const auto& exts = n->header_exts; if( exts.size() > 0 ) { - auto& new_protocol_features = exts.front().get().protocol_features; + const auto& new_protocol_features = exts.front().get().protocol_features; validator( n->header.timestamp, prev_bh->activated_protocol_features->protocol_features, new_protocol_features ); } } EOS_RETHROW_EXCEPTIONS( fork_database_exception, "serialized fork database is incompatible with configured protocol features" ) From 9ad228bdf75c13e863fd21fc5b16e959305dc1c4 Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 12 Mar 2019 18:51:41 -0400 Subject: [PATCH 118/680] check for invalid protocol feature activations earlier when producing a block --- libraries/chain/controller.cpp | 17 +++++++++++++---- unittests/protocol_feature_tests.cpp | 2 +- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 27f41e6939b..73ed8e7b4a5 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -2301,6 +2301,10 @@ void controller::start_block( block_timestamp_type when, uint16_t confirm_block_ } } + if( new_protocol_feature_activations.size() > 0 ) { + validate_protocol_features( new_protocol_feature_activations ); + } + my->start_block( when, confirm_block_count, new_protocol_feature_activations, block_status::incomplete, optional() ); } @@ -2310,6 +2314,11 @@ void controller::start_block( block_timestamp_type when, const vector& new_protocol_feature_activations ) { validate_db_available_size(); + + if( new_protocol_feature_activations.size() > 0 ) { + validate_protocol_features( new_protocol_feature_activations ); + } + my->start_block( when, confirm_block_count, new_protocol_feature_activations, block_status::incomplete, optional() ); } @@ -2325,10 +2334,10 @@ block_state_ptr controller::finalize_block( const std::function& cur_features, - const vector& new_features ) - { control->check_protocol_features( timestamp, cur_features, new_features ); }, + []( block_timestamp_type timestamp, + const flat_set& cur_features, + const vector& new_features ) + {}, signer_callback ); diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 031294277e9..79e48137821 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -40,7 +40,7 @@ BOOST_AUTO_TEST_CASE( activate_preactivate_feature ) try { auto t = c.control->pending_block_time(); c.control->abort_block(); BOOST_REQUIRE_EXCEPTION( c.control->start_block( t, 0, {digest_type()} ), protocol_feature_exception, - fc_exception_message_starts_with( "unrecognized protocol feature with digest:" ) + fc_exception_message_is( "protocol feature with digest '0000000000000000000000000000000000000000000000000000000000000000' is unrecognized" ) ); auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::preactivate_feature ); From 0b00d78708bfad3018147f28c7d56359d9207304 Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 12 Mar 2019 19:05:08 -0400 Subject: [PATCH 119/680] correct action name for preactivate_protocol_features --- libraries/testing/tester.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 33b16b1c0a9..c3cb3425fae 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -955,7 +955,7 @@ namespace eosio { namespace testing { void base_tester::preactivate_protocol_features(const vector feature_digests) { for( const auto& feature_digest: feature_digests ) { - push_action( config::system_account_name, N(preactivate_feature), config::system_account_name, + push_action( config::system_account_name, N(preactivate), config::system_account_name, fc::mutable_variant_object()("feature_digest", feature_digest) ); } } From 625fce66f3a44fae046cb86c718f2223f5c63b8c Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 12 Mar 2019 19:50:06 -0400 Subject: [PATCH 120/680] add dependency checking to controller::preactivate_feature --- libraries/chain/controller.cpp | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 73ed8e7b4a5..9e387b28e3b 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -2261,6 +2261,21 @@ void controller::preactivate_feature( const digest_type& feature_digest ) { ("digest", feature_digest) ); + auto dependency_checker = [&]( const digest_type& d ) -> bool + { + if( is_protocol_feature_activated( d ) ) return true; + + return ( std::find( gpo.preactivated_protocol_features.begin(), + gpo.preactivated_protocol_features.end(), + d ) != gpo.preactivated_protocol_features.end() ); + }; + + EOS_ASSERT( my->protocol_features.validate_dependencies( feature_digest, dependency_checker ), + protocol_feature_exception, + "not all dependencies of protocol feature with digest '${digest}' have been activated or pre-activated", + ("digest", feature_digest) + ); + my->db.modify( gpo, [&]( auto& gp ) { gp.preactivated_protocol_features.push_back( feature_digest ); } ); From 3628676bf7223262f13b50c222715bc38238fbe9 Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 12 Mar 2019 23:10:53 -0400 Subject: [PATCH 121/680] bug fix in start_block: improper count to check if all preactivated features were activated --- libraries/chain/controller.cpp | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 9e387b28e3b..49573573859 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1341,10 +1341,11 @@ struct controller_impl { bool handled_all_preactivated_features = (num_preactivated_protocol_features == 0); if( new_protocol_feature_activations.size() > 0 ) { - flat_map preactivated_protocol_features; - preactivated_protocol_features.reserve( num_preactivated_protocol_features ); + flat_map activated_protocol_features; + activated_protocol_features.reserve( std::max( num_preactivated_protocol_features, + new_protocol_feature_activations.size() ) ); for( const auto& feature_digest : gpo.preactivated_protocol_features ) { - preactivated_protocol_features.emplace( feature_digest, false ); + activated_protocol_features.emplace( feature_digest, false ); } size_t num_preactivated_features_that_have_activated = 0; @@ -1352,19 +1353,22 @@ struct controller_impl { for( const auto& feature_digest : new_protocol_feature_activations ) { const auto& f = protocol_features.get_protocol_feature( feature_digest ); - if( f.preactivation_required ) { - auto itr = preactivated_protocol_features.find( feature_digest ); - if( itr != preactivated_protocol_features.end() && !itr->second ) { - itr->second = true; - ++num_preactivated_features_that_have_activated; - } + auto res = activated_protocol_features.emplace( feature_digest, true ); + if( !res.second ) { + EOS_ASSERT( res.first->second, block_validate_exception, + "attempted duplicate activation within a single block: ${digest}", + ("digest", res.first->first) + ); + res.first->second = true; + ++num_preactivated_features_that_have_activated; } if( f.builtin_feature ) { trigger_activation_handler( *f.builtin_feature ); - protocol_features.activate_feature( feature_digest, pbhs.block_num ); } + protocol_features.activate_feature( feature_digest, pbhs.block_num ); + ++bb._num_new_protocol_features_that_have_activated; } From 34ce13df2fb81f49efabd565829d1245c8bef452 Mon Sep 17 00:00:00 2001 From: Greg Lee Date: Wed, 13 Mar 2019 07:34:15 -0400 Subject: [PATCH 122/680] Added EOS Rio's Hyperion History API described in this blog post: https://medium.com/@eosriobrazil/presenting-hyperion-history-api-solution-f8a8fda5865b --- plugins/COMMUNITY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/COMMUNITY.md b/plugins/COMMUNITY.md index 55cce74a33b..6ec4f66a6e0 100644 --- a/plugins/COMMUNITY.md +++ b/plugins/COMMUNITY.md @@ -17,6 +17,7 @@ Third parties are encouraged to make pull requests to this file (`develop` branc | Chintai ZMQ Watcher | https://github.com/acoutts/chintai-zeromq-watcher-plugin | | Mongo History API | https://github.com/CryptoLions/EOS-mongo-history-API | | State History API | https://github.com/acoutts/EOS-state-history-API | +| Hyperion History API | https://github.com/eosrio/Hyperion-History-API | ## DISCLAIMER: From a71d76174825fefdcd71a2946eecb24ebeb1ec2d Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Wed, 13 Mar 2019 08:50:36 -0400 Subject: [PATCH 123/680] pipeline changes (#6920) --- .buildkite/coverage.yml | 22 +++++++++------------- CMakeLists.txt | 2 +- tests/CMakeLists.txt | 2 +- 3 files changed, 11 insertions(+), 15 deletions(-) diff --git a/.buildkite/coverage.yml b/.buildkite/coverage.yml index 190c9c7f5f9..ded8b3651e5 100644 --- a/.buildkite/coverage.yml +++ b/.buildkite/coverage.yml @@ -1,17 +1,16 @@ steps: - - command: | + - label: ":spiral_note_pad: Generate Report" + command: | + echo "--- :hammer: Ensuring lcov is installed" && apt-get install -y lcov && \ echo "--- :hammer: Building" && \ - /usr/bin/cmake -GNinja -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_COMPILER=clang++-4.0 -DCMAKE_C_COMPILER=clang-4.0 -DBOOST_ROOT="${BOOST_ROOT}" -DWASM_ROOT="${WASM_ROOT}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true -DENABLE_COVERAGE_TESTING=true -DBUILD_DOXYGEN=false && \ - /usr/bin/ninja + cmake -GNinja -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_COMPILER=clang++-4.0 -DCMAKE_C_COMPILER=clang-4.0 -DBOOST_ROOT="${BOOST_ROOT}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true -DENABLE_COVERAGE_TESTING=true -DBUILD_DOXYGEN=false && \ + ninja && \ echo "--- :spiral_note_pad: Generating Code Coverage Report" && \ - /usr/bin/ninja EOSIO_ut_coverage && \ + ninja EOSIO_ut_coverage && \ echo "--- :arrow_up: Publishing Code Coverage Report" && \ buildkite-agent artifact upload "EOSIO_ut_coverage/**/*" s3://eos-coverage/$BUILDKITE_JOB_ID && \ - cp /config/.coveralls.yml . && \ - /usr/local/bin/coveralls-lcov EOSIO_ut_coverage_filtered.info && \ echo "+++ View Report" && \ printf "\033]1339;url=https://eos-coverage.s3-us-west-2.amazonaws.com/$BUILDKITE_JOB_ID/EOSIO_ut_coverage/index.html;content=View Full Coverage Report\a\n" - label: ":spiral_note_pad: Generate Report" agents: queue: "automation-large-builder-fleet" plugins: @@ -20,16 +19,13 @@ steps: account_ids: "436617320021" no-include-email: true region: "us-west-2" - docker#v2.1.0: + docker#v3.0.1: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" workdir: /data/job - mounts: - - /etc/buildkite-agent/config:/config environment: + - LCOV_PATH=/usr/bin/lcov - BOOST_ROOT=/root/opt/boost - OPENSSL_ROOT_DIR=/usr/include/openssl - - WASM_ROOT=/root/opt/wasm - - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/wasm/bin - - CI=true + - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/mongodb/bin:~/opt/llvm/bin/ timeout: 60 diff --git a/CMakeLists.txt b/CMakeLists.txt index 997ae2c1e65..0d54f35526c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -192,7 +192,7 @@ set(ENABLE_COVERAGE_TESTING FALSE CACHE BOOL "Build EOSIO for code coverage anal if(ENABLE_COVERAGE_TESTING) SET(CMAKE_CXX_FLAGS "--coverage ${CMAKE_CXX_FLAGS}") - find_program( LCOV_PATH lcov ) + find_program( LCOV_PATH lcov ) find_program( LLVMCOV_PATH llvm-cov ) find_program( GENHTML_PATH NAMES genhtml) endif() diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 68116bab863..ae9b36bcd68 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -117,7 +117,7 @@ if(ENABLE_COVERAGE_TESTING) if(NOT LLVMCOV_PATH) message(FATAL_ERROR "llvm-cov not found! Aborting...") - endif() # NOT LCOV_PATH + endif() if(NOT GENHTML_PATH) message(FATAL_ERROR "genhtml not found! Aborting...") From a4bde9008189d46b6559993addd844bdbde223db Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 13 Mar 2019 19:33:43 -0400 Subject: [PATCH 124/680] add get_supported_protocol_features to producer_api_plugin --- .../chain/include/eosio/chain/exceptions.hpp | 4 +- .../eosio/chain/protocol_feature_manager.hpp | 10 ++++- libraries/chain/protocol_feature_manager.cpp | 40 +++++++++++++++++++ libraries/testing/tester.cpp | 5 ++- plugins/chain_plugin/chain_plugin.cpp | 4 +- .../producer_api_plugin.cpp | 3 ++ .../eosio/producer_plugin/producer_plugin.hpp | 8 ++++ plugins/producer_plugin/producer_plugin.cpp | 39 ++++++++++++++++++ 8 files changed, 106 insertions(+), 7 deletions(-) diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index 848dcab6654..fce1762ead0 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -527,8 +527,8 @@ namespace eosio { namespace chain { FC_DECLARE_DERIVED_EXCEPTION( protocol_feature_exception, chain_exception, 3250000, "Protocol feature exception" ) - FC_DECLARE_DERIVED_EXCEPTION( protocol_feature_validation_exception, snapshot_exception, + FC_DECLARE_DERIVED_EXCEPTION( protocol_feature_validation_exception, protocol_feature_exception, 3250001, "Protocol feature validation exception" ) - FC_DECLARE_DERIVED_EXCEPTION( protocol_feature_bad_block_exception, snapshot_exception, + FC_DECLARE_DERIVED_EXCEPTION( protocol_feature_bad_block_exception, protocol_feature_exception, 3250002, "Protocol feature exception (invalid block)" ) } } // eosio::chain diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index 07653951213..8db07009591 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -57,7 +57,7 @@ class protocol_feature_base : public fc::reflect_init { class builtin_protocol_feature : public protocol_feature_base { public: - static constexpr const char* feature_type_string = "builtin"; + static const char* feature_type_string; builtin_protocol_feature() = default; @@ -95,12 +95,15 @@ class protocol_feature_manager { struct protocol_feature { digest_type feature_digest; + digest_type description_digest; flat_set dependencies; time_point earliest_allowed_activation_time; bool preactivation_required = false; bool enabled = false; optional builtin_feature; + fc::variant to_variant( bool include_subjective_restrictions = true )const; + friend bool operator <( const protocol_feature& lhs, const protocol_feature& rhs ) { return lhs.feature_digest < rhs.feature_digest; } @@ -114,6 +117,10 @@ class protocol_feature_manager { } }; + using protocol_feature_set_type = std::set>; + + const protocol_feature_set_type& get_protocol_feature_set()const { return _recognized_protocol_features; } + recognized_t is_recognized( const digest_type& feature_digest, time_point now )const; const protocol_feature& get_protocol_feature( const digest_type& feature_digest )const; @@ -136,7 +143,6 @@ class protocol_feature_manager { void popped_blocks_to( uint32_t block_num ); protected: - using protocol_feature_set_type = std::set>; struct builtin_protocol_feature_entry { static constexpr uint32_t not_active = std::numeric_limits::max(); diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index d565cf53a8f..45a59699b56 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -74,6 +74,8 @@ Pre-activated protocol features must be activated in the next block. } } + const char* builtin_protocol_feature::feature_type_string = "builtin"; + builtin_protocol_feature::builtin_protocol_feature( builtin_protocol_feature_t codename, const digest_type& description_digest, flat_set&& dependencies, @@ -115,6 +117,43 @@ Pre-activated protocol features must be activated in the next block. return enc.result(); } + fc::variant protocol_feature_manager::protocol_feature::to_variant( bool include_subjective_restrictions )const { + EOS_ASSERT( builtin_feature, protocol_feature_exception, "not a builtin protocol feature" ); + + fc::mutable_variant_object mvo; + + mvo( "feature_digest", feature_digest ); + + if( include_subjective_restrictions ) { + fc::mutable_variant_object subjective_restrictions; + + subjective_restrictions( "enabled", enabled ); + subjective_restrictions( "preactivation_required", preactivation_required ); + subjective_restrictions( "earliest_allowed_activation_time", earliest_allowed_activation_time ); + + mvo( "subjective_restrictions", std::move( subjective_restrictions ) ); + } + + mvo( "description_digest", description_digest ); + mvo( "dependencies", dependencies ); + mvo( "protocol_feature_type", builtin_protocol_feature::feature_type_string ); + + fc::variants specification; + auto add_to_specification = [&specification]( const char* key_name, auto&& value ) { + fc::mutable_variant_object obj; + obj( "name", key_name ); + obj( "value", std::forward( value ) ); + specification.emplace_back( std::move(obj) ); + }; + + + add_to_specification( "builtin_feature_codename", builtin_protocol_feature_codename( *builtin_feature ) ); + + mvo( "specification", std::move( specification ) ); + + return fc::variant( std::move(mvo) ); + } + protocol_feature_manager::protocol_feature_manager() { _builtin_protocol_features.reserve( builtin_protocol_feature_codenames.size() ); } @@ -286,6 +325,7 @@ Pre-activated protocol features must be activated in the next block. auto res = _recognized_protocol_features.insert( protocol_feature{ feature_digest, + f.description_digest, f.dependencies, f.subjective_restrictions.earliest_allowed_activation_time, f.subjective_restrictions.preactivation_required, diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index c3cb3425fae..dce7adb616d 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -977,12 +977,13 @@ namespace eosio { namespace testing { if( !pf.enabled || pf.earliest_allowed_activation_time > current_block_time || pfm.is_builtin_activated( *pf.builtin_feature, current_block_num ) ) return; + auto res = preactivation_set.emplace( feature_digest ); + if( !res.second ) return; + for( const auto& dependency : pf.dependencies ) { add_digests( dependency ); } - auto res = preactivation_set.emplace( feature_digest ); - if( !res.second ) return; preactivations.emplace_back( feature_digest ); }; diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index c70898ce5ac..c02c6be9e29 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -356,10 +356,12 @@ void clear_chainbase_files( const fc::path& p ) { optional read_builtin_protocol_feature( const fc::path& p ) { try { return fc::json::from_file( p ); - } catch( const protocol_feature_exception& e ) { + } catch( const fc::exception& e ) { wlog( "problem encountered while reading '${path}':\n${details}", ("path", p.generic_string())("details",e.to_detail_string()) ); } catch( ... ) { + dlog( "unknown problem encountered while reading '${path}'", + ("path", p.generic_string()) ); } return {}; } diff --git a/plugins/producer_api_plugin/producer_api_plugin.cpp b/plugins/producer_api_plugin/producer_api_plugin.cpp index e02ec265f5e..611979ed6a4 100644 --- a/plugins/producer_api_plugin/producer_api_plugin.cpp +++ b/plugins/producer_api_plugin/producer_api_plugin.cpp @@ -94,6 +94,9 @@ void producer_api_plugin::plugin_startup() { INVOKE_R_V(producer, get_scheduled_protocol_feature_activations), 201), CALL(producer, producer, schedule_protocol_feature_activations, INVOKE_V_R(producer, schedule_protocol_feature_activations, producer_plugin::scheduled_protocol_feature_activations), 201), + CALL(producer, producer, get_supported_protocol_features, + INVOKE_R_R(producer, get_supported_protocol_features, + producer_plugin::get_supported_protocol_features_params), 201), }); } diff --git a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp index c441f4530ec..55a73279dd2 100644 --- a/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp +++ b/plugins/producer_plugin/include/eosio/producer_plugin/producer_plugin.hpp @@ -55,6 +55,11 @@ class producer_plugin : public appbase::plugin { std::vector protocol_features_to_activate; }; + struct get_supported_protocol_features_params { + bool exclude_disabled = false; + bool exclude_unactivatable = false; + }; + producer_plugin(); virtual ~producer_plugin(); @@ -90,6 +95,8 @@ class producer_plugin : public appbase::plugin { scheduled_protocol_feature_activations get_scheduled_protocol_feature_activations() const; void schedule_protocol_feature_activations(const scheduled_protocol_feature_activations& schedule); + fc::variants get_supported_protocol_features( const get_supported_protocol_features_params& params ) const; + signal confirmed_block; private: std::shared_ptr my; @@ -103,3 +110,4 @@ FC_REFLECT(eosio::producer_plugin::whitelist_blacklist, (actor_whitelist)(actor_ FC_REFLECT(eosio::producer_plugin::integrity_hash_information, (head_block_id)(integrity_hash)) FC_REFLECT(eosio::producer_plugin::snapshot_information, (head_block_id)(snapshot_name)) FC_REFLECT(eosio::producer_plugin::scheduled_protocol_feature_activations, (protocol_features_to_activate)) +FC_REFLECT(eosio::producer_plugin::get_supported_protocol_features_params, (exclude_disabled)(exclude_unactivatable)) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 3e4a78db58c..23d17f445f6 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -981,6 +981,45 @@ void producer_plugin::schedule_protocol_feature_activations( const scheduled_pro my->_protocol_features_to_activate = schedule.protocol_features_to_activate; } +fc::variants producer_plugin::get_supported_protocol_features( const get_supported_protocol_features_params& params ) const { + fc::variants results; + const chain::controller& chain = my->chain_plug->chain(); + const auto& pfm = chain.get_protocol_feature_manager(); + const auto& pfs = pfm.get_protocol_feature_set(); + const auto next_block_time = chain.head_block_time() + fc::milliseconds(config::block_interval_ms); + + flat_map visited_protocol_features; + visited_protocol_features.reserve( pfs.size() ); + + std::function add_feature = + [&results, &pfm, ¶ms, next_block_time, &visited_protocol_features, &add_feature] + ( const protocol_feature_manager::protocol_feature& pf ) -> bool { + if( ( params.exclude_disabled || params.exclude_unactivatable ) && !pf.enabled ) return false; + if( params.exclude_unactivatable && ( next_block_time > pf.earliest_allowed_activation_time ) ) return false; + + auto res = visited_protocol_features.emplace( pf.feature_digest, false ); + if( !res.second ) return res.first->second; + + const auto original_size = results.size(); + for( const auto& dependency : pf.dependencies ) { + if( !add_feature( pfm.get_protocol_feature( dependency ) ) ) { + results.resize( original_size ); + return false; + } + } + + res.first->second = true; + results.emplace_back( pf.to_variant(true) ); + return true; + }; + + for( const auto& pf : pfs ) { + add_feature( pf ); + } + + return results; +} + optional producer_plugin_impl::calculate_next_block_time(const account_name& producer_name, const block_timestamp_type& current_block_time) const { chain::controller& chain = chain_plug->chain(); const auto& hbs = chain.head_block_state(); From dc8e70002663c46b3631c8b575c9ce82c39f55cb Mon Sep 17 00:00:00 2001 From: eun2ce Date: Thu, 14 Mar 2019 14:57:10 +0900 Subject: [PATCH 125/680] clear unused variable --- programs/cleos/main.cpp | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index bfceb4b40cc..caa24ae5ccf 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -305,7 +305,7 @@ void sign_transaction(signed_transaction& trx, fc::variant& required_keys, const trx = signed_trx.as(); } -fc::variant push_transaction( signed_transaction& trx, int32_t extra_kcpu = 1000, packed_transaction::compression_type compression = packed_transaction::none ) { +fc::variant push_transaction( signed_transaction& trx, packed_transaction::compression_type compression = packed_transaction::none ) { auto info = get_info(); if (trx.signatures.size() == 0) { // #5445 can't change txn content if already signed @@ -347,11 +347,11 @@ fc::variant push_transaction( signed_transaction& trx, int32_t extra_kcpu = 1000 } } -fc::variant push_actions(std::vector&& actions, int32_t extra_kcpu, packed_transaction::compression_type compression = packed_transaction::none ) { +fc::variant push_actions(std::vector&& actions, packed_transaction::compression_type compression = packed_transaction::none ) { signed_transaction trx; trx.actions = std::forward(actions); - return push_transaction(trx, extra_kcpu, compression); + return push_transaction(trx, compression); } void print_action( const fc::variant& at ) { @@ -497,8 +497,8 @@ void print_result( const fc::variant& result ) { try { } FC_CAPTURE_AND_RETHROW( (result) ) } using std::cout; -void send_actions(std::vector&& actions, int32_t extra_kcpu = 1000, packed_transaction::compression_type compression = packed_transaction::none ) { - auto result = push_actions( move(actions), extra_kcpu, compression); +void send_actions(std::vector&& actions, packed_transaction::compression_type compression = packed_transaction::none ) { + auto result = push_actions( move(actions), compression); if( tx_print_json ) { cout << fc::json::to_pretty_string( result ) << endl; @@ -507,8 +507,8 @@ void send_actions(std::vector&& actions, int32_t extra_kcpu = 100 } } -void send_transaction( signed_transaction& trx, int32_t extra_kcpu, packed_transaction::compression_type compression = packed_transaction::none ) { - auto result = push_transaction(trx, extra_kcpu, compression); +void send_transaction( signed_transaction& trx, packed_transaction::compression_type compression = packed_transaction::none ) { + auto result = push_transaction(trx, compression); if( tx_print_json ) { cout << fc::json::to_pretty_string( result ) << endl; @@ -2971,7 +2971,7 @@ int main( int argc, char** argv ) { actions.emplace_back( create_setcode(account, code_bytes ) ); if ( shouldSend ) { std::cerr << localized("Setting Code...") << std::endl; - send_actions(std::move(actions), 10000, packed_transaction::zlib); + send_actions(std::move(actions), packed_transaction::zlib); } } else { std::cerr << localized("Skipping set code because the new code is the same as the existing code") << std::endl; @@ -3019,7 +3019,7 @@ int main( int argc, char** argv ) { } EOS_RETHROW_EXCEPTIONS(abi_type_exception, "Fail to parse ABI JSON") if ( shouldSend ) { std::cerr << localized("Setting ABI...") << std::endl; - send_actions(std::move(actions), 10000, packed_transaction::zlib); + send_actions(std::move(actions), packed_transaction::zlib); } } else { std::cerr << localized("Skipping set abi because the new abi is the same as the existing abi") << std::endl; @@ -3036,7 +3036,7 @@ int main( int argc, char** argv ) { set_abi_callback(); if (actions.size()) { std::cerr << localized("Publishing contract...") << std::endl; - send_actions(std::move(actions), 10000, packed_transaction::zlib); + send_actions(std::move(actions), packed_transaction::zlib); } else { std::cout << "no transaction is sent" << std::endl; } From caaa5c96ec3a44d5197aa70310109531a2353599 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Thu, 14 Mar 2019 14:27:44 +0800 Subject: [PATCH 126/680] Fix reversed condition in get supported protocol API --- plugins/producer_plugin/producer_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 23d17f445f6..8e966724dec 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -995,7 +995,7 @@ fc::variants producer_plugin::get_supported_protocol_features( const get_support [&results, &pfm, ¶ms, next_block_time, &visited_protocol_features, &add_feature] ( const protocol_feature_manager::protocol_feature& pf ) -> bool { if( ( params.exclude_disabled || params.exclude_unactivatable ) && !pf.enabled ) return false; - if( params.exclude_unactivatable && ( next_block_time > pf.earliest_allowed_activation_time ) ) return false; + if( params.exclude_unactivatable && ( next_block_time < pf.earliest_allowed_activation_time ) ) return false; auto res = visited_protocol_features.emplace( pf.feature_digest, false ); if( !res.second ) return res.first->second; From 488b8e6e0f1e5fdadb0baceb8e4b9aa6dae4ba6c Mon Sep 17 00:00:00 2001 From: Kayan Date: Thu, 14 Mar 2019 17:22:35 +0800 Subject: [PATCH 127/680] fix a case if started block get aborted --- plugins/producer_plugin/producer_plugin.cpp | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 23d17f445f6..0824dbe3e9d 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -1194,20 +1194,21 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { if( drop_features_to_activate ) { _protocol_features_to_activate.clear(); } else { + auto protocol_features_to_activate = _protocol_features_to_activate; // do a copy as pending_block might be aborted if( features_to_activate.size() > 0 ) { - _protocol_features_to_activate.reserve( _protocol_features_to_activate.size() + protocol_features_to_activate.reserve( protocol_features_to_activate.size() + features_to_activate.size() ); - std::set set_of_features_to_activate( _protocol_features_to_activate.begin(), - _protocol_features_to_activate.end() ); + std::set set_of_features_to_activate( protocol_features_to_activate.begin(), + protocol_features_to_activate.end() ); for( const auto& f : features_to_activate ) { auto res = set_of_features_to_activate.insert( f ); if( res.second ) { - _protocol_features_to_activate.push_back( f ); + protocol_features_to_activate.push_back( f ); } } features_to_activate.clear(); } - std::swap( features_to_activate, _protocol_features_to_activate ); + std::swap( features_to_activate, protocol_features_to_activate ); ilog( "signaling activation of the following protocol features in block ${num}: ${features_to_activate}", ("num", hbs->block_num + 1)("features_to_activate", features_to_activate) ); } @@ -1662,6 +1663,8 @@ void producer_plugin_impl::produce_block() { EOS_ASSERT(signature_provider_itr != _signature_providers.end(), producer_priv_key_not_found, "Attempting to produce a block for which we don't have the private key"); + _protocol_features_to_activate.clear(); // clear _protocol_features_to_activate as it is already set in pending_block + //idump( (fc::time_point::now() - chain.pending_block_time()) ); chain.finalize_block( [&]( const digest_type& d ) { auto debug_logger = maybe_make_debug_time_logger(); From 9f1ac1b26070b5bbc7c661613a2ffd8f6fca19d7 Mon Sep 17 00:00:00 2001 From: Greg Lee Date: Thu, 14 Mar 2019 05:55:53 -0400 Subject: [PATCH 128/680] Add Chronicle to COMMUNITY.md --- plugins/COMMUNITY.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/plugins/COMMUNITY.md b/plugins/COMMUNITY.md index 6ec4f66a6e0..bff0cbddbb3 100644 --- a/plugins/COMMUNITY.md +++ b/plugins/COMMUNITY.md @@ -1,8 +1,8 @@ # Community Plugin List -This file contains a list of community authored plugins for `nodeos`, acting as a directory of the plugins that are available. +This file contains a list of community authored plugins for `nodeos` and APIs/tools that are associated with plugins, acting as a directory of the community authored plugins that are available. -Third parties are encouraged to make pull requests to this file (`develop` branch please) in order to list new plugins. +Third parties are encouraged to make pull requests to this file (`develop` branch please) in order to list new related projects. | Description | URL | | ----------- | --- | @@ -18,7 +18,8 @@ Third parties are encouraged to make pull requests to this file (`develop` branc | Mongo History API | https://github.com/CryptoLions/EOS-mongo-history-API | | State History API | https://github.com/acoutts/EOS-state-history-API | | Hyperion History API | https://github.com/eosrio/Hyperion-History-API | +| Chronicle | https://github.com/EOSChronicleProject/eos-chronicle | ## DISCLAIMER: -The fact that a plugin is listed in this file does not mean the plugin has been reviewed by this repository's maintainers. No warranties are made, i.e. you are at your own risk if you choose to use them. +The fact that a plugin or API/tool is listed in this file does not mean it has been reviewed by this repository's maintainers. No warranties are made, i.e. you are at your own risk if you choose to use them. From 21f11fa50c9b7865e3511167cd248a157259596c Mon Sep 17 00:00:00 2001 From: Greg Lee Date: Thu, 14 Mar 2019 06:03:04 -0400 Subject: [PATCH 129/680] Update disclaimer in COMMUNITY.md --- plugins/COMMUNITY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/COMMUNITY.md b/plugins/COMMUNITY.md index bff0cbddbb3..f47cefa8e63 100644 --- a/plugins/COMMUNITY.md +++ b/plugins/COMMUNITY.md @@ -22,4 +22,4 @@ Third parties are encouraged to make pull requests to this file (`develop` branc ## DISCLAIMER: -The fact that a plugin or API/tool is listed in this file does not mean it has been reviewed by this repository's maintainers. No warranties are made, i.e. you are at your own risk if you choose to use them. +The resources listed here are developed, offered and maintained by third-parties and not by block.one. Providing information, material or commentaries about such third-party resources does not mean we endorse or recommend any of these resources. We are not responsible, and disclaim any responsibility or liability, for your use of or reliance on any of these resources. Third-party resources may be updated, changed or terminated at any time, so the information here may be out of date or inaccurate. USAGE AND RELIANCE IS ENTIRELY AT YOUR OWN RISK. From 421bc24600440425ce3488adf637d5823159a9c8 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 14 Mar 2019 10:45:25 -0400 Subject: [PATCH 130/680] fix duplication check bug in controller_impl::start_block --- libraries/chain/controller.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 49573573859..de071f79d26 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1355,7 +1355,7 @@ struct controller_impl { auto res = activated_protocol_features.emplace( feature_digest, true ); if( !res.second ) { - EOS_ASSERT( res.first->second, block_validate_exception, + EOS_ASSERT( !res.first->second, block_validate_exception, "attempted duplicate activation within a single block: ${digest}", ("digest", res.first->first) ); From 1b5cece9dde0ec8b818ca1a72a8611938703017c Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 14 Mar 2019 16:43:01 -0400 Subject: [PATCH 131/680] add ONLY_LINK_TO_EXISTING_PERMISSON protocol feature (enables writing unit tests that test preactivation); bug fixes in validating_tester --- libraries/chain/eosio_contract.cpp | 10 +++- .../eosio/chain/protocol_feature_manager.hpp | 1 + libraries/chain/protocol_feature_manager.cpp | 11 ++++ .../testing/include/eosio/testing/tester.hpp | 6 +- unittests/protocol_feature_tests.cpp | 58 +++++++++++++++++++ 5 files changed, 84 insertions(+), 2 deletions(-) diff --git a/libraries/chain/eosio_contract.cpp b/libraries/chain/eosio_contract.cpp index 03e0fed7f7f..cc303bb50cd 100644 --- a/libraries/chain/eosio_contract.cpp +++ b/libraries/chain/eosio_contract.cpp @@ -315,7 +315,15 @@ void apply_eosio_linkauth(apply_context& context) { EOS_ASSERT(code != nullptr, account_query_exception, "Failed to retrieve code for account: ${account}", ("account", requirement.code)); if( requirement.requirement != config::eosio_any_name ) { - const auto *permission = db.find(requirement.requirement); + const permission_object* permission = nullptr; + if( context.control.is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ) { + permission = db.find( + boost::make_tuple( requirement.account, requirement.requirement ) + ); + } else { + permission = db.find(requirement.requirement); + } + EOS_ASSERT(permission != nullptr, permission_query_exception, "Failed to retrieve permission: ${permission}", ("permission", requirement.requirement)); } diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index 8db07009591..2ed374d05b7 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -14,6 +14,7 @@ enum class protocol_feature_t : uint32_t { enum class builtin_protocol_feature_t : uint32_t { preactivate_feature, + only_link_to_existing_permission }; struct protocol_feature_subjective_restrictions { diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 45a59699b56..c58c4719a9f 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -27,6 +27,17 @@ Pre-activated protocol features must be activated in the next block. {}, {time_point{}, false, true} // enabled without preactivation and ready to go at any time } ) + ( builtin_protocol_feature_t::only_link_to_existing_permission, builtin_protocol_feature_spec{ + "ONLY_LINK_TO_EXISTING_PERMISSION", + fc::variant("f3c3d91c4603cde2397268bfed4e662465293aab10cd9416db0d442b8cec2949").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: ONLY_LINK_TO_EXISTING_PERMISSION + +Disallows linking an action to a non-existing permission. +*/ + {} + } ) ; diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 15c48c38bb3..29eba224d45 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -350,6 +350,10 @@ namespace eosio { namespace testing { class validating_tester : public base_tester { public: virtual ~validating_tester() { + if( !validating_node ) { + elog( "~validating_tester() called with empty validating_node; likely in the middle of failure" ); + return; + } try { if( num_blocks_to_producer_before_shutdown > 0 ) produce_blocks( num_blocks_to_producer_before_shutdown ); @@ -454,7 +458,7 @@ namespace eosio { namespace testing { hbh.producer == vn_hbh.producer; validating_node.reset(); - validating_node = std::make_unique(vcfg); + validating_node = std::make_unique(vcfg, make_protocol_feature_manager()); validating_node->add_indices(); validating_node->startup( []() { return false; } ); diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 79e48137821..40ababa7bad 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -76,6 +76,64 @@ BOOST_AUTO_TEST_CASE( activate_preactivate_feature ) try { } FC_LOG_AND_RETHROW() +BOOST_AUTO_TEST_CASE( only_link_to_existing_permission_test ) try { + tester c(setup_policy::preactivate_feature_and_new_bios, db_read_mode::SPECULATIVE); + const auto& pfm = c.control->get_protocol_feature_manager(); + + auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::only_link_to_existing_permission ); + BOOST_REQUIRE( d ); + + c.create_accounts( {N(alice), N(bob), N(charlie)} ); + + BOOST_CHECK_EXCEPTION( c.push_action( config::system_account_name, N(linkauth), N(bob), fc::mutable_variant_object() + ("account", "bob") + ("code", name(config::system_account_name)) + ("type", "") + ("requirement", "test" ) + ), permission_query_exception, + fc_exception_message_is( "Failed to retrieve permission: test" ) + ); + + BOOST_CHECK_EXCEPTION( c.push_action( config::system_account_name, N(linkauth), N(charlie), fc::mutable_variant_object() + ("account", "charlie") + ("code", name(config::system_account_name)) + ("type", "") + ("requirement", "test" ) + ), permission_query_exception, + fc_exception_message_is( "Failed to retrieve permission: test" ) + ); + + c.push_action( config::system_account_name, N(updateauth), N(alice), fc::mutable_variant_object() + ("account", "alice") + ("permission", "test") + ("parent", "active") + ("auth", authority(get_public_key("testapi", "test"))) + ); + + c.produce_block(); + + // Verify the incorrect behavior prior to ONLY_LINK_TO_EXISTING_PERMISSION activation. + c.push_action( config::system_account_name, N(linkauth), N(bob), fc::mutable_variant_object() + ("account", "bob") + ("code", name(config::system_account_name)) + ("type", "") + ("requirement", "test" ) + ); + + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + + // Verify the correct behavior after ONLY_LINK_TO_EXISTING_PERMISSION activation. + BOOST_CHECK_EXCEPTION( c.push_action( config::system_account_name, N(linkauth), N(charlie), fc::mutable_variant_object() + ("account", "charlie") + ("code", name(config::system_account_name)) + ("type", "") + ("requirement", "test" ) + ), permission_query_exception, + fc_exception_message_is( "Failed to retrieve permission: test" ) + ); + +} FC_LOG_AND_RETHROW() BOOST_AUTO_TEST_SUITE_END() From a7158b1b9d2e13e098b4150207cf83cc6092c0e0 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 14 Mar 2019 18:23:09 -0400 Subject: [PATCH 132/680] add additional unit tests for protocol activation; fix bug that lead to inconsistent state due to controller_impl::start_block exception --- libraries/chain/controller.cpp | 3 +- .../testing/include/eosio/testing/tester.hpp | 1 + unittests/protocol_feature_tests.cpp | 65 ++++++++++++++++++- 3 files changed, 65 insertions(+), 4 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index de071f79d26..62231b28901 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1313,7 +1313,8 @@ struct controller_impl { { EOS_ASSERT( !pending, block_validate_exception, "pending block already exists" ); - auto guard_pending = fc::make_scoped_exit([this](){ + auto guard_pending = fc::make_scoped_exit([this, head_block_num=head->block_num](){ + protocol_features.popped_blocks_to( head_block_num ); pending.reset(); }); diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 29eba224d45..3ee3208a39a 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -314,6 +314,7 @@ namespace eosio { namespace testing { controller::config cfg; map chain_transactions; map last_produced_block; + public: vector protocol_features_to_be_activated_wo_preactivation; }; diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 40ababa7bad..6d5fb9bd4bc 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -22,8 +22,7 @@ using namespace eosio::testing; BOOST_AUTO_TEST_SUITE(protocol_feature_tests) BOOST_AUTO_TEST_CASE( activate_preactivate_feature ) try { - tester c(setup_policy::none, db_read_mode::SPECULATIVE); - + tester c( setup_policy::none ); const auto& pfm = c.control->get_protocol_feature_manager(); c.produce_block(); @@ -76,8 +75,68 @@ BOOST_AUTO_TEST_CASE( activate_preactivate_feature ) try { } FC_LOG_AND_RETHROW() +BOOST_AUTO_TEST_CASE( double_preactivation ) try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + const auto& pfm = c.control->get_protocol_feature_manager(); + + auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::only_link_to_existing_permission ); + BOOST_REQUIRE( d ); + + c.push_action( config::system_account_name, N(preactivate), config::system_account_name, + fc::mutable_variant_object()("feature_digest", *d), 10 ); + + std::string expected_error_msg("protocol feature with digest '"); + { + fc::variant v; + to_variant( *d, v ); + expected_error_msg += v.get_string(); + expected_error_msg += "' is already pre-activated"; + } + + BOOST_CHECK_EXCEPTION( c.push_action( config::system_account_name, N(preactivate), config::system_account_name, + fc::mutable_variant_object()("feature_digest", *d), 20 ), + protocol_feature_exception, + fc_exception_message_is( expected_error_msg ) + ); + +} FC_LOG_AND_RETHROW() + +BOOST_AUTO_TEST_CASE( double_activation ) try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + const auto& pfm = c.control->get_protocol_feature_manager(); + + auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::only_link_to_existing_permission ); + BOOST_REQUIRE( d ); + + BOOST_CHECK( !c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + + c.preactivate_protocol_features( {*d} ); + + BOOST_CHECK( !c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + + c.schedule_protocol_features_wo_preactivation( {*d} ); + + BOOST_CHECK_EXCEPTION( c.produce_block();, + block_validate_exception, + fc_exception_message_starts_with( "attempted duplicate activation within a single block:" ) + ); + + c.protocol_features_to_be_activated_wo_preactivation.clear(); + + BOOST_CHECK( !c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + + c.produce_block(); + + BOOST_CHECK( c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + + c.produce_block(); + + BOOST_CHECK( c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + +} FC_LOG_AND_RETHROW() + BOOST_AUTO_TEST_CASE( only_link_to_existing_permission_test ) try { - tester c(setup_policy::preactivate_feature_and_new_bios, db_read_mode::SPECULATIVE); + tester c( setup_policy::preactivate_feature_and_new_bios ); const auto& pfm = c.control->get_protocol_feature_manager(); auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::only_link_to_existing_permission ); From bb9ce200ed1c6e71132c12cdeb6310807fcabf3a Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 14 Mar 2019 08:12:31 -0500 Subject: [PATCH 133/680] Added launcher flags to support supplying alternate installation paths. GH #6879. --- programs/eosio-launcher/main.cpp | 56 ++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 20 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 66b40819b9d..8a3a75a721b 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -408,6 +408,7 @@ struct launcher_def { bool skip_transaction_signatures = false; string eosd_extra_args; std::map specific_nodeos_args; + std::map specific_nodeos_installation_paths; testnet_def network; string gelf_endpoint; vector aliases; @@ -488,8 +489,10 @@ launcher_def::set_options (bpo::options_description &cfg) { ("genesis,g",bpo::value()->default_value("./genesis.json"),"set the path to genesis.json") ("skip-signature", bpo::bool_switch(&skip_transaction_signatures)->default_value(false), "nodeos does not require transaction signatures.") ("nodeos", bpo::value(&eosd_extra_args), "forward nodeos command line argument(s) to each instance of nodeos, enclose arg(s) in quotes") - ("specific-num", bpo::value>()->composing(), "forward nodeos command line argument(s) (using \"--specific-nodeos\" flag) to this specific instance of nodeos. This parameter can be entered multiple times and requires a paired \"--specific-nodeos\" flag") + ("specific-num", bpo::value>()->composing(), "forward nodeos command line argument(s) (using \"--specific-nodeos\" flag) to this specific instance of nodeos. This parameter can be entered multiple times and requires a paired \"--specific-nodeos\" flag each time it is used") ("specific-nodeos", bpo::value>()->composing(), "forward nodeos command line argument(s) to its paired specific instance of nodeos(using \"--specific-num\"), enclose arg(s) in quotes") + ("spcfc-inst-num", bpo::value>()->composing(), "Specify a specific version installation path (using \"--spcfc-inst-nodeos\" flag) for launching this specific instance of nodeos. This parameter can be entered multiple times and requires a paired \"--spcfc-inst-nodeos\" flag each time it is used") + ("spcfc-inst-nodeos", bpo::value>()->composing(), "Provide a specific version installation path to its paired specific instance of nodeos(using \"--spcfc-inst-num\")") ("delay,d",bpo::value(&start_delay)->default_value(0),"seconds delay before starting each node after the first") ("boot",bpo::bool_switch(&boot)->default_value(false),"After deploying the nodes and generating a boot script, invoke it.") ("nogen",bpo::bool_switch(&nogen)->default_value(false),"launch nodes without writing new config files") @@ -513,6 +516,28 @@ inline enum_type& operator|=(enum_type&lhs, const enum_type& rhs) return lhs = static_cast(static_cast(lhs) | static_cast(rhs)); } +template +void retrieve_paired_array_parameters (const variables_map &vmap, const std::string& num_selector, const std::string& paired_selector, std::map& selector_map) { + if (vmap.count(num_selector)) { + const auto specific_nums = vmap[num_selector].as>(); + const auto specific_args = vmap[paired_selector].as>(); + if (specific_nums.size() != specific_args.size()) { + cerr << "ERROR: every " << num_selector << " argument must be paired with a " << paired_selector << " argument" << endl; + exit (-1); + } + const auto total_nodes = vmap["nodes"].as(); + for(uint i = 0; i < specific_nums.size(); ++i) + { + const auto& num = specific_nums[i]; + if (num >= total_nodes) { + cerr << "\"--" << num_selector << "\" provided value= " << num << " is higher than \"--nodes\" provided value=" << total_nodes << endl; + exit (-1); + } + selector_map[num] = specific_args[i]; + } + } +} + void launcher_def::initialize (const variables_map &vmap) { if (vmap.count("mode")) { @@ -550,24 +575,8 @@ launcher_def::initialize (const variables_map &vmap) { server_ident_file = vmap["servers"].as(); } - if (vmap.count("specific-num")) { - const auto specific_nums = vmap["specific-num"].as>(); - const auto specific_args = vmap["specific-nodeos"].as>(); - if (specific_nums.size() != specific_args.size()) { - cerr << "ERROR: every specific-num argument must be paired with a specific-nodeos argument" << endl; - exit (-1); - } - const auto total_nodes = vmap["nodes"].as(); - for(uint i = 0; i < specific_nums.size(); ++i) - { - const auto& num = specific_nums[i]; - if (num >= total_nodes) { - cerr << "\"--specific-num\" provided value= " << num << " is higher than \"--nodes\" provided value=" << total_nodes << endl; - exit (-1); - } - specific_nodeos_args[num] = specific_args[i]; - } - } + retrieve_paired_array_parameters(vmap, "specific-num", "specific-nodeos", specific_nodeos_args); + retrieve_paired_array_parameters(vmap, "spcfc-inst-num", "spcfc-inst-nodeos", specific_nodeos_installation_paths); using namespace std::chrono; system_clock::time_point now = system_clock::now(); @@ -1511,7 +1520,14 @@ launcher_def::launch (eosd_def &instance, string >s) { node_rt_info info; info.remote = !host->is_local(); - string eosdcmd = "programs/nodeos/nodeos "; + string install_path; + if (instance.name != "bios" && !specific_nodeos_installation_paths.empty()) { + const auto node_num = boost::lexical_cast(instance.get_node_num()); + if (specific_nodeos_installation_paths.count(node_num)) { + install_path = specific_nodeos_installation_paths[node_num] + "/"; + } + } + string eosdcmd = install_path + "programs/nodeos/nodeos "; if (skip_transaction_signatures) { eosdcmd += "--skip-transaction-signatures "; } From 4240c316d7d7cd10f589d80f690230fe888dd785 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 14 Mar 2019 08:14:57 -0500 Subject: [PATCH 134/680] Added helper flag to supply alternate label mapping file. GH #6879. --- tests/TestHelper.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/TestHelper.py b/tests/TestHelper.py index 10b69fa334d..a9920a731c1 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -105,6 +105,8 @@ def parse_args(includeArgs, applicationSpecificArgs=AppArgs()): parser.add_argument("--clean-run", help="Kill all nodeos and kleos instances", action='store_true') if "--sanity-test" in includeArgs: parser.add_argument("--sanity-test", help="Validates nodeos and kleos are in path and can be started up.", action='store_true') + if "--alternate-versions-labels-file" in includeArgs: + parser.add_argument("--alternate-versions-labels-file", type=str, help="Provide a file to define the labels that can be used in the test and the path to the version installation associated with that.") for arg in applicationSpecificArgs.args: parser.add_argument(arg.flag, type=arg.type, help=arg.help, choices=arg.choices, default=arg.default) From 6316febc4a8632b17f9466394c26264332373541 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Thu, 14 Mar 2019 08:15:53 -0500 Subject: [PATCH 135/680] Added support for supplying alternate label mappings to Cluster.launcher to take advantage of launcher flags. GH #6879. --- tests/Cluster.py | 60 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 58 insertions(+), 2 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 3bc0f215566..d41d8e8731d 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -83,6 +83,7 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 self.useBiosBootFile=False self.filesToCleanup=[] + self.alternateVersionLabels=Cluster.__defaultAlternateVersionLabels() def setChainStrategy(self, chainSyncStrategy=Utils.SyncReplayTag): @@ -93,13 +94,43 @@ def setChainStrategy(self, chainSyncStrategy=Utils.SyncReplayTag): def setWalletMgr(self, walletMgr): self.walletMgr=walletMgr + @staticmethod + def __defaultAlternateVersionLabels(): + """Return a labels dictionary with just the "current" label to path set.""" + labels={} + labels["current"]="./" + return labels + + def setAlternateVersionLabels(self, file): + """From the provided file return a dictionary of labels to paths.""" + Utils.Print("alternate file=%s" % (file)) + self.alternateVersionLabels=Cluster.__defaultAlternateVersionLabels() + if file is None: + # only have "current" + return + if not os.path.exists(file): + Utils.errorExit("Alternate Version Labels file \"%s\" does not exist" % (file)) + with open(file, 'r') as f: + content=f.read() + p=re.compile(r'^\s*(\w+)\s*=\s*([^\s](?:.*[^\s])?)\s*$', re.MULTILINE) + all=p.findall(content) + for match in all: + label=match[0] + path=match[1] + if label=="current": + Utils.Print("ERROR: cannot overwrite default label %s with path=%s" % (label, path)) + continue + self.alternateVersionLabels[label]=path + if Utils.Debug: Utils.Print("Version label \"%s\" maps to \"%s\"" % (label, path)) + # launch local nodes and set self.nodes # pylint: disable=too-many-locals # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches # pylint: disable=too-many-statements def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, - totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None): + totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, alternateVersionLabelsFile=None, + associatedNodeLabels=None): """Launch cluster. pnodes: producer nodes count totalNodes: producer + non-producer nodes count @@ -115,8 +146,19 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne A value of false uses manual bootstrapping in this script, which does not do things like stake votes for producers. specificExtraNodeosArgs: dictionary of arguments to pass to a specific node (via --specific-num and --specific-nodeos flags on launcher), example: { "5" : "--plugin eosio::test_control_api_plugin" } + alternateVersionLabelsFile: Supply an alternate version labels file to use with associatedNodeLabels. + associatedNodeLabels: Supply a dictionary of node numbers to use an alternate label for a specific node. """ assert(isinstance(topo, str)) + if alternateVersionLabelsFile is not None: + assert(isinstance(alternateVersionLabelsFile, str)) + elif associatedNodeLabels is not None: + associatedNodeLabels=None # need to supply alternateVersionLabelsFile to use labels + + if associatedNodeLabels is not None: + assert(isinstance(associatedNodeLabels, dict)) + Utils.Print("associatedNodeLabels size=%s" % (len(associatedNodeLabels))) + Utils.Print("alternateVersionLabelsFile=%s" % (alternateVersionLabelsFile)) if not self.localCluster: Utils.Print("WARNING: Cluster not local, not launching %s." % (Utils.EosServerName)) @@ -136,6 +178,8 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne assert(isinstance(totalProducers, (str,int))) producerFlag="--producers %s" % (totalProducers) + self.setAlternateVersionLabels(alternateVersionLabelsFile) + tries = 30 while not Utils.arePortsAvailable(set(range(self.port, self.port+totalNodes+1))): Utils.Print("ERROR: Another process is listening on nodeos default port. wait...") @@ -181,6 +225,18 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne cmdArr.append("--max-transaction-cpu-usage") cmdArr.append(str(150000000)) + if associatedNodeLabels is not None: + for nodeNum,label in associatedNodeLabels.items(): + assert(isinstance(nodeNum, (str,int))) + assert(isinstance(label, str)) + path=self.alternateVersionLabels.get(label) + if path is None: + Utils.errorExit("associatedNodeLabels passed in indicates label %s for node num %s, but it was not identified in %s" % (label, nodeNum, alternateVersionLabelsFile)) + cmdArr.append("--spcfc-inst-num") + cmdArr.append(str(nodeNum)) + cmdArr.append("--spcfc-inst-nodeos") + cmdArr.append(path) + # must be last cmdArr.append before subprocess.call, so that everything is on the command line # before constructing the shape.json file for "bridge" if topo=="bridge": @@ -273,7 +329,7 @@ def getNodeNum(nodeName): producerGroup2.append(nodeName) Utils.Print("Group2 grouping producerIndex=%s, secondGroupStart=%s" % (producerIndex,secondGroupStart)) if group!=prodGroup: - errorExit("Node configuration not consistent with \"bridge\" topology. Node %s has producers that fall into both halves of the bridged network" % (nodeName)) + Utils.errorExit("Node configuration not consistent with \"bridge\" topology. Node %s has producers that fall into both halves of the bridged network" % (nodeName)) for _,bridgeNode in bridgeNodes.items(): bridgeNode["peers"]=[] From d9fe4591102abe7e2b2e9a67d459916f5c67ab3f Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 13 Mar 2019 18:13:08 +0800 Subject: [PATCH 136/680] Add capability to activate preactivate feature in the python testing framework --- testnet.template | 8 ++++++-- tests/Cluster.py | 31 ++++++++++++++++++++++--------- tests/Node.py | 28 ++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 11 deletions(-) diff --git a/testnet.template b/testnet.template index 637fea419af..31e0b87d46f 100644 --- a/testnet.template +++ b/testnet.template @@ -12,6 +12,11 @@ if [ -z "$biosport" ]; then biosport=9776 fi +bioscontractpath=$BIOS_CONTRACT_PATH +if [ -z "$bioscontractpath" ]; then + bioscontractpath="unittests/contracts/eosio.bios" +fi + wddir=eosio-ignition-wd wdaddr=localhost:8899 wdurl=http://$wdaddr @@ -76,8 +81,7 @@ wcmd create --to-console -n ignition ###INSERT prodkeys # Use old bios contract for now (switch to new after adding changes to activate PREACTIVATE_FEATURE) -ecmd set contract eosio unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios eosio.bios.wasm eosio.bios.abi -#ecmd set contract eosio unittests/contracts/eosio.bios eosio.bios.wasm eosio.bios.abi +ecmd set contract eosio $bioscontractpath eosio.bios.wasm eosio.bios.abi # Create required system accounts ecmd create key --to-console diff --git a/tests/Cluster.py b/tests/Cluster.py index b68ea7190f6..e61bb3947db 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -99,7 +99,7 @@ def setWalletMgr(self, walletMgr): # pylint: disable=too-many-branches # pylint: disable=too-many-statements def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, - totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None): + totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, activatePreactivateFeature=True): """Launch cluster. pnodes: producer nodes count totalNodes: producer + non-producer nodes count @@ -108,13 +108,14 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne delay: delay between individual nodes launch (as defined by launcher) delay 0 exposes a bootstrap bug where producer handover may have a large gap confusing nodes and bringing system to a halt. onlyBios: When true, only loads the bios contract (and not more full bootstrapping). - dontBootstrap: When true, don't do any bootstrapping at all. + dontBootstrap: When true, don't do any bootstrapping at all. (even bios is not uploaded) extraNodeosArgs: string of arguments to pass through to each nodoes instance (via --nodeos flag on launcher) useBiosBootFile: determines which of two bootstrap methods is used (when both dontBootstrap and onlyBios are false). The default value of true uses the bios_boot.sh file generated by the launcher. A value of false uses manual bootstrapping in this script, which does not do things like stake votes for producers. specificExtraNodeosArgs: dictionary of arguments to pass to a specific node (via --specific-num and --specific-nodeos flags on launcher), example: { "5" : "--plugin eosio::test_control_api_plugin" } + activatePreactivateFeature: When true, this will activate PREACTIVATE_FEATURE protocol feature immediately after the bios node is starting """ assert(isinstance(topo, str)) @@ -161,6 +162,8 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne nodeosArgs += extraNodeosArgs if Utils.Debug: nodeosArgs += " --contracts-console" + if activatePreactivateFeature: + nodeosArgs += " --plugin eosio::producer_api_plugin" if nodeosArgs: cmdArr.append("--nodeos") @@ -325,6 +328,7 @@ def connectGroup(group, producerNodes, bridgeNodes) : if not biosNode.checkPulse(): Utils.Print("ERROR: Bios node doesn't appear to be running...") return False + if activatePreactivateFeature: biosNode.activatePreactivateFeature() self.nodes=[biosNode] @@ -340,13 +344,13 @@ def connectGroup(group, producerNodes, bridgeNodes) : Utils.Print("Bootstrap cluster.") if onlyBios or not useBiosBootFile: - self.biosNode=Cluster.bootstrap(totalNodes, prodCount, totalProducers, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr, onlyBios) + self.biosNode=Cluster.bootstrap(totalNodes, prodCount, totalProducers, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr, onlyBios, activatePreactivateFeature) if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") return False else: self.useBiosBootFile=True - self.biosNode=Cluster.bios_bootstrap(totalNodes, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr) + self.biosNode=Cluster.bios_bootstrap(totalNodes, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr, activatePreactivateFeature) if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") return False @@ -835,7 +839,7 @@ def parseClusterKeys(totalNodes): return producerKeys @staticmethod - def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr, silent=False): + def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr, activatePreactivateFeature, silent=False): """Bootstrap cluster using the bios_boot.sh script generated by eosio-launcher.""" Utils.Print("Starting cluster bootstrap.") @@ -843,10 +847,15 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr, silent=False): if not biosNode.checkPulse(): Utils.Print("ERROR: Bios node doesn't appear to be running...") return None + if activatePreactivateFeature: biosNode.activatePreactivateFeature() cmd="bash bios_boot.sh" if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) - if 0 != subprocess.call(cmd.split(), stdout=Utils.FNull): + if activatePreactivateFeature: + biosContractPath = "unittests/contracts/eosio.bios" + else: + biosContractPath = "unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios" + if 0 != subprocess.call(cmd.split(), stdout=Utils.FNull, env={"BIOS_CONTRACT_PATH":biosContractPath}): if not silent: Utils.Print("Launcher failed to shut down eos cluster.") return None @@ -916,7 +925,7 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr, silent=False): return biosNode @staticmethod - def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletMgr, onlyBios=False): + def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletMgr, activatePreactivateFeature, onlyBios=False): """Create 'prodCount' init accounts and deposits 10000000000 SYS in each. If prodCount is -1 will initialize all possible producers. Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1.""" @@ -928,6 +937,7 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM if not biosNode.checkPulse(): Utils.Print("ERROR: Bios node doesn't appear to be running...") return None + if activatePreactivateFeature: biosNode.activatePreactivateFeature() producerKeys=Cluster.parseClusterKeys(totalNodes) # should have totalNodes node plus bios node @@ -960,8 +970,11 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM return None contract="eosio.bios" - #contractDir="unittests/contracts/%s" % (contract) - contractDir="unittests/contracts/old_versions/v1.6.0-rc3/%s" % (contract) # use old eosio.bios for now + contractDir="unittests/contracts/%s" % (contract) + if activatePreactivateFeature: + contractDir="unittests/contracts/%s" % (contract) + else: + contractDir="unittests/contracts/old_versions/v1.6.0-rc3/%s" % (contract) wasmFile="%s.wasm" % (contract) abiFile="%s.abi" % (contract) Utils.Print("Publish %s contract" % (contract)) diff --git a/tests/Node.py b/tests/Node.py index 16eede1b46a..520bb2b79bf 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -7,6 +7,8 @@ import datetime import json import signal +import urllib.request +import urllib.parse from core_symbol import CORE_SYMBOL from testUtils import Utils @@ -1406,3 +1408,29 @@ def reportStatus(self): status="last getInfo returned None" if not self.infoValid else "at last call to getInfo" Utils.Print(" hbn : %s (%s)" % (self.lastRetrievedHeadBlockNum, status)) Utils.Print(" lib : %s (%s)" % (self.lastRetrievedLIB, status)) + + def sendRpcApi(self, relativeUrl, data={}): + url = urllib.parse.urljoin(self.endpointHttp, relativeUrl) + req = urllib.request.Request(url) + req.add_header('Content-Type', 'application/json; charset=utf-8') + reqData = json.dumps(data).encode("utf-8") + rpcApiResult = None + try: + response = urllib.request.urlopen(req, reqData) + rpcApiResult = json.loads(response.read()) + except Exception as e: + Utils.Print("Fail to send RPC API to {} with data {} ({})".format(url, data, e)) + raise + return rpcApiResult + + def scheduleProtocolFeatureActivations(self, featureDigests=[]): + self.sendRpcApi("v1/producer/schedule_protocol_feature_activations", {"protocol_features_to_activate": featureDigests}) + + def activatePreactivateFeature(self): + # TODO: Find out if there's another way to get the feature digest in the real life + self.scheduleProtocolFeatureActivations(["0ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd"]) + # Wait for the next block to be produced so the scheduled protocol feature is activated + currentHead = self.getHeadBlockNum() + def isHeadAdvancing(): + return self.getHeadBlockNum() > currentHead + Utils.waitForBool(isHeadAdvancing, 5) From a9d91528f7293565e2b4bf34b43cd9d94cac18f2 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 13 Mar 2019 11:42:07 -0400 Subject: [PATCH 137/680] decode binary RPC response in sendRpcApi before converting to JSON --- tests/Node.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Node.py b/tests/Node.py index 520bb2b79bf..d6690144856 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1417,7 +1417,7 @@ def sendRpcApi(self, relativeUrl, data={}): rpcApiResult = None try: response = urllib.request.urlopen(req, reqData) - rpcApiResult = json.loads(response.read()) + rpcApiResult = json.loads(response.read().decode("utf-8")) except Exception as e: Utils.Print("Fail to send RPC API to {} with data {} ({})".format(url, data, e)) raise From e7440bb01c0ce8ef7bfd3a8d100f5543850a4997 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Thu, 14 Mar 2019 15:35:30 +0800 Subject: [PATCH 138/680] Use API to get feature digest for preactivate protocol feature --- tests/Node.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index d6690144856..3e5026c5e7f 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1426,11 +1426,28 @@ def sendRpcApi(self, relativeUrl, data={}): def scheduleProtocolFeatureActivations(self, featureDigests=[]): self.sendRpcApi("v1/producer/schedule_protocol_feature_activations", {"protocol_features_to_activate": featureDigests}) + def getSupportedProtocolFeatures(self, excludeDisabled=True, excludeUnactivatable=True): + param = { + "exclude_disabled": excludeDisabled, + "exclude_unactivatable": excludeUnactivatable + } + res = self.sendRpcApi("v1/producer/get_supported_protocol_features", param) + return res + def activatePreactivateFeature(self): - # TODO: Find out if there's another way to get the feature digest in the real life - self.scheduleProtocolFeatureActivations(["0ec7e080177b2c02b278d5088611686b49d739925a92d9bfcacd7fc6b74053bd"]) + def getPreactivateFeatureDigest(supportedProtocolFeatures): + for protocolFeature in supportedProtocolFeatures: + for spec in protocolFeature["specification"]: + if (spec["name"] == "builtin_feature_codename" and spec["value"] == "PREACTIVATE_FEATURE"): + return protocolFeature["feature_digest"] + return None + preactivateFeatureDigest = getPreactivateFeatureDigest(self.getSupportedProtocolFeatures()) + assert preactivateFeatureDigest + + self.scheduleProtocolFeatureActivations([preactivateFeatureDigest]) + # Wait for the next block to be produced so the scheduled protocol feature is activated currentHead = self.getHeadBlockNum() def isHeadAdvancing(): - return self.getHeadBlockNum() > currentHead + return self.getHeadBlockNum() > currentHead Utils.waitForBool(isHeadAdvancing, 5) From 23836069bc7b255367916bb7c0f239432cbe416e Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Thu, 14 Mar 2019 18:11:30 +0800 Subject: [PATCH 139/680] Change python cluster to use setup policy enum --- tests/Cluster.py | 88 ++++++++++++++++--------------- tests/Node.py | 24 +++++++++ tests/nodeos_forked_chain_test.py | 2 +- 3 files changed, 70 insertions(+), 44 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index e61bb3947db..e035c94c877 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -18,6 +18,16 @@ from Node import BlockType from Node import Node from WalletMgr import WalletMgr +from enum import Enum, auto + +# Protocol Feature Setup Policy +class PFSetupPolicy(Enum): + NONE = auto() + PREACTIVATE_FEATURE_ONLY = auto() + FULL = auto() + def hasPreactivateFeature(self): + return self == PFSetupPolicy.PREACTIVATE_FEATURE_ONLY or \ + self == PFSetupPolicy.FULL # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-public-methods @@ -99,7 +109,8 @@ def setWalletMgr(self, walletMgr): # pylint: disable=too-many-branches # pylint: disable=too-many-statements def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, - totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, activatePreactivateFeature=True): + totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, + pFSetupPolicy:PFSetupPolicy = PFSetupPolicy.FULL): """Launch cluster. pnodes: producer nodes count totalNodes: producer + non-producer nodes count @@ -162,7 +173,7 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne nodeosArgs += extraNodeosArgs if Utils.Debug: nodeosArgs += " --contracts-console" - if activatePreactivateFeature: + if pFSetupPolicy.hasPreactivateFeature(): nodeosArgs += " --plugin eosio::producer_api_plugin" if nodeosArgs: @@ -323,13 +334,12 @@ def connectGroup(group, producerNodes, bridgeNodes) : self.nodes=nodes - if onlyBios: - biosNode=Node(Cluster.__BiosHost, Cluster.__BiosPort, walletMgr=self.walletMgr) - if not biosNode.checkPulse(): - Utils.Print("ERROR: Bios node doesn't appear to be running...") - return False - if activatePreactivateFeature: biosNode.activatePreactivateFeature() + biosNode=Node(Cluster.__BiosHost, Cluster.__BiosPort, walletMgr=self.walletMgr) + if not biosNode.checkPulse(): + Utils.Print("ERROR: Bios node doesn't appear to be running...") + return False + if onlyBios: self.nodes=[biosNode] # ensure cluster node are inter-connected by ensuring everyone has block 1 @@ -338,22 +348,24 @@ def connectGroup(group, producerNodes, bridgeNodes) : Utils.Print("ERROR: Cluster doesn't seem to be in sync. Some nodes missing block 1") return False + if pFSetupPolicy.hasPreactivateFeature(): + Utils.Print("Activate Preactivate Feature.") + biosNode.activatePreactivateFeature() + if dontBootstrap: Utils.Print("Skipping bootstrap.") return True Utils.Print("Bootstrap cluster.") if onlyBios or not useBiosBootFile: - self.biosNode=Cluster.bootstrap(totalNodes, prodCount, totalProducers, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr, onlyBios, activatePreactivateFeature) - if self.biosNode is None: - Utils.Print("ERROR: Bootstrap failed.") - return False + self.biosNode=self.bootstrap(biosNode, totalNodes, prodCount, totalProducers, pFSetupPolicy, onlyBios) else: self.useBiosBootFile=True - self.biosNode=Cluster.bios_bootstrap(totalNodes, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr, activatePreactivateFeature) - if self.biosNode is None: - Utils.Print("ERROR: Bootstrap failed.") - return False + self.biosNode=self.bios_bootstrap(biosNode, totalNodes, pFSetupPolicy) + + if self.biosNode is None: + Utils.Print("ERROR: Bootstrap failed.") + return False self.discoverBiosNodePid() @@ -838,20 +850,14 @@ def parseClusterKeys(totalNodes): return producerKeys - @staticmethod - def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr, activatePreactivateFeature, silent=False): + def bios_bootstrap(self, biosNode, totalNodes, pfSetupPolicy, silent=False): """Bootstrap cluster using the bios_boot.sh script generated by eosio-launcher.""" Utils.Print("Starting cluster bootstrap.") - biosNode=Node(biosHost, biosPort, walletMgr=walletMgr) - if not biosNode.checkPulse(): - Utils.Print("ERROR: Bios node doesn't appear to be running...") - return None - if activatePreactivateFeature: biosNode.activatePreactivateFeature() cmd="bash bios_boot.sh" if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) - if activatePreactivateFeature: + if pfSetupPolicy.hasPreactivateFeature(): biosContractPath = "unittests/contracts/eosio.bios" else: biosContractPath = "unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios" @@ -873,14 +879,14 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr, activatePreactivat Utils.Print("ERROR: Failed to parse private keys from cluster config files.") return None - walletMgr.killall() - walletMgr.cleanup() + self.walletMgr.killall() + self.walletMgr.cleanup() - if not walletMgr.launch(): + if not self.walletMgr.launch(): Utils.Print("ERROR: Failed to launch bootstrap wallet.") return None - ignWallet=walletMgr.create("ignition") + ignWallet=self.walletMgr.create("ignition") if ignWallet is None: Utils.Print("ERROR: Failed to create ignition wallet.") return None @@ -894,7 +900,7 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr, activatePreactivat eosioAccount.activePublicKey=eosioKeys["public"] producerKeys.pop(eosioName) - if not walletMgr.importKey(eosioAccount, ignWallet): + if not self.walletMgr.importKey(eosioAccount, ignWallet): Utils.Print("ERROR: Failed to import %s account keys into ignition wallet." % (eosioName)) return None @@ -924,8 +930,7 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr, activatePreactivat return biosNode - @staticmethod - def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletMgr, activatePreactivateFeature, onlyBios=False): + def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios=False): """Create 'prodCount' init accounts and deposits 10000000000 SYS in each. If prodCount is -1 will initialize all possible producers. Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1.""" @@ -933,12 +938,6 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM if totalProducers is None: totalProducers=totalNodes - biosNode=Node(biosHost, biosPort, walletMgr=walletMgr) - if not biosNode.checkPulse(): - Utils.Print("ERROR: Bios node doesn't appear to be running...") - return None - if activatePreactivateFeature: biosNode.activatePreactivateFeature() - producerKeys=Cluster.parseClusterKeys(totalNodes) # should have totalNodes node plus bios node if producerKeys is None: @@ -948,14 +947,14 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM Utils.Print("ERROR: Failed to parse %d producer keys from cluster config files, only found %d." % (totalProducers+1,len(producerKeys))) return None - walletMgr.killall() - walletMgr.cleanup() + self.walletMgr.killall() + self.walletMgr.cleanup() - if not walletMgr.launch(): + if not self.walletMgr.launch(): Utils.Print("ERROR: Failed to launch bootstrap wallet.") return None - ignWallet=walletMgr.create("ignition") + ignWallet=self.walletMgr.create("ignition") eosioName="eosio" eosioKeys=producerKeys[eosioName] @@ -965,13 +964,13 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM eosioAccount.activePrivateKey=eosioKeys["private"] eosioAccount.activePublicKey=eosioKeys["public"] - if not walletMgr.importKey(eosioAccount, ignWallet): + if not self.walletMgr.importKey(eosioAccount, ignWallet): Utils.Print("ERROR: Failed to import %s account keys into ignition wallet." % (eosioName)) return None contract="eosio.bios" contractDir="unittests/contracts/%s" % (contract) - if activatePreactivateFeature: + if pfSetupPolicy.hasPreactivateFeature(): contractDir="unittests/contracts/%s" % (contract) else: contractDir="unittests/contracts/old_versions/v1.6.0-rc3/%s" % (contract) @@ -983,6 +982,9 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM Utils.Print("ERROR: Failed to publish contract %s." % (contract)) return None + if pfSetupPolicy == PFSetupPolicy.FULL: + biosNode.preactivateAllBuiltinProtocolFeature() + Node.validateTransaction(trans) Utils.Print("Creating accounts: %s " % ", ".join(producerKeys.keys())) diff --git a/tests/Node.py b/tests/Node.py index 3e5026c5e7f..ebfb2cf5bce 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1451,3 +1451,27 @@ def getPreactivateFeatureDigest(supportedProtocolFeatures): def isHeadAdvancing(): return self.getHeadBlockNum() > currentHead Utils.waitForBool(isHeadAdvancing, 5) + + def getAllBuiltinFeatureDigestsToPreactivate(self): + allBuiltinProtocolFeatureDigests = [] + supportedProtocolFeatures = self.getSupportedProtocolFeatures() + for protocolFeature in supportedProtocolFeatures: + for spec in protocolFeature["specification"]: + if (spec["name"] == "builtin_feature_codename"): + if (spec["value"] != "PREACTIVATE_FEATURE"): + allBuiltinProtocolFeatureDigests.append(protocolFeature["feature_digest"]) + break + return allBuiltinProtocolFeatureDigests + + def preactivateAllBuiltinProtocolFeature(self): + contract="eosio" + action="preactivate" + allBuiltinProtocolFeatureDigests = self.getAllBuiltinFeatureDigestsToPreactivate() + for digest in allBuiltinProtocolFeatureDigests: + Utils.Print("push preactivate action with digest" % (digest)) + data='{"feature_digest":{}}'.format(digest) + opts="--permission eosio@active" + trans=self.pushMessage(contract, action, data, opts) + if trans is None or not trans[0]: + Utils.Print("ERROR: Failed to preactive digest {}".format(digest)) + return None diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 4ef22ab082f..77b1f96e28d 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -156,7 +156,7 @@ def getMinHeadAndLib(prodNodes): # "bridge" shape connects defprocera through defproducerk (in node0) to each other and defproducerl through defproduceru (in node01) # and the only connection between those 2 groups is through the bridge node - if cluster.launch(prodCount=prodCount, onlyBios=False, topo="bridge", pnodes=totalProducerNodes, + if cluster.launch(prodCount=prodCount, topo="bridge", pnodes=totalProducerNodes, totalNodes=totalNodes, totalProducers=totalProducers, p2pPlugin=p2pPlugin, useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs) is False: Utils.cmdError("launcher") From 5b77b49566d1481d67d81191145959e639212aa2 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 14 Mar 2019 19:27:33 -0400 Subject: [PATCH 140/680] remove dependency on enum.auto (not available to all platforms) --- tests/Cluster.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index e035c94c877..9a342737d45 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -18,13 +18,13 @@ from Node import BlockType from Node import Node from WalletMgr import WalletMgr -from enum import Enum, auto +from enum import Enum # Protocol Feature Setup Policy class PFSetupPolicy(Enum): - NONE = auto() - PREACTIVATE_FEATURE_ONLY = auto() - FULL = auto() + NONE = 0 + PREACTIVATE_FEATURE_ONLY = 1 + FULL = 2 def hasPreactivateFeature(self): return self == PFSetupPolicy.PREACTIVATE_FEATURE_ONLY or \ self == PFSetupPolicy.FULL From 424374d9559e938498d429c4dfbd21bc50951cec Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Fri, 15 Mar 2019 15:01:39 +0800 Subject: [PATCH 141/680] Preactivate feature inside bios_boot.sh created by launcher --- testnet.template | 9 +++++++++ tests/Cluster.py | 31 ++++++++++++++++++++----------- tests/Node.py | 21 ++++++++++++--------- 3 files changed, 41 insertions(+), 20 deletions(-) diff --git a/testnet.template b/testnet.template index 31e0b87d46f..508b8656695 100644 --- a/testnet.template +++ b/testnet.template @@ -17,6 +17,8 @@ if [ -z "$bioscontractpath" ]; then bioscontractpath="unittests/contracts/eosio.bios" fi +featuredigests=($FEATURE_DIGESTS) + wddir=eosio-ignition-wd wdaddr=localhost:8899 wdurl=http://$wdaddr @@ -83,6 +85,13 @@ wcmd create --to-console -n ignition # Use old bios contract for now (switch to new after adding changes to activate PREACTIVATE_FEATURE) ecmd set contract eosio $bioscontractpath eosio.bios.wasm eosio.bios.abi +# Preactivate all digests +for digest in "${featuredigests[@]}"; +do +echo "$index"; +ecmd push action eosio preactivate '["'$digest'"]' -p eosio +done + # Create required system accounts ecmd create key --to-console pubsyskey=`grep "^Public key:" $logfile | tail -1 | sed "s/^Public key://"` diff --git a/tests/Cluster.py b/tests/Cluster.py index 9a342737d45..2d70e501416 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -24,7 +24,7 @@ class PFSetupPolicy(Enum): NONE = 0 PREACTIVATE_FEATURE_ONLY = 1 - FULL = 2 + FULL = 2 # This will only happen if the cluster is bootstrapped (i.e. dontBootstrap == False) def hasPreactivateFeature(self): return self == PFSetupPolicy.PREACTIVATE_FEATURE_ONLY or \ self == PFSetupPolicy.FULL @@ -110,7 +110,7 @@ def setWalletMgr(self, walletMgr): # pylint: disable=too-many-statements def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, - pFSetupPolicy:PFSetupPolicy = PFSetupPolicy.FULL): + pfSetupPolicy:PFSetupPolicy = PFSetupPolicy.FULL): """Launch cluster. pnodes: producer nodes count totalNodes: producer + non-producer nodes count @@ -126,7 +126,7 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne A value of false uses manual bootstrapping in this script, which does not do things like stake votes for producers. specificExtraNodeosArgs: dictionary of arguments to pass to a specific node (via --specific-num and --specific-nodeos flags on launcher), example: { "5" : "--plugin eosio::test_control_api_plugin" } - activatePreactivateFeature: When true, this will activate PREACTIVATE_FEATURE protocol feature immediately after the bios node is starting + pfSetupPolicy: determine the protocol feature setup policy (none, preactivate_feature_only, or full) """ assert(isinstance(topo, str)) @@ -173,7 +173,7 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne nodeosArgs += extraNodeosArgs if Utils.Debug: nodeosArgs += " --contracts-console" - if pFSetupPolicy.hasPreactivateFeature(): + if pfSetupPolicy.hasPreactivateFeature(): nodeosArgs += " --plugin eosio::producer_api_plugin" if nodeosArgs: @@ -348,7 +348,7 @@ def connectGroup(group, producerNodes, bridgeNodes) : Utils.Print("ERROR: Cluster doesn't seem to be in sync. Some nodes missing block 1") return False - if pFSetupPolicy.hasPreactivateFeature(): + if pfSetupPolicy.hasPreactivateFeature(): Utils.Print("Activate Preactivate Feature.") biosNode.activatePreactivateFeature() @@ -358,10 +358,12 @@ def connectGroup(group, producerNodes, bridgeNodes) : Utils.Print("Bootstrap cluster.") if onlyBios or not useBiosBootFile: - self.biosNode=self.bootstrap(biosNode, totalNodes, prodCount, totalProducers, pFSetupPolicy, onlyBios) + Utils.Print("NON BIOS BOOTSTRAP") + self.biosNode=self.bootstrap(biosNode, totalNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios) else: + Utils.Print(" BIOS BOOTSTRAP") self.useBiosBootFile=True - self.biosNode=self.bios_bootstrap(biosNode, totalNodes, pFSetupPolicy) + self.biosNode=self.bios_bootstrap(biosNode, totalNodes, pfSetupPolicy) if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") @@ -857,11 +859,18 @@ def bios_bootstrap(self, biosNode, totalNodes, pfSetupPolicy, silent=False): cmd="bash bios_boot.sh" if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) + env = { + "BIOS_CONTRACT_PATH": "unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios", + "FEATURE_DIGESTS": "" + } if pfSetupPolicy.hasPreactivateFeature(): - biosContractPath = "unittests/contracts/eosio.bios" - else: - biosContractPath = "unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios" - if 0 != subprocess.call(cmd.split(), stdout=Utils.FNull, env={"BIOS_CONTRACT_PATH":biosContractPath}): + env["BIOS_CONTRACT_PATH"] = "unittests/contracts/eosio.bios" + + if pfSetupPolicy == PFSetupPolicy.FULL: + allBuiltinProtocolFeatureDigests = biosNode.getAllBuiltinFeatureDigestsToPreactivate() + env["FEATURE_DIGESTS"] = " ".join(allBuiltinProtocolFeatureDigests) + + if 0 != subprocess.call(cmd.split(), stdout=Utils.FNull, env=env): if not silent: Utils.Print("Launcher failed to shut down eos cluster.") return None diff --git a/tests/Node.py b/tests/Node.py index ebfb2cf5bce..0a2f87f2d3d 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -9,6 +9,7 @@ import signal import urllib.request import urllib.parse +import tempfile from core_symbol import CORE_SYMBOL from testUtils import Utils @@ -1434,6 +1435,12 @@ def getSupportedProtocolFeatures(self, excludeDisabled=True, excludeUnactivatabl res = self.sendRpcApi("v1/producer/get_supported_protocol_features", param) return res + def waitForHeadToAdvance(self): + currentHead = self.getHeadBlockNum() + def isHeadAdvancing(): + return self.getHeadBlockNum() > currentHead + Utils.waitForBool(isHeadAdvancing, 5) + def activatePreactivateFeature(self): def getPreactivateFeatureDigest(supportedProtocolFeatures): for protocolFeature in supportedProtocolFeatures: @@ -1447,10 +1454,7 @@ def getPreactivateFeatureDigest(supportedProtocolFeatures): self.scheduleProtocolFeatureActivations([preactivateFeatureDigest]) # Wait for the next block to be produced so the scheduled protocol feature is activated - currentHead = self.getHeadBlockNum() - def isHeadAdvancing(): - return self.getHeadBlockNum() > currentHead - Utils.waitForBool(isHeadAdvancing, 5) + self.waitForHeadToAdvance() def getAllBuiltinFeatureDigestsToPreactivate(self): allBuiltinProtocolFeatureDigests = [] @@ -1464,14 +1468,13 @@ def getAllBuiltinFeatureDigestsToPreactivate(self): return allBuiltinProtocolFeatureDigests def preactivateAllBuiltinProtocolFeature(self): - contract="eosio" - action="preactivate" allBuiltinProtocolFeatureDigests = self.getAllBuiltinFeatureDigestsToPreactivate() for digest in allBuiltinProtocolFeatureDigests: - Utils.Print("push preactivate action with digest" % (digest)) - data='{"feature_digest":{}}'.format(digest) + Utils.Print("push preactivate action with digest {}".format(digest)) + data="{{\"feature_digest\":{}}}".format(digest) opts="--permission eosio@active" - trans=self.pushMessage(contract, action, data, opts) + trans=self.pushMessage("eosio", "preactivate", data, opts) if trans is None or not trans[0]: Utils.Print("ERROR: Failed to preactive digest {}".format(digest)) return None + self.waitForHeadToAdvance() From 2211e9a4a361af01678699807e0179d12c024390 Mon Sep 17 00:00:00 2001 From: Kayan Date: Fri, 15 Mar 2019 17:36:11 +0800 Subject: [PATCH 142/680] fix single producer node case for preactivation --- plugins/producer_plugin/producer_plugin.cpp | 8 +- tests/CMakeLists.txt | 3 + tests/prod_preactivation_test.py | 176 ++++++++++++++++++++ 3 files changed, 186 insertions(+), 1 deletion(-) create mode 100755 tests/prod_preactivation_test.py diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 0824dbe3e9d..4bc57ea548e 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -143,6 +143,7 @@ class producer_plugin_impl : public std::enable_shared_from_this _protocol_features_to_activate; + bool _protocol_features_signaled = false; // to mark whether it has been signaled in start_block time_point _last_signed_block_time; time_point _start_time = fc::time_point::now(); @@ -979,6 +980,7 @@ void producer_plugin::schedule_protocol_feature_activations( const scheduled_pro invalid_protocol_features_to_activate, "duplicate digests" ); chain.validate_protocol_features( schedule.protocol_features_to_activate ); my->_protocol_features_to_activate = schedule.protocol_features_to_activate; + my->_protocol_features_signaled = false; } fc::variants producer_plugin::get_supported_protocol_features( const get_supported_protocol_features_params& params ) const { @@ -1209,6 +1211,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { features_to_activate.clear(); } std::swap( features_to_activate, protocol_features_to_activate ); + _protocol_features_signaled = true; ilog( "signaling activation of the following protocol features in block ${num}: ${features_to_activate}", ("num", hbs->block_num + 1)("features_to_activate", features_to_activate) ); } @@ -1663,7 +1666,10 @@ void producer_plugin_impl::produce_block() { EOS_ASSERT(signature_provider_itr != _signature_providers.end(), producer_priv_key_not_found, "Attempting to produce a block for which we don't have the private key"); - _protocol_features_to_activate.clear(); // clear _protocol_features_to_activate as it is already set in pending_block + if (_protocol_features_signaled) { + _protocol_features_to_activate.clear(); // clear _protocol_features_to_activate as it is already set in pending_block + _protocol_features_signaled = false; + } //idump( (fc::time_point::now() - chain.pending_block_time()) ); chain.finalize_block( [&]( const digest_type& d ) { diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 9142e47726d..d550a4cca43 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -65,6 +65,9 @@ if(BUILD_MONGO_DB_PLUGIN) set_property(TEST nodeos_run_test-mongodb PROPERTY LABELS nonparallelizable_tests) endif() +add_test(NAME producer-preactivate-feature-test COMMAND tests/prod_preactivation_test.py --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST producer-preactivate-feature-test PROPERTY LABELS nonparallelizable_tests) + add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 4 -n 6 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST distributed-transactions-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME distributed-transactions-bnet-test COMMAND tests/distributed-transactions-test.py -d 2 -p 1 -n 4 --p2p-plugin bnet -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) diff --git a/tests/prod_preactivation_test.py b/tests/prod_preactivation_test.py new file mode 100755 index 00000000000..cef0d844380 --- /dev/null +++ b/tests/prod_preactivation_test.py @@ -0,0 +1,176 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +from Cluster import Cluster +from WalletMgr import WalletMgr +from Node import Node +from Node import ReturnType +from TestHelper import TestHelper + +import decimal +import re +import time + +############################################################### +# nodeos_run_test +# --dump-error-details +# --keep-logs +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit +cmdError=Utils.cmdError +from core_symbol import CORE_SYMBOL + +args = TestHelper.parse_args({"--host","--port","--prod-count","--defproducera_prvt_key","--defproducerb_prvt_key","--mongodb" + ,"--dump-error-details","--dont-launch","--keep-logs","-v","--leave-running","--only-bios","--clean-run" + ,"--sanity-test","--p2p-plugin","--wallet-port"}) +server=args.host +port=args.port +debug=args.v +enableMongo=args.mongodb +defproduceraPrvtKey=args.defproducera_prvt_key +defproducerbPrvtKey=args.defproducerb_prvt_key +dumpErrorDetails=args.dump_error_details +keepLogs=args.keep_logs +dontLaunch=args.dont_launch +dontKill=args.leave_running +prodCount=2 # args.prod_count +onlyBios=args.only_bios +killAll=args.clean_run +sanityTest=args.sanity_test +p2pPlugin=args.p2p_plugin +walletPort=args.wallet_port + +Utils.Debug=debug +localTest=True +cluster=Cluster(host=server, port=port, walletd=True, enableMongo=enableMongo, defproduceraPrvtKey=defproduceraPrvtKey, defproducerbPrvtKey=defproducerbPrvtKey) +walletMgr=WalletMgr(True, port=walletPort) +testSuccessful=False +killEosInstances=not dontKill +killWallet=not dontKill +dontBootstrap=sanityTest + +WalletdName=Utils.EosWalletName +ClientName="cleos" +timeout = .5 * 12 * 2 + 60 # time for finalization with 1 producer + 60 seconds padding +Utils.setIrreversibleTimeout(timeout) + +try: + TestHelper.printSystemInfo("BEGIN prod_preactivation_test.py") + cluster.setWalletMgr(walletMgr) + Print("SERVER: %s" % (server)) + Print("PORT: %d" % (port)) + + if enableMongo and not cluster.isMongodDbRunning(): + errorExit("MongoDb doesn't seem to be running.") + + if localTest and not dontLaunch: + cluster.killall(allInstances=killAll) + cluster.cleanup() + Print("Stand up cluster") + if cluster.launch(pnodes=prodCount, totalNodes=prodCount, prodCount=prodCount, onlyBios=onlyBios, dontBootstrap=dontBootstrap, p2pPlugin=p2pPlugin, extraNodeosArgs=" --plugin eosio::producer_api_plugin") is False: + cmdError("launcher") + errorExit("Failed to stand up eos cluster.") + + Print("Validating system accounts after bootstrap") + cluster.validateAccounts(None) + + node = cluster.getNode(0) + cmd = "curl %s/v1/producer/get_supported_protocol_features" % (node.endpointHttp) + Print("try to get supported feature list from Node 0 with cmd: %s" % (cmd)) + feature0=Utils.runCmdReturnJson(cmd) + #Print("feature list:", feature0) + + node = cluster.getNode(1) + cmd = "curl %s/v1/producer/get_supported_protocol_features" % (node.endpointHttp) + Print("try to get supported feature list from Node 1 with cmd: %s" % (cmd)) + feature1=Utils.runCmdReturnJson(cmd) + #Print("feature list:", feature1) + + if feature0 != feature1: + errorExit("feature list mismatch between node 0 and node 1") + else: + Print("feature list from node 0 matches with that from node 1") + + if len(feature0) == 0: + errorExit("No supported feature list") + + digest = "" + for i in range(0, len(feature0)): + feature = feature0[i] + if feature["specification"][i]["value"] != "PREACTIVATE_FEATURE": + continue + else: + digest = feature["feature_digest"] + + if len(digest) == 0: + errorExit("code name PREACTIVATE_FEATURE not found") + + Print("found digest ", digest, " of PREACTIVATE_FEATURE") + + node0 = cluster.getNode(0) + contract="eosio.bios" + contractDir="unittests/contracts/%s" % (contract) + wasmFile="%s.wasm" % (contract) + abiFile="%s.abi" % (contract) + + Print("publish a new bios contract %s should fails because env.is_feature_activated unresolveable" % (contractDir)) + retMap = node0.publishContract("eosio", contractDir, wasmFile, abiFile, True, shouldFail=True) + + #Print(retMap) + if retMap["output"].decode("utf-8").find("unresolveable") < 0: + errorExit("bios contract not result in expected unresolveable error") + + secwait = 60 + Print("Wait for defproducerb to produce...") + node = cluster.getNode(1) + while secwait > 0: + info = node.getInfo() + #Print("head producer:", info["head_block_producer"]) + if info["head_block_producer"] == "defproducerb": #defproducerb is in node0 + break + time.sleep(1) + secwait = secwait - 1 + + if secwait <= 0: + errorExit("No producer of defproducerb") + + cmd = "curl --data-binary '{\"protocol_features_to_activate\":[\"%s\"]}' %s/v1/producer/schedule_protocol_feature_activations" % (digest, node.endpointHttp) + + Print("try to preactivate feature on node 1, cmd: %s" % (cmd)) + result = Utils.runCmdReturnJson(cmd) + + if result["result"] != "ok": + errorExit("failed to preactivate feature from producer plugin on node 1") + else: + Print("feature PREACTIVATE_FEATURE (%s) preactivation success" % (digest)) + + time.sleep(2) + Print("publish a new bios contract %s should fails because node1 is not producing block yet" % (contractDir)) + retMap = node0.publishContract("eosio", contractDir, wasmFile, abiFile, True, shouldFail=True) + if retMap["output"].decode("utf-8").find("unresolveable") < 0: + errorExit("bios contract not result in expected unresolveable error") + + Print("now wait for node 1 produce a block....(take some minutes)...") + secwait = 480 # wait for node1 produce a block + while secwait > 0: + info = node.getInfo() + #Print("head producer:", info["head_block_producer"]) + if info["head_block_producer"] >= "defproducerm" and info["head_block_producer"] <= "defproduceru": + break + time.sleep(1) + secwait = secwait - 1 + + if secwait <= 0: + errorExit("No blocks produced by node 1") + + time.sleep(1) + retMap = node0.publishContract("eosio", contractDir, wasmFile, abiFile, True) + Print("sucessfully set new contract with new intrinsic!!!") + + testSuccessful=True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + +exit(0) From 355f1437e6f0f73213a01a185cdfc11ae1c193a0 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 15 Mar 2019 23:34:07 -0400 Subject: [PATCH 143/680] Redesign protocol_feature_manager to fix bug which would lead to inconsistent state with respect to activations after a restart. Add get_activated_protocol_features to chain_api_plugin. Redesign of protocol_feature_manager also supports the implementation of this RPC. --- libraries/chain/controller.cpp | 117 ++++--- .../chain/include/eosio/chain/controller.hpp | 2 +- .../chain/include/eosio/chain/exceptions.hpp | 2 + .../eosio/chain/global_property_object.hpp | 85 +++-- .../eosio/chain/protocol_feature_manager.hpp | 273 +++++++++++++-- libraries/chain/include/eosio/chain/types.hpp | 1 + libraries/chain/protocol_feature_manager.cpp | 315 +++++++++++++----- libraries/chain/wasm_interface.cpp | 4 +- .../testing/include/eosio/testing/tester.hpp | 16 +- libraries/testing/tester.cpp | 52 +-- plugins/chain_api_plugin/chain_api_plugin.cpp | 1 + plugins/chain_plugin/chain_plugin.cpp | 121 +++++-- .../eosio/chain_plugin/chain_plugin.hpp | 17 + plugins/producer_plugin/producer_plugin.cpp | 11 +- 14 files changed, 763 insertions(+), 254 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 62231b28901..00252a9b47b 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -35,6 +35,7 @@ using controller_index_set = index_set< account_index, account_sequence_index, global_property_multi_index, + protocol_state_multi_index, dynamic_global_property_multi_index, block_summary_multi_index, transaction_multi_index, @@ -275,7 +276,7 @@ struct controller_impl { apply_handlers[receiver][make_pair(contract,action)] = v; } - controller_impl( const controller::config& cfg, controller& s, protocol_feature_manager&& pfm ) + controller_impl( const controller::config& cfg, controller& s, protocol_feature_set&& pfs ) :self(s), db( cfg.state_dir, cfg.read_only ? database::read_only : database::read_write, @@ -288,7 +289,7 @@ struct controller_impl { wasmif( cfg.wasm_runtime ), resource_limits( db ), authorization( s, db ), - protocol_features( std::move(pfm) ), + protocol_features( std::move(pfs) ), conf( cfg ), chain_id( cfg.genesis.compute_chain_id() ), read_mode( cfg.read_mode ), @@ -569,6 +570,8 @@ struct controller_impl { db.undo(); } + protocol_features.init( db ); + const auto& rbi = reversible_blocks.get_index(); auto last_block_num = lib_num; @@ -876,10 +879,14 @@ struct controller_impl { conf.genesis.initial_configuration.validate(); db.create([&](auto& gpo ){ gpo.configuration = conf.genesis.initial_configuration; + }); + + db.create([&](auto& pso ){ for( const auto& i : genesis_intrinsics ) { - add_intrinsic_to_whitelist( gpo.whitelisted_intrinsics, i ); + add_intrinsic_to_whitelist( pso.whitelisted_intrinsics, i ); } }); + db.create([](auto&){}); authorization.initialize_database(); @@ -1336,23 +1343,24 @@ struct controller_impl { // modify state of speculative block only if we are in speculative read mode (otherwise we need clean state for head or read-only modes) if ( read_mode == db_read_mode::SPECULATIVE || pending->_block_status != controller::block_status::incomplete ) { - const auto& gpo = db.get(); + const auto& pso = db.get(); - auto num_preactivated_protocol_features = gpo.preactivated_protocol_features.size(); + auto num_preactivated_protocol_features = pso.preactivated_protocol_features.size(); bool handled_all_preactivated_features = (num_preactivated_protocol_features == 0); if( new_protocol_feature_activations.size() > 0 ) { flat_map activated_protocol_features; activated_protocol_features.reserve( std::max( num_preactivated_protocol_features, new_protocol_feature_activations.size() ) ); - for( const auto& feature_digest : gpo.preactivated_protocol_features ) { + for( const auto& feature_digest : pso.preactivated_protocol_features ) { activated_protocol_features.emplace( feature_digest, false ); } size_t num_preactivated_features_that_have_activated = 0; + const auto& pfs = protocol_features.get_protocol_feature_set(); for( const auto& feature_digest : new_protocol_feature_activations ) { - const auto& f = protocol_features.get_protocol_feature( feature_digest ); + const auto& f = pfs.get_protocol_feature( feature_digest ); auto res = activated_protocol_features.emplace( feature_digest, true ); if( !res.second ) { @@ -1382,6 +1390,20 @@ struct controller_impl { "There are pre-activated protocol features that were not activated at the start of this block" ); + if( new_protocol_feature_activations.size() > 0 ) { + db.modify( pso, [&]( auto& ps ) { + ps.preactivated_protocol_features.clear(); + + ps.activated_protocol_features.reserve( ps.activated_protocol_features.size() + + new_protocol_feature_activations.size() ); + for( const auto& feature_digest : new_protocol_feature_activations ) { + ps.activated_protocol_features.emplace_back( feature_digest, pbhs.block_num ); + } + }); + } + + const auto& gpo = db.get(); + if( gpo.proposed_schedule_block_num.valid() && // if there is a proposed schedule that was proposed in a block ... ( *gpo.proposed_schedule_block_num <= pbhs.dpos_irreversible_blocknum ) && // ... that has now become irreversible ... pbhs.prev_pending_schedule.schedule.producers.size() == 0 // ... and there was room for a new pending schedule prior to any possible promotion @@ -1402,13 +1424,6 @@ struct controller_impl { db.modify( gpo, [&]( auto& gp ) { gp.proposed_schedule_block_num = optional(); gp.proposed_schedule.clear(); - if( gp.preactivated_protocol_features.size() > 0 ) { - gp.preactivated_protocol_features.clear(); - } - }); - } else if( gpo.preactivated_protocol_features.size() > 0 ) { - db.modify( gpo, [&]( auto& gp ) { - gp.preactivated_protocol_features.clear(); }); } @@ -1553,25 +1568,27 @@ struct controller_impl { const flat_set& currently_activated_protocol_features, const vector& new_protocol_features ) { + const auto& pfs = protocol_features.get_protocol_feature_set(); + for( auto itr = new_protocol_features.begin(); itr != new_protocol_features.end(); ++itr ) { const auto& f = *itr; - auto status = protocol_features.is_recognized( f, timestamp ); + auto status = pfs.is_recognized( f, timestamp ); switch( status ) { - case protocol_feature_manager::recognized_t::unrecognized: + case protocol_feature_set::recognized_t::unrecognized: EOS_THROW( protocol_feature_exception, "protocol feature with digest '${digest}' is unrecognized", ("digest", f) ); break; - case protocol_feature_manager::recognized_t::disabled: + case protocol_feature_set::recognized_t::disabled: EOS_THROW( protocol_feature_exception, "protocol feature with digest '${digest}' is disabled", ("digest", f) ); break; - case protocol_feature_manager::recognized_t::too_early: + case protocol_feature_set::recognized_t::too_early: EOS_THROW( protocol_feature_exception, "${timestamp} is too early for the earliest allowed activation time of the protocol feature with digest '${digest}'", ("digest", f)("timestamp", timestamp) ); break; - case protocol_feature_manager::recognized_t::ready_if_preactivated: - case protocol_feature_manager::recognized_t::ready: + case protocol_feature_set::recognized_t::ready_if_preactivated: + case protocol_feature_set::recognized_t::ready: break; default: EOS_THROW( protocol_feature_exception, "unexpected recognized_t status" ); @@ -1593,7 +1610,7 @@ struct controller_impl { return (std::find( new_protocol_features.begin(), itr, f ) != itr); }; - EOS_ASSERT( protocol_features.validate_dependencies( f, dependency_checker ), protocol_feature_exception, + EOS_ASSERT( pfs.validate_dependencies( f, dependency_checker ), protocol_feature_exception, "not all dependencies of protocol feature with digest '${digest}' have been activated", ("digest", f) ); @@ -2140,12 +2157,12 @@ const protocol_feature_manager& controller::get_protocol_feature_manager()const } controller::controller( const controller::config& cfg ) -:my( new controller_impl( cfg, *this, protocol_feature_manager{} ) ) +:my( new controller_impl( cfg, *this, protocol_feature_set{} ) ) { } -controller::controller( const config& cfg, protocol_feature_manager&& pfm ) -:my( new controller_impl( cfg, *this, std::move(pfm) ) ) +controller::controller( const config& cfg, protocol_feature_set&& pfs ) +:my( new controller_impl( cfg, *this, std::move(pfs) ) ) { } @@ -2186,10 +2203,12 @@ chainbase::database& controller::mutable_db()const { return my->db; } const fork_database& controller::fork_db()const { return my->fork_db; } void controller::preactivate_feature( const digest_type& feature_digest ) { + const auto& pfs = my->protocol_features.get_protocol_feature_set(); auto cur_time = pending_block_time(); - auto status = my->protocol_features.is_recognized( feature_digest, cur_time ); + + auto status = pfs.is_recognized( feature_digest, cur_time ); switch( status ) { - case protocol_feature_manager::recognized_t::unrecognized: + case protocol_feature_set::recognized_t::unrecognized: if( is_producing_block() ) { EOS_THROW( subjective_block_production_exception, "protocol feature with digest '${digest}' is unrecognized", ("digest", feature_digest) ); @@ -2198,7 +2217,7 @@ void controller::preactivate_feature( const digest_type& feature_digest ) { "protocol feature with digest '${digest}' is unrecognized", ("digest", feature_digest) ); } break; - case protocol_feature_manager::recognized_t::disabled: + case protocol_feature_set::recognized_t::disabled: if( is_producing_block() ) { EOS_THROW( subjective_block_production_exception, "protocol feature with digest '${digest}' is disabled", ("digest", feature_digest) ); @@ -2207,7 +2226,7 @@ void controller::preactivate_feature( const digest_type& feature_digest ) { "protocol feature with digest '${digest}' is disabled", ("digest", feature_digest) ); } break; - case protocol_feature_manager::recognized_t::too_early: + case protocol_feature_set::recognized_t::too_early: if( is_producing_block() ) { EOS_THROW( subjective_block_production_exception, "${timestamp} is too early for the earliest allowed activation time of the protocol feature with digest '${digest}'", ("digest", feature_digest)("timestamp", cur_time) ); @@ -2216,8 +2235,8 @@ void controller::preactivate_feature( const digest_type& feature_digest ) { "${timestamp} is too early for the earliest allowed activation time of the protocol feature with digest '${digest}'", ("digest", feature_digest)("timestamp", cur_time) ); } break; - case protocol_feature_manager::recognized_t::ready_if_preactivated: - case protocol_feature_manager::recognized_t::ready: + case protocol_feature_set::recognized_t::ready_if_preactivated: + case protocol_feature_set::recognized_t::ready: break; default: if( is_producing_block() ) { @@ -2255,12 +2274,12 @@ void controller::preactivate_feature( const digest_type& feature_digest ) { ("digest", feature_digest) ); - const auto& gpo = my->db.get(); + const auto& pso = my->db.get(); - EOS_ASSERT( std::find( gpo.preactivated_protocol_features.begin(), - gpo.preactivated_protocol_features.end(), + EOS_ASSERT( std::find( pso.preactivated_protocol_features.begin(), + pso.preactivated_protocol_features.end(), feature_digest - ) == gpo.preactivated_protocol_features.end(), + ) == pso.preactivated_protocol_features.end(), protocol_feature_exception, "protocol feature with digest '${digest}' is already pre-activated", ("digest", feature_digest) @@ -2270,30 +2289,30 @@ void controller::preactivate_feature( const digest_type& feature_digest ) { { if( is_protocol_feature_activated( d ) ) return true; - return ( std::find( gpo.preactivated_protocol_features.begin(), - gpo.preactivated_protocol_features.end(), - d ) != gpo.preactivated_protocol_features.end() ); + return ( std::find( pso.preactivated_protocol_features.begin(), + pso.preactivated_protocol_features.end(), + d ) != pso.preactivated_protocol_features.end() ); }; - EOS_ASSERT( my->protocol_features.validate_dependencies( feature_digest, dependency_checker ), + EOS_ASSERT( pfs.validate_dependencies( feature_digest, dependency_checker ), protocol_feature_exception, "not all dependencies of protocol feature with digest '${digest}' have been activated or pre-activated", ("digest", feature_digest) ); - my->db.modify( gpo, [&]( auto& gp ) { - gp.preactivated_protocol_features.push_back( feature_digest ); + my->db.modify( pso, [&]( auto& ps ) { + ps.preactivated_protocol_features.push_back( feature_digest ); } ); } vector controller::get_preactivated_protocol_features()const { - const auto& gpo = my->db.get(); + const auto& pso = my->db.get(); - if( gpo.preactivated_protocol_features.size() == 0 ) return {}; + if( pso.preactivated_protocol_features.size() == 0 ) return {}; vector preactivated_protocol_features; - for( const auto& f : gpo.preactivated_protocol_features ) { + for( const auto& f : pso.preactivated_protocol_features ) { preactivated_protocol_features.emplace_back( f ); } @@ -2314,9 +2333,9 @@ void controller::start_block( block_timestamp_type when, uint16_t confirm_block_ vector new_protocol_feature_activations; - const auto& gpo = my->db.get(); - if( gpo.preactivated_protocol_features.size() > 0 ) { - for( const auto& f : gpo.preactivated_protocol_features ) { + const auto& pso = my->db.get(); + if( pso.preactivated_protocol_features.size() > 0 ) { + for( const auto& f : pso.preactivated_protocol_features ) { new_protocol_feature_activations.emplace_back( f ); } } @@ -2923,9 +2942,9 @@ const flat_set &controller::get_resource_greylist() const { template<> void controller_impl::on_activation() { - db.modify( db.get(), [&]( auto& gp ) { - add_intrinsic_to_whitelist( gp.whitelisted_intrinsics, "preactivate_feature" ); - add_intrinsic_to_whitelist( gp.whitelisted_intrinsics, "is_feature_activated" ); + db.modify( db.get(), [&]( auto& ps ) { + add_intrinsic_to_whitelist( ps.whitelisted_intrinsics, "preactivate_feature" ); + add_intrinsic_to_whitelist( ps.whitelisted_intrinsics, "is_feature_activated" ); } ); } diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 04a24acaef7..6b1c82faea4 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -98,7 +98,7 @@ namespace eosio { namespace chain { }; explicit controller( const config& cfg ); - controller( const config& cfg, protocol_feature_manager&& pfm ); + controller( const config& cfg, protocol_feature_set&& pfs ); ~controller(); void add_indices(); diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index fce1762ead0..a80213e0425 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -531,4 +531,6 @@ namespace eosio { namespace chain { 3250001, "Protocol feature validation exception" ) FC_DECLARE_DERIVED_EXCEPTION( protocol_feature_bad_block_exception, protocol_feature_exception, 3250002, "Protocol feature exception (invalid block)" ) + FC_DECLARE_DERIVED_EXCEPTION( protocol_feature_iterator_exception, protocol_feature_exception, + 3250003, "Protocol feature iterator exception" ) } } // eosio::chain diff --git a/libraries/chain/include/eosio/chain/global_property_object.hpp b/libraries/chain/include/eosio/chain/global_property_object.hpp index 851f8e15893..cbfe7308bd4 100644 --- a/libraries/chain/include/eosio/chain/global_property_object.hpp +++ b/libraries/chain/include/eosio/chain/global_property_object.hpp @@ -19,53 +19,84 @@ namespace eosio { namespace chain { /** * @class global_property_object - * @brief Maintains global state information (committee_member list, current fees) + * @brief Maintains global state information about block producer schedules and chain configuration parameters * @ingroup object * @ingroup implementation - * - * This is an implementation detail. The values here are set by committee_members to tune the blockchain parameters. */ class global_property_object : public chainbase::object { - OBJECT_CTOR(global_property_object, (proposed_schedule)(preactivated_protocol_features)(whitelisted_intrinsics)) + OBJECT_CTOR(global_property_object, (proposed_schedule)) public: id_type id; optional proposed_schedule_block_num; shared_producer_schedule_type proposed_schedule; chain_config configuration; - shared_vector preactivated_protocol_features; - whitelisted_intrinsics_type whitelisted_intrinsics; }; + using global_property_multi_index = chainbase::shared_multi_index_container< + global_property_object, + indexed_by< + ordered_unique, + BOOST_MULTI_INDEX_MEMBER(global_property_object, global_property_object::id_type, id) + > + > + >; /** - * @class dynamic_global_property_object - * @brief Maintains global state information (committee_member list, current fees) + * @class protocol_state_object + * @brief Maintains global state information about consensus protocol rules * @ingroup object * @ingroup implementation - * - * This is an implementation detail. The values here are calculated during normal chain operations and reflect the - * current values of global blockchain properties. */ - class dynamic_global_property_object : public chainbase::object + class protocol_state_object : public chainbase::object { - OBJECT_CTOR(dynamic_global_property_object) + OBJECT_CTOR(protocol_state_object, (activated_protocol_features)(preactivated_protocol_features)(whitelisted_intrinsics)) - id_type id; - uint64_t global_action_sequence = 0; + public: + struct activated_protocol_feature { + digest_type feature_digest; + uint32_t activation_block_num = 0; + + activated_protocol_feature() = default; + + activated_protocol_feature( const digest_type& feature_digest, uint32_t activation_block_num ) + :feature_digest( feature_digest ) + ,activation_block_num( activation_block_num ) + {} + }; + + public: + id_type id; + shared_vector activated_protocol_features; + shared_vector preactivated_protocol_features; + whitelisted_intrinsics_type whitelisted_intrinsics; }; - using global_property_multi_index = chainbase::shared_multi_index_container< - global_property_object, + using protocol_state_multi_index = chainbase::shared_multi_index_container< + protocol_state_object, indexed_by< ordered_unique, - BOOST_MULTI_INDEX_MEMBER(global_property_object, global_property_object::id_type, id) + BOOST_MULTI_INDEX_MEMBER(protocol_state_object, protocol_state_object::id_type, id) > > >; + /** + * @class dynamic_global_property_object + * @brief Maintains global state information that frequently change + * @ingroup object + * @ingroup implementation + */ + class dynamic_global_property_object : public chainbase::object + { + OBJECT_CTOR(dynamic_global_property_object) + + id_type id; + uint64_t global_action_sequence = 0; + }; + using dynamic_global_property_multi_index = chainbase::shared_multi_index_container< dynamic_global_property_object, indexed_by< @@ -78,14 +109,22 @@ namespace eosio { namespace chain { }} CHAINBASE_SET_INDEX_TYPE(eosio::chain::global_property_object, eosio::chain::global_property_multi_index) +CHAINBASE_SET_INDEX_TYPE(eosio::chain::protocol_state_object, eosio::chain::protocol_state_multi_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::dynamic_global_property_object, eosio::chain::dynamic_global_property_multi_index) -FC_REFLECT(eosio::chain::dynamic_global_property_object, - (global_action_sequence) +FC_REFLECT(eosio::chain::global_property_object, + (proposed_schedule_block_num)(proposed_schedule)(configuration) ) -FC_REFLECT(eosio::chain::global_property_object, - (proposed_schedule_block_num)(proposed_schedule)(configuration) - (preactivated_protocol_features)(whitelisted_intrinsics) +FC_REFLECT(eosio::chain::protocol_state_object::activated_protocol_feature, + (feature_digest)(activation_block_num) + ) + +FC_REFLECT(eosio::chain::protocol_state_object, + (activated_protocol_features)(preactivated_protocol_features)(whitelisted_intrinsics) + ) + +FC_REFLECT(eosio::chain::dynamic_global_property_object, + (global_action_sequence) ) diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index 2ed374d05b7..1bfc440d99d 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -5,6 +5,7 @@ #pragma once #include +#include namespace eosio { namespace chain { @@ -73,7 +74,7 @@ class builtin_protocol_feature : public protocol_feature_base { builtin_protocol_feature_t get_codename()const { return _codename; } - friend class protocol_feature_manager; + friend class protocol_feature_set; public: std::string builtin_feature_codename; @@ -81,10 +82,37 @@ class builtin_protocol_feature : public protocol_feature_base { builtin_protocol_feature_t _codename; }; -class protocol_feature_manager { -public: +struct protocol_feature { + digest_type feature_digest; + digest_type description_digest; + flat_set dependencies; + time_point earliest_allowed_activation_time; + bool preactivation_required = false; + bool enabled = false; + optional builtin_feature; + + fc::variant to_variant( bool include_subjective_restrictions = true, + fc::mutable_variant_object* additional_fields = nullptr )const; + + friend bool operator <( const protocol_feature& lhs, const protocol_feature& rhs ) { + return lhs.feature_digest < rhs.feature_digest; + } + + friend bool operator <( const digest_type& lhs, const protocol_feature& rhs ) { + return lhs < rhs.feature_digest; + } + + friend bool operator <( const protocol_feature& lhs, const digest_type& rhs ) { + return lhs.feature_digest < rhs; + } +}; - protocol_feature_manager(); +class protocol_feature_set { +protected: + using protocol_feature_set_type = std::set< protocol_feature, std::less<> >; + +public: + protocol_feature_set(); enum class recognized_t { unrecognized, @@ -94,70 +122,241 @@ class protocol_feature_manager { ready }; - struct protocol_feature { - digest_type feature_digest; - digest_type description_digest; - flat_set dependencies; - time_point earliest_allowed_activation_time; - bool preactivation_required = false; - bool enabled = false; - optional builtin_feature; + recognized_t is_recognized( const digest_type& feature_digest, time_point now )const; + + optional get_builtin_digest( builtin_protocol_feature_t feature_codename )const; - fc::variant to_variant( bool include_subjective_restrictions = true )const; + const protocol_feature& get_protocol_feature( const digest_type& feature_digest )const; - friend bool operator <( const protocol_feature& lhs, const protocol_feature& rhs ) { - return lhs.feature_digest < rhs.feature_digest; + bool validate_dependencies( const digest_type& feature_digest, + const std::function& validator )const; + + builtin_protocol_feature make_default_builtin_protocol_feature( + builtin_protocol_feature_t codename, + const std::function& handle_dependency + )const; + + const protocol_feature& add_feature( const builtin_protocol_feature& f ); + + class const_iterator : public std::iterator { + protected: + protocol_feature_set_type::const_iterator _itr; + + protected: + explicit const_iterator( protocol_feature_set_type::const_iterator itr ) + :_itr(itr) + {} + + const protocol_feature* get_pointer()const { return &*_itr; } + + friend class protocol_feature_set; + + public: + const_iterator() = default; + + friend bool operator == ( const const_iterator& lhs, const const_iterator& rhs ) { + return (lhs._itr == rhs._itr); } - friend bool operator <( const digest_type& lhs, const protocol_feature& rhs ) { - return lhs < rhs.feature_digest; + friend bool operator != ( const const_iterator& lhs, const const_iterator& rhs ) { + return (lhs._itr != rhs._itr); } - friend bool operator <( const protocol_feature& lhs, const digest_type& rhs ) { - return lhs.feature_digest < rhs; + const protocol_feature& operator*()const { + return *get_pointer(); + } + + const protocol_feature* operator->()const { + return get_pointer(); + } + + const_iterator& operator++() { + ++_itr; + return *this; + } + + const_iterator& operator--() { + --_itr; + return *this; + } + + const_iterator operator++(int) { + const_iterator result(*this); + ++(*this); + return result; + } + + const_iterator operator--(int) { + const_iterator result(*this); + --(*this); + return result; } }; - using protocol_feature_set_type = std::set>; + using const_reverse_iterator = std::reverse_iterator; - const protocol_feature_set_type& get_protocol_feature_set()const { return _recognized_protocol_features; } + const_iterator cbegin()const { return const_iterator( _recognized_protocol_features.cbegin() ); } + const_iterator begin()const { return cbegin(); } - recognized_t is_recognized( const digest_type& feature_digest, time_point now )const; + const_iterator cend()const { return const_iterator( _recognized_protocol_features.cend() ); } + const_iterator end()const { return cend(); } - const protocol_feature& get_protocol_feature( const digest_type& feature_digest )const; + const_reverse_iterator crbegin()const { return std::make_reverse_iterator( cend() ); } + const_reverse_iterator rbegin()const { return crbegin(); } - bool is_builtin_activated( builtin_protocol_feature_t feature_codename, uint32_t current_block_num )const; + const_reverse_iterator crend()const { return std::make_reverse_iterator( cbegin() ); } + const_reverse_iterator rend()const { return crend(); } - optional get_builtin_digest( builtin_protocol_feature_t feature_codename )const; + bool empty()const { return _recognized_protocol_features.empty(); } + std::size_t size()const { return _recognized_protocol_features.size(); } + std::size_t max_size()const { return _recognized_protocol_features.max_size(); } - bool validate_dependencies( const digest_type& feature_digest, - const std::function& validator )const; + template + const_iterator find( const K& x )const { + return const_iterator( _recognized_protocol_features.find( x ) ); + } - builtin_protocol_feature make_default_builtin_protocol_feature( - builtin_protocol_feature_t codename, - const std::function& handle_dependency - )const; + template + const_iterator lower_bound( const K& x )const { + return const_iterator( _recognized_protocol_features.lower_bound( x ) ); + } + + template + const_iterator upper_bound( const K& x )const { + return const_iterator( _recognized_protocol_features.upper_bound( x ) ); + } + + friend class protocol_feature_manager; + +protected: + protocol_feature_set_type _recognized_protocol_features; + vector _recognized_builtin_protocol_features; +}; - void add_feature( const builtin_protocol_feature& f ); + +class protocol_feature_manager { +public: + + protocol_feature_manager( protocol_feature_set&& pfs ); + + class const_iterator : public std::iterator { + protected: + const protocol_feature_manager* _pfm = nullptr; + std::size_t _index = 0; + + protected: + static constexpr std::size_t end_index = std::numeric_limits::max(); + + explicit const_iterator( const protocol_feature_manager* pfm, std::size_t i = end_index ) + :_pfm(pfm) + ,_index(i) + {} + + const protocol_feature* get_pointer()const; + + friend class protocol_feature_manager; + + public: + const_iterator() = default; + + friend bool operator == ( const const_iterator& lhs, const const_iterator& rhs ) { + return std::tie( lhs._pfm, lhs._index ) == std::tie( rhs._pfm, rhs._index ); + } + + friend bool operator != ( const const_iterator& lhs, const const_iterator& rhs ) { + return !(lhs == rhs); + } + + uint32_t activation_ordinal()const; + + uint32_t activation_block_num()const; + + const protocol_feature& operator*()const { + return *get_pointer(); + } + + const protocol_feature* operator->()const { + return get_pointer(); + } + + const_iterator& operator++(); + + const_iterator& operator--(); + + const_iterator operator++(int) { + const_iterator result(*this); + ++(*this); + return result; + } + + const_iterator operator--(int) { + const_iterator result(*this); + --(*this); + return result; + } + }; + + friend class const_iterator; + + using const_reverse_iterator = std::reverse_iterator; + + void init( chainbase::database& db ); + + bool is_initialized()const { return _initialized; } + + const protocol_feature_set& get_protocol_feature_set()const { return _protocol_feature_set; } + + optional get_builtin_digest( builtin_protocol_feature_t feature_codename )const { + return _protocol_feature_set.get_builtin_digest( feature_codename ); + } + + // All methods below require is_initialized() as a precondition. + + const_iterator cbegin()const; + const_iterator begin()const { return cbegin(); } + + const_iterator cend()const { return const_iterator( this ); } + const_iterator end()const { return cend(); } + + const_reverse_iterator crbegin()const { return std::make_reverse_iterator( cend() ); } + const_reverse_iterator rbegin()const { return crbegin(); } + + const_reverse_iterator crend()const { return std::make_reverse_iterator( cbegin() ); } + const_reverse_iterator rend()const { return crend(); } + + const_iterator at_activation_ordinal( uint32_t activation_ordinal )const; + + const_iterator lower_bound( uint32_t block_num )const; + + const_iterator upper_bound( uint32_t block_num )const; + + + bool is_builtin_activated( builtin_protocol_feature_t feature_codename, uint32_t current_block_num )const; void activate_feature( const digest_type& feature_digest, uint32_t current_block_num ); void popped_blocks_to( uint32_t block_num ); protected: + struct protocol_feature_entry { + protocol_feature_set::const_iterator iterator_to_protocol_feature; + uint32_t activation_block_num; + }; + struct builtin_protocol_feature_entry { - static constexpr uint32_t not_active = std::numeric_limits::max(); static constexpr size_t no_previous = std::numeric_limits::max(); + static constexpr uint32_t not_active = std::numeric_limits::max(); - protocol_feature_set_type::iterator iterator_to_protocol_feature; - uint32_t activation_block_num = not_active; - size_t previous = no_previous; + size_t previous = no_previous; + uint32_t activation_block_num = not_active; }; protected: - protocol_feature_set_type _recognized_protocol_features; + protocol_feature_set _protocol_feature_set; + vector _activated_protocol_features; vector _builtin_protocol_features; size_t _head_of_builtin_activation_list = builtin_protocol_feature_entry::no_previous; + bool _initialized = false; }; } } // namespace eosio::chain diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index 5ce7bf87550..a3332271fbe 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -189,6 +189,7 @@ namespace eosio { namespace chain { account_history_object_type, ///< Defined by history_plugin action_history_object_type, ///< Defined by history_plugin reversible_block_object_type, + protocol_state_object_type, OBJECT_TYPE_COUNT ///< Sentry value which contains the number of different object types }; diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index c58c4719a9f..b3f1bb01676 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -4,8 +4,11 @@ */ #include +#include #include +#include + #include #include @@ -128,13 +131,22 @@ Disallows linking an action to a non-existing permission. return enc.result(); } - fc::variant protocol_feature_manager::protocol_feature::to_variant( bool include_subjective_restrictions )const { + fc::variant protocol_feature::to_variant( bool include_subjective_restrictions, + fc::mutable_variant_object* additional_fields )const + { EOS_ASSERT( builtin_feature, protocol_feature_exception, "not a builtin protocol feature" ); fc::mutable_variant_object mvo; mvo( "feature_digest", feature_digest ); + if( additional_fields ) { + for( const auto& e : *additional_fields ) { + if( e.key().compare( "feature_digest" ) != 0 ) + mvo( e.key(), e.value() ); + } + } + if( include_subjective_restrictions ) { fc::mutable_variant_object subjective_restrictions; @@ -165,12 +177,14 @@ Disallows linking an action to a non-existing permission. return fc::variant( std::move(mvo) ); } - protocol_feature_manager::protocol_feature_manager() { - _builtin_protocol_features.reserve( builtin_protocol_feature_codenames.size() ); + protocol_feature_set::protocol_feature_set() + { + _recognized_builtin_protocol_features.reserve( builtin_protocol_feature_codenames.size() ); } - protocol_feature_manager::recognized_t - protocol_feature_manager::is_recognized( const digest_type& feature_digest, time_point now )const { + + protocol_feature_set::recognized_t + protocol_feature_set::is_recognized( const digest_type& feature_digest, time_point now )const { auto itr = _recognized_protocol_features.find( feature_digest ); if( itr == _recognized_protocol_features.end() ) @@ -188,8 +202,19 @@ Disallows linking an action to a non-existing permission. return recognized_t::ready; } - const protocol_feature_manager::protocol_feature& - protocol_feature_manager::get_protocol_feature( const digest_type& feature_digest )const { + optional protocol_feature_set::get_builtin_digest( builtin_protocol_feature_t feature_codename )const { + uint32_t indx = static_cast( feature_codename ); + + if( indx >= _recognized_builtin_protocol_features.size() ) + return {}; + + if( _recognized_builtin_protocol_features[indx] == _recognized_protocol_features.end() ) + return {}; + + return _recognized_builtin_protocol_features[indx]->feature_digest; + } + + const protocol_feature& protocol_feature_set::get_protocol_feature( const digest_type& feature_digest )const { auto itr = _recognized_protocol_features.find( feature_digest ); EOS_ASSERT( itr != _recognized_protocol_features.end(), protocol_feature_exception, @@ -200,31 +225,7 @@ Disallows linking an action to a non-existing permission. return *itr; } - bool protocol_feature_manager::is_builtin_activated( builtin_protocol_feature_t feature_codename, - uint32_t current_block_num )const - { - uint32_t indx = static_cast( feature_codename ); - - if( indx >= _builtin_protocol_features.size() ) return false; - - return (_builtin_protocol_features[indx].activation_block_num <= current_block_num); - } - - optional - protocol_feature_manager::get_builtin_digest( builtin_protocol_feature_t feature_codename )const - { - uint32_t indx = static_cast( feature_codename ); - - if( indx >= _builtin_protocol_features.size() ) - return {}; - - if( _builtin_protocol_features[indx].iterator_to_protocol_feature == _recognized_protocol_features.end() ) - return {}; - - return _builtin_protocol_features[indx].iterator_to_protocol_feature->feature_digest; - } - - bool protocol_feature_manager::validate_dependencies( + bool protocol_feature_set::validate_dependencies( const digest_type& feature_digest, const std::function& validator )const { @@ -240,9 +241,9 @@ Disallows linking an action to a non-existing permission. } builtin_protocol_feature - protocol_feature_manager::make_default_builtin_protocol_feature( + protocol_feature_set::make_default_builtin_protocol_feature( builtin_protocol_feature_t codename, - const std::function& handle_dependency + const std::function& handle_dependency )const { auto itr = builtin_protocol_feature_codenames.find( codename ); @@ -254,24 +255,13 @@ Disallows linking an action to a non-existing permission. dependencies.reserve( itr->second.builtin_dependencies.size() ); for( const auto& d : itr->second.builtin_dependencies ) { - handle_dependency( d ); - auto dependency_digest = get_builtin_digest( d ); - EOS_ASSERT( dependency_digest, protocol_feature_exception, - "cannot make default builtin protocol feature with codename '${codename}' since it has a dependency that has not been added yet: ${dependency_codename}", - ("codename", builtin_protocol_feature_codename(itr->first)) - ("dependency_codename", builtin_protocol_feature_codename(d)) - ); - dependencies.insert( *dependency_digest ); + dependencies.insert( handle_dependency( d ) ); } return {itr->first, itr->second.description_digest, std::move(dependencies), itr->second.subjective_restrictions}; } - void protocol_feature_manager::add_feature( const builtin_protocol_feature& f ) { - EOS_ASSERT( _head_of_builtin_activation_list == builtin_protocol_feature_entry::no_previous, - protocol_feature_exception, - "new builtin protocol features cannot be added after a protocol feature has already been activated" ); - + const protocol_feature& protocol_feature_set::add_feature( const builtin_protocol_feature& f ) { auto builtin_itr = builtin_protocol_feature_codenames.find( f._codename ); EOS_ASSERT( builtin_itr != builtin_protocol_feature_codenames.end(), protocol_feature_validation_exception, "Builtin protocol feature has unsupported builtin_protocol_feature_t: ${codename}", @@ -279,8 +269,8 @@ Disallows linking an action to a non-existing permission. uint32_t indx = static_cast( f._codename ); - if( indx < _builtin_protocol_features.size() ) { - EOS_ASSERT( _builtin_protocol_features[indx].iterator_to_protocol_feature == _recognized_protocol_features.end(), + if( indx < _recognized_builtin_protocol_features.size() ) { + EOS_ASSERT( _recognized_builtin_protocol_features[indx] == _recognized_protocol_features.end(), protocol_feature_exception, "builtin protocol feature with codename '${codename}' already added", ("codename", f.builtin_feature_codename) ); @@ -348,66 +338,221 @@ Disallows linking an action to a non-existing permission. "builtin protocol feature with codename '${codename}' has a digest of ${digest} but another protocol feature with the same digest has already been added", ("codename", f.builtin_feature_codename)("digest", feature_digest) ); - if( indx >= _builtin_protocol_features.size() ) { - for( auto i =_builtin_protocol_features.size(); i <= indx; ++i ) { - _builtin_protocol_features.push_back( builtin_protocol_feature_entry{ - _recognized_protocol_features.end(), - builtin_protocol_feature_entry::not_active - } ); + if( indx >= _recognized_builtin_protocol_features.size() ) { + for( auto i =_recognized_builtin_protocol_features.size(); i <= indx; ++i ) { + _recognized_builtin_protocol_features.push_back( _recognized_protocol_features.end() ); } } - _builtin_protocol_features[indx].iterator_to_protocol_feature = res.first; + _recognized_builtin_protocol_features[indx] = res.first; + return *res.first; + } + + + + protocol_feature_manager::protocol_feature_manager( protocol_feature_set&& pfs ) + :_protocol_feature_set( std::move(pfs) ) + { + _builtin_protocol_features.resize( _protocol_feature_set._recognized_builtin_protocol_features.size() ); + } + + void protocol_feature_manager::init( chainbase::database& db ) { + EOS_ASSERT( !is_initialized(), protocol_feature_exception, "cannot initialize protocol_feature_manager twice" ); + + + auto reset_initialized = fc::make_scoped_exit( [this]() { _initialized = false; } ); + _initialized = true; + + for( const auto& f : db.get().activated_protocol_features ) { + activate_feature( f.feature_digest, f.activation_block_num ); + } + + reset_initialized.cancel(); + } + + const protocol_feature* protocol_feature_manager::const_iterator::get_pointer()const { + //EOS_ASSERT( _pfm, protocol_feature_iterator_exception, "cannot dereference singular iterator" ); + //EOS_ASSERT( _index != end_index, protocol_feature_iterator_exception, "cannot dereference end iterator" ); + return &*(_pfm->_activated_protocol_features[_index].iterator_to_protocol_feature); + } + + uint32_t protocol_feature_manager::const_iterator::activation_ordinal()const { + EOS_ASSERT( _pfm, + protocol_feature_iterator_exception, + "called activation_ordinal() on singular iterator" + ); + EOS_ASSERT( _index != end_index, + protocol_feature_iterator_exception, + "called activation_ordinal() on end iterator" + ); + + return _index; + } + + uint32_t protocol_feature_manager::const_iterator::activation_block_num()const { + EOS_ASSERT( _pfm, + protocol_feature_iterator_exception, + "called activation_block_num() on singular iterator" + ); + EOS_ASSERT( _index != end_index, + protocol_feature_iterator_exception, + "called activation_block_num() on end iterator" + ); + + return _pfm->_activated_protocol_features[_index].activation_block_num; + } + + protocol_feature_manager::const_iterator& protocol_feature_manager::const_iterator::operator++() { + EOS_ASSERT( _pfm, protocol_feature_iterator_exception, "cannot increment singular iterator" ); + EOS_ASSERT( _index != end_index, protocol_feature_iterator_exception, "cannot increment end iterator" ); + + ++_index; + if( _index >= _pfm->_activated_protocol_features.size() ) { + _index = end_index; + } + + return *this; + } + + protocol_feature_manager::const_iterator& protocol_feature_manager::const_iterator::operator--() { + EOS_ASSERT( _pfm, protocol_feature_iterator_exception, "cannot decrement singular iterator" ); + if( _index == end_index ) { + EOS_ASSERT( _pfm->_activated_protocol_features.size() > 0, + protocol_feature_iterator_exception, + "cannot decrement end iterator when no protocol features have been activated" + ); + _index = _pfm->_activated_protocol_features.size() - 1; + } else { + EOS_ASSERT( _index > 0, + protocol_feature_iterator_exception, + "cannot decrement iterator at the beginning of protocol feature activation list" ) + ; + --_index; + } + return *this; + } + + protocol_feature_manager::const_iterator protocol_feature_manager::cbegin()const { + if( _activated_protocol_features.size() == 0 ) { + return cend(); + } else { + return const_iterator( this, 0 ); + } + } + + protocol_feature_manager::const_iterator + protocol_feature_manager::at_activation_ordinal( uint32_t activation_ordinal )const { + if( activation_ordinal >= _activated_protocol_features.size() ) { + return cend(); + } + + return const_iterator{this, static_cast(activation_ordinal)}; + } + + protocol_feature_manager::const_iterator + protocol_feature_manager::lower_bound( uint32_t block_num )const { + const auto begin = _activated_protocol_features.cbegin(); + const auto end = _activated_protocol_features.cend(); + auto itr = std::lower_bound( begin, end, block_num, []( const protocol_feature_entry& lhs, uint32_t rhs ) { + return lhs.activation_block_num < rhs; + } ); + + if( itr == end ) { + return cend(); + } + + return const_iterator{this, static_cast(itr - begin)}; + } + + protocol_feature_manager::const_iterator + protocol_feature_manager::upper_bound( uint32_t block_num )const { + const auto begin = _activated_protocol_features.cbegin(); + const auto end = _activated_protocol_features.cend(); + auto itr = std::upper_bound( begin, end, block_num, []( uint32_t lhs, const protocol_feature_entry& rhs ) { + return lhs < rhs.activation_block_num; + } ); + + if( itr == end ) { + return cend(); + } + + return const_iterator{this, static_cast(itr - begin)}; + } + + bool protocol_feature_manager::is_builtin_activated( builtin_protocol_feature_t feature_codename, + uint32_t current_block_num )const + { + uint32_t indx = static_cast( feature_codename ); + + if( indx >= _builtin_protocol_features.size() ) return false; + + return (_builtin_protocol_features[indx].activation_block_num <= current_block_num); } void protocol_feature_manager::activate_feature( const digest_type& feature_digest, uint32_t current_block_num ) { - auto itr = _recognized_protocol_features.find( feature_digest ); + EOS_ASSERT( is_initialized(), protocol_feature_exception, "protocol_feature_manager is not yet initialized" ); - EOS_ASSERT( itr != _recognized_protocol_features.end(), protocol_feature_exception, + auto itr = _protocol_feature_set.find( feature_digest ); + + EOS_ASSERT( itr != _protocol_feature_set.end(), protocol_feature_exception, "unrecognized protocol feature digest: ${digest}", ("digest", feature_digest) ); - if( itr->builtin_feature ) { - if( _head_of_builtin_activation_list != builtin_protocol_feature_entry::no_previous ) { - auto largest_block_num_of_activated_builtins = _builtin_protocol_features[_head_of_builtin_activation_list].activation_block_num; - EOS_ASSERT( largest_block_num_of_activated_builtins <= current_block_num, - protocol_feature_exception, - "trying to activate a builtin protocol feature with a current block number of " - "${current_block_num} when the largest activation block number of all activated builtin " - "protocol features is ${largest_block_num_of_activated_builtins}", - ("current_block_num", current_block_num) - ("largest_block_num_of_activated_builtins", largest_block_num_of_activated_builtins) - ); - } + if( _activated_protocol_features.size() > 0 ) { + const auto& last = _activated_protocol_features.back(); + EOS_ASSERT( last.activation_block_num <= current_block_num, + protocol_feature_exception, + "last protocol feature activation block num is ${last_activation_block_num} yet " + "attempting to activate protocol feature with a current block num of ${current_block_num}" + "protocol features is ${last_activation_block_num}", + ("current_block_num", current_block_num) + ("last_activation_block_num", last.activation_block_num) + ); + } - uint32_t indx = static_cast( *itr->builtin_feature ); + EOS_ASSERT( itr->builtin_feature, + protocol_feature_exception, + "invariant failure: encountered non-builtin protocol feature which is not yet supported" + ); - EOS_ASSERT( indx < _builtin_protocol_features.size() && - _builtin_protocol_features[indx].iterator_to_protocol_feature != _recognized_protocol_features.end(), - protocol_feature_exception, - "invariant failure: problem with activating builtin protocol feature with digest: ${digest}", - ("digest", feature_digest) ); + uint32_t indx = static_cast( *itr->builtin_feature ); - EOS_ASSERT( _builtin_protocol_features[indx].activation_block_num == builtin_protocol_feature_entry::not_active, - protocol_feature_exception, - "cannot activate already activated builtin feature with digest: ${digest}", - ("digest", feature_digest) ); + EOS_ASSERT( indx < _builtin_protocol_features.size(), protocol_feature_exception, + "invariant failure while trying to activate feature with digest '${digest}': " + "unsupported builtin_protocol_feature_t ${codename}", + ("digest", feature_digest) + ("codename", indx) + ); - _builtin_protocol_features[indx].activation_block_num = current_block_num; - _builtin_protocol_features[indx].previous = _head_of_builtin_activation_list; - _head_of_builtin_activation_list = indx; - } + EOS_ASSERT( _builtin_protocol_features[indx].activation_block_num == builtin_protocol_feature_entry::not_active, + protocol_feature_exception, + "cannot activate already activated builtin feature with digest: ${digest}", + ("digest", feature_digest) + ); + + _activated_protocol_features.push_back( protocol_feature_entry{itr, current_block_num} ); + _builtin_protocol_features[indx].previous = _head_of_builtin_activation_list; + _builtin_protocol_features[indx].activation_block_num = current_block_num; + _head_of_builtin_activation_list = indx; } void protocol_feature_manager::popped_blocks_to( uint32_t block_num ) { + EOS_ASSERT( is_initialized(), protocol_feature_exception, "protocol_feature_manager is not yet initialized" ); + while( _head_of_builtin_activation_list != builtin_protocol_feature_entry::no_previous ) { auto& e = _builtin_protocol_features[_head_of_builtin_activation_list]; if( e.activation_block_num <= block_num ) break; _head_of_builtin_activation_list = e.previous; - e.activation_block_num = builtin_protocol_feature_entry::not_active; e.previous = builtin_protocol_feature_entry::no_previous; + e.activation_block_num = builtin_protocol_feature_entry::not_active; + } + + while( _activated_protocol_features.size() > 0 + && block_num < _activated_protocol_features.back().activation_block_num ) + { + _activated_protocol_features.pop_back(); } } diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 6a37bc0e407..1b4c546b52e 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -46,9 +46,9 @@ namespace eosio { namespace chain { wasm_validations::wasm_binary_validation validator(control, module); validator.validate(); - const auto& gpo = control.db().get(); + const auto& pso = control.db().get(); - root_resolver resolver( gpo.whitelisted_intrinsics ); + root_resolver resolver( pso.whitelisted_intrinsics ); LinkResult link_result = linkModule(module, resolver); //there are a couple opportunties for improvement here-- diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 3ee3208a39a..b75196d7f78 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -75,7 +75,7 @@ namespace eosio { namespace testing { bool expect_assert_message(const fc::exception& ex, string expected); - protocol_feature_manager make_protocol_feature_manager(); + protocol_feature_set make_protocol_feature_set(); /** * @class tester @@ -94,11 +94,11 @@ namespace eosio { namespace testing { void init(const setup_policy policy = setup_policy::full, db_read_mode read_mode = db_read_mode::SPECULATIVE); void init(controller::config config, const snapshot_reader_ptr& snapshot = nullptr); - void init(controller::config config, protocol_feature_manager&& pfm, const snapshot_reader_ptr& snapshot = nullptr); + void init(controller::config config, protocol_feature_set&& pfs, const snapshot_reader_ptr& snapshot = nullptr); void execute_setup_policy(const setup_policy policy); void close(); - void open( protocol_feature_manager&& pfm, const snapshot_reader_ptr& snapshot); + void open( protocol_feature_set&& pfs, const snapshot_reader_ptr& snapshot); void open( const snapshot_reader_ptr& snapshot); bool is_same_chain( base_tester& other ); @@ -328,8 +328,8 @@ namespace eosio { namespace testing { init(config); } - tester(controller::config config, protocol_feature_manager&& pfm) { - init(config, std::move(pfm)); + tester(controller::config config, protocol_feature_set&& pfs) { + init(config, std::move(pfs)); } signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { @@ -394,7 +394,7 @@ namespace eosio { namespace testing { vcfg.trusted_producers = trusted_producers; - validating_node = std::make_unique(vcfg, make_protocol_feature_manager()); + validating_node = std::make_unique(vcfg, make_protocol_feature_set()); validating_node->add_indices(); validating_node->startup( []() { return false; } ); @@ -409,7 +409,7 @@ namespace eosio { namespace testing { vcfg.blocks_dir = vcfg.blocks_dir.parent_path() / std::string("v_").append( vcfg.blocks_dir.filename().generic_string() ); vcfg.state_dir = vcfg.state_dir.parent_path() / std::string("v_").append( vcfg.state_dir.filename().generic_string() ); - validating_node = std::make_unique(vcfg, make_protocol_feature_manager()); + validating_node = std::make_unique(vcfg, make_protocol_feature_set()); validating_node->add_indices(); validating_node->startup( []() { return false; } ); @@ -459,7 +459,7 @@ namespace eosio { namespace testing { hbh.producer == vn_hbh.producer; validating_node.reset(); - validating_node = std::make_unique(vcfg, make_protocol_feature_manager()); + validating_node = std::make_unique(vcfg, make_protocol_feature_set()); validating_node->add_indices(); validating_node->startup( []() { return false; } ); diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index dce7adb616d..d946c010ca6 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -79,28 +79,37 @@ namespace eosio { namespace testing { memcpy( data.data(), obj.value.data(), obj.value.size() ); } - protocol_feature_manager make_protocol_feature_manager() { - protocol_feature_manager pfm; - - set visited_builtins; - - std::function add_builtins = - [&pfm, &visited_builtins, &add_builtins]( builtin_protocol_feature_t codename ) -> void { - auto res = visited_builtins.emplace( codename ); - if( !res.second ) return; + protocol_feature_set make_protocol_feature_set() { + protocol_feature_set pfs; + + map< builtin_protocol_feature_t, optional > visited_builtins; + + std::function add_builtins = + [&pfs, &visited_builtins, &add_builtins]( builtin_protocol_feature_t codename ) -> digest_type { + auto res = visited_builtins.emplace( codename, optional() ); + if( !res.second ) { + EOS_ASSERT( res.first->second, protocol_feature_exception, + "invariant failure: cycle found in builtin protocol feature dependencies" + ); + return *res.first->second; + } - auto f = pfm.make_default_builtin_protocol_feature( codename, [&add_builtins]( builtin_protocol_feature_t d ) { - add_builtins( d ); + auto f = pfs.make_default_builtin_protocol_feature( codename, + [&add_builtins]( builtin_protocol_feature_t d ) { + return add_builtins( d ); } ); - pfm.add_feature( f ); + const auto& pf = pfs.add_feature( f ); + res.first->second = pf.feature_digest; + + return pf.feature_digest; }; for( const auto& p : builtin_protocol_feature_codenames ) { add_builtins( p.first ); } - return pfm; + return pfs; } bool base_tester::is_same_chain( base_tester& other ) { @@ -136,9 +145,9 @@ namespace eosio { namespace testing { open(snapshot); } - void base_tester::init(controller::config config, protocol_feature_manager&& pfm, const snapshot_reader_ptr& snapshot) { + void base_tester::init(controller::config config, protocol_feature_set&& pfs, const snapshot_reader_ptr& snapshot) { cfg = config; - open(std::move(pfm), snapshot); + open(std::move(pfs), snapshot); } void base_tester::execute_setup_policy(const setup_policy policy) { @@ -186,11 +195,11 @@ namespace eosio { namespace testing { } void base_tester::open( const snapshot_reader_ptr& snapshot ) { - open( make_protocol_feature_manager(), snapshot ); + open( make_protocol_feature_set(), snapshot ); } - void base_tester::open( protocol_feature_manager&& pfm, const snapshot_reader_ptr& snapshot ) { - control.reset( new controller(cfg, std::move(pfm)) ); + void base_tester::open( protocol_feature_set&& pfs, const snapshot_reader_ptr& snapshot ) { + control.reset( new controller(cfg, std::move(pfs)) ); control->add_indices(); control->startup( []() { return false; }, snapshot); chain_transactions.clear(); @@ -962,6 +971,7 @@ namespace eosio { namespace testing { void base_tester::preactivate_all_builtin_protocol_features() { const auto& pfm = control->get_protocol_feature_manager(); + const auto& pfs = pfm.get_protocol_feature_set(); const auto current_block_num = control->head_block_num() + (control->is_building_block() ? 1 : 0); const auto current_block_time = ( control->is_building_block() ? control->pending_block_time() : control->head_block_time() + fc::milliseconds(config::block_interval_ms) ); @@ -970,9 +980,9 @@ namespace eosio { namespace testing { vector preactivations; std::function add_digests = - [&pfm, current_block_num, current_block_time, &preactivation_set, &preactivations, &add_digests] + [&pfm, &pfs, current_block_num, current_block_time, &preactivation_set, &preactivations, &add_digests] ( const digest_type& feature_digest ) { - const auto& pf = pfm.get_protocol_feature( feature_digest ); + const auto& pf = pfs.get_protocol_feature( feature_digest ); FC_ASSERT( pf.builtin_feature, "called add_digests on a non-builtin protocol feature" ); if( !pf.enabled || pf.earliest_allowed_activation_time > current_block_time || pfm.is_builtin_activated( *pf.builtin_feature, current_block_num ) ) return; @@ -988,7 +998,7 @@ namespace eosio { namespace testing { }; for( const auto& f : builtin_protocol_feature_codenames ) { - auto digest = pfm.get_builtin_digest( f.first ); + auto digest = pfs.get_builtin_digest( f.first ); if( !digest ) continue; add_digests( *digest ); } diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index 8b9fd3f843c..3d89a74b21e 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -84,6 +84,7 @@ void chain_api_plugin::plugin_startup() { _http_plugin.add_api({ CHAIN_RO_CALL(get_info, 200l), + CHAIN_RO_CALL(get_activated_protocol_features, 200), CHAIN_RO_CALL(get_block, 200), CHAIN_RO_CALL(get_block_header_state, 200), CHAIN_RO_CALL(get_account, 200), diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index c02c6be9e29..9e806094e66 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -366,10 +366,10 @@ optional read_builtin_protocol_feature( const fc::path return {}; } -protocol_feature_manager initialize_protocol_features( const fc::path& p, bool populate_missing_builtins = true ) { +protocol_feature_set initialize_protocol_features( const fc::path& p, bool populate_missing_builtins = true ) { using boost::filesystem::directory_iterator; - protocol_feature_manager pfm; + protocol_feature_set pfs; bool directory_exists = true; @@ -425,7 +425,7 @@ protocol_feature_manager initialize_protocol_features( const fc::path& p, bool p map found_builtin_protocol_features; map > builtin_protocol_features_to_add; // The bool in the pair is set to true if the builtin protocol feature has already been visited to add - set visited_builtins; + map< builtin_protocol_feature_t, optional > visited_builtins; // Read all builtin protocol features if( directory_exists ) { @@ -458,12 +458,12 @@ protocol_feature_manager initialize_protocol_features( const fc::path& p, bool p // Add builtin protocol features to the protocol feature manager in the right order (to satisfy dependencies) using itr_type = map>::iterator; std::function add_protocol_feature = - [&pfm, &builtin_protocol_features_to_add, &visited_builtins, &log_recognized_protocol_feature, &add_protocol_feature]( const itr_type& itr ) -> void { + [&pfs, &builtin_protocol_features_to_add, &visited_builtins, &log_recognized_protocol_feature, &add_protocol_feature]( const itr_type& itr ) -> void { if( itr->second.second ) { return; } else { itr->second.second = true; - visited_builtins.insert( itr->second.first.get_codename() ); + visited_builtins.emplace( itr->second.first.get_codename(), itr->first ); } for( const auto& d : itr->second.first.dependencies ) { @@ -473,7 +473,7 @@ protocol_feature_manager initialize_protocol_features( const fc::path& p, bool p } } - pfm.add_feature( itr->second.first ); + pfs.add_feature( itr->second.first ); log_recognized_protocol_feature( itr->second.first, itr->first ); }; @@ -509,28 +509,34 @@ protocol_feature_manager initialize_protocol_features( const fc::path& p, bool p ); }; - std::function add_missing_builtins = - [&pfm, &visited_builtins, &output_protocol_feature, &log_recognized_protocol_feature, &add_missing_builtins, populate_missing_builtins] - ( builtin_protocol_feature_t codename ) -> void { - auto res = visited_builtins.emplace( codename ); - if( !res.second ) return; + std::function add_missing_builtins = + [&pfs, &visited_builtins, &output_protocol_feature, &log_recognized_protocol_feature, &add_missing_builtins, populate_missing_builtins] + ( builtin_protocol_feature_t codename ) -> digest_type { + auto res = visited_builtins.emplace( codename, optional() ); + if( !res.second ) { + EOS_ASSERT( res.first->second, protocol_feature_exception, + "invariant failure: cycle found in builtin protocol feature dependencies" + ); + return *res.first->second; + } - auto f = pfm.make_default_builtin_protocol_feature( codename, + auto f = pfs.make_default_builtin_protocol_feature( codename, [&add_missing_builtins]( builtin_protocol_feature_t d ) { - add_missing_builtins( d ); + return add_missing_builtins( d ); } ); if( !populate_missing_builtins ) f.subjective_restrictions.enabled = false; - pfm.add_feature( f ); - - const auto feature_digest = f.digest(); + const auto& pf = pfs.add_feature( f ); + res.first->second = pf.feature_digest; - log_recognized_protocol_feature( f, feature_digest ); + log_recognized_protocol_feature( f, pf.feature_digest ); if( populate_missing_builtins ) - output_protocol_feature( f, feature_digest ); + output_protocol_feature( f, pf.feature_digest ); + + return pf.feature_digest; }; for( const auto& p : builtin_protocol_feature_codenames ) { @@ -540,7 +546,7 @@ protocol_feature_manager initialize_protocol_features( const fc::path& p, bool p add_missing_builtins( p.first ); } - return pfm; + return pfs; } void chain_plugin::plugin_initialize(const variables_map& options) { @@ -593,7 +599,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { my->blocks_dir = bld; } - protocol_feature_manager pfm; + protocol_feature_set pfs; { fc::path protocol_features_dir; auto pfd = options.at( "protocol-features-dir" ).as(); @@ -602,7 +608,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { else protocol_features_dir = pfd; - pfm = initialize_protocol_features( protocol_features_dir ); + pfs = initialize_protocol_features( protocol_features_dir ); } if( options.count("checkpoint") ) { @@ -880,7 +886,7 @@ void chain_plugin::plugin_initialize(const variables_map& options) { my->chain_config->db_hugepage_paths = options.at("database-hugepage-path").as>(); #endif - my->chain.emplace( *my->chain_config, std::move(pfm) ); + my->chain.emplace( *my->chain_config, std::move(pfs) ); my->chain_id.emplace( my->chain->get_chain_id()); // set up method providers @@ -1286,6 +1292,77 @@ read_only::get_info_results read_only::get_info(const read_only::get_info_params }; } +read_only::get_activated_protocol_features_results +read_only::get_activated_protocol_features( const read_only::get_activated_protocol_features_params& params )const { + read_only::get_activated_protocol_features_results result; + const auto& pfm = db.get_protocol_feature_manager(); + + uint32_t lower_bound_value = std::numeric_limits::lowest(); + uint32_t upper_bound_value = std::numeric_limits::max(); + + if( params.lower_bound ) { + lower_bound_value = *params.lower_bound; + } + + if( params.upper_bound ) { + upper_bound_value = *params.upper_bound; + } + + if( upper_bound_value < lower_bound_value ) + return result; + + auto walk_range = [&]( auto itr, auto end_itr, auto&& convert_iterator ) { + fc::mutable_variant_object mvo; + mvo( "activation_ordinal", 0 ); + mvo( "activation_block_num", 0 ); + + auto& activation_ordinal_value = mvo["activation_ordinal"]; + auto& activation_block_num_value = mvo["activation_block_num"]; + + auto cur_time = fc::time_point::now(); + auto end_time = cur_time + fc::microseconds(1000 * 10); /// 10ms max time + for( unsigned int count = 0; + cur_time <= end_time && count < params.limit && itr != end_itr; + ++itr, cur_time = fc::time_point::now() ) + { + const auto& conv_itr = convert_iterator( itr ); + activation_ordinal_value = conv_itr.activation_ordinal(); + activation_block_num_value = conv_itr.activation_block_num(); + + result.activated_protocol_features.emplace_back( conv_itr->to_variant( false, &mvo ) ); + ++count; + } + if( itr != end_itr ) { + result.more = convert_iterator( itr ).activation_ordinal() ; + } + }; + + auto get_next_if_not_end = [&pfm]( auto&& itr ) { + if( itr == pfm.cend() ) return itr; + + ++itr; + return itr; + }; + + wlog( "lower_bound_value = ${value}", ("value", lower_bound_value) ); + wlog( "upper_bound_value = ${value}", ("value", upper_bound_value) ); + + auto lower = ( params.search_by_block_num ? pfm.lower_bound( lower_bound_value ) + : pfm.at_activation_ordinal( lower_bound_value ) ); + + auto upper = ( params.search_by_block_num ? pfm.upper_bound( lower_bound_value ) + : get_next_if_not_end( pfm.at_activation_ordinal( upper_bound_value ) ) ); + + if( params.reverse ) { + walk_range( std::make_reverse_iterator(upper), std::make_reverse_iterator(lower), + []( auto&& ritr ) { return --(ritr.base()); } ); + } else { + walk_range( lower, upper, []( auto&& itr ) { return itr; } ); + } + + return result; +} + uint64_t read_only::get_table_index_name(const read_only::get_table_rows_params& p, bool& primary) { using boost::algorithm::starts_with; // see multi_index packing of index name diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 7abb15e8b90..733bc32825f 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -109,6 +109,21 @@ class read_only { }; get_info_results get_info(const get_info_params&) const; + struct get_activated_protocol_features_params { + optional lower_bound; + optional upper_bound; + uint32_t limit = 10; + bool search_by_block_num = false; + bool reverse = false; + }; + + struct get_activated_protocol_features_results { + fc::variants activated_protocol_features; + optional more; + }; + + get_activated_protocol_features_results get_activated_protocol_features( const get_activated_protocol_features_params& params )const; + struct producer_info { name producer_name; }; @@ -711,6 +726,8 @@ FC_REFLECT( eosio::chain_apis::permission, (perm_name)(parent)(required_auth) ) FC_REFLECT(eosio::chain_apis::empty, ) FC_REFLECT(eosio::chain_apis::read_only::get_info_results, (server_version)(chain_id)(head_block_num)(last_irreversible_block_num)(last_irreversible_block_id)(head_block_id)(head_block_time)(head_block_producer)(virtual_block_cpu_limit)(virtual_block_net_limit)(block_cpu_limit)(block_net_limit)(server_version_string)(fork_db_head_block_num)(fork_db_head_block_id) ) +FC_REFLECT(eosio::chain_apis::read_only::get_activated_protocol_features_params, (lower_bound)(upper_bound)(limit)(search_by_block_num)(reverse) ) +FC_REFLECT(eosio::chain_apis::read_only::get_activated_protocol_features_results, (activated_protocol_features)(more) ) FC_REFLECT(eosio::chain_apis::read_only::get_block_params, (block_num_or_id)) FC_REFLECT(eosio::chain_apis::read_only::get_block_header_state_params, (block_num_or_id)) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 717fb534158..c693898e19b 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -984,16 +984,15 @@ void producer_plugin::schedule_protocol_feature_activations( const scheduled_pro fc::variants producer_plugin::get_supported_protocol_features( const get_supported_protocol_features_params& params ) const { fc::variants results; const chain::controller& chain = my->chain_plug->chain(); - const auto& pfm = chain.get_protocol_feature_manager(); - const auto& pfs = pfm.get_protocol_feature_set(); + const auto& pfs = chain.get_protocol_feature_manager().get_protocol_feature_set(); const auto next_block_time = chain.head_block_time() + fc::milliseconds(config::block_interval_ms); flat_map visited_protocol_features; visited_protocol_features.reserve( pfs.size() ); - std::function add_feature = - [&results, &pfm, ¶ms, next_block_time, &visited_protocol_features, &add_feature] - ( const protocol_feature_manager::protocol_feature& pf ) -> bool { + std::function add_feature = + [&results, &pfs, ¶ms, next_block_time, &visited_protocol_features, &add_feature] + ( const protocol_feature& pf ) -> bool { if( ( params.exclude_disabled || params.exclude_unactivatable ) && !pf.enabled ) return false; if( params.exclude_unactivatable && ( next_block_time < pf.earliest_allowed_activation_time ) ) return false; @@ -1002,7 +1001,7 @@ fc::variants producer_plugin::get_supported_protocol_features( const get_support const auto original_size = results.size(); for( const auto& dependency : pf.dependencies ) { - if( !add_feature( pfm.get_protocol_feature( dependency ) ) ) { + if( !add_feature( pfs.get_protocol_feature( dependency ) ) ) { results.resize( original_size ); return false; } From f23069d9895f335788d6bd4d9e41a598e62b6c9a Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 15 Mar 2019 23:36:12 -0400 Subject: [PATCH 144/680] add protocol_feature_tests/activate_and_restart unit test --- unittests/protocol_feature_tests.cpp | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 6d5fb9bd4bc..514acb56ae8 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -75,6 +75,34 @@ BOOST_AUTO_TEST_CASE( activate_preactivate_feature ) try { } FC_LOG_AND_RETHROW() +BOOST_AUTO_TEST_CASE( activate_and_restart ) try { + tester c( setup_policy::none ); + const auto& pfm = c.control->get_protocol_feature_manager(); + + auto pfs = pfm.get_protocol_feature_set(); // make copy of protocol feature set + + auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::preactivate_feature ); + BOOST_REQUIRE( d ); + + BOOST_CHECK( !c.control->is_builtin_activated( builtin_protocol_feature_t::preactivate_feature ) ); + + // Activate PREACTIVATE_FEATURE. + c.schedule_protocol_features_wo_preactivation({ *d }); + c.produce_blocks(2); + + auto head_block_num = c.control->head_block_num(); + + BOOST_CHECK( c.control->is_builtin_activated( builtin_protocol_feature_t::preactivate_feature ) ); + + c.close(); + c.open( std::move( pfs ), nullptr ); + + BOOST_CHECK_EQUAL( head_block_num, c.control->head_block_num() ); + + BOOST_CHECK( c.control->is_builtin_activated( builtin_protocol_feature_t::preactivate_feature ) ); + +} FC_LOG_AND_RETHROW() + BOOST_AUTO_TEST_CASE( double_preactivation ) try { tester c( setup_policy::preactivate_feature_and_new_bios ); const auto& pfm = c.control->get_protocol_feature_manager(); From a15322a513b9525a217b34f2babb407921c5c5c7 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 15 Mar 2019 23:45:25 -0400 Subject: [PATCH 145/680] port over @taokayan's producer_plugin fixes from EOSIO/eos#6949 --- plugins/producer_plugin/producer_plugin.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 717fb534158..3facbddceb0 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -143,6 +143,7 @@ class producer_plugin_impl : public std::enable_shared_from_this _protocol_features_to_activate; + bool _protocol_features_signaled = false; // to mark whether it has been signaled in start_block time_point _last_signed_block_time; time_point _start_time = fc::time_point::now(); @@ -979,6 +980,7 @@ void producer_plugin::schedule_protocol_feature_activations( const scheduled_pro invalid_protocol_features_to_activate, "duplicate digests" ); chain.validate_protocol_features( schedule.protocol_features_to_activate ); my->_protocol_features_to_activate = schedule.protocol_features_to_activate; + my->_protocol_features_signaled = false; } fc::variants producer_plugin::get_supported_protocol_features( const get_supported_protocol_features_params& params ) const { @@ -1209,6 +1211,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block() { features_to_activate.clear(); } std::swap( features_to_activate, protocol_features_to_activate ); + _protocol_features_signaled = true; ilog( "signaling activation of the following protocol features in block ${num}: ${features_to_activate}", ("num", hbs->block_num + 1)("features_to_activate", features_to_activate) ); } @@ -1663,7 +1666,10 @@ void producer_plugin_impl::produce_block() { EOS_ASSERT(signature_provider_itr != _signature_providers.end(), producer_priv_key_not_found, "Attempting to produce a block for which we don't have the private key"); - _protocol_features_to_activate.clear(); // clear _protocol_features_to_activate as it is already set in pending_block + if (_protocol_features_signaled) { + _protocol_features_to_activate.clear(); // clear _protocol_features_to_activate as it is already set in pending_block + _protocol_features_signaled = false; + } //idump( (fc::time_point::now() - chain.pending_block_time()) ); chain.finalize_block( [&]( const digest_type& d ) { From 0d564cf47bd2aba50f1b82007f7869ac1bff53e7 Mon Sep 17 00:00:00 2001 From: Kayan Date: Mon, 18 Mar 2019 17:20:55 +0800 Subject: [PATCH 146/680] fix python tests --- tests/CMakeLists.txt | 1 + tests/prod_preactivation_test.py | 31 ++++++++++++------------------- 2 files changed, 13 insertions(+), 19 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index d550a4cca43..cc1aa65b314 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -45,6 +45,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/consensus-validation-malicious-produc configure_file(${CMAKE_CURRENT_SOURCE_DIR}/validate-dirty-db.py ${CMAKE_CURRENT_BINARY_DIR}/validate-dirty-db.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/launcher_test.py ${CMAKE_CURRENT_BINARY_DIR}/launcher_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/db_modes_test.sh ${CMAKE_CURRENT_BINARY_DIR}/db_modes_test.sh COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/prod_preactivation_test.py ${CMAKE_CURRENT_BINARY_DIR}/prod_preactivation_test.py COPYONLY) #To run plugin_test with all log from blockchain displayed, put --verbose after --, i.e. plugin_test -- --verbose add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_output) diff --git a/tests/prod_preactivation_test.py b/tests/prod_preactivation_test.py index cef0d844380..2873d9b75f1 100755 --- a/tests/prod_preactivation_test.py +++ b/tests/prod_preactivation_test.py @@ -12,7 +12,7 @@ import time ############################################################### -# nodeos_run_test +# prod_preactivation_test # --dump-error-details # --keep-logs ############################################################### @@ -22,7 +22,7 @@ cmdError=Utils.cmdError from core_symbol import CORE_SYMBOL -args = TestHelper.parse_args({"--host","--port","--prod-count","--defproducera_prvt_key","--defproducerb_prvt_key","--mongodb" +args = TestHelper.parse_args({"--host","--port","--defproducera_prvt_key","--defproducerb_prvt_key","--mongodb" ,"--dump-error-details","--dont-launch","--keep-logs","-v","--leave-running","--only-bios","--clean-run" ,"--sanity-test","--p2p-plugin","--wallet-port"}) server=args.host @@ -35,7 +35,7 @@ keepLogs=args.keep_logs dontLaunch=args.dont_launch dontKill=args.leave_running -prodCount=2 # args.prod_count +prodCount=2 onlyBios=args.only_bios killAll=args.clean_run sanityTest=args.sanity_test @@ -53,8 +53,6 @@ WalletdName=Utils.EosWalletName ClientName="cleos" -timeout = .5 * 12 * 2 + 60 # time for finalization with 1 producer + 60 seconds padding -Utils.setIrreversibleTimeout(timeout) try: TestHelper.printSystemInfo("BEGIN prod_preactivation_test.py") @@ -69,7 +67,7 @@ cluster.killall(allInstances=killAll) cluster.cleanup() Print("Stand up cluster") - if cluster.launch(pnodes=prodCount, totalNodes=prodCount, prodCount=prodCount, onlyBios=onlyBios, dontBootstrap=dontBootstrap, p2pPlugin=p2pPlugin, extraNodeosArgs=" --plugin eosio::producer_api_plugin") is False: + if cluster.launch(pnodes=prodCount, totalNodes=prodCount, prodCount=prodCount, onlyBios=onlyBios, dontBootstrap=dontBootstrap, p2pPlugin=p2pPlugin, useBiosBootFile=False, extraNodeosArgs=" --plugin eosio::producer_api_plugin") is False: cmdError("launcher") errorExit("Failed to stand up eos cluster.") @@ -80,13 +78,11 @@ cmd = "curl %s/v1/producer/get_supported_protocol_features" % (node.endpointHttp) Print("try to get supported feature list from Node 0 with cmd: %s" % (cmd)) feature0=Utils.runCmdReturnJson(cmd) - #Print("feature list:", feature0) node = cluster.getNode(1) cmd = "curl %s/v1/producer/get_supported_protocol_features" % (node.endpointHttp) Print("try to get supported feature list from Node 1 with cmd: %s" % (cmd)) feature1=Utils.runCmdReturnJson(cmd) - #Print("feature list:", feature1) if feature0 != feature1: errorExit("feature list mismatch between node 0 and node 1") @@ -99,7 +95,7 @@ digest = "" for i in range(0, len(feature0)): feature = feature0[i] - if feature["specification"][i]["value"] != "PREACTIVATE_FEATURE": + if feature["specification"][0]["value"] != "PREACTIVATE_FEATURE": continue else: digest = feature["feature_digest"] @@ -118,23 +114,21 @@ Print("publish a new bios contract %s should fails because env.is_feature_activated unresolveable" % (contractDir)) retMap = node0.publishContract("eosio", contractDir, wasmFile, abiFile, True, shouldFail=True) - #Print(retMap) if retMap["output"].decode("utf-8").find("unresolveable") < 0: errorExit("bios contract not result in expected unresolveable error") - secwait = 60 - Print("Wait for defproducerb to produce...") + secwait = 30 + Print("Wait for defproducera to produce...") node = cluster.getNode(1) while secwait > 0: info = node.getInfo() - #Print("head producer:", info["head_block_producer"]) - if info["head_block_producer"] == "defproducerb": #defproducerb is in node0 + if info["head_block_producer"] == "defproducera": #defproducera is in node0 break time.sleep(1) secwait = secwait - 1 if secwait <= 0: - errorExit("No producer of defproducerb") + errorExit("No producer of defproducera") cmd = "curl --data-binary '{\"protocol_features_to_activate\":[\"%s\"]}' %s/v1/producer/schedule_protocol_feature_activations" % (digest, node.endpointHttp) @@ -153,11 +147,10 @@ errorExit("bios contract not result in expected unresolveable error") Print("now wait for node 1 produce a block....(take some minutes)...") - secwait = 480 # wait for node1 produce a block + secwait = 30 # wait for node1 produce a block while secwait > 0: info = node.getInfo() - #Print("head producer:", info["head_block_producer"]) - if info["head_block_producer"] >= "defproducerm" and info["head_block_producer"] <= "defproduceru": + if (info["head_block_producer"] == "defproducerl") or (info["head_block_producer"] == "defproducerm"): break time.sleep(1) secwait = secwait - 1 @@ -165,7 +158,7 @@ if secwait <= 0: errorExit("No blocks produced by node 1") - time.sleep(1) + time.sleep(0.6) retMap = node0.publishContract("eosio", contractDir, wasmFile, abiFile, True) Print("sucessfully set new contract with new intrinsic!!!") From 6c54979d75be46c9ef1457f1c350d8cde7ca628f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 18 Mar 2019 12:14:05 -0500 Subject: [PATCH 147/680] Update appbase to shutdown fix --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index f97eaef38f0..be9285b9600 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit f97eaef38f09d3e0a261540c6e0f5868b0bf61e9 +Subproject commit be9285b9600a109baa8704c310f2c3abaf595d2c From 9564d86f647090352494cb8b43f726a289f57a49 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 18 Mar 2019 13:01:42 -0500 Subject: [PATCH 148/680] Update to appbase master with shutdown fix --- libraries/appbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/appbase b/libraries/appbase index be9285b9600..013246f52f1 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit be9285b9600a109baa8704c310f2c3abaf595d2c +Subproject commit 013246f52f13a7bc129193c3a64e6cd0cea44ac0 From a90364084f75b8f7d371a9f2a79b53d18febb051 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 18 Mar 2019 21:52:13 -0400 Subject: [PATCH 149/680] fix bug in block_header_state::next regarding producer_to_last_produced; add unit test producer_schedule_tests/producer_watermark_test --- libraries/chain/block_header_state.cpp | 1 + unittests/producer_schedule_tests.cpp | 97 ++++++++++++++++++++++++++ 2 files changed, 98 insertions(+) diff --git a/libraries/chain/block_header_state.cpp b/libraries/chain/block_header_state.cpp index 543482863e0..f7dd7aba656 100644 --- a/libraries/chain/block_header_state.cpp +++ b/libraries/chain/block_header_state.cpp @@ -131,6 +131,7 @@ namespace eosio { namespace chain { } } } + new_producer_to_last_produced[prokey.producer_name] = result.block_num; result.producer_to_last_produced = std::move( new_producer_to_last_produced ); diff --git a/unittests/producer_schedule_tests.cpp b/unittests/producer_schedule_tests.cpp index 3f030c2f705..9003f8555bd 100644 --- a/unittests/producer_schedule_tests.cpp +++ b/unittests/producer_schedule_tests.cpp @@ -8,6 +8,8 @@ #include #include +#include "fork_test_utilities.hpp" + #ifdef NON_VALIDATING_TEST #define TESTER tester #else @@ -403,4 +405,99 @@ BOOST_FIXTURE_TEST_CASE( empty_producer_schedule_has_no_effect, tester ) try { BOOST_REQUIRE_EQUAL( validate(), true ); } FC_LOG_AND_RETHROW() +BOOST_AUTO_TEST_CASE( producer_watermark_test ) try { + tester c; + + c.create_accounts( {N(alice),N(bob),N(carol)} ); + c.produce_block(); + + auto compare_schedules = [&]( const vector& a, const producer_schedule_type& b ) { + return std::equal( a.begin(), a.end(), b.producers.begin(), b.producers.end() ); + }; + + auto res = c.set_producers( {N(alice),N(bob),N(carol)} ); + vector sch1 = { + {N(alice), c.get_public_key(N(alice), "active")}, + {N(bob), c.get_public_key(N(bob), "active")}, + {N(carol), c.get_public_key(N(carol), "active")} + }; + wlog("set producer schedule to [alice,bob,carol]"); + BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch1, *c.control->proposed_producers() ) ); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 0u ); + c.produce_block(); // Starts new block which promotes the proposed schedule to pending + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 1u ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch1, c.control->pending_producers() ) ); + BOOST_CHECK_EQUAL( c.control->active_producers().version, 0u ); + c.produce_block(); + c.produce_block(); // Starts new block which promotes the pending schedule to active + BOOST_REQUIRE_EQUAL( c.control->active_producers().version, 1u ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch1, c.control->active_producers() ) ); + + produce_empty_blocks_until( c, N(carol), N(alice) ); + c.produce_block(); + produce_empty_blocks_until( c, N(carol), N(alice) ); + + res = c.set_producers( {N(alice),N(bob)} ); + vector sch2 = { + {N(alice), c.get_public_key(N(alice), "active")}, + {N(bob), c.get_public_key(N(bob), "active")} + }; + wlog("set producer schedule to [alice,bob]"); + BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch2, *c.control->proposed_producers() ) ); + + produce_empty_blocks_until( c, N(bob), N(carol) ); + produce_empty_blocks_until( c, N(alice), N(bob) ); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 2u ); + BOOST_CHECK_EQUAL( c.control->active_producers().version, 1u ); + + produce_empty_blocks_until( c, N(carol), N(alice) ); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 2u ); + BOOST_CHECK_EQUAL( c.control->active_producers().version, 1u ); + + produce_empty_blocks_until( c, N(bob), N(carol) ); + BOOST_CHECK_EQUAL( c.control->pending_block_producer(), N(carol) ); + BOOST_REQUIRE_EQUAL( c.control->active_producers().version, 2u ); + + auto carol_last_produced_block_num = c.control->head_block_num() + 1; + wdump((carol_last_produced_block_num)); + + c.produce_block(); + BOOST_CHECK( c.control->pending_block_producer() == N(alice) ); + + res = c.set_producers( {N(alice),N(bob),N(carol)} ); + wlog("set producer schedule to [alice,bob,carol]"); + BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch1, *c.control->proposed_producers() ) ); + + produce_empty_blocks_until( c, N(bob), N(alice) ); + produce_empty_blocks_until( c, N(alice), N(bob) ); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 3u ); + BOOST_REQUIRE_EQUAL( c.control->active_producers().version, 2u ); + + produce_empty_blocks_until( c, N(bob), N(alice) ); + BOOST_REQUIRE_EQUAL( c.control->active_producers().version, 3u ); + + produce_empty_blocks_until( c, N(alice), N(bob) ); + c.produce_blocks(11); + BOOST_CHECK_EQUAL( c.control->pending_block_producer(), N(bob) ); + c.finish_block(); + + auto carol_block_num = c.control->head_block_num() + 1; + auto carol_block_time = c.control->head_block_time() + fc::milliseconds(config::block_interval_ms); + auto confirmed = carol_block_num - carol_last_produced_block_num - 1; + + c.control->start_block( carol_block_time, confirmed ); + BOOST_CHECK_EQUAL( c.control->pending_block_producer(), N(carol) ); + c.produce_block(); + auto h = c.control->head_block_header(); + + BOOST_CHECK_EQUAL( h.producer, N(carol) ); + BOOST_CHECK_EQUAL( h.confirmed, confirmed ); + + produce_empty_blocks_until( c, N(carol), N(alice) ); + +} FC_LOG_AND_RETHROW() + BOOST_AUTO_TEST_SUITE_END() From 21ec01fca471f992f74d7112963ecc237391e1dd Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Tue, 19 Mar 2019 10:24:54 +0800 Subject: [PATCH 150/680] Add some helper functions for Cluster and Node --- tests/Cluster.py | 6 +++++ tests/Node.py | 69 ++++++++++++++++++++++++++++++++++-------------- 2 files changed, 55 insertions(+), 20 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 2d70e501416..035d483a291 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -354,6 +354,8 @@ def connectGroup(group, producerNodes, bridgeNodes) : if dontBootstrap: Utils.Print("Skipping bootstrap.") + self.biosNode=biosNode + self.discoverBiosNodePid() return True Utils.Print("Bootstrap cluster.") @@ -1595,3 +1597,7 @@ def stripValues(lowestMaxes,greaterThan): @staticmethod def getDataDir(nodeId): return os.path.abspath(os.path.join(Cluster.__dataDir, "node_%02d" % (nodeId))) + + @staticmethod + def getConfigDir(nodeId): + return os.path.abspath(os.path.join(Cluster.__configDir, "node_%02d" % (nodeId))) diff --git a/tests/Node.py b/tests/Node.py index 0a2f87f2d3d..62d9da38e4a 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -9,6 +9,7 @@ import signal import urllib.request import urllib.parse +from urllib.error import HTTPError import tempfile from core_symbol import CORE_SYMBOL @@ -1419,15 +1420,20 @@ def sendRpcApi(self, relativeUrl, data={}): try: response = urllib.request.urlopen(req, reqData) rpcApiResult = json.loads(response.read().decode("utf-8")) + except HTTPError as e: + Utils.Print("Fail to send RPC API to {} with data {} ({})".format(url, data, e.read())) + raise e except Exception as e: Utils.Print("Fail to send RPC API to {} with data {} ({})".format(url, data, e)) - raise + raise e return rpcApiResult + # Require producer_api_plugin def scheduleProtocolFeatureActivations(self, featureDigests=[]): self.sendRpcApi("v1/producer/schedule_protocol_feature_activations", {"protocol_features_to_activate": featureDigests}) - def getSupportedProtocolFeatures(self, excludeDisabled=True, excludeUnactivatable=True): + # Require producer_api_plugin + def getSupportedProtocolFeatures(self, excludeDisabled=False, excludeUnactivatable=False): param = { "exclude_disabled": excludeDisabled, "exclude_unactivatable": excludeUnactivatable @@ -1435,20 +1441,39 @@ def getSupportedProtocolFeatures(self, excludeDisabled=True, excludeUnactivatabl res = self.sendRpcApi("v1/producer/get_supported_protocol_features", param) return res - def waitForHeadToAdvance(self): + # This will return supported protocol feature digests as a dict, i.e. + # { + # "PREACTIVATE_FEATURE": "01234567", + # "ONLY_LINK_TO_EXISTING_PERMISSION": "01234567" + # } + # Require producer_api_plugin + def getSupportedProtocolFeatureDigestDict(self, excludeDisabled=False, excludeUnactivatable=False): + protocolFeatureDigestDict = {} + supportedProtocolFeatures = self.getSupportedProtocolFeatures(excludeDisabled, excludeUnactivatable) + for protocolFeature in supportedProtocolFeatures: + for spec in protocolFeature["specification"]: + if (spec["name"] == "builtin_feature_codename"): + codename = spec["value"] + protocolFeatureDigestDict[codename] = protocolFeature["feature_digest"] + break + return protocolFeatureDigestDict + + def waitForHeadToAdvance(self, timeout=6): currentHead = self.getHeadBlockNum() def isHeadAdvancing(): return self.getHeadBlockNum() > currentHead - Utils.waitForBool(isHeadAdvancing, 5) + Utils.waitForBool(isHeadAdvancing, timeout) + + def waitForLibToAdvance(self, timeout=6): + currentLib = self.getIrreversibleBlockNum() + def isLibAdvancing(): + return self.getIrreversibleBlockNum() > currentLib + Utils.waitForBool(isLibAdvancing, timeout) + # Require producer_api_plugin def activatePreactivateFeature(self): - def getPreactivateFeatureDigest(supportedProtocolFeatures): - for protocolFeature in supportedProtocolFeatures: - for spec in protocolFeature["specification"]: - if (spec["name"] == "builtin_feature_codename" and spec["value"] == "PREACTIVATE_FEATURE"): - return protocolFeature["feature_digest"] - return None - preactivateFeatureDigest = getPreactivateFeatureDigest(self.getSupportedProtocolFeatures()) + protocolFeatureDigestDict = self.getSupportedProtocolFeatureDigestDict() + preactivateFeatureDigest = protocolFeatureDigestDict["PREACTIVATE_FEATURE"] assert preactivateFeatureDigest self.scheduleProtocolFeatureActivations([preactivateFeatureDigest]) @@ -1456,17 +1481,15 @@ def getPreactivateFeatureDigest(supportedProtocolFeatures): # Wait for the next block to be produced so the scheduled protocol feature is activated self.waitForHeadToAdvance() + # Return an array of feature digests to be preactivated + # Require producer_api_plugin def getAllBuiltinFeatureDigestsToPreactivate(self): - allBuiltinProtocolFeatureDigests = [] - supportedProtocolFeatures = self.getSupportedProtocolFeatures() - for protocolFeature in supportedProtocolFeatures: - for spec in protocolFeature["specification"]: - if (spec["name"] == "builtin_feature_codename"): - if (spec["value"] != "PREACTIVATE_FEATURE"): - allBuiltinProtocolFeatureDigests.append(protocolFeature["feature_digest"]) - break - return allBuiltinProtocolFeatureDigests + protocolFeatureDigestDict = self.getSupportedProtocolFeatureDigestDict() + # Filter out "PREACTIVATE_FEATURE" + protocolFeatureDigestDict = {k: v for k, v in protocolFeatureDigestDict.items() if k != "PREACTIVATE_FEATURE"} + return list(protocolFeatureDigestDict.values()) + # Require PREACTIVATE_FEATURE to be activated and require eosio.bios with preactivate_feature def preactivateAllBuiltinProtocolFeature(self): allBuiltinProtocolFeatureDigests = self.getAllBuiltinFeatureDigestsToPreactivate() for digest in allBuiltinProtocolFeatureDigests: @@ -1478,3 +1501,9 @@ def preactivateAllBuiltinProtocolFeature(self): Utils.Print("ERROR: Failed to preactive digest {}".format(digest)) return None self.waitForHeadToAdvance() + + def getLatestBlockHeaderState(self): + headBlockNum = self.getHeadBlockNum() + cmdDesc = "get block {} --header-state".format(headBlockNum) + latestBlockHeaderState = self.processCleosCmd(cmdDesc, cmdDesc) + return latestBlockHeaderState From d85b4151ad1ab0b2e8387985db8f6df9d82a98f5 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Tue, 19 Mar 2019 10:38:38 +0800 Subject: [PATCH 151/680] Remove dependency on enum module --- tests/Cluster.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 035d483a291..df74c506588 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -18,16 +18,21 @@ from Node import BlockType from Node import Node from WalletMgr import WalletMgr -from enum import Enum # Protocol Feature Setup Policy -class PFSetupPolicy(Enum): +class PFSetupPolicy: NONE = 0 PREACTIVATE_FEATURE_ONLY = 1 FULL = 2 # This will only happen if the cluster is bootstrapped (i.e. dontBootstrap == False) - def hasPreactivateFeature(self): - return self == PFSetupPolicy.PREACTIVATE_FEATURE_ONLY or \ - self == PFSetupPolicy.FULL + @staticmethod + def hasPreactivateFeature(policy): + return policy == PFSetupPolicy.PREACTIVATE_FEATURE_ONLY or \ + policy == PFSetupPolicy.FULL + @staticmethod + def isValid(policy): + return policy == PFSetupPolicy.NONE or \ + policy == PFSetupPolicy.PREACTIVATE_FEATURE_ONLY or \ + policy == PFSetupPolicy.FULL # pylint: disable=too-many-instance-attributes # pylint: disable=too-many-public-methods @@ -129,6 +134,7 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne pfSetupPolicy: determine the protocol feature setup policy (none, preactivate_feature_only, or full) """ assert(isinstance(topo, str)) + assert PFSetupPolicy.isValid(pfSetupPolicy) if not self.localCluster: Utils.Print("WARNING: Cluster not local, not launching %s." % (Utils.EosServerName)) @@ -173,7 +179,7 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne nodeosArgs += extraNodeosArgs if Utils.Debug: nodeosArgs += " --contracts-console" - if pfSetupPolicy.hasPreactivateFeature(): + if PFSetupPolicy.hasPreactivateFeature(pfSetupPolicy): nodeosArgs += " --plugin eosio::producer_api_plugin" if nodeosArgs: @@ -348,7 +354,7 @@ def connectGroup(group, producerNodes, bridgeNodes) : Utils.Print("ERROR: Cluster doesn't seem to be in sync. Some nodes missing block 1") return False - if pfSetupPolicy.hasPreactivateFeature(): + if PFSetupPolicy.hasPreactivateFeature(pfSetupPolicy): Utils.Print("Activate Preactivate Feature.") biosNode.activatePreactivateFeature() @@ -858,6 +864,7 @@ def bios_bootstrap(self, biosNode, totalNodes, pfSetupPolicy, silent=False): """Bootstrap cluster using the bios_boot.sh script generated by eosio-launcher.""" Utils.Print("Starting cluster bootstrap.") + assert PFSetupPolicy.isValid(pfSetupPolicy) cmd="bash bios_boot.sh" if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) @@ -865,7 +872,7 @@ def bios_bootstrap(self, biosNode, totalNodes, pfSetupPolicy, silent=False): "BIOS_CONTRACT_PATH": "unittests/contracts/old_versions/v1.6.0-rc3/eosio.bios", "FEATURE_DIGESTS": "" } - if pfSetupPolicy.hasPreactivateFeature(): + if PFSetupPolicy.hasPreactivateFeature(pfSetupPolicy): env["BIOS_CONTRACT_PATH"] = "unittests/contracts/eosio.bios" if pfSetupPolicy == PFSetupPolicy.FULL: @@ -946,6 +953,7 @@ def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPoli Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1.""" Utils.Print("Starting cluster bootstrap.") + assert PFSetupPolicy.isValid(pfSetupPolicy) if totalProducers is None: totalProducers=totalNodes @@ -981,7 +989,7 @@ def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPoli contract="eosio.bios" contractDir="unittests/contracts/%s" % (contract) - if pfSetupPolicy.hasPreactivateFeature(): + if PFSetupPolicy.hasPreactivateFeature(pfSetupPolicy): contractDir="unittests/contracts/%s" % (contract) else: contractDir="unittests/contracts/old_versions/v1.6.0-rc3/%s" % (contract) From 2a93b336b7c9015be7fe5e7d59826ca9b020f8e9 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Tue, 19 Mar 2019 11:06:34 +0800 Subject: [PATCH 152/680] Remove wrong type checking --- tests/Cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index df74c506588..909c270de3d 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -115,7 +115,7 @@ def setWalletMgr(self, walletMgr): # pylint: disable=too-many-statements def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, - pfSetupPolicy:PFSetupPolicy = PFSetupPolicy.FULL): + pfSetupPolicy = PFSetupPolicy.FULL): """Launch cluster. pnodes: producer nodes count totalNodes: producer + non-producer nodes count From 43d8eb934c0a8c669713c30138312da7a38ff327 Mon Sep 17 00:00:00 2001 From: Kayan Date: Tue, 19 Mar 2019 11:25:24 +0800 Subject: [PATCH 153/680] fix python test that failed in some servers --- tests/prod_preactivation_test.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/tests/prod_preactivation_test.py b/tests/prod_preactivation_test.py index 2873d9b75f1..c12ec412796 100755 --- a/tests/prod_preactivation_test.py +++ b/tests/prod_preactivation_test.py @@ -67,7 +67,7 @@ cluster.killall(allInstances=killAll) cluster.cleanup() Print("Stand up cluster") - if cluster.launch(pnodes=prodCount, totalNodes=prodCount, prodCount=prodCount, onlyBios=onlyBios, dontBootstrap=dontBootstrap, p2pPlugin=p2pPlugin, useBiosBootFile=False, extraNodeosArgs=" --plugin eosio::producer_api_plugin") is False: + if cluster.launch(pnodes=prodCount, totalNodes=prodCount, prodCount=1, onlyBios=onlyBios, dontBootstrap=dontBootstrap, p2pPlugin=p2pPlugin, useBiosBootFile=False, extraNodeosArgs=" --plugin eosio::producer_api_plugin") is False: cmdError("launcher") errorExit("Failed to stand up eos cluster.") @@ -118,17 +118,27 @@ errorExit("bios contract not result in expected unresolveable error") secwait = 30 - Print("Wait for defproducera to produce...") + Print("Wait for node 1 to produce...") node = cluster.getNode(1) while secwait > 0: info = node.getInfo() - if info["head_block_producer"] == "defproducera": #defproducera is in node0 + if info["head_block_producer"] >= "defproducerl" and info["head_block_producer"] <= "defproduceru": + break + time.sleep(1) + secwait = secwait - 1 + + secwait = 30 + Print("Waiting until node 0 start to produce...") + node = cluster.getNode(1) + while secwait > 0: + info = node.getInfo() + if info["head_block_producer"] >= "defproducera" and info["head_block_producer"] <= "defproducerk": break time.sleep(1) secwait = secwait - 1 if secwait <= 0: - errorExit("No producer of defproducera") + errorExit("No producer of node 0") cmd = "curl --data-binary '{\"protocol_features_to_activate\":[\"%s\"]}' %s/v1/producer/schedule_protocol_feature_activations" % (digest, node.endpointHttp) @@ -140,17 +150,17 @@ else: Print("feature PREACTIVATE_FEATURE (%s) preactivation success" % (digest)) - time.sleep(2) + time.sleep(0.6) Print("publish a new bios contract %s should fails because node1 is not producing block yet" % (contractDir)) retMap = node0.publishContract("eosio", contractDir, wasmFile, abiFile, True, shouldFail=True) if retMap["output"].decode("utf-8").find("unresolveable") < 0: errorExit("bios contract not result in expected unresolveable error") - Print("now wait for node 1 produce a block....(take some minutes)...") - secwait = 30 # wait for node1 produce a block + Print("now wait for node 1 produce a block...") + secwait = 30 # wait for node 1 produce a block while secwait > 0: info = node.getInfo() - if (info["head_block_producer"] == "defproducerl") or (info["head_block_producer"] == "defproducerm"): + if info["head_block_producer"] >= "defproducerl" and info["head_block_producer"] <= "defproduceru": break time.sleep(1) secwait = secwait - 1 From dfe276b71bf4bb3394c44cad19d4f996c4460f95 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Tue, 19 Mar 2019 16:55:23 +0800 Subject: [PATCH 154/680] Set PFSetupPolicy to be NONE for prod preactivate test --- tests/prod_preactivation_test.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/tests/prod_preactivation_test.py b/tests/prod_preactivation_test.py index c12ec412796..3fc31853524 100755 --- a/tests/prod_preactivation_test.py +++ b/tests/prod_preactivation_test.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 from testUtils import Utils -from Cluster import Cluster +from Cluster import Cluster, PFSetupPolicy from WalletMgr import WalletMgr from Node import Node from Node import ReturnType @@ -67,7 +67,9 @@ cluster.killall(allInstances=killAll) cluster.cleanup() Print("Stand up cluster") - if cluster.launch(pnodes=prodCount, totalNodes=prodCount, prodCount=1, onlyBios=onlyBios, dontBootstrap=dontBootstrap, p2pPlugin=p2pPlugin, useBiosBootFile=False, extraNodeosArgs=" --plugin eosio::producer_api_plugin") is False: + if cluster.launch(pnodes=prodCount, totalNodes=prodCount, prodCount=1, onlyBios=onlyBios, + dontBootstrap=dontBootstrap, p2pPlugin=p2pPlugin, useBiosBootFile=False, + pfSetupPolicy=PFSetupPolicy.NONE, extraNodeosArgs=" --plugin eosio::producer_api_plugin") is False: cmdError("launcher") errorExit("Failed to stand up eos cluster.") @@ -88,7 +90,7 @@ errorExit("feature list mismatch between node 0 and node 1") else: Print("feature list from node 0 matches with that from node 1") - + if len(feature0) == 0: errorExit("No supported feature list") @@ -99,9 +101,9 @@ continue else: digest = feature["feature_digest"] - + if len(digest) == 0: - errorExit("code name PREACTIVATE_FEATURE not found") + errorExit("code name PREACTIVATE_FEATURE not found") Print("found digest ", digest, " of PREACTIVATE_FEATURE") @@ -136,10 +138,10 @@ break time.sleep(1) secwait = secwait - 1 - + if secwait <= 0: errorExit("No producer of node 0") - + cmd = "curl --data-binary '{\"protocol_features_to_activate\":[\"%s\"]}' %s/v1/producer/schedule_protocol_feature_activations" % (digest, node.endpointHttp) Print("try to preactivate feature on node 1, cmd: %s" % (cmd)) From 59447e2dda0a4bbc511376531f9599441d304982 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 19 Mar 2019 11:24:29 -0400 Subject: [PATCH 155/680] Rename eosio-wat2wasm back to orginal name; don't install eosio-wat2wasm was really the Assemble command from WAVM and we used it for the old wasm build enviroment. It's no longer needed. Remove the rename and install changes effectively reverting ae9388d restoring this back to upstream --- libraries/wasm-jit/Source/Programs/Assemble.cpp | 2 +- libraries/wasm-jit/Source/Programs/CMakeLists.txt | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/libraries/wasm-jit/Source/Programs/Assemble.cpp b/libraries/wasm-jit/Source/Programs/Assemble.cpp index a3328794ddd..60ca42cf0f9 100644 --- a/libraries/wasm-jit/Source/Programs/Assemble.cpp +++ b/libraries/wasm-jit/Source/Programs/Assemble.cpp @@ -7,7 +7,7 @@ int commandMain(int argc,char** argv) { if(argc < 3) { - std::cerr << "Usage: eosio-wast2wasm in.wast out.wasm [switches]" << std::endl; + std::cerr << "Usage: Assemble in.wast out.wasm [switches]" << std::endl; std::cerr << " -n|--omit-names\t\tOmits WAST function and local names from the output" << std::endl; return EXIT_FAILURE; } diff --git a/libraries/wasm-jit/Source/Programs/CMakeLists.txt b/libraries/wasm-jit/Source/Programs/CMakeLists.txt index 27a3aa427b4..260f4c1092c 100644 --- a/libraries/wasm-jit/Source/Programs/CMakeLists.txt +++ b/libraries/wasm-jit/Source/Programs/CMakeLists.txt @@ -1,7 +1,6 @@ -add_executable(eosio-wast2wasm Assemble.cpp CLI.h) -target_link_libraries(eosio-wast2wasm Logging IR WAST WASM) -set_target_properties(eosio-wast2wasm PROPERTIES FOLDER Programs) -INSTALL(TARGETS eosio-wast2wasm DESTINATION ${CMAKE_INSTALL_BINDIR}) +add_executable(Assemble Assemble.cpp CLI.h) +target_link_libraries(Assemble Logging IR WAST WASM) +set_target_properties(Assemble PROPERTIES FOLDER Programs) add_executable(Disassemble Disassemble.cpp CLI.h) target_link_libraries(Disassemble Logging IR WAST WASM) From 96f6ee165cbacfd9b598e1af248bd9eda2ec3733 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 19 Mar 2019 11:56:50 -0400 Subject: [PATCH 156/680] Don't build WAVM tools any longer Some of these don't work as intended due to changes in WAVM to support EOSIO --- libraries/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/CMakeLists.txt b/libraries/CMakeLists.txt index a40355971a9..39d0398305d 100644 --- a/libraries/CMakeLists.txt +++ b/libraries/CMakeLists.txt @@ -2,7 +2,7 @@ add_subdirectory( fc ) add_subdirectory( builtins ) add_subdirectory( softfloat ) add_subdirectory( chainbase ) -add_subdirectory( wasm-jit ) +add_subdirectory( wasm-jit EXCLUDE_FROM_ALL ) add_subdirectory( appbase ) add_subdirectory( chain ) add_subdirectory( testing ) From ad8da6bda091cc51a0f0858a77679029c736b663 Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 19 Mar 2019 12:37:28 -0400 Subject: [PATCH 157/680] enforce preactivation_required in controller_impl::start_block; add unit test protocol_feature_tests/require_preactivation_test to verify this --- libraries/chain/controller.cpp | 13 +++++++++---- .../eosio/chain/protocol_feature_manager.hpp | 1 - libraries/chain/protocol_feature_manager.cpp | 3 --- unittests/protocol_feature_tests.cpp | 17 ++++++++++++++++- 4 files changed, 25 insertions(+), 9 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 00252a9b47b..c7c71225d64 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1363,11 +1363,18 @@ struct controller_impl { const auto& f = pfs.get_protocol_feature( feature_digest ); auto res = activated_protocol_features.emplace( feature_digest, true ); - if( !res.second ) { + if( res.second ) { + // feature_digest was not preactivated + EOS_ASSERT( !f.preactivation_required, protocol_feature_exception, + "attempted to activate protocol feature without prior required preactivation: ${digest}", + ("digest", feature_digest) + ); + } else { EOS_ASSERT( !res.first->second, block_validate_exception, "attempted duplicate activation within a single block: ${digest}", - ("digest", res.first->first) + ("digest", feature_digest) ); + // feature_digest was preactivated res.first->second = true; ++num_preactivated_features_that_have_activated; } @@ -1587,7 +1594,6 @@ struct controller_impl { EOS_THROW( protocol_feature_exception, "${timestamp} is too early for the earliest allowed activation time of the protocol feature with digest '${digest}'", ("digest", f)("timestamp", timestamp) ); break; - case protocol_feature_set::recognized_t::ready_if_preactivated: case protocol_feature_set::recognized_t::ready: break; default: @@ -2235,7 +2241,6 @@ void controller::preactivate_feature( const digest_type& feature_digest ) { "${timestamp} is too early for the earliest allowed activation time of the protocol feature with digest '${digest}'", ("digest", feature_digest)("timestamp", cur_time) ); } break; - case protocol_feature_set::recognized_t::ready_if_preactivated: case protocol_feature_set::recognized_t::ready: break; default: diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index 1bfc440d99d..b93f58b6c6d 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -118,7 +118,6 @@ class protocol_feature_set { unrecognized, disabled, too_early, - ready_if_preactivated, ready }; diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index b3f1bb01676..ded32d32cf8 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -196,9 +196,6 @@ Disallows linking an action to a non-existing permission. if( itr->earliest_allowed_activation_time > now ) return recognized_t::too_early; - if( itr->preactivation_required ) - return recognized_t::ready_if_preactivated; - return recognized_t::ready; } diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 514acb56ae8..24ff264e4df 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -163,6 +163,22 @@ BOOST_AUTO_TEST_CASE( double_activation ) try { } FC_LOG_AND_RETHROW() +BOOST_AUTO_TEST_CASE( require_preactivation_test ) try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + const auto& pfm = c.control->get_protocol_feature_manager(); + + auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::only_link_to_existing_permission ); + BOOST_REQUIRE( d ); + + BOOST_CHECK( !c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + + c.schedule_protocol_features_wo_preactivation( {*d} ); + BOOST_CHECK_EXCEPTION( c.produce_block(), + protocol_feature_exception, + fc_exception_message_starts_with( "attempted to activate protocol feature without prior required preactivation:" ) + ); +} FC_LOG_AND_RETHROW() + BOOST_AUTO_TEST_CASE( only_link_to_existing_permission_test ) try { tester c( setup_policy::preactivate_feature_and_new_bios ); const auto& pfm = c.control->get_protocol_feature_manager(); @@ -222,5 +238,4 @@ BOOST_AUTO_TEST_CASE( only_link_to_existing_permission_test ) try { } FC_LOG_AND_RETHROW() - BOOST_AUTO_TEST_SUITE_END() From 568476a78bd2b1a5c8b7373e8c29eb951c9ea96c Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 19 Mar 2019 21:36:35 -0400 Subject: [PATCH 158/680] protocol_feature_set::make_default_builtin_protocol_feature can be static --- .../include/eosio/chain/protocol_feature_manager.hpp | 9 +++++---- libraries/chain/protocol_feature_manager.cpp | 2 +- libraries/testing/tester.cpp | 2 +- plugins/chain_plugin/chain_plugin.cpp | 2 +- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index b93f58b6c6d..2f6f6cb3630 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -130,10 +130,11 @@ class protocol_feature_set { bool validate_dependencies( const digest_type& feature_digest, const std::function& validator )const; - builtin_protocol_feature make_default_builtin_protocol_feature( - builtin_protocol_feature_t codename, - const std::function& handle_dependency - )const; + static builtin_protocol_feature + make_default_builtin_protocol_feature( + builtin_protocol_feature_t codename, + const std::function& handle_dependency + ); const protocol_feature& add_feature( const builtin_protocol_feature& f ); diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index ded32d32cf8..bc03e829218 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -241,7 +241,7 @@ Disallows linking an action to a non-existing permission. protocol_feature_set::make_default_builtin_protocol_feature( builtin_protocol_feature_t codename, const std::function& handle_dependency - )const { + ) { auto itr = builtin_protocol_feature_codenames.find( codename ); EOS_ASSERT( itr != builtin_protocol_feature_codenames.end(), protocol_feature_validation_exception, diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index d946c010ca6..f03c4f9e8bc 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -94,7 +94,7 @@ namespace eosio { namespace testing { return *res.first->second; } - auto f = pfs.make_default_builtin_protocol_feature( codename, + auto f = protocol_feature_set::make_default_builtin_protocol_feature( codename, [&add_builtins]( builtin_protocol_feature_t d ) { return add_builtins( d ); } ); diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index 9e806094e66..c5d22beec14 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -520,7 +520,7 @@ protocol_feature_set initialize_protocol_features( const fc::path& p, bool popul return *res.first->second; } - auto f = pfs.make_default_builtin_protocol_feature( codename, + auto f = protocol_feature_set::make_default_builtin_protocol_feature( codename, [&add_missing_builtins]( builtin_protocol_feature_t d ) { return add_missing_builtins( d ); } ); From c4631a57b0e5f2b55c1fc79b03c21353b43db5a9 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Tue, 19 Mar 2019 14:34:40 +0800 Subject: [PATCH 159/680] Add subjective restriction procotol feature test --- tests/Cluster.py | 2 - tests/Node.py | 14 +- tests/nodeos_protocol_feature_tests.py | 172 +++++++++++++++++++++++++ 3 files changed, 183 insertions(+), 5 deletions(-) create mode 100644 tests/nodeos_protocol_feature_tests.py diff --git a/tests/Cluster.py b/tests/Cluster.py index 909c270de3d..165cf6a431f 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -366,10 +366,8 @@ def connectGroup(group, producerNodes, bridgeNodes) : Utils.Print("Bootstrap cluster.") if onlyBios or not useBiosBootFile: - Utils.Print("NON BIOS BOOTSTRAP") self.biosNode=self.bootstrap(biosNode, totalNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios) else: - Utils.Print(" BIOS BOOTSTRAP") self.useBiosBootFile=True self.biosNode=self.bios_bootstrap(biosNode, totalNodes, pfSetupPolicy) diff --git a/tests/Node.py b/tests/Node.py index 62d9da38e4a..75833b276bc 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1490,9 +1490,8 @@ def getAllBuiltinFeatureDigestsToPreactivate(self): return list(protocolFeatureDigestDict.values()) # Require PREACTIVATE_FEATURE to be activated and require eosio.bios with preactivate_feature - def preactivateAllBuiltinProtocolFeature(self): - allBuiltinProtocolFeatureDigests = self.getAllBuiltinFeatureDigestsToPreactivate() - for digest in allBuiltinProtocolFeatureDigests: + def preactivateProtocolFeatures(self, featureDigests:list): + for digest in featureDigests: Utils.Print("push preactivate action with digest {}".format(digest)) data="{{\"feature_digest\":{}}}".format(digest) opts="--permission eosio@active" @@ -1502,8 +1501,17 @@ def preactivateAllBuiltinProtocolFeature(self): return None self.waitForHeadToAdvance() + # Require PREACTIVATE_FEATURE to be activated and require eosio.bios with preactivate_feature + def preactivateAllBuiltinProtocolFeature(self): + allBuiltinProtocolFeatureDigests = self.getAllBuiltinFeatureDigestsToPreactivate() + self.preactivateProtocolFeatures(allBuiltinProtocolFeatureDigests) + def getLatestBlockHeaderState(self): headBlockNum = self.getHeadBlockNum() cmdDesc = "get block {} --header-state".format(headBlockNum) latestBlockHeaderState = self.processCleosCmd(cmdDesc, cmdDesc) return latestBlockHeaderState + + def getActivatedProtocolFeatures(self): + latestBlockHeaderState = self.getLatestBlockHeaderState() + return latestBlockHeaderState["activated_protocol_features"]["protocol_features"] diff --git a/tests/nodeos_protocol_feature_tests.py b/tests/nodeos_protocol_feature_tests.py new file mode 100644 index 00000000000..68846d05f12 --- /dev/null +++ b/tests/nodeos_protocol_feature_tests.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +from Cluster import Cluster, PFSetupPolicy +from TestHelper import TestHelper +from WalletMgr import WalletMgr +from Node import Node + +import signal +import json +import time +from os.path import join +from datetime import datetime +from urllib.error import HTTPError + +# Parse command line arguments +args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running","--keep-logs"}) +Utils.Debug = args.v +killAll=args.clean_run +dumpErrorDetails=args.dump_error_details +dontKill=args.leave_running +killEosInstances=not dontKill +killWallet=not dontKill +keepLogs=args.keep_logs + +def modifyPFSubjectiveRestriction(nodeId, jsonName, subjectiveRestrictionKey, newValue): + jsonPath = join(Cluster.getConfigDir(nodeId), "protocol_features", jsonName) + protocolFeatureJson = [] + with open(jsonPath) as f: + protocolFeatureJson = json.load(f) + protocolFeatureJson["subjective_restrictions"][subjectiveRestrictionKey] = newValue + with open(jsonPath, "w") as f: + json.dump(protocolFeatureJson, f, indent=2) + +def modifyPreactFeatEarliestActivationTime(nodeId, newTime:datetime): + newTimeAsPosix = newTime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + modifyPFSubjectiveRestriction(nodeId, "BUILTIN-PREACTIVATE_FEATURE.json", "earliest_allowed_activation_time", newTimeAsPosix) + +def enableOnlyLinkToExistingPermissionPFSpec(nodeId, enable=True): + modifyPFSubjectiveRestriction(nodeId, "BUILTIN-ONLY_LINK_TO_EXISTING_PERMISSION.json", "enabled", enable) + +def relaunchNode(node: Node, nodeId, chainArg="", addOrSwapFlags=None, relaunchAssertMessage="Fail to relaunch"): + relaunchTimeout=5 + isRelaunchSuccess = node.relaunch(nodeId, chainArg=chainArg, addOrSwapFlags=addOrSwapFlags, timeout=relaunchTimeout, cachePopen=True) + time.sleep(1) # Give a second to replay or resync if needed + assert isRelaunchSuccess, relaunchAssertMessage + return isRelaunchSuccess + +def restartNode(node: Node, nodeId, chainArg="", addOrSwapFlags=None): + if not node.killed: + node.kill(signal.SIGTERM) + relaunchNode(node, nodeId, chainArg, addOrSwapFlags) + +def isFeatureActivated(node, featureDigest): + latestBlockHeaderState = node.getLatestBlockHeaderState() + activatedProcotolFeatures = latestBlockHeaderState["activated_protocol_features"]["protocol_features"] + return featureDigest in activatedProcotolFeatures + +# Setup cluster and it's wallet manager +walletMgr=WalletMgr(True) +cluster=Cluster(walletd=True) +cluster.setWalletMgr(walletMgr) + +# List to contain the test result message +testResultMsgs = [] +testSuccessful = False +try: + TestHelper.printSystemInfo("BEGIN") + cluster.killall(allInstances=killAll) + cluster.cleanup() + + # Start the cluster + cluster.launch(extraNodeosArgs=" --plugin eosio::producer_api_plugin ", + specificExtraNodeosArgs={0:" -e "}, + totalNodes=2, + prodCount=4, + useBiosBootFile=False, + pfSetupPolicy=PFSetupPolicy.NONE) + # We don't need bios node anymore, just kill it + cluster.biosNode.kill(signal.SIGTERM) + producingNodeId = 0 + nonProducingNodeId = 1 + producingNode = cluster.getNode(producingNodeId) + nonProducingNode = cluster.getNode(nonProducingNodeId) + + supportedProtocolFeatureDigestDict = producingNode.getSupportedProtocolFeatureDigestDict() + + def pauseBlockProduction(): + producingNode.sendRpcApi("v1/producer/pause") + + def resumeBlockProduction(): + producingNode.sendRpcApi("v1/producer/resume") + + ############################################################### + # The following test case is testing PREACTIVATE_FEATURE with + # earliest_allowed_activation_time subjective restriction + ############################################################### + + preactivateFeatureDigest = supportedProtocolFeatureDigestDict["PREACTIVATE_FEATURE"] + + # Kill the nodes and then modify the JSON of PREACTIVATE FEATURE so it's far in the future + modifyPreactFeatEarliestActivationTime(producingNodeId, datetime(2120, 1, 1)) + modifyPreactFeatEarliestActivationTime(nonProducingNodeId, datetime(2120, 1, 1)) + restartNode(producingNode, producingNodeId) + restartNode(nonProducingNode, nonProducingNodeId) + + # Activating PREACTIVATE_FEATURE should fail + try: + producingNode.activatePreactivateFeature() + Utils.errorExit("Preactivate Feature should Fail") + except Exception as e: + Utils.Print("Fail to activate PREACTIVATE_FEATURE as expected") + + # Modify back the earliest activation time for producing node + modifyPreactFeatEarliestActivationTime(producingNodeId, datetime(1970, 1, 1)) + restartNode(producingNode, producingNodeId) + + # This time PREACTIVATE_FEATURE should be successfully activated on the producing node + Utils.Print("Activating PREACTIVATE_FEATURE") + producingNode.activatePreactivateFeature() + producingNode.waitForHeadToAdvance() + assert isFeatureActivated(producingNode, preactivateFeatureDigest),\ + "PREACTIVATE_FEATURE is not activated in producing node" + + # However, it should fail on the non producing node, because its earliest act time is in the future + assert not isFeatureActivated(nonProducingNode, preactivateFeatureDigest),\ + "PREACTIVATE_FEATURE is activated on non producing node when it's supposed not to" + + # After modifying the earliest activation time, and it's syncing, it should be activated + modifyPreactFeatEarliestActivationTime(nonProducingNodeId, datetime(1970, 1, 1)) + restartNode(nonProducingNode, nonProducingNodeId) + assert isFeatureActivated(nonProducingNode, preactivateFeatureDigest),\ + "PREACTIVATE_FEATURE is not activated on non producing node" + + ##################################################################### + # The following test case is testing ONLY_LINK_TO_EXISTING_PERMISSION + # with enabled subjective restriction + ##################################################################### + + onlyLinkToExistPermFeatDigest = supportedProtocolFeatureDigestDict["ONLY_LINK_TO_EXISTING_PERMISSION"] + + # First publish eosio contract + trans = producingNode.publishContract("eosio", "unittests/contracts/eosio.bios", + "eosio.bios.wasm", "eosio.bios.abi", waitForTransBlock=True) + assert trans is not None, "Fail to publish eosio.bios contract" + + # Test ONLY_LINK_TO_EXISTING_PERMISSION when subjective_restrictions enabled is false + enableOnlyLinkToExistingPermissionPFSpec(producingNodeId, False) + restartNode(producingNode, producingNodeId) + producingNode.preactivateProtocolFeatures([onlyLinkToExistPermFeatDigest]) + producingNode.waitForHeadToAdvance() + assert not isFeatureActivated(producingNode, onlyLinkToExistPermFeatDigest),\ + "ONLY_LINK_TO_EXISTING_PERMISSION is activated on non producing node when it's supposed not to" + + # Modify the subjective_restriction enabled to be true, now it should be activated on both nodes + enableOnlyLinkToExistingPermissionPFSpec(producingNodeId, True) + restartNode(producingNode, producingNodeId) + producingNode.preactivateProtocolFeatures([onlyLinkToExistPermFeatDigest]) + producingNode.waitForHeadToAdvance() + assert isFeatureActivated(producingNode, onlyLinkToExistPermFeatDigest),\ + "ONLY_LINK_TO_EXISTING_PERMISSION is not activated on producing node" + assert isFeatureActivated(nonProducingNode, onlyLinkToExistPermFeatDigest),\ + "ONLY_LINK_TO_EXISTING_PERMISSION is not activated on non producing node" + + testSuccessful = True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + # Print test result + for msg in testResultMsgs: Utils.Print(msg) + +exitCode = 0 if testSuccessful else 1 +exit(exitCode) From a6bed123d54f78164cdaaf492f6e7d739c75978d Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Tue, 19 Mar 2019 16:42:12 +0800 Subject: [PATCH 160/680] Add test to CMakeList.txt --- tests/CMakeLists.txt | 4 ++++ ...tocol_feature_tests.py => nodeos_protocol_feature_test.py} | 0 2 files changed, 4 insertions(+) rename tests/{nodeos_protocol_feature_tests.py => nodeos_protocol_feature_test.py} (100%) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index cc1aa65b314..551f5975927 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -41,6 +41,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_remote_test.py ${CMAKE_CUR configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_under_min_avail_ram.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_under_min_avail_ram.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_voting_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_voting_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_irreversible_mode_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_irreversible_mode_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_protocol_feature_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_protocol_feature_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/consensus-validation-malicious-producers.py ${CMAKE_CURRENT_BINARY_DIR}/consensus-validation-malicious-producers.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/validate-dirty-db.py ${CMAKE_CURRENT_BINARY_DIR}/validate-dirty-db.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/launcher_test.py ${CMAKE_CURRENT_BINARY_DIR}/launcher_test.py COPYONLY) @@ -114,6 +115,9 @@ set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_runnin add_test(NAME nodeos_irreversible_mode_lr_test COMMAND tests/nodeos_irreversible_mode_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_irreversible_mode_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_protocol_feature_lr_test COMMAND tests/nodeos_protocol_feature_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_protocol_feature_lr_test PROPERTY LABELS long_running_tests) + if(ENABLE_COVERAGE_TESTING) set(Coverage_NAME ${PROJECT_NAME}_coverage) diff --git a/tests/nodeos_protocol_feature_tests.py b/tests/nodeos_protocol_feature_test.py similarity index 100% rename from tests/nodeos_protocol_feature_tests.py rename to tests/nodeos_protocol_feature_test.py From ec1e2f18ddc2d6d33f882fd46188c74fb1501735 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 20 Mar 2019 17:27:05 +0800 Subject: [PATCH 161/680] nodeos_protocol_feature_test now only tests JSON read feature --- tests/Cluster.py | 24 +++-- tests/Node.py | 33 +++--- tests/nodeos_protocol_feature_test.py | 140 ++++---------------------- 3 files changed, 56 insertions(+), 141 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 165cf6a431f..e5d669ce6e8 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -340,8 +340,8 @@ def connectGroup(group, producerNodes, bridgeNodes) : self.nodes=nodes - biosNode=Node(Cluster.__BiosHost, Cluster.__BiosPort, walletMgr=self.walletMgr) - if not biosNode.checkPulse(): + biosNode=self.discoverBiosNode() + if not biosNode or not biosNode.checkPulse(): Utils.Print("ERROR: Bios node doesn't appear to be running...") return False @@ -361,7 +361,6 @@ def connectGroup(group, producerNodes, bridgeNodes) : if dontBootstrap: Utils.Print("Skipping bootstrap.") self.biosNode=biosNode - self.discoverBiosNodePid() return True Utils.Print("Bootstrap cluster.") @@ -375,8 +374,6 @@ def connectGroup(group, producerNodes, bridgeNodes) : Utils.Print("ERROR: Bootstrap failed.") return False - self.discoverBiosNodePid() - # validate iniX accounts can be retrieved producerKeys=Cluster.parseClusterKeys(totalNodes) @@ -1232,7 +1229,7 @@ def myFunc(): @staticmethod def pgrepEosServerPattern(nodeInstance): dataLocation=Cluster.__dataDir + Cluster.nodeExtensionToName(nodeInstance) - return r"[\n]?(\d+) (.* --data-dir %s .*)\n" % (dataLocation) + return r"[\n]?(\d+) (.* --data-dir %s.*)\n" % (dataLocation) # Populates list of EosInstanceInfo objects, matched to actual running instances def discoverLocalNodes(self, totalNodes, timeout=None): @@ -1261,15 +1258,16 @@ def discoverLocalNodes(self, totalNodes, timeout=None): if Utils.Debug: Utils.Print("Found %d nodes" % (len(nodes))) return nodes - def discoverBiosNodePid(self, timeout=None): + def discoverBiosNode(self, timeout=None): psOut=Cluster.pgrepEosServers(timeout=timeout) pattern=Cluster.pgrepEosServerPattern("bios") Utils.Print("pattern={\n%s\n}, psOut=\n%s\n" % (pattern,psOut)) m=re.search(pattern, psOut, re.MULTILINE) if m is None: Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) + return None else: - self.biosNode.pid=int(m.group(1)) + return Node(Cluster.__BiosHost, Cluster.__BiosPort, pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr) # Kills a percentange of Eos instances starting from the tail and update eosInstanceInfos state def killSomeEosInstances(self, killCount, killSignalStr=Utils.SigKillTag): @@ -1602,8 +1600,14 @@ def stripValues(lowestMaxes,greaterThan): @staticmethod def getDataDir(nodeId): - return os.path.abspath(os.path.join(Cluster.__dataDir, "node_%02d" % (nodeId))) + assert isinstance(nodeId, int) or (isinstance(nodeId, str) and nodeId == "bios"), "Invalid Node ID is passed" + extName = nodeId + if isinstance(nodeId, int): extName = "%02d" % (nodeId) + return os.path.abspath(os.path.join(Cluster.__dataDir, "node_{}".format(extName))) @staticmethod def getConfigDir(nodeId): - return os.path.abspath(os.path.join(Cluster.__configDir, "node_%02d" % (nodeId))) + assert isinstance(nodeId, int) or (isinstance(nodeId, str) and nodeId == "bios"), "Invalid Node ID is passed" + extName = nodeId + if isinstance(nodeId, int): extName = "%02d" % (nodeId) + return os.path.abspath(os.path.join(Cluster.__configDir, "node_{}".format(extName))) diff --git a/tests/Node.py b/tests/Node.py index 75833b276bc..cdfb6d4e476 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1307,8 +1307,9 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim assert(self.pid is None) assert(self.killed) + assert isinstance(nodeId, int) or (isinstance(nodeId, str) and nodeId == "bios"), "Invalid Node ID is passed" - if Utils.Debug: Utils.Print("Launching node process, Id: %d" % (nodeId)) + if Utils.Debug: Utils.Print("Launching node process, Id: {}".format(nodeId)) cmdArr=[] myCmd=self.cmd @@ -1340,7 +1341,10 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim myCmd=" ".join(cmdArr) - dataDir="var/lib/node_%02d" % (nodeId) + if nodeId == "bios": + dataDir="var/lib/node_bios" + else: + dataDir="var/lib/node_%02d" % (nodeId) dt = datetime.datetime.now() dateStr="%d_%02d_%02d_%02d_%02d_%02d" % ( dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) @@ -1441,20 +1445,20 @@ def getSupportedProtocolFeatures(self, excludeDisabled=False, excludeUnactivatab res = self.sendRpcApi("v1/producer/get_supported_protocol_features", param) return res - # This will return supported protocol feature digests as a dict, i.e. + # This will return supported protocol feature digests as a dict (feature codename as the key), i.e. # { - # "PREACTIVATE_FEATURE": "01234567", - # "ONLY_LINK_TO_EXISTING_PERMISSION": "01234567" + # "PREACTIVATE_FEATURE": {...}, + # "ONLY_LINK_TO_EXISTING_PERMISSION": {...}, # } # Require producer_api_plugin - def getSupportedProtocolFeatureDigestDict(self, excludeDisabled=False, excludeUnactivatable=False): + def getSupportedProtocolFeatureDict(self, excludeDisabled=False, excludeUnactivatable=False): protocolFeatureDigestDict = {} supportedProtocolFeatures = self.getSupportedProtocolFeatures(excludeDisabled, excludeUnactivatable) for protocolFeature in supportedProtocolFeatures: for spec in protocolFeature["specification"]: if (spec["name"] == "builtin_feature_codename"): codename = spec["value"] - protocolFeatureDigestDict[codename] = protocolFeature["feature_digest"] + protocolFeatureDigestDict[codename] = protocolFeature break return protocolFeatureDigestDict @@ -1472,8 +1476,8 @@ def isLibAdvancing(): # Require producer_api_plugin def activatePreactivateFeature(self): - protocolFeatureDigestDict = self.getSupportedProtocolFeatureDigestDict() - preactivateFeatureDigest = protocolFeatureDigestDict["PREACTIVATE_FEATURE"] + protocolFeatureDigestDict = self.getSupportedProtocolFeatureDict() + preactivateFeatureDigest = protocolFeatureDigestDict["PREACTIVATE_FEATURE"]["feature_digest"] assert preactivateFeatureDigest self.scheduleProtocolFeatureActivations([preactivateFeatureDigest]) @@ -1484,10 +1488,13 @@ def activatePreactivateFeature(self): # Return an array of feature digests to be preactivated # Require producer_api_plugin def getAllBuiltinFeatureDigestsToPreactivate(self): - protocolFeatureDigestDict = self.getSupportedProtocolFeatureDigestDict() - # Filter out "PREACTIVATE_FEATURE" - protocolFeatureDigestDict = {k: v for k, v in protocolFeatureDigestDict.items() if k != "PREACTIVATE_FEATURE"} - return list(protocolFeatureDigestDict.values()) + protocolFeatures = [] + protocolFeatureDict = self.getSupportedProtocolFeatureDict() + for k, v in protocolFeatureDict.items(): + # Filter out "PREACTIVATE_FEATURE" + if k != "PREACTIVATE_FEATURE": + protocolFeatures.append(v["feature_digest"]) + return protocolFeatures # Require PREACTIVATE_FEATURE to be activated and require eosio.bios with preactivate_feature def preactivateProtocolFeatures(self, featureDigests:list): diff --git a/tests/nodeos_protocol_feature_test.py b/tests/nodeos_protocol_feature_test.py index 68846d05f12..2c581b39dbd 100644 --- a/tests/nodeos_protocol_feature_test.py +++ b/tests/nodeos_protocol_feature_test.py @@ -8,10 +8,8 @@ import signal import json -import time from os.path import join from datetime import datetime -from urllib.error import HTTPError # Parse command line arguments args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running","--keep-logs"}) @@ -23,150 +21,56 @@ killWallet=not dontKill keepLogs=args.keep_logs -def modifyPFSubjectiveRestriction(nodeId, jsonName, subjectiveRestrictionKey, newValue): - jsonPath = join(Cluster.getConfigDir(nodeId), "protocol_features", jsonName) +# The following test case will test the Protocol Feature JSON reader of the blockchain +def modifyPFSubjectiveRestrictions(nodeId, featureCodename, subjectiveRestrictions): + jsonPath = join(Cluster.getConfigDir(nodeId), "protocol_features", "BUILTIN-{}.json".format(featureCodename)) protocolFeatureJson = [] with open(jsonPath) as f: protocolFeatureJson = json.load(f) - protocolFeatureJson["subjective_restrictions"][subjectiveRestrictionKey] = newValue + protocolFeatureJson["subjective_restrictions"] = subjectiveRestrictions with open(jsonPath, "w") as f: json.dump(protocolFeatureJson, f, indent=2) -def modifyPreactFeatEarliestActivationTime(nodeId, newTime:datetime): - newTimeAsPosix = newTime.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] - modifyPFSubjectiveRestriction(nodeId, "BUILTIN-PREACTIVATE_FEATURE.json", "earliest_allowed_activation_time", newTimeAsPosix) - -def enableOnlyLinkToExistingPermissionPFSpec(nodeId, enable=True): - modifyPFSubjectiveRestriction(nodeId, "BUILTIN-ONLY_LINK_TO_EXISTING_PERMISSION.json", "enabled", enable) - -def relaunchNode(node: Node, nodeId, chainArg="", addOrSwapFlags=None, relaunchAssertMessage="Fail to relaunch"): - relaunchTimeout=5 - isRelaunchSuccess = node.relaunch(nodeId, chainArg=chainArg, addOrSwapFlags=addOrSwapFlags, timeout=relaunchTimeout, cachePopen=True) - time.sleep(1) # Give a second to replay or resync if needed - assert isRelaunchSuccess, relaunchAssertMessage - return isRelaunchSuccess - -def restartNode(node: Node, nodeId, chainArg="", addOrSwapFlags=None): +def restartNode(node: Node, nodeId, chainArg=None, addOrSwapFlags=None): if not node.killed: node.kill(signal.SIGTERM) - relaunchNode(node, nodeId, chainArg, addOrSwapFlags) + isRelaunchSuccess = node.relaunch(nodeId, chainArg, addOrSwapFlags=addOrSwapFlags, timeout=5, cachePopen=True) + assert isRelaunchSuccess, "Fail to relaunch" -def isFeatureActivated(node, featureDigest): - latestBlockHeaderState = node.getLatestBlockHeaderState() - activatedProcotolFeatures = latestBlockHeaderState["activated_protocol_features"]["protocol_features"] - return featureDigest in activatedProcotolFeatures - -# Setup cluster and it's wallet manager walletMgr=WalletMgr(True) cluster=Cluster(walletd=True) cluster.setWalletMgr(walletMgr) # List to contain the test result message -testResultMsgs = [] testSuccessful = False try: TestHelper.printSystemInfo("BEGIN") cluster.killall(allInstances=killAll) cluster.cleanup() - - # Start the cluster cluster.launch(extraNodeosArgs=" --plugin eosio::producer_api_plugin ", - specificExtraNodeosArgs={0:" -e "}, - totalNodes=2, - prodCount=4, - useBiosBootFile=False, + dontBootstrap=True, pfSetupPolicy=PFSetupPolicy.NONE) - # We don't need bios node anymore, just kill it - cluster.biosNode.kill(signal.SIGTERM) - producingNodeId = 0 - nonProducingNodeId = 1 - producingNode = cluster.getNode(producingNodeId) - nonProducingNode = cluster.getNode(nonProducingNodeId) - - supportedProtocolFeatureDigestDict = producingNode.getSupportedProtocolFeatureDigestDict() - - def pauseBlockProduction(): - producingNode.sendRpcApi("v1/producer/pause") - - def resumeBlockProduction(): - producingNode.sendRpcApi("v1/producer/resume") - - ############################################################### - # The following test case is testing PREACTIVATE_FEATURE with - # earliest_allowed_activation_time subjective restriction - ############################################################### - - preactivateFeatureDigest = supportedProtocolFeatureDigestDict["PREACTIVATE_FEATURE"] - - # Kill the nodes and then modify the JSON of PREACTIVATE FEATURE so it's far in the future - modifyPreactFeatEarliestActivationTime(producingNodeId, datetime(2120, 1, 1)) - modifyPreactFeatEarliestActivationTime(nonProducingNodeId, datetime(2120, 1, 1)) - restartNode(producingNode, producingNodeId) - restartNode(nonProducingNode, nonProducingNodeId) - - # Activating PREACTIVATE_FEATURE should fail - try: - producingNode.activatePreactivateFeature() - Utils.errorExit("Preactivate Feature should Fail") - except Exception as e: - Utils.Print("Fail to activate PREACTIVATE_FEATURE as expected") - - # Modify back the earliest activation time for producing node - modifyPreactFeatEarliestActivationTime(producingNodeId, datetime(1970, 1, 1)) - restartNode(producingNode, producingNodeId) - - # This time PREACTIVATE_FEATURE should be successfully activated on the producing node - Utils.Print("Activating PREACTIVATE_FEATURE") - producingNode.activatePreactivateFeature() - producingNode.waitForHeadToAdvance() - assert isFeatureActivated(producingNode, preactivateFeatureDigest),\ - "PREACTIVATE_FEATURE is not activated in producing node" - - # However, it should fail on the non producing node, because its earliest act time is in the future - assert not isFeatureActivated(nonProducingNode, preactivateFeatureDigest),\ - "PREACTIVATE_FEATURE is activated on non producing node when it's supposed not to" - - # After modifying the earliest activation time, and it's syncing, it should be activated - modifyPreactFeatEarliestActivationTime(nonProducingNodeId, datetime(1970, 1, 1)) - restartNode(nonProducingNode, nonProducingNodeId) - assert isFeatureActivated(nonProducingNode, preactivateFeatureDigest),\ - "PREACTIVATE_FEATURE is not activated on non producing node" - - ##################################################################### - # The following test case is testing ONLY_LINK_TO_EXISTING_PERMISSION - # with enabled subjective restriction - ##################################################################### - - onlyLinkToExistPermFeatDigest = supportedProtocolFeatureDigestDict["ONLY_LINK_TO_EXISTING_PERMISSION"] + biosNode = cluster.biosNode - # First publish eosio contract - trans = producingNode.publishContract("eosio", "unittests/contracts/eosio.bios", - "eosio.bios.wasm", "eosio.bios.abi", waitForTransBlock=True) - assert trans is not None, "Fail to publish eosio.bios contract" + # Modify the JSON file and then restart the node so it updates the internal state + newSubjectiveRestrictions = { + "earliest_allowed_activation_time": "2030-01-01T00:00:00.000", + "preactivation_required": True, + "enabled": False + } + modifyPFSubjectiveRestrictions("bios", "PREACTIVATE_FEATURE", newSubjectiveRestrictions) + restartNode(biosNode, "bios") - # Test ONLY_LINK_TO_EXISTING_PERMISSION when subjective_restrictions enabled is false - enableOnlyLinkToExistingPermissionPFSpec(producingNodeId, False) - restartNode(producingNode, producingNodeId) - producingNode.preactivateProtocolFeatures([onlyLinkToExistPermFeatDigest]) - producingNode.waitForHeadToAdvance() - assert not isFeatureActivated(producingNode, onlyLinkToExistPermFeatDigest),\ - "ONLY_LINK_TO_EXISTING_PERMISSION is activated on non producing node when it's supposed not to" + supportedProtocolFeatureDict = biosNode.getSupportedProtocolFeatureDict() + preactivateFeatureSubjectiveRestrictions = supportedProtocolFeatureDict["PREACTIVATE_FEATURE"]["subjective_restrictions"] - # Modify the subjective_restriction enabled to be true, now it should be activated on both nodes - enableOnlyLinkToExistingPermissionPFSpec(producingNodeId, True) - restartNode(producingNode, producingNodeId) - producingNode.preactivateProtocolFeatures([onlyLinkToExistPermFeatDigest]) - producingNode.waitForHeadToAdvance() - assert isFeatureActivated(producingNode, onlyLinkToExistPermFeatDigest),\ - "ONLY_LINK_TO_EXISTING_PERMISSION is not activated on producing node" - assert isFeatureActivated(nonProducingNode, onlyLinkToExistPermFeatDigest),\ - "ONLY_LINK_TO_EXISTING_PERMISSION is not activated on non producing node" + # Ensure that the PREACTIVATE_FEATURE subjective restrictions match the value written in the JSON + assert preactivateFeatureSubjectiveRestrictions == newSubjectiveRestrictions,\ + "PREACTIVATE_FEATURE subjective restrictions are not updated according to the JSON" testSuccessful = True finally: TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) - # Print test result - for msg in testResultMsgs: Utils.Print(msg) exitCode = 0 if testSuccessful else 1 exit(exitCode) From a54dd46915e21c95b77fbcf3de4ab1bd8d6f03dd Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 20 Mar 2019 17:29:58 +0800 Subject: [PATCH 162/680] Label nodeos_protocol_feature_test as nonparallelizable_tests --- tests/CMakeLists.txt | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 551f5975927..f357261b627 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -69,6 +69,8 @@ endif() add_test(NAME producer-preactivate-feature-test COMMAND tests/prod_preactivation_test.py --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST producer-preactivate-feature-test PROPERTY LABELS nonparallelizable_tests) +add_test(NAME nodeos_protocol_feature_test COMMAND tests/nodeos_protocol_feature_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_protocol_feature_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 4 -n 6 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST distributed-transactions-test PROPERTY LABELS nonparallelizable_tests) @@ -115,9 +117,6 @@ set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_runnin add_test(NAME nodeos_irreversible_mode_lr_test COMMAND tests/nodeos_irreversible_mode_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_irreversible_mode_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_protocol_feature_lr_test COMMAND tests/nodeos_protocol_feature_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_protocol_feature_lr_test PROPERTY LABELS long_running_tests) - if(ENABLE_COVERAGE_TESTING) set(Coverage_NAME ${PROJECT_NAME}_coverage) From ff870f5a82ee76a8400a43e67ac26f3c4daca2cb Mon Sep 17 00:00:00 2001 From: Kayan Date: Wed, 20 Mar 2019 17:36:21 +0800 Subject: [PATCH 163/680] prevent producer plugin from scheduling a feature that require preactivation --- plugins/producer_plugin/producer_plugin.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index ae289bfdcf5..cf39d0bb8df 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -979,6 +979,13 @@ void producer_plugin::schedule_protocol_feature_activations( const scheduled_pro EOS_ASSERT( set_of_features_to_activate.size() == schedule.protocol_features_to_activate.size(), invalid_protocol_features_to_activate, "duplicate digests" ); chain.validate_protocol_features( schedule.protocol_features_to_activate ); + const auto& pfs = chain.get_protocol_feature_manager().get_protocol_feature_set(); + for (auto &feature_digest : set_of_features_to_activate) { + const auto& pf = pfs.get_protocol_feature(feature_digest); + EOS_ASSERT( !pf.preactivation_required, protocol_feature_exception, + "protocol feature requires preactivation: ${digest}", + ("digest", feature_digest)); + } my->_protocol_features_to_activate = schedule.protocol_features_to_activate; my->_protocol_features_signaled = false; } From ccb1a8c5818099502473d16ceaf5df4e39f14b7d Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 20 Mar 2019 17:58:12 +0800 Subject: [PATCH 164/680] Add unit test to test subject restrictions --- .../testing/include/eosio/testing/tester.hpp | 5 +- libraries/testing/tester.cpp | 9 ++- unittests/protocol_feature_tests.cpp | 59 +++++++++++++++++++ 3 files changed, 69 insertions(+), 4 deletions(-) diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index b75196d7f78..78defa2f862 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -75,7 +75,8 @@ namespace eosio { namespace testing { bool expect_assert_message(const fc::exception& ex, string expected); - protocol_feature_set make_protocol_feature_set(); + using subjective_restriction_map = std::map; + protocol_feature_set make_protocol_feature_set(const subjective_restriction_map custom_subjective_restrictions = {}); /** * @class tester @@ -320,7 +321,7 @@ namespace eosio { namespace testing { class tester : public base_tester { public: - tester(setup_policy policy = setup_policy::full, db_read_mode read_mode = db_read_mode::SPECULATIVE ) { + tester(setup_policy policy = setup_policy::full, db_read_mode read_mode = db_read_mode::SPECULATIVE) { init(policy, read_mode); } diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index f03c4f9e8bc..8e2d6fc7239 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -79,13 +79,13 @@ namespace eosio { namespace testing { memcpy( data.data(), obj.value.data(), obj.value.size() ); } - protocol_feature_set make_protocol_feature_set() { + protocol_feature_set make_protocol_feature_set(const subjective_restriction_map custom_subjective_restrictions) { protocol_feature_set pfs; map< builtin_protocol_feature_t, optional > visited_builtins; std::function add_builtins = - [&pfs, &visited_builtins, &add_builtins]( builtin_protocol_feature_t codename ) -> digest_type { + [&pfs, &visited_builtins, &add_builtins, &custom_subjective_restrictions]( builtin_protocol_feature_t codename ) -> digest_type { auto res = visited_builtins.emplace( codename, optional() ); if( !res.second ) { EOS_ASSERT( res.first->second, protocol_feature_exception, @@ -99,6 +99,11 @@ namespace eosio { namespace testing { return add_builtins( d ); } ); + const auto itr = custom_subjective_restrictions.find(codename); + if (itr != custom_subjective_restrictions.end()) { + f.subjective_restrictions = itr->second; + } + const auto& pf = pfs.add_feature( f ); res.first->second = pf.feature_digest; diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 24ff264e4df..7d4206abd85 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -238,4 +238,63 @@ BOOST_AUTO_TEST_CASE( only_link_to_existing_permission_test ) try { } FC_LOG_AND_RETHROW() +BOOST_AUTO_TEST_CASE( subjective_restrictions_test ) try { + tester c( setup_policy::none ); + auto restart_with_new_pfs = [&](protocol_feature_set&& pfs) { + c.close(); + c.open(std::move(pfs), nullptr); + }; + auto pfm = c.control->get_protocol_feature_manager(); + auto preactivate_feature_digest = *pfm.get_builtin_digest(builtin_protocol_feature_t::preactivate_feature ); + auto only_link_to_existing_permission_digest = *pfm.get_builtin_digest(builtin_protocol_feature_t::only_link_to_existing_permission ); + + // First, test PREACTIVATE_FEATURE with invalid earliest allowed activation time + subjective_restriction_map custom_subjective_restrictions = { + { builtin_protocol_feature_t::preactivate_feature, {fc::time_point::from_iso_string( "2200-01-01T00:00:00"), false, true} } + }; + restart_with_new_pfs(make_protocol_feature_set(custom_subjective_restrictions)); + // When a block is produced, the protocol feature activation should fail and throws an error + c.schedule_protocol_features_wo_preactivation({ preactivate_feature_digest }); + BOOST_CHECK_EXCEPTION( c.produce_block(), + protocol_feature_exception, + fc_exception_message_starts_with( std::string("2020-01-01T00:00:00.000 is too early for the earliest ") + + std::string("allowed activation time of the protocol feature") + ) + ); + //Revert back to the valid earliest allowed activation time + custom_subjective_restrictions = { + { builtin_protocol_feature_t::preactivate_feature, {fc::time_point::from_iso_string( "1970-01-01T00:00:00"), false, true} } + }; + restart_with_new_pfs(make_protocol_feature_set(custom_subjective_restrictions)); + // Now it should be fine, the feature should be activated after the block is produced + BOOST_CHECK_NO_THROW( c.produce_block() ); + BOOST_CHECK( c.control->is_builtin_activated( builtin_protocol_feature_t::preactivate_feature ) ); + + c.set_bios_contract(); + c.produce_block(); + + // Second, test ONLY_LINK_TO_EXISTING_PERMISSION with subjective_restrictions enable == false + custom_subjective_restrictions = { + { builtin_protocol_feature_t::only_link_to_existing_permission, {fc::time_point{}, true, false} } + }; + restart_with_new_pfs(make_protocol_feature_set(custom_subjective_restrictions)); + // It should fail + BOOST_CHECK_EXCEPTION( c.preactivate_protocol_features({only_link_to_existing_permission_digest}), + subjective_block_production_exception, + fc_exception_message_starts_with( std::string("protocol feature with digest '") + + std::string(only_link_to_existing_permission_digest)+ + std::string("' is disabled") + ) + ); + // Revert back with subjective_restrictions enable == true + custom_subjective_restrictions = { + { builtin_protocol_feature_t::only_link_to_existing_permission, {fc::time_point{}, true, true} } + }; + restart_with_new_pfs(make_protocol_feature_set(custom_subjective_restrictions)); + // Should be fine now, and activated in the next block + BOOST_CHECK_NO_THROW( c.preactivate_protocol_features({only_link_to_existing_permission_digest}) ); + c.produce_block(); + BOOST_CHECK( c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); +} FC_LOG_AND_RETHROW() + BOOST_AUTO_TEST_SUITE_END() From 765d831a9595dad9415ffc9e060ecd9377dbf4bb Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 20 Mar 2019 18:36:25 +0800 Subject: [PATCH 165/680] Add more coverage on the test case --- unittests/protocol_feature_tests.cpp | 50 +++++++++++++++++++--------- 1 file changed, 35 insertions(+), 15 deletions(-) diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 7d4206abd85..b62b4b4bd03 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -248,23 +248,36 @@ BOOST_AUTO_TEST_CASE( subjective_restrictions_test ) try { auto preactivate_feature_digest = *pfm.get_builtin_digest(builtin_protocol_feature_t::preactivate_feature ); auto only_link_to_existing_permission_digest = *pfm.get_builtin_digest(builtin_protocol_feature_t::only_link_to_existing_permission ); - // First, test PREACTIVATE_FEATURE with invalid earliest allowed activation time + auto invalid_act_time = fc::time_point::from_iso_string( "2200-01-01T00:00:00"); + auto valid_act_time = fc::time_point{}; + + // First, test subjective_restrictions on feature that can be activated WITHOUT preactivation (PREACTIVATE_FEATURE) subjective_restriction_map custom_subjective_restrictions = { - { builtin_protocol_feature_t::preactivate_feature, {fc::time_point::from_iso_string( "2200-01-01T00:00:00"), false, true} } + { builtin_protocol_feature_t::preactivate_feature, {invalid_act_time, false, true} } }; restart_with_new_pfs(make_protocol_feature_set(custom_subjective_restrictions)); // When a block is produced, the protocol feature activation should fail and throws an error c.schedule_protocol_features_wo_preactivation({ preactivate_feature_digest }); BOOST_CHECK_EXCEPTION( c.produce_block(), protocol_feature_exception, - fc_exception_message_starts_with( std::string("2020-01-01T00:00:00.000 is too early for the earliest ") + + fc_exception_message_starts_with( std::string(c.control->head_block_time()) + + std::string(" is too early for the earliest ") + std::string("allowed activation time of the protocol feature") ) ); - //Revert back to the valid earliest allowed activation time - custom_subjective_restrictions = { - { builtin_protocol_feature_t::preactivate_feature, {fc::time_point::from_iso_string( "1970-01-01T00:00:00"), false, true} } - }; + // Revert to the valid earliest allowed activation time, however with enabled == false + custom_subjective_restrictions = {{ builtin_protocol_feature_t::preactivate_feature, {valid_act_time, false, false} }}; + restart_with_new_pfs(make_protocol_feature_set(custom_subjective_restrictions)); + // This should also fail, but with different exception + BOOST_CHECK_EXCEPTION( c.produce_block(), + protocol_feature_exception, + fc_exception_message_starts_with( std::string("protocol feature with digest '") + + std::string(preactivate_feature_digest) + + std::string("' is disabled") + ) + ); + // Revert to the valid earliest allowed activation time, however with subjective_restrictions enabled == true + custom_subjective_restrictions = {{ builtin_protocol_feature_t::preactivate_feature, {valid_act_time, false, true} }}; restart_with_new_pfs(make_protocol_feature_set(custom_subjective_restrictions)); // Now it should be fine, the feature should be activated after the block is produced BOOST_CHECK_NO_THROW( c.produce_block() ); @@ -273,12 +286,21 @@ BOOST_AUTO_TEST_CASE( subjective_restrictions_test ) try { c.set_bios_contract(); c.produce_block(); - // Second, test ONLY_LINK_TO_EXISTING_PERMISSION with subjective_restrictions enable == false - custom_subjective_restrictions = { - { builtin_protocol_feature_t::only_link_to_existing_permission, {fc::time_point{}, true, false} } - }; + // Second, test subjective_restrictions on feature that need to be activated WITH preactivation (ONLY_LINK_TO_EXISTING_PERMISSION) + custom_subjective_restrictions = {{ builtin_protocol_feature_t::only_link_to_existing_permission, {invalid_act_time, true, true} }}; restart_with_new_pfs(make_protocol_feature_set(custom_subjective_restrictions)); // It should fail + BOOST_CHECK_EXCEPTION( c.preactivate_protocol_features({only_link_to_existing_permission_digest}), + subjective_block_production_exception, + fc_exception_message_starts_with( std::string(c.control->head_block_time() + fc::milliseconds(config::block_interval_ms)) + + std::string(" is too early for the earliest ") + + std::string("allowed activation time of the protocol feature") + ) + ); + // Revert with valid time and subjective_restrictions enabled == false + custom_subjective_restrictions = {{ builtin_protocol_feature_t::only_link_to_existing_permission, {valid_act_time, true, false} }}; + restart_with_new_pfs(make_protocol_feature_set(custom_subjective_restrictions)); + // It should fail but with different exception BOOST_CHECK_EXCEPTION( c.preactivate_protocol_features({only_link_to_existing_permission_digest}), subjective_block_production_exception, fc_exception_message_starts_with( std::string("protocol feature with digest '") + @@ -286,10 +308,8 @@ BOOST_AUTO_TEST_CASE( subjective_restrictions_test ) try { std::string("' is disabled") ) ); - // Revert back with subjective_restrictions enable == true - custom_subjective_restrictions = { - { builtin_protocol_feature_t::only_link_to_existing_permission, {fc::time_point{}, true, true} } - }; + // Revert with valid time and subjective_restrictions enabled == true + custom_subjective_restrictions = {{ builtin_protocol_feature_t::only_link_to_existing_permission, {valid_act_time, true, true} }}; restart_with_new_pfs(make_protocol_feature_set(custom_subjective_restrictions)); // Should be fine now, and activated in the next block BOOST_CHECK_NO_THROW( c.preactivate_protocol_features({only_link_to_existing_permission_digest}) ); From de1565edce99ce66aa9e1b77523547e7be4c29b2 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 20 Mar 2019 18:39:21 +0800 Subject: [PATCH 166/680] Change permission of nodeos_protocol_feature_test to executable --- tests/nodeos_protocol_feature_test.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 tests/nodeos_protocol_feature_test.py diff --git a/tests/nodeos_protocol_feature_test.py b/tests/nodeos_protocol_feature_test.py old mode 100644 new mode 100755 From 756f3c70585a33717151902b6b3a2b0304b6558e Mon Sep 17 00:00:00 2001 From: Zach <34947245+kj4ezj@users.noreply.github.com> Date: Wed, 20 Mar 2019 11:17:47 -0400 Subject: [PATCH 167/680] Created test scripts with xUnit, auto-scaling parallelism, and empty test detection; and added them to pipeline (#6963) * Created test scripts with xUnit, auto-scaling parallelism, and empty test detection, and added them to pipeline * Copy-pasta error * Added xUnit output and empty test detection to long-running tests * Removed escape character from copy-pasta * Suppress missing DartConfiguration.tcl file error * Increased the long-running test timeout from 60 min to 90 min * Removed line continuations from Buildkite yaml files * Deleted Buildkite yaml files from pipelines migrated to Buildkite repo --- .buildkite/coverage.yml | 31 --- .buildkite/debug.yml | 343 -------------------------- .buildkite/docker.yml | 101 -------- .buildkite/long_running_tests.yml | 210 ++++++---------- .buildkite/pipeline.yml | 391 +++++++++++------------------- .buildkite/sanitizers.yml | 155 ------------ CMakeLists.txt | 2 +- scripts/long-running-test.sh | 26 ++ scripts/parallel-test.sh | 28 +++ scripts/serial-test.sh | 26 ++ 10 files changed, 301 insertions(+), 1012 deletions(-) delete mode 100644 .buildkite/coverage.yml delete mode 100644 .buildkite/debug.yml delete mode 100644 .buildkite/docker.yml delete mode 100644 .buildkite/sanitizers.yml create mode 100755 scripts/long-running-test.sh create mode 100755 scripts/parallel-test.sh create mode 100755 scripts/serial-test.sh diff --git a/.buildkite/coverage.yml b/.buildkite/coverage.yml deleted file mode 100644 index ded8b3651e5..00000000000 --- a/.buildkite/coverage.yml +++ /dev/null @@ -1,31 +0,0 @@ -steps: - - label: ":spiral_note_pad: Generate Report" - command: | - echo "--- :hammer: Ensuring lcov is installed" && apt-get install -y lcov && \ - echo "--- :hammer: Building" && \ - cmake -GNinja -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_COMPILER=clang++-4.0 -DCMAKE_C_COMPILER=clang-4.0 -DBOOST_ROOT="${BOOST_ROOT}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true -DENABLE_COVERAGE_TESTING=true -DBUILD_DOXYGEN=false && \ - ninja && \ - echo "--- :spiral_note_pad: Generating Code Coverage Report" && \ - ninja EOSIO_ut_coverage && \ - echo "--- :arrow_up: Publishing Code Coverage Report" && \ - buildkite-agent artifact upload "EOSIO_ut_coverage/**/*" s3://eos-coverage/$BUILDKITE_JOB_ID && \ - echo "+++ View Report" && \ - printf "\033]1339;url=https://eos-coverage.s3-us-west-2.amazonaws.com/$BUILDKITE_JOB_ID/EOSIO_ut_coverage/index.html;content=View Full Coverage Report\a\n" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v3.0.1: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" - workdir: /data/job - environment: - - LCOV_PATH=/usr/bin/lcov - - BOOST_ROOT=/root/opt/boost - - OPENSSL_ROOT_DIR=/usr/include/openssl - - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/mongodb/bin:~/opt/llvm/bin/ - timeout: 60 diff --git a/.buildkite/debug.yml b/.buildkite/debug.yml deleted file mode 100644 index 3cd6b16d23a..00000000000 --- a/.buildkite/debug.yml +++ /dev/null @@ -1,343 +0,0 @@ -steps: - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- Compressing build directory :compression:" && \ - tar -pczf build.tar.gz build/ - label: ":darwin: Mojave Build" - agents: - - "role=builder-v2-1" - - "os=mojave" - artifact_paths: "build.tar.gz" - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":ubuntu: 16.04 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":ubuntu: 18.04 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":fedora: 27 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":centos: 7 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":aws: 1 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "+++ :hammer: Building" && \ - echo 1 | ./eosio_build.sh -o Debug && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz build/ - label: ":aws: 2 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - workdir: /data/job - timeout: 60 - - - wait - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":darwin: Mojave Tests" - agents: - - "role=builder-v2-1" - - "os=mojave" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":ubuntu: 16.04 Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":ubuntu: 18.04 Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":fedora: 27 Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":centos: 7 Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":aws: 1 Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && ctest -LE long_running_tests --output-on-failure - retry: - automatic: - limit: 1 - label: ":aws: 2 Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" - workdir: /data/job - timeout: 60 \ No newline at end of file diff --git a/.buildkite/docker.yml b/.buildkite/docker.yml deleted file mode 100644 index 9be30a77cef..00000000000 --- a/.buildkite/docker.yml +++ /dev/null @@ -1,101 +0,0 @@ -steps: - - command: | - echo "AUTHENTICATING GOOGLE SERVICE ACCOUNT" && \ - gcloud --quiet auth activate-service-account b1-automation-svc@b1-automation-dev.iam.gserviceaccount.com --key-file=/etc/gcp-service-account.json && \ - docker-credential-gcr configure-docker && \ - echo "BUILDING BUILD IMAGE" && \ - cd Docker/builder && \ - docker build -t eosio/builder:latest -t eosio/builder:$BUILDKITE_COMMIT -t eosio/builder:$BUILDKITE_BRANCH . --build-arg branch=$BUILDKITE_COMMIT && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/builder:latest eosio/builder:$BUILDKITE_TAG || : && \ - docker tag eosio/builder:$BUILDKITE_COMMIT gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - docker tag eosio/builder:$BUILDKITE_BRANCH gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/builder:$BUILDKITE_TAG gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_TAG || : && \ - docker tag eosio/builder:latest gcr.io/b1-automation-dev/eosio/builder:latest && \ - echo "PUSHING DOCKER IMAGES" && \ - docker push gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - docker push gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker push gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_TAG || : && \ - docker push gcr.io/b1-automation-dev/eosio/builder:latest && \ - echo "TRASHING OLD IMAGES" && \ - docker rmi eosio/builder:$BUILDKITE_COMMIT && \ - docker rmi eosio/builder:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi eosio/builder:$BUILDKITE_TAG || : && \ - docker rmi eosio/builder:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_TAG || : && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:latest - label: "Docker build builder" - agents: - queue: "automation-docker-builder-fleet" - timeout: 300 - - - wait - - - command: | - echo "AUTHENTICATING GOOGLE SERVICE ACCOUNT" && \ - gcloud --quiet auth activate-service-account b1-automation-svc@b1-automation-dev.iam.gserviceaccount.com --key-file=/etc/gcp-service-account.json && \ - docker-credential-gcr configure-docker && \ - echo "BUILDING EOS IMAGE" && \ - docker pull gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - cd Docker && \ - docker build -t eosio/eos:latest -t eosio/eos:$BUILDKITE_COMMIT -t eosio/eos:$BUILDKITE_BRANCH . --build-arg branch=$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/eos:latest eosio/eos:$BUILDKITE_TAG || : && \ - docker tag eosio/eos:$BUILDKITE_COMMIT gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_COMMIT && \ - docker tag eosio/eos:$BUILDKITE_BRANCH gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/eos:$BUILDKITE_TAG gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_TAG || : && \ - docker tag eosio/eos:latest gcr.io/b1-automation-dev/eosio/eos:latest && \ - echo "PUSHING DOCKER IMAGES" && \ - docker push gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_COMMIT && \ - docker push gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker push gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_TAG || : && \ - docker push gcr.io/b1-automation-dev/eosio/eos:latest && \ - echo "TRASHING OLD IMAGES" && \ - docker rmi eosio/eos:$BUILDKITE_COMMIT && \ - docker rmi eosio/eos:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi eosio/eos:$BUILDKITE_TAG || : && \ - docker rmi eosio/eos:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_COMMIT && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi gcr.io/b1-automation-dev/eosio/eos:$BUILDKITE_TAG || : && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT - label: "Docker build eos" - agents: - queue: "automation-docker-builder-fleet" - timeout: 300 - - - command: | - echo "AUTHENTICATING GOOGLE SERVICE ACCOUNT" && \ - gcloud --quiet auth activate-service-account b1-automation-svc@b1-automation-dev.iam.gserviceaccount.com --key-file=/etc/gcp-service-account.json && \ - docker-credential-gcr configure-docker && \ - echo "BUILDING EOS DEV IMAGE" && \ - docker pull gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT && \ - cd Docker/dev && \ - docker build -t eosio/eos-dev:latest -t eosio/eos-dev:$BUILDKITE_COMMIT -t eosio/eos-dev:$BUILDKITE_BRANCH . --build-arg branch=$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/eos-dev:latest eosio/eos-dev:$BUILDKITE_TAG || : && \ - docker tag eosio/eos-dev:$BUILDKITE_COMMIT gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_COMMIT && \ - docker tag eosio/eos-dev:$BUILDKITE_BRANCH gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker tag eosio/eos-dev:$BUILDKITE_TAG gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_TAG || : && \ - docker tag eosio/eos-dev:latest gcr.io/b1-automation-dev/eosio/eos-dev:latest && \ - echo "PUSHING DOCKER IMAGES" && \ - docker push gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_COMMIT && \ - docker push gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker push gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_TAG || : && \ - docker push gcr.io/b1-automation-dev/eosio/eos-dev:latest && \ - echo "TRASHING OLD IMAGES" && \ - docker rmi eosio/eos-dev:$BUILDKITE_COMMIT && \ - docker rmi eosio/eos-dev:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi eosio/eos-dev:$BUILDKITE_TAG || : && \ - docker rmi eosio/eos-dev:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_COMMIT && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_BRANCH && \ - [[ "$BUILDKITE_TAG" != "" ]] && docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:$BUILDKITE_TAG || : && \ - docker rmi gcr.io/b1-automation-dev/eosio/eos-dev:latest && \ - docker rmi gcr.io/b1-automation-dev/eosio/builder:$BUILDKITE_COMMIT - label: "Docker build eos-dev" - agents: - queue: "automation-docker-builder-fleet" - timeout: 300 - - - wait diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index 0e6133019ce..6383f57c392 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -1,9 +1,9 @@ steps: - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":ubuntu: 16.04 Build" agents: @@ -22,9 +22,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":ubuntu: 18.04 Build" agents: @@ -43,9 +43,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":centos: 7 Build" agents: @@ -64,9 +64,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":aws: 1 Build" agents: @@ -85,9 +85,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":aws: 2 Build" agents: @@ -106,9 +106,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":fedora: 27 Build" agents: @@ -127,11 +127,11 @@ steps: timeout: 60 - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- Compressing build directory :compression:" && \ + echo "--- Creating symbolic link to job directory :file_folder:" + sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job + echo "+++ Building :hammer:" + ./scripts/eosio_build.sh -y + echo "--- Compressing build directory :compression:" tar -pczf build.tar.gz build/ label: ":darwin: Mojave Build" agents: @@ -141,11 +141,11 @@ steps: timeout: 60 - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- Compressing build directory :compression:" && \ + echo "--- Creating symbolic link to job directory :file_folder:" + sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job + echo "+++ Building :hammer:" + ./scripts/eosio_build.sh -y + echo "--- Compressing build directory :compression:" tar -pczf build.tar.gz build/ label: ":darwin: High Sierra Build" agents: @@ -156,21 +156,14 @@ steps: - wait - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running LR Tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + - command: | # Ubuntu 16.04 Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh label: ":ubuntu: 16.04 LR Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -181,23 +174,16 @@ steps: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" workdir: /data/job - timeout: 60 + timeout: 90 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running LR Tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + - command: | # Ubuntu 18.04 Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh label: ":ubuntu: 18.04 LR Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -208,23 +194,16 @@ steps: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" workdir: /data/job - timeout: 60 + timeout: 90 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running LR Tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + - command: | # centOS Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh label: ":centos: 7 LR Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -235,23 +214,16 @@ steps: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" workdir: /data/job - timeout: 60 + timeout: 90 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running LR Tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + - command: | # Amazon AWS-1 Linux Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh label: ":aws: 1 LR Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -262,23 +234,16 @@ steps: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" workdir: /data/job - timeout: 60 + timeout: 90 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running LR Tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + - command: | # Amazon AWS-2 Linux Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh label: ":aws: 2 LR Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -289,23 +254,16 @@ steps: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" workdir: /data/job - timeout: 60 + timeout: 90 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running LR Tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + - command: | # Fedora Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" + echo "+++ :microscope: Running LR Tests" + ./scripts/long-running-test.sh label: ":fedora: 27 LR Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -316,42 +274,26 @@ steps: debug: true image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" workdir: /data/job - timeout: 60 + timeout: 90 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running LR Tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + - command: | # High Sierra Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" + echo "+++ :microscope: Running LR Tests" + ln -s "$(pwd)" /data/job && ./scripts/long-running-test.sh label: ":darwin: High Sierra LR Tests" agents: - "role=tester-v2-1" - "os=high-sierra" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - timeout: 60 + timeout: 90 - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running LR Tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L long_running_tests --output-on-failure + - command: | # Mojave Tests + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + echo "+++ :microscope: Running LR Tests" + ln -s "$(pwd)" /data/job && ./scripts/long-running-test.sh label: ":darwin: Mojave LR Tests" agents: - "role=tester-v2-1" - "os=mojave" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" - timeout: 60 - - \ No newline at end of file + timeout: 90 diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 4e860734910..57ce31e5a6c 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,9 +1,9 @@ steps: - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":ubuntu: 16.04 Build" agents: @@ -22,9 +22,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":ubuntu: 18.04 Build" agents: @@ -43,9 +43,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":centos: 7 Build" agents: @@ -64,9 +64,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":aws: 1 Build" agents: @@ -85,9 +85,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":aws: 2 Build" agents: @@ -106,9 +106,9 @@ steps: timeout: 60 - command: | - echo "+++ :hammer: Building" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- :compression: Compressing build directory" && \ + echo "+++ :hammer: Building" + ./scripts/eosio_build.sh -y + echo "--- :compression: Compressing build directory" tar -pczf build.tar.gz build/ label: ":fedora: 27 Build" agents: @@ -127,11 +127,11 @@ steps: timeout: 60 - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- Compressing build directory :compression:" && \ + echo "--- Creating symbolic link to job directory :file_folder:" + sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job + echo "+++ Building :hammer:" + ./scripts/eosio_build.sh -y + echo "--- Compressing build directory :compression:" tar -pczf build.tar.gz build/ label: ":darwin: Mojave Build" agents: @@ -141,11 +141,11 @@ steps: timeout: 60 - command: | - echo "--- Creating symbolic link to job directory :file_folder:" && \ - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job && \ - echo "+++ Building :hammer:" && \ - ./scripts/eosio_build.sh -y && \ - echo "--- Compressing build directory :compression:" && \ + echo "--- Creating symbolic link to job directory :file_folder:" + sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job + echo "+++ Building :hammer:" + ./scripts/eosio_build.sh -y + echo "--- Compressing build directory :compression:" tar -pczf build.tar.gz build/ label: ":darwin: High Sierra Build" agents: @@ -156,21 +156,15 @@ steps: - wait + # Ubuntu 16.04 Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh label: ":ubuntu: 16.04 Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -184,20 +178,13 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh label: ":ubuntu: 16.04 NP Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -210,21 +197,15 @@ steps: workdir: /data/job timeout: 60 + # Ubuntu 18.04 Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh label: ":ubuntu: 18.04 Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -238,20 +219,13 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh label: ":ubuntu: 18.04 NP Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -264,22 +238,15 @@ steps: workdir: /data/job timeout: 60 - + # centOS Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh label: ":centos: 7 Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -293,20 +260,13 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh label: ":centos: 7 NP Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -319,21 +279,15 @@ steps: workdir: /data/job timeout: 60 + # Amazon AWS-1 Linux Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh label: ":aws: 1 Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -347,20 +301,13 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh label: ":aws: 1 NP Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -373,21 +320,15 @@ steps: workdir: /data/job timeout: 60 + # Amazon AWS-2 Linux Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh label: ":aws: 2 Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -401,20 +342,13 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh label: ":aws: 2 NP Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -427,21 +361,15 @@ steps: workdir: /data/job timeout: 60 + # Fedora Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" + echo "+++ :microscope: Running Tests" + ./scripts/parallel-test.sh label: ":fedora: 27 Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -455,20 +383,13 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" + echo "+++ :microscope: Running Tests" + ./scripts/serial-test.sh label: ":fedora: 27 NP Tests" agents: queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" plugins: ecr#v1.1.4: login: true @@ -481,85 +402,61 @@ steps: workdir: /data/job timeout: 60 + # High Sierra Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" + echo "+++ :microscope: Running Tests" + ln -s "$(pwd)" /data/job + ./scripts/parallel-test.sh label: ":darwin: High Sierra Tests" agents: - "role=tester-v2-1" - "os=high-sierra" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" + echo "+++ :microscope: Running Tests" + ln -s "$(pwd)" /data/job && ./scripts/serial-test.sh label: ":darwin: High Sierra NP Tests" agents: - "role=tester-v2-1" - "os=high-sierra" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" timeout: 60 + # Mojave Tests - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -j8 -LE _tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + echo "+++ :microscope: Running Tests" + ln -s "$(pwd)" /data/job + ./scripts/parallel-test.sh label: ":darwin: Mojave Tests" agents: - "role=tester-v2-1" - "os=mojave" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ - tar -zxf build.tar.gz && \ - echo "--- :m: Starting MongoDB" && \ - ~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ln -s "$(pwd)" /data/job && cd /data/job/build && PATH=\$PATH:~/opt/mongodb/bin ctest -L nonparallelizable_tests --output-on-failure + echo "--- :arrow_down: Downloading Build Directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + echo "+++ :microscope: Running Tests" + ln -s "$(pwd)" /data/job && ./scripts/serial-test.sh label: ":darwin: Mojave NP Tests" agents: - "role=tester-v2-1" - "os=mojave" - artifact_paths: - - "mongod.log" - - "build/genesis.json" - - "build/config.ini" timeout: 60 - wait - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew label: ":darwin: High Sierra Package Builder" agents: @@ -571,10 +468,10 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew label: ":darwin: Mojave Package Builder" agents: @@ -586,10 +483,10 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" cd /data/job/build/packages && bash generate_package.sh deb label: ":ubuntu: 16.04 Package builder" agents: @@ -612,10 +509,10 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" cd /data/job/build/packages && bash generate_package.sh deb label: ":ubuntu: 18.04 Package builder" agents: @@ -638,17 +535,17 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ - yum install -y rpm-build && \ - mkdir -p /root/rpmbuild/BUILD && \ - mkdir -p /root/rpmbuild/BUILDROOT && \ - mkdir -p /root/rpmbuild/RPMS && \ - mkdir -p /root/rpmbuild/SOURCES && \ - mkdir -p /root/rpmbuild/SPECS && \ - mkdir -p /root/rpmbuild/SRPMS && \ + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + yum install -y rpm-build + mkdir -p /root/rpmbuild/BUILD + mkdir -p /root/rpmbuild/BUILDROOT + mkdir -p /root/rpmbuild/RPMS + mkdir -p /root/rpmbuild/SOURCES + mkdir -p /root/rpmbuild/SPECS + mkdir -p /root/rpmbuild/SRPMS cd /data/job/build/packages && bash generate_package.sh rpm label: ":fedora: 27 Package builder" agents: @@ -671,17 +568,17 @@ steps: timeout: 60 - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" && \ - tar -zxf build.tar.gz && \ - echo "+++ :microscope: Starting package build" && \ - yum install -y rpm-build && \ - mkdir -p /root/rpmbuild/BUILD && \ - mkdir -p /root/rpmbuild/BUILDROOT && \ - mkdir -p /root/rpmbuild/RPMS && \ - mkdir -p /root/rpmbuild/SOURCES && \ - mkdir -p /root/rpmbuild/SPECS && \ - mkdir -p /root/rpmbuild/SRPMS && \ + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + yum install -y rpm-build + mkdir -p /root/rpmbuild/BUILD + mkdir -p /root/rpmbuild/BUILDROOT + mkdir -p /root/rpmbuild/RPMS + mkdir -p /root/rpmbuild/SOURCES + mkdir -p /root/rpmbuild/SPECS + mkdir -p /root/rpmbuild/SRPMS cd /data/job/build/packages && bash generate_package.sh rpm label: ":centos: 7 Package builder" agents: @@ -706,9 +603,9 @@ steps: - wait - command: | - echo "--- :arrow_down: Downloading brew files" && \ - buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: High Sierra Package Builder" && \ - mv build/packages/eosio.rb build/packages/eosio_highsierra.rb && \ + echo "--- :arrow_down: Downloading brew files" + buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: High Sierra Package Builder" + mv build/packages/eosio.rb build/packages/eosio_highsierra.rb buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" label: ":darwin: Brew Updater" agents: @@ -716,4 +613,4 @@ steps: artifact_paths: - "build/packages/eosio_highsierra.rb" - "build/packages/eosio.rb" - timeout: 60 \ No newline at end of file + timeout: 60 diff --git a/.buildkite/sanitizers.yml b/.buildkite/sanitizers.yml deleted file mode 100644 index d49493eb5ee..00000000000 --- a/.buildkite/sanitizers.yml +++ /dev/null @@ -1,155 +0,0 @@ -steps: - - command: | - echo "--- :hammer: Building with Undefined Sanitizer" && \ - /usr/bin/cmake -GNinja \ - -DCMAKE_BUILD_TYPE=Debug \ - -DCMAKE_CXX_COMPILER=clang++-4.0 \ - -DCMAKE_C_COMPILER=clang-4.0 \ - -DBOOST_ROOT="${BOOST_ROOT}" \ - -DWASM_ROOT="${WASM_ROOT}" \ - -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" \ - -DBUILD_MONGO_DB_PLUGIN=true \ - -DENABLE_COVERAGE_TESTING=true\ - -DBUILD_DOXYGEN=false -DCMAKE_CXX_FLAGS="-fsanitize=undefined -fsanitize-recover=all -g -fno-omit-frame-pointer" \ - -DCMAKE_C_FLAGS="-fsanitize=undefined -fsanitize-recover=all -g -fno-omit-frame-pointer" \ - -DCMAKE_EXE_LINKER_FLAGS="-fsanitize=undefined -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s -pthread" \ - -DCMAKE_MODULE_LINKER_FLAGS="-fsanitize=undefined -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s -pthread" && \ - echo "--- :shinto_shrine: Running ninja" && \ - /usr/bin/ninja | tee ninja.log && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz * - echo "--- :beers: Done" - label: ":_: Undefined Sanitizer" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build.tar.gz" - - "ninja.log" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" - workdir: /data/job - command: ["--privileged"] - mounts: - - /etc/buildkite-agent/config:/config - environment: - - BOOST_ROOT=/root/opt/boost - - OPENSSL_ROOT_DIR=/usr/include/openssl - - WASM_ROOT=/root/opt/wasm - - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/wasm/bin - - CI=true - - UBSAN_OPTIONS=print_stacktrace=1 - timeout: 60 - - - command: | - echo "--- :hammer: Building with Address Sanitizer" && \ - /usr/bin/cmake -GNinja \ - -DCMAKE_BUILD_TYPE=Debug \ - -DCMAKE_CXX_COMPILER=clang++-4.0 \ - -DCMAKE_C_COMPILER=clang-4.0 \ - -DBOOST_ROOT="${BOOST_ROOT}" \ - -DWASM_ROOT="${WASM_ROOT}" \ - -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" \ - -DBUILD_MONGO_DB_PLUGIN=true \ - -DENABLE_COVERAGE_TESTING=true \ - -DBUILD_DOXYGEN=false \ - -DCMAKE_CXX_FLAGS="-fsanitize=address -fsanitize-recover=all -O1 -g -fno-omit-frame-pointer" \ - -DCMAKE_C_FLAGS="-fsanitize=address -fsanitize-recover=all -O1 -g -fno-omit-frame-pointer" \ - -DCMAKE_EXE_LINKER_FLAGS="-fsanitize=address -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s" \ - -DCMAKE_MODULE_LINKER_FLAGS="-fsanitize=address -fsanitize-recover=all -rtlib=compiler-rt -lgcc_s" - echo "--- :shinto_shrine: Running ninja" && \ - /usr/bin/ninja | tee ninja.log && \ - echo "--- :compression: Compressing build directory" && \ - tar -pczf build.tar.gz * - echo "--- :beers: Done" - label: ":_: Address Sanitizer" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build.tar.gz" - - "ninja.log" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" - workdir: /data/job - command: ["--privileged"] - mounts: - - /etc/buildkite-agent/config:/config - environment: - - BOOST_ROOT=/root/opt/boost - - OPENSSL_ROOT_DIR=/usr/include/openssl - - WASM_ROOT=/root/opt/wasm - - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/root/opt/wasm/bin - - CI=true - - ASAN_OPTIONS=fast_unwind_on_malloc=0:halt_on_error=0:detect_odr_violation=0:detect_leaks=0:symbolize=1:verbosity=1 - timeout: 60 - - - wait - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":_: Undefined Sanitizer" && \ - tar -zxf build.tar.gz --no-same-owner && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ctest -j8 -LE _tests -V -O sanitizer.log || true - label: ":_: Undefined Sanitizer Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "sanitizer.log" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" - workdir: /data/job - mounts: - - /etc/buildkite-agent/config:/config - timeout: 120 - - - command: | - echo "--- :arrow_down: Downloading build directory" && \ - buildkite-agent artifact download "build.tar.gz" . --step ":_: Address Sanitizer" && \ - tar -zxf build.tar.gz --no-same-owner && \ - echo "--- :m: Starting MongoDB" && \ - $(which mongod) --fork --logpath "$(pwd)"/mongod.log && \ - echo "+++ :microscope: Running tests" && \ - ctest -j8 -LE _tests -V -O sanitizer.log || true - label: ":_: Address Sanitizer Tests" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "mongod.log" - - "sanitizer.log" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" - workdir: /data/job - mounts: - - /etc/buildkite-agent/config:/config - timeout: 120 \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index 0d54f35526c..f9375f0f8b9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required( VERSION 3.5 ) project( EOSIO ) - +include(CTest) # suppresses DartConfiguration.tcl error enable_testing() if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT) diff --git a/scripts/long-running-test.sh b/scripts/long-running-test.sh new file mode 100755 index 00000000000..60cae2d0b7f --- /dev/null +++ b/scripts/long-running-test.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -e # exit on failure of any "simple" command (excludes &&, ||, or | chains) +# prepare environment +PATH=$PATH:~/opt/mongodb/bin +echo "Extracting build directory..." +tar -zxf build.tar.gz +echo "Starting MongoDB..." +~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log +cd /data/job/build +# run tests +echo "Running tests..." +TEST_COUNT=$(ctest -N -L nonparallelizable_tests | grep -i 'Total Tests: ' | cut -d ':' -f 2 | awk '{print $1}') +[[ $TEST_COUNT > 0 ]] && echo "$TEST_COUNT tests found." || (echo "ERROR: No tests registered with ctest! Exiting..." && exit 1) +echo "$ ctest -L long_running_tests --output-on-failure -T Test" +ctest -L long_running_tests --output-on-failure -T Test +# upload artifacts +echo "Uploading artifacts..." +XML_FILENAME="test-results.xml" +mv $(pwd)/Testing/$(ls $(pwd)/Testing/ | grep '20' | tail -n 1)/Test.xml $XML_FILENAME +buildkite-agent artifact upload config.ini +buildkite-agent artifact upload genesis.json +cd .. +buildkite-agent artifact upload mongod.log +cd build +buildkite-agent artifact upload $XML_FILENAME +echo "Done uploading artifacts." diff --git a/scripts/parallel-test.sh b/scripts/parallel-test.sh new file mode 100755 index 00000000000..5174c454e2a --- /dev/null +++ b/scripts/parallel-test.sh @@ -0,0 +1,28 @@ +#!/bin/bash +set -e # exit on failure of any "simple" command (excludes &&, ||, or | chains) +# prepare environment +PATH=$PATH:~/opt/mongodb/bin +echo "Extracting build directory..." +tar -zxf build.tar.gz +echo "Starting MongoDB..." +~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log +cd /data/job/build +# run tests +echo "Running tests..." +CPU_CORES=$(getconf _NPROCESSORS_ONLN) +echo "$CPU_CORES cpu cores detected." +TEST_COUNT=$(ctest -N -LE _tests | grep -i 'Total Tests: ' | cut -d ':' -f 2 | awk '{print $1}') +[[ $TEST_COUNT > 0 ]] && echo "$TEST_COUNT tests found." || (echo "ERROR: No tests registered with ctest! Exiting..." && exit 1) +echo "$ ctest -j $CPU_CORES -LE _tests --output-on-failure -T Test" +ctest -j $CPU_CORES -LE _tests --output-on-failure -T Test +# upload artifacts +echo "Uploading artifacts..." +XML_FILENAME="test-results.xml" +mv $(pwd)/Testing/$(ls $(pwd)/Testing/ | grep '20' | tail -n 1)/Test.xml $XML_FILENAME +buildkite-agent artifact upload config.ini +buildkite-agent artifact upload genesis.json +cd .. +buildkite-agent artifact upload mongod.log +cd build +buildkite-agent artifact upload $XML_FILENAME +echo "Done uploading artifacts." diff --git a/scripts/serial-test.sh b/scripts/serial-test.sh new file mode 100755 index 00000000000..512229d6272 --- /dev/null +++ b/scripts/serial-test.sh @@ -0,0 +1,26 @@ +#!/bin/bash +set -e # exit on failure of any "simple" command (excludes &&, ||, or | chains) +# prepare environment +PATH=$PATH:~/opt/mongodb/bin +echo "Extracting build directory..." +tar -zxf build.tar.gz +echo "Starting MongoDB..." +~/bin/mongod --fork --dbpath ~/data/mongodb -f ~/etc/mongod.conf --logpath "$(pwd)"/mongod.log +cd /data/job/build +# run tests +echo "Running tests..." +TEST_COUNT=$(ctest -N -L nonparallelizable_tests | grep -i 'Total Tests: ' | cut -d ':' -f 2 | awk '{print $1}') +[[ $TEST_COUNT > 0 ]] && echo "$TEST_COUNT tests found." || (echo "ERROR: No tests registered with ctest! Exiting..." && exit 1) +echo "$ ctest -L nonparallelizable_tests --output-on-failure -T Test" +ctest -L nonparallelizable_tests --output-on-failure -T Test +# upload artifacts +echo "Uploading artifacts..." +XML_FILENAME="test-results.xml" +mv $(pwd)/Testing/$(ls $(pwd)/Testing/ | grep '20' | tail -n 1)/Test.xml $XML_FILENAME +buildkite-agent artifact upload config.ini +buildkite-agent artifact upload genesis.json +cd .. +buildkite-agent artifact upload mongod.log +cd build +buildkite-agent artifact upload $XML_FILENAME +echo "Done uploading artifacts." From eef306c09d0eb7779c947e27b598648b6abb4f0a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 20 Mar 2019 10:41:43 -0500 Subject: [PATCH 168/680] Prevent txn_test_gen_plugin from calling back into http_plugin multiple times per request. --- plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index 414664be32a..60383175387 100755 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -69,7 +69,10 @@ using io_work_t = boost::asio::executor_work_guard>(0);\ + auto result_handler = [times_called{std::move(times_called)}, cb, body](const fc::exception_ptr& e) mutable {\ + if( ++(*times_called) > 1 ) return;\ if (e) {\ try {\ e->dynamic_rethrow_exception();\ From dde3b35158b06fa8d8e26c6bb73a346c9edea207 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 20 Mar 2019 15:19:53 -0400 Subject: [PATCH 169/680] pass custom_subjective_restrictions by reference into make_protocol_feature_set of tester --- libraries/testing/include/eosio/testing/tester.hpp | 3 ++- libraries/testing/tester.cpp | 7 ++++--- tests/Node.py | 2 +- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 78defa2f862..154ebef7410 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -76,7 +76,8 @@ namespace eosio { namespace testing { bool expect_assert_message(const fc::exception& ex, string expected); using subjective_restriction_map = std::map; - protocol_feature_set make_protocol_feature_set(const subjective_restriction_map custom_subjective_restrictions = {}); + + protocol_feature_set make_protocol_feature_set(const subjective_restriction_map& custom_subjective_restrictions = {}); /** * @class tester diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 8e2d6fc7239..93dc4b5f61f 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -79,13 +79,14 @@ namespace eosio { namespace testing { memcpy( data.data(), obj.value.data(), obj.value.size() ); } - protocol_feature_set make_protocol_feature_set(const subjective_restriction_map custom_subjective_restrictions) { + protocol_feature_set make_protocol_feature_set(const subjective_restriction_map& custom_subjective_restrictions) { protocol_feature_set pfs; map< builtin_protocol_feature_t, optional > visited_builtins; std::function add_builtins = - [&pfs, &visited_builtins, &add_builtins, &custom_subjective_restrictions]( builtin_protocol_feature_t codename ) -> digest_type { + [&pfs, &visited_builtins, &add_builtins, &custom_subjective_restrictions] + ( builtin_protocol_feature_t codename ) -> digest_type { auto res = visited_builtins.emplace( codename, optional() ); if( !res.second ) { EOS_ASSERT( res.first->second, protocol_feature_exception, @@ -100,7 +101,7 @@ namespace eosio { namespace testing { } ); const auto itr = custom_subjective_restrictions.find(codename); - if (itr != custom_subjective_restrictions.end()) { + if( itr != custom_subjective_restrictions.end() ) { f.subjective_restrictions = itr->second; } diff --git a/tests/Node.py b/tests/Node.py index cdfb6d4e476..cf65e2282f0 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1445,7 +1445,7 @@ def getSupportedProtocolFeatures(self, excludeDisabled=False, excludeUnactivatab res = self.sendRpcApi("v1/producer/get_supported_protocol_features", param) return res - # This will return supported protocol feature digests as a dict (feature codename as the key), i.e. + # This will return supported protocol features in a dict (feature codename as the key), i.e. # { # "PREACTIVATE_FEATURE": {...}, # "ONLY_LINK_TO_EXISTING_PERMISSION": {...}, From d89e52540960c73593e3c125313a6df6d2946071 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 20 Mar 2019 16:06:28 -0400 Subject: [PATCH 170/680] some cleanup in protocol_feature_tests/subjective_restrictions_test --- unittests/protocol_feature_tests.cpp | 125 +++++++++++++++++---------- 1 file changed, 78 insertions(+), 47 deletions(-) diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index b62b4b4bd03..10648136c0c 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -240,77 +240,108 @@ BOOST_AUTO_TEST_CASE( only_link_to_existing_permission_test ) try { BOOST_AUTO_TEST_CASE( subjective_restrictions_test ) try { tester c( setup_policy::none ); - auto restart_with_new_pfs = [&](protocol_feature_set&& pfs) { + const auto& pfm = c.control->get_protocol_feature_manager(); + + auto restart_with_new_pfs = [&c]( protocol_feature_set&& pfs ) { c.close(); c.open(std::move(pfs), nullptr); }; - auto pfm = c.control->get_protocol_feature_manager(); - auto preactivate_feature_digest = *pfm.get_builtin_digest(builtin_protocol_feature_t::preactivate_feature ); - auto only_link_to_existing_permission_digest = *pfm.get_builtin_digest(builtin_protocol_feature_t::only_link_to_existing_permission ); - auto invalid_act_time = fc::time_point::from_iso_string( "2200-01-01T00:00:00"); + auto get_builtin_digest = [&pfm]( builtin_protocol_feature_t codename ) -> digest_type { + auto res = pfm.get_builtin_digest( codename ); + BOOST_REQUIRE( res ); + return *res; + }; + + auto preactivate_feature_digest = get_builtin_digest( builtin_protocol_feature_t::preactivate_feature ); + auto only_link_to_existing_permission_digest = get_builtin_digest( builtin_protocol_feature_t::only_link_to_existing_permission ); + + auto invalid_act_time = fc::time_point::from_iso_string( "2200-01-01T00:00:00" ); auto valid_act_time = fc::time_point{}; // First, test subjective_restrictions on feature that can be activated WITHOUT preactivation (PREACTIVATE_FEATURE) + + c.schedule_protocol_features_wo_preactivation({ preactivate_feature_digest }); + // schedule PREACTIVATE_FEATURE activation (persists until next successful start_block) + subjective_restriction_map custom_subjective_restrictions = { { builtin_protocol_feature_t::preactivate_feature, {invalid_act_time, false, true} } }; - restart_with_new_pfs(make_protocol_feature_set(custom_subjective_restrictions)); - // When a block is produced, the protocol feature activation should fail and throws an error - c.schedule_protocol_features_wo_preactivation({ preactivate_feature_digest }); - BOOST_CHECK_EXCEPTION( c.produce_block(), - protocol_feature_exception, - fc_exception_message_starts_with( std::string(c.control->head_block_time()) + - std::string(" is too early for the earliest ") + - std::string("allowed activation time of the protocol feature") - ) - ); + restart_with_new_pfs( make_protocol_feature_set(custom_subjective_restrictions) ); + // When a block is produced, the protocol feature activation should fail and throw an error + BOOST_CHECK_EXCEPTION( c.produce_block(), + protocol_feature_exception, + fc_exception_message_starts_with( + std::string(c.control->head_block_time()) + + " is too early for the earliest allowed activation time of the protocol feature" + ) + ); + BOOST_CHECK_EQUAL( c.protocol_features_to_be_activated_wo_preactivation.size(), 1 ); + // Revert to the valid earliest allowed activation time, however with enabled == false - custom_subjective_restrictions = {{ builtin_protocol_feature_t::preactivate_feature, {valid_act_time, false, false} }}; - restart_with_new_pfs(make_protocol_feature_set(custom_subjective_restrictions)); + custom_subjective_restrictions = { + { builtin_protocol_feature_t::preactivate_feature, {valid_act_time, false, false} } + }; + restart_with_new_pfs( make_protocol_feature_set(custom_subjective_restrictions) ); // This should also fail, but with different exception - BOOST_CHECK_EXCEPTION( c.produce_block(), - protocol_feature_exception, - fc_exception_message_starts_with( std::string("protocol feature with digest '") + - std::string(preactivate_feature_digest) + - std::string("' is disabled") - ) - ); + BOOST_CHECK_EXCEPTION( c.produce_block(), + protocol_feature_exception, + fc_exception_message_is( + std::string("protocol feature with digest '") + + std::string(preactivate_feature_digest) + + "' is disabled" + ) + ); + BOOST_CHECK_EQUAL( c.protocol_features_to_be_activated_wo_preactivation.size(), 1 ); + // Revert to the valid earliest allowed activation time, however with subjective_restrictions enabled == true - custom_subjective_restrictions = {{ builtin_protocol_feature_t::preactivate_feature, {valid_act_time, false, true} }}; - restart_with_new_pfs(make_protocol_feature_set(custom_subjective_restrictions)); + custom_subjective_restrictions = { + { builtin_protocol_feature_t::preactivate_feature, {valid_act_time, false, true} } + }; + restart_with_new_pfs( make_protocol_feature_set(custom_subjective_restrictions) ); // Now it should be fine, the feature should be activated after the block is produced BOOST_CHECK_NO_THROW( c.produce_block() ); BOOST_CHECK( c.control->is_builtin_activated( builtin_protocol_feature_t::preactivate_feature ) ); + BOOST_CHECK_EQUAL( c.protocol_features_to_be_activated_wo_preactivation.size(), 0 ); + + // Second, test subjective_restrictions on feature that need to be activated WITH preactivation (ONLY_LINK_TO_EXISTING_PERMISSION) c.set_bios_contract(); c.produce_block(); - // Second, test subjective_restrictions on feature that need to be activated WITH preactivation (ONLY_LINK_TO_EXISTING_PERMISSION) - custom_subjective_restrictions = {{ builtin_protocol_feature_t::only_link_to_existing_permission, {invalid_act_time, true, true} }}; - restart_with_new_pfs(make_protocol_feature_set(custom_subjective_restrictions)); + custom_subjective_restrictions = { + { builtin_protocol_feature_t::only_link_to_existing_permission, {invalid_act_time, true, true} } + }; + restart_with_new_pfs( make_protocol_feature_set(custom_subjective_restrictions) ); // It should fail - BOOST_CHECK_EXCEPTION( c.preactivate_protocol_features({only_link_to_existing_permission_digest}), - subjective_block_production_exception, - fc_exception_message_starts_with( std::string(c.control->head_block_time() + fc::milliseconds(config::block_interval_ms)) + - std::string(" is too early for the earliest ") + - std::string("allowed activation time of the protocol feature") - ) - ); + BOOST_CHECK_EXCEPTION( c.preactivate_protocol_features({only_link_to_existing_permission_digest}), + subjective_block_production_exception, + fc_exception_message_starts_with( + std::string(c.control->head_block_time() + fc::milliseconds(config::block_interval_ms)) + + " is too early for the earliest allowed activation time of the protocol feature" + ) + ); + // Revert with valid time and subjective_restrictions enabled == false - custom_subjective_restrictions = {{ builtin_protocol_feature_t::only_link_to_existing_permission, {valid_act_time, true, false} }}; - restart_with_new_pfs(make_protocol_feature_set(custom_subjective_restrictions)); + custom_subjective_restrictions = { + { builtin_protocol_feature_t::only_link_to_existing_permission, {valid_act_time, true, false} } + }; + restart_with_new_pfs( make_protocol_feature_set(custom_subjective_restrictions) ); // It should fail but with different exception - BOOST_CHECK_EXCEPTION( c.preactivate_protocol_features({only_link_to_existing_permission_digest}), - subjective_block_production_exception, - fc_exception_message_starts_with( std::string("protocol feature with digest '") + - std::string(only_link_to_existing_permission_digest)+ - std::string("' is disabled") - ) - ); + BOOST_CHECK_EXCEPTION( c.preactivate_protocol_features({only_link_to_existing_permission_digest}), + subjective_block_production_exception, + fc_exception_message_is( + std::string("protocol feature with digest '") + + std::string(only_link_to_existing_permission_digest)+ + "' is disabled" + ) + ); + // Revert with valid time and subjective_restrictions enabled == true - custom_subjective_restrictions = {{ builtin_protocol_feature_t::only_link_to_existing_permission, {valid_act_time, true, true} }}; - restart_with_new_pfs(make_protocol_feature_set(custom_subjective_restrictions)); + custom_subjective_restrictions = { + { builtin_protocol_feature_t::only_link_to_existing_permission, {valid_act_time, true, true} } + }; + restart_with_new_pfs( make_protocol_feature_set(custom_subjective_restrictions) ); // Should be fine now, and activated in the next block BOOST_CHECK_NO_THROW( c.preactivate_protocol_features({only_link_to_existing_permission_digest}) ); c.produce_block(); From 68f92dc122a34c7b42a3dbf685b2cf9b314c0739 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 18 Mar 2019 10:25:56 -0500 Subject: [PATCH 171/680] Add strand to protect internals of asio --- plugins/net_plugin/net_plugin.cpp | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 320214ae933..a77a828032a 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -497,7 +497,8 @@ namespace eosio { transaction_state_index trx_state; optional peer_requested; // this peer is requesting info from us std::shared_ptr server_ioc; // keep ioc alive - socket_ptr socket; + boost::asio::io_context::strand strand; + socket_ptr socket; fc::message_buffer<1024*1024> pending_message_buffer; fc::optional outstanding_read_bytes; @@ -730,6 +731,7 @@ namespace eosio { trx_state(), peer_requested(), server_ioc( my_impl->server_ioc ), + strand( *my_impl->server_ioc ), socket( std::make_shared( std::ref( *my_impl->server_ioc ))), node_id(), last_handshake_recv(), @@ -755,6 +757,7 @@ namespace eosio { trx_state(), peer_requested(), server_ioc( my_impl->server_ioc ), + strand( *my_impl->server_ioc ), socket( s ), node_id(), last_handshake_recv(), @@ -976,7 +979,8 @@ namespace eosio { std::vector bufs; buffer_queue.fill_out_buffer( bufs ); - boost::asio::async_write(*socket, bufs, [c, priority]( boost::system::error_code ec, std::size_t w ) { + boost::asio::async_write(*socket, bufs, + boost::asio::bind_executor(strand, [c, priority]( boost::system::error_code ec, std::size_t w ) { app().post(priority, [c, priority, ec, w]() { try { auto conn = c.lock(); @@ -1016,7 +1020,7 @@ namespace eosio { fc_elog( logger,"Exception in do_queue_write to ${p}", ("p",pname) ); } }); - }); + })); } void connection::cancel_sync(go_away_reason reason) { @@ -1859,7 +1863,7 @@ namespace eosio { connection_wptr weak_conn = c; // Note: need to add support for IPv6 too - resolver->async_resolve( query, + resolver->async_resolve( query, boost::asio::bind_executor( c->strand, [weak_conn, this]( const boost::system::error_code& err, tcp::resolver::iterator endpoint_itr ) { app().post( priority::low, [err, endpoint_itr, weak_conn, this]() { auto c = weak_conn.lock(); @@ -1871,7 +1875,7 @@ namespace eosio { ("peer_addr", c->peer_name())( "error", err.message()) ); } } ); - } ); + } ) ); } void net_plugin_impl::connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr) { @@ -1883,7 +1887,8 @@ namespace eosio { ++endpoint_itr; c->connecting = true; connection_wptr weak_conn = c; - c->socket->async_connect( current_endpoint, [weak_conn, endpoint_itr, this]( const boost::system::error_code& err ) { + c->socket->async_connect( current_endpoint, boost::asio::bind_executor( c->strand, + [weak_conn, endpoint_itr, this]( const boost::system::error_code& err ) { app().post( priority::low, [weak_conn, endpoint_itr, this, err]() { auto c = weak_conn.lock(); if( !c ) return; @@ -1902,7 +1907,7 @@ namespace eosio { } } } ); - } ); + } ) ); } bool net_plugin_impl::start_session(const connection_ptr& con) { @@ -2052,6 +2057,7 @@ namespace eosio { ++conn->reads_in_flight; boost::asio::async_read(*conn->socket, conn->pending_message_buffer.get_buffer_sequence_for_boost_async_read(), completion_handler, + boost::asio::bind_executor( conn->strand, [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { app().post( priority::medium, [this,weak_conn, ec, bytes_transferred]() { auto conn = weak_conn.lock(); @@ -2133,7 +2139,7 @@ namespace eosio { close( conn ); } }); - }); + })); } catch (...) { string pname = conn ? conn->peer_name() : "no connection name"; fc_elog( logger, "Undefined exception handling reading ${p}",("p",pname) ); From c39d5ea3c90e13a73db0f657ff92e9037ef5a8ac Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 19 Mar 2019 15:52:04 -0500 Subject: [PATCH 172/680] Ensure that intermediate asio operations are on the same thread --- plugins/net_plugin/net_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index a77a828032a..a26353ab387 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -731,7 +731,7 @@ namespace eosio { trx_state(), peer_requested(), server_ioc( my_impl->server_ioc ), - strand( *my_impl->server_ioc ), + strand( app().get_io_service() ), socket( std::make_shared( std::ref( *my_impl->server_ioc ))), node_id(), last_handshake_recv(), @@ -757,7 +757,7 @@ namespace eosio { trx_state(), peer_requested(), server_ioc( my_impl->server_ioc ), - strand( *my_impl->server_ioc ), + strand( app().get_io_service() ), socket( s ), node_id(), last_handshake_recv(), From 6dfb043dcf9d663f536485c00a763857625245a5 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 20 Mar 2019 18:59:06 -0400 Subject: [PATCH 173/680] allow is_feature_activated to be called by unprivileged contracts --- libraries/chain/controller.cpp | 2 ++ libraries/chain/wasm_interface.cpp | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index c7c71225d64..c2cf5b3ffc4 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -177,6 +177,8 @@ struct pending_state { if( activated_features.find( feature_digest ) != activated_features.end() ) return true; + if( bb._num_new_protocol_features_that_have_activated == 0 ) return false; + auto end = bb._new_protocol_feature_activations.begin() + bb._num_new_protocol_features_that_have_activated; return (std::find( bb._new_protocol_feature_activations.begin(), end, feature_digest ) != end); } diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 1b4c546b52e..62a09aeac1e 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -132,13 +132,6 @@ class privileged_api : public context_aware_api { EOS_ASSERT( false, unsupported_feature, "Unsupported Hardfork Detected" ); } - /** - * Returns true if the specified protocol feature is activated, false if not. - */ - bool is_feature_activated( const digest_type& feature_digest ) { - return context.control.is_protocol_feature_activated( feature_digest ); - } - /** * Pre-activates the specified protocol feature. * Fails if the feature is unrecognized, disabled, or not allowed to be activated at the current time. @@ -921,6 +914,13 @@ class system_api : public context_aware_api { return static_cast( context.trx_context.published.time_since_epoch().count() ); } + /** + * Returns true if the specified protocol feature is activated, false if not. + */ + bool is_feature_activated( const digest_type& feature_digest ) { + return context.control.is_protocol_feature_activated( feature_digest ); + } + }; constexpr size_t max_assert_message = 1024; @@ -1720,7 +1720,6 @@ REGISTER_INTRINSICS(privileged_api, (set_blockchain_parameters_packed, void(int,int) ) (is_privileged, int(int64_t) ) (set_privileged, void(int64_t, int) ) - (is_feature_activated, int(int) ) (preactivate_feature, void(int) ) ); @@ -1798,8 +1797,9 @@ REGISTER_INTRINSICS(permission_api, REGISTER_INTRINSICS(system_api, - (current_time, int64_t() ) - (publication_time, int64_t() ) + (current_time, int64_t() ) + (publication_time, int64_t() ) + (is_feature_activated, int(int) ) ); REGISTER_INTRINSICS(context_free_system_api, From 79c2be8dcda75ea1c361ac06f9a4d6b0ef03fb48 Mon Sep 17 00:00:00 2001 From: Kayan Date: Thu, 21 Mar 2019 17:01:40 +0800 Subject: [PATCH 174/680] fix bios_boot script for feature_digest param --- testnet.template | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testnet.template b/testnet.template index 59b62f3c102..ab9051f0601 100644 --- a/testnet.template +++ b/testnet.template @@ -87,8 +87,7 @@ ecmd set contract eosio $bioscontractpath eosio.bios.wasm eosio.bios.abi # Preactivate all digests for digest in "${featuredigests[@]}"; do -echo "$index"; -ecmd push action eosio preactivate '["'$digest'"]' -p eosio +ecmd push action eosio preactivate "{\"feature_digest\":\"$digest\"}" -p eosio done # Create required system accounts From ab4a0ef38c1de08dd339dcd85d490aa4a3b06856 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 21 Mar 2019 22:07:48 -0400 Subject: [PATCH 175/680] store whitelisted_intrinsics in portable snapshot as a set of intrinsic names --- libraries/chain/CMakeLists.txt | 2 +- libraries/chain/controller.cpp | 1 + .../eosio/chain/global_property_object.hpp | 49 ---------- .../eosio/chain/protocol_state_object.hpp | 89 +++++++++++++++++++ .../eosio/chain/whitelisted_intrinsics.hpp | 5 ++ libraries/chain/protocol_feature_manager.cpp | 2 +- libraries/chain/protocol_state_object.cpp | 54 +++++++++++ libraries/chain/wasm_interface.cpp | 1 + libraries/chain/whitelisted_intrinsics.cpp | 25 ++++++ libraries/fc | 2 +- 10 files changed, 178 insertions(+), 52 deletions(-) create mode 100644 libraries/chain/include/eosio/chain/protocol_state_object.hpp create mode 100644 libraries/chain/protocol_state_object.cpp diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 820fbcc63f0..15e8bfb0802 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -41,12 +41,12 @@ add_library( eosio_chain webassembly/wabt.cpp # get_config.cpp -# global_property_object.cpp # # contracts/chain_initializer.cpp transaction_metadata.cpp + protocol_state_object.cpp protocol_feature_activation.cpp protocol_feature_manager.cpp genesis_intrinsics.cpp diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index c2cf5b3ffc4..279964752e1 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include diff --git a/libraries/chain/include/eosio/chain/global_property_object.hpp b/libraries/chain/include/eosio/chain/global_property_object.hpp index cbfe7308bd4..14ed594c0bd 100644 --- a/libraries/chain/include/eosio/chain/global_property_object.hpp +++ b/libraries/chain/include/eosio/chain/global_property_object.hpp @@ -11,7 +11,6 @@ #include #include #include -#include #include #include "multi_index_includes.hpp" @@ -44,45 +43,6 @@ namespace eosio { namespace chain { > >; - /** - * @class protocol_state_object - * @brief Maintains global state information about consensus protocol rules - * @ingroup object - * @ingroup implementation - */ - class protocol_state_object : public chainbase::object - { - OBJECT_CTOR(protocol_state_object, (activated_protocol_features)(preactivated_protocol_features)(whitelisted_intrinsics)) - - public: - struct activated_protocol_feature { - digest_type feature_digest; - uint32_t activation_block_num = 0; - - activated_protocol_feature() = default; - - activated_protocol_feature( const digest_type& feature_digest, uint32_t activation_block_num ) - :feature_digest( feature_digest ) - ,activation_block_num( activation_block_num ) - {} - }; - - public: - id_type id; - shared_vector activated_protocol_features; - shared_vector preactivated_protocol_features; - whitelisted_intrinsics_type whitelisted_intrinsics; - }; - - using protocol_state_multi_index = chainbase::shared_multi_index_container< - protocol_state_object, - indexed_by< - ordered_unique, - BOOST_MULTI_INDEX_MEMBER(protocol_state_object, protocol_state_object::id_type, id) - > - > - >; - /** * @class dynamic_global_property_object * @brief Maintains global state information that frequently change @@ -109,7 +69,6 @@ namespace eosio { namespace chain { }} CHAINBASE_SET_INDEX_TYPE(eosio::chain::global_property_object, eosio::chain::global_property_multi_index) -CHAINBASE_SET_INDEX_TYPE(eosio::chain::protocol_state_object, eosio::chain::protocol_state_multi_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::dynamic_global_property_object, eosio::chain::dynamic_global_property_multi_index) @@ -117,14 +76,6 @@ FC_REFLECT(eosio::chain::global_property_object, (proposed_schedule_block_num)(proposed_schedule)(configuration) ) -FC_REFLECT(eosio::chain::protocol_state_object::activated_protocol_feature, - (feature_digest)(activation_block_num) - ) - -FC_REFLECT(eosio::chain::protocol_state_object, - (activated_protocol_features)(preactivated_protocol_features)(whitelisted_intrinsics) - ) - FC_REFLECT(eosio::chain::dynamic_global_property_object, (global_action_sequence) ) diff --git a/libraries/chain/include/eosio/chain/protocol_state_object.hpp b/libraries/chain/include/eosio/chain/protocol_state_object.hpp new file mode 100644 index 00000000000..6be252a2638 --- /dev/null +++ b/libraries/chain/include/eosio/chain/protocol_state_object.hpp @@ -0,0 +1,89 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include +#include +#include +#include +#include "multi_index_includes.hpp" + +namespace eosio { namespace chain { + + /** + * @class protocol_state_object + * @brief Maintains global state information about consensus protocol rules + * @ingroup object + * @ingroup implementation + */ + class protocol_state_object : public chainbase::object + { + OBJECT_CTOR(protocol_state_object, (activated_protocol_features)(preactivated_protocol_features)(whitelisted_intrinsics)) + + public: + struct activated_protocol_feature { + digest_type feature_digest; + uint32_t activation_block_num = 0; + + activated_protocol_feature() = default; + + activated_protocol_feature( const digest_type& feature_digest, uint32_t activation_block_num ) + :feature_digest( feature_digest ) + ,activation_block_num( activation_block_num ) + {} + }; + + public: + id_type id; + shared_vector activated_protocol_features; + shared_vector preactivated_protocol_features; + whitelisted_intrinsics_type whitelisted_intrinsics; + }; + + using protocol_state_multi_index = chainbase::shared_multi_index_container< + protocol_state_object, + indexed_by< + ordered_unique, + BOOST_MULTI_INDEX_MEMBER(protocol_state_object, protocol_state_object::id_type, id) + > + > + >; + + struct snapshot_protocol_state_object { + vector activated_protocol_features; + vector preactivated_protocol_features; + std::set whitelisted_intrinsics; + }; + + namespace detail { + template<> + struct snapshot_row_traits { + using value_type = protocol_state_object; + using snapshot_type = snapshot_protocol_state_object; + + static snapshot_protocol_state_object to_snapshot_row( const protocol_state_object& value, + const chainbase::database& db ); + + static void from_snapshot_row( snapshot_protocol_state_object&& row, + protocol_state_object& value, + chainbase::database& db ); + }; + } + +}} + +CHAINBASE_SET_INDEX_TYPE(eosio::chain::protocol_state_object, eosio::chain::protocol_state_multi_index) + +FC_REFLECT(eosio::chain::protocol_state_object::activated_protocol_feature, + (feature_digest)(activation_block_num) + ) + +FC_REFLECT(eosio::chain::protocol_state_object, + (activated_protocol_features)(preactivated_protocol_features)(whitelisted_intrinsics) + ) + +FC_REFLECT(eosio::chain::snapshot_protocol_state_object, + (activated_protocol_features)(preactivated_protocol_features)(whitelisted_intrinsics) + ) diff --git a/libraries/chain/include/eosio/chain/whitelisted_intrinsics.hpp b/libraries/chain/include/eosio/chain/whitelisted_intrinsics.hpp index d9466114afe..96fbf2e2195 100644 --- a/libraries/chain/include/eosio/chain/whitelisted_intrinsics.hpp +++ b/libraries/chain/include/eosio/chain/whitelisted_intrinsics.hpp @@ -19,4 +19,9 @@ namespace eosio { namespace chain { void remove_intrinsic_from_whitelist( whitelisted_intrinsics_type& whitelisted_intrinsics, const std::string& name ); + void reset_intrinsic_whitelist( whitelisted_intrinsics_type& whitelisted_intrinsics, + const std::set& s ); + + std::set convert_intrinsic_whitelist_to_set( const whitelisted_intrinsics_type& whitelisted_intrinsics ); + } } // namespace eosio::chain diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index bc03e829218..a7fe5ecb1ad 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -4,7 +4,7 @@ */ #include -#include +#include #include #include diff --git a/libraries/chain/protocol_state_object.cpp b/libraries/chain/protocol_state_object.cpp new file mode 100644 index 00000000000..8a860248a3b --- /dev/null +++ b/libraries/chain/protocol_state_object.cpp @@ -0,0 +1,54 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include + +namespace eosio { namespace chain { + + namespace detail { + + snapshot_protocol_state_object + snapshot_row_traits::to_snapshot_row( const protocol_state_object& value, + const chainbase::database& db ) + { + snapshot_protocol_state_object res; + + res.activated_protocol_features.reserve( value.activated_protocol_features.size() ); + for( const auto& v : value.activated_protocol_features ) { + res.activated_protocol_features.emplace_back( v ); + } + + res.preactivated_protocol_features.reserve( value.preactivated_protocol_features.size() ); + for( const auto& v : value.preactivated_protocol_features ) { + res.preactivated_protocol_features.emplace_back( v ); + } + + res.whitelisted_intrinsics = convert_intrinsic_whitelist_to_set( value.whitelisted_intrinsics ); + + return res; + } + + void + snapshot_row_traits::from_snapshot_row( snapshot_protocol_state_object&& row, + protocol_state_object& value, + chainbase::database& db ) + { + value.activated_protocol_features.clear(); + value.activated_protocol_features.reserve( row.activated_protocol_features.size() ); + for( const auto& v : row.activated_protocol_features ) { + value.activated_protocol_features.emplace_back( v ); + } + + value.preactivated_protocol_features.clear(); + value.preactivated_protocol_features.reserve( row.preactivated_protocol_features.size() ); + for( const auto& v : row.preactivated_protocol_features ) { + value.preactivated_protocol_features.emplace_back( v ); + } + + reset_intrinsic_whitelist( value.whitelisted_intrinsics, row.whitelisted_intrinsics ); + } + + } + +}} diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 62a09aeac1e..98d82bbd335 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/libraries/chain/whitelisted_intrinsics.cpp b/libraries/chain/whitelisted_intrinsics.cpp index 7b5ca67b86d..6a4756bf502 100644 --- a/libraries/chain/whitelisted_intrinsics.cpp +++ b/libraries/chain/whitelisted_intrinsics.cpp @@ -80,4 +80,29 @@ namespace eosio { namespace chain { whitelisted_intrinsics.erase( itr ); } + void reset_intrinsic_whitelist( whitelisted_intrinsics_type& whitelisted_intrinsics, + const std::set& s ) + { + whitelisted_intrinsics.clear(); + + for( const auto& name : s ) { + uint64_t h = static_cast( std::hash{}( name ) ); + whitelisted_intrinsics.emplace( std::piecewise_construct, + std::forward_as_tuple( h ), + std::forward_as_tuple( name.c_str(), name.size(), + whitelisted_intrinsics.get_allocator() ) + ); + } + } + + std::set convert_intrinsic_whitelist_to_set( const whitelisted_intrinsics_type& whitelisted_intrinsics ) { + std::set s; + + for( const auto& p : whitelisted_intrinsics ) { + s.emplace( p.second.c_str(), p.second.size() ); + } + + return s; + } + } } diff --git a/libraries/fc b/libraries/fc index 0c348cc9af4..2295a0d05f0 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 0c348cc9af47d71af57e6926fd64848594a78658 +Subproject commit 2295a0d05f07058c5dd27a1682294fcf428e3346 From a8cd499b9b1ad884d9ca582231a1f5a02bab16f2 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 21 Mar 2019 23:16:20 -0400 Subject: [PATCH 176/680] remove wlogs from get_activated_protocol_features --- plugins/chain_plugin/chain_plugin.cpp | 3 --- 1 file changed, 3 deletions(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index c5d22beec14..aca508db776 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1344,9 +1344,6 @@ read_only::get_activated_protocol_features( const read_only::get_activated_proto return itr; }; - wlog( "lower_bound_value = ${value}", ("value", lower_bound_value) ); - wlog( "upper_bound_value = ${value}", ("value", upper_bound_value) ); - auto lower = ( params.search_by_block_num ? pfm.lower_bound( lower_bound_value ) : pfm.at_activation_ordinal( lower_bound_value ) ); From b364446dde4533c08345c6f7b6c0bd29f5f019a7 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Fri, 22 Mar 2019 09:56:32 -0700 Subject: [PATCH 177/680] Issue 6940 (#6978) --- README.md | 14 +++++++++++++- scripts/eosio_install.sh | 2 +- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c36b2c6d1a5..e22a2b2cebc 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,11 @@ Block.one is neither launching nor operating any initial public blockchains base There is no public testnet running currently. -**If you have previously installed EOSIO, please run the `eosio_uninstall` script (it is in the directory where you cloned EOSIO) before downloading and using the binary releases.** +--- + +**If you used our build scripts to install eosio, [please be sure to uninstall](#build-script-uninstall) before using our packages.** + +--- #### Mac OS X Brew Install ```sh @@ -37,6 +41,7 @@ $ brew install eosio ```sh $ brew remove eosio ``` + #### Ubuntu 18.04 Debian Package Install ```sh $ wget https://github.com/eosio/eos/releases/download/v1.6.0/eosio_1.7.0-rc1-ubuntu-18.04_amd64.deb @@ -70,6 +75,13 @@ $ sudo yum install ./eosio-1.7.0-rc1.fc27.x86_64.rpm $ sudo yum remove eosio.cdt ``` +#### Build Script Uninstall + +If you have previously installed EOSIO using build scripts, you have two options (neither impact your data directory by default): + +1. `./scripts/eosio_uninstall.sh` - Will uninstall eosio, yet leave dependencies (you can use --full to delete your data directory). +2. `./scripts/full_uninstaller.sh` - Will uninstall eosio and dependencies (can be forced; see script). + ## Supported Operating Systems EOSIO currently supports the following operating systems: 1. Amazon 2017.09 and higher diff --git a/scripts/eosio_install.sh b/scripts/eosio_install.sh index ac5a731f2fd..a858fd63430 100755 --- a/scripts/eosio_install.sh +++ b/scripts/eosio_install.sh @@ -78,7 +78,7 @@ printf " \\__\\/ \\__\\/ \\__\\/ \\__\\/ printf "==============================================================================================\\n" printf "EOSIO has been installed into ${OPT_LOCATION}/eosio/bin!\\n" -printf "If you need to, you can fully uninstall using eosio_uninstall.sh && scripts/clean_old_install.sh.\\n" +printf "If you need to, you can uninstall using: ./scripts/full_uninstaller.sh (it will leave your data directory).\\n" printf "==============================================================================================\\n\\n" printf "EOSIO website: https://eos.io\\n" From b0b4fdae2f3514b325a49d5a2494e784f2fb66ad Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 22 Mar 2019 13:40:05 -0400 Subject: [PATCH 178/680] Remove setting CMAKE_OSX_SYSROOT Setting CMAKE_OSX_SYSROOT has shown to cause build failures on fresh macos 10.13 installs --- CMakeLists.txt | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index f9375f0f8b9..00258c4b86d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -14,14 +14,8 @@ endif() list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/libraries/fc/CMakeModules") list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules") -if (UNIX) - if (APPLE) - execute_process(COMMAND xcrun --show-sdk-path - OUTPUT_VARIABLE CMAKE_OSX_SYSROOT - OUTPUT_STRIP_TRAILING_WHITESPACE) - list(APPEND CMAKE_PREFIX_PATH "/usr/local/opt/llvm@4") - list(APPEND CMAKE_PREFIX_PATH "/usr/local/opt/gettext") - endif() +if (UNIX AND APPLE) + list(APPEND CMAKE_PREFIX_PATH "/usr/local/opt/llvm@4" "/usr/local/opt/gettext") endif() include( GNUInstallDirs ) From cb98d8f756098547b8a35186c49cd8f138cbeee1 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Feb 2019 08:10:05 -0600 Subject: [PATCH 179/680] Added ability to configure nodes that are not launched immediately. --- programs/eosio-launcher/main.cpp | 53 +++++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 4 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 8a3a75a721b..066305e4122 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -247,6 +247,7 @@ class tn_node_def { vector producers; eosd_def* instance; string gelf_endpoint; + bool dont_start; }; void @@ -390,6 +391,8 @@ string producer_names::producer_name(unsigned int producer_number) { struct launcher_def { bool force_overwrite; size_t total_nodes; + size_t unstarted_nodes; + size_t total_nodes; size_t prod_nodes; size_t producers; size_t next_node; @@ -481,6 +484,7 @@ launcher_def::set_options (bpo::options_description &cfg) { cfg.add_options() ("force,f", bpo::bool_switch(&force_overwrite)->default_value(false), "Force overwrite of existing configuration files and erase blockchain") ("nodes,n",bpo::value(&total_nodes)->default_value(1),"total number of nodes to configure and launch") + ("unstarted-nodes",bpo::value(&unstarted_nodes)->default_value(0),"total number of nodes to configure, but not launch") ("pnodes,p",bpo::value(&prod_nodes)->default_value(1),"number of nodes that contain one or more producers") ("producers",bpo::value(&producers)->default_value(21),"total number of non-bios producer instances in this network") ("mode,m",bpo::value>()->multitoken()->default_value({"any"}, "any"),"connection mode, combination of \"any\", \"producers\", \"specified\", \"none\"") @@ -634,7 +638,31 @@ launcher_def::initialize (const variables_map &vmap) { if (prod_nodes > (producers + 1)) prod_nodes = producers; if (prod_nodes > total_nodes) - total_nodes = prod_nodes; + total_nodes = prod_nodes + unstarted_nodes; + else if (total_nodes < prod_nodes + unstarted_nodes) { + cerr << "ERROR: if provided, \"--nodes\" must be equal or greater than the number of nodes indicated by \"--pnodes\" and \"--unstarted-nodes\"." << endl; + exit (-1); + } + + if (vmap.count("specific-num")) { + const auto specific_nums = vmap["specific-num"].as>(); + const auto specific_args = vmap["specific-nodeos"].as>(); + if (specific_nums.size() != specific_args.size()) { + cerr << "ERROR: every specific-num argument must be paired with a specific-nodeos argument" << endl; + exit (-1); + } + // don't include bios + const auto allowed_nums = total_nodes - 1; + for(uint i = 0; i < specific_nums.size(); ++i) + { + const auto& num = specific_nums[i]; + if (num >= allowed_nums) { + cerr << "\"--specific-num\" provided value= " << num << " is higher than \"--nodes\" provided value=" << total_nodes << endl; + exit (-1); + } + specific_nodeos_args[num] = specific_args[i]; + } + } char* erd_env_var = getenv ("EOSIO_HOME"); if (erd_env_var == nullptr || std::string(erd_env_var).empty()) { @@ -733,7 +761,7 @@ launcher_def::generate () { write_dot_file (); if (!output.empty()) { - bfs::path savefile = output; + bfs::path savefile = output; { bfs::ofstream sf (savefile); sf << fc::json::to_pretty_string (network) << endl; @@ -754,6 +782,7 @@ launcher_def::generate () { } return false; } + return true; } @@ -864,6 +893,7 @@ launcher_def::bind_nodes () { int extra = producers % non_bios; unsigned int i = 0; unsigned int producer_number = 0; + const auto to_not_start_node = total_nodes - unstarted_nodes - 1; for (auto &h : bindings) { for (auto &inst : h.instances) { bool is_bios = inst.name == "bios"; @@ -894,6 +924,7 @@ launcher_def::bind_nodes () { ++producer_number; } } + node.dont_start = i >= to_not_start_node; } node.gelf_endpoint = gelf_endpoint; network.nodes[node.name] = move(node); @@ -1564,6 +1595,10 @@ launcher_def::launch (eosd_def &instance, string >s) { } if (!host->is_local()) { + if (instance.node->dont_start) { + cerr << "Unable to use \"unstarted-nodes\" with a remote hose" << endl; + exit (-1); + } string cmdl ("cd "); cmdl += host->eosio_home + "; nohup " + eosdcmd + " > " + reout.string() + " 2> " + reerr.string() + "& echo $! > " + pidf.string() @@ -1578,7 +1613,7 @@ launcher_def::launch (eosd_def &instance, string >s) { string cmd = "cd " + host->eosio_home + "; kill -15 $(cat " + pidf.string() + ")"; format_ssh (cmd, host->host_name, info.kill_cmd); } - else { + else if (!instance.node->dont_start) { cerr << "spawning child, " << eosdcmd << endl; bp::child c(eosdcmd, bp::std_out > reout, bp::std_err > reerr ); @@ -1600,6 +1635,16 @@ launcher_def::launch (eosd_def &instance, string >s) { } c.detach(); } + else { + cerr << "not spawning child, " << eosdcmd << endl; + + const bfs::path dd = instance.data_dir_name; + const bfs::path start_file = dd / "start.cmd"; + bfs::ofstream sf (start_file); + + sf << eosdcmd << endl; + sf.close(); + } last_run.running_nodes.emplace_back (move(info)); } @@ -2046,7 +2091,7 @@ FC_REFLECT( eosd_def, (p2p_endpoint) ) // @ignore instance, gelf_endpoint -FC_REFLECT( tn_node_def, (name)(keys)(peers)(producers) ) +FC_REFLECT( tn_node_def, (name)(keys)(peers)(producers)(dont_start) ) FC_REFLECT( testnet_def, (name)(ssh_helper)(nodes) ) From 328473c9af665c780bf00163cb58e03e85e5d7b4 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Feb 2019 11:42:31 -0600 Subject: [PATCH 180/680] Cleanup of scripts. --- tests/Cluster.py | 63 ++++++++++--------- tests/Node.py | 3 +- ...onsensus-validation-malicious-producers.py | 2 +- tests/distributed-transactions-test.py | 2 +- tests/testUtils.py | 22 ++++++- 5 files changed, 58 insertions(+), 34 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index d41d8e8731d..2c2486d48bf 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -31,7 +31,6 @@ class Cluster(object): __LauncherCmdArr=[] __bootlog="eosio-ignition-wd/bootlog.txt" __configDir="etc/eosio/" - __dataDir="var/lib/" # pylint: disable=too-many-arguments # walletd [True|False] Is keosd running. If not load the wallet plugin @@ -128,11 +127,12 @@ def setAlternateVersionLabels(self, file): # pylint: disable=too-many-return-statements # pylint: disable=too-many-branches # pylint: disable=too-many-statements - def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, + def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, alternateVersionLabelsFile=None, associatedNodeLabels=None): """Launch cluster. pnodes: producer nodes count + unstartedNodes: non-producer nodes that are configured into the launch, but not started totalNodes: producer + non-producer nodes count prodCount: producers per producer node count topo: cluster topology (as defined by launcher, and "bridge" shape that is specific to this launch method) @@ -169,6 +169,8 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne if pnodes > totalNodes: raise RuntimeError("totalNodes (%d) must be equal to or greater than pnodes(%d)." % (totalNodes, pnodes)) + if pnodes + unstartedNodes > totalNodes: + raise RuntimeError("totalNodes (%d) must be equal to or greater than pnodes(%d) + unstartedNodes(%d)." % (totalNodes, pnodes, unstartedNodes)) if self.walletMgr is None: self.walletMgr=WalletMgr(True) @@ -806,15 +808,6 @@ def nodeNameToId(name): m=re.search(r"node_([\d]+)", name) return int(m.group(1)) - @staticmethod - def nodeExtensionToName(ext): - r"""Convert node extension (bios, 0, 1, etc) to node name. """ - prefix="node_" - if ext == "bios": - return prefix + ext - - return "node_%02d" % (ext) - @staticmethod def parseProducerKeys(configFile, nodeName): """Parse node config file for producer keys. Returns dictionary. (Keys: account name; Values: dictionary objects (Keys: ["name", "node", "private","public"]; Values: account name, node id returned by nodeNameToId(nodeName), private key(string)and public key(string))).""" @@ -852,7 +845,7 @@ def parseProducerKeys(configFile, nodeName): def parseProducers(nodeNum): """Parse node config file for producers.""" - configFile=Cluster.__configDir + Cluster.nodeExtensionToName(nodeNum) + "/config.ini" + configFile=Cluster.__configDir + Utils.nodeExtensionToName(nodeNum) + "/config.ini" if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) configStr=None with open(configFile, 'r') as f: @@ -870,7 +863,7 @@ def parseProducers(nodeNum): def parseClusterKeys(totalNodes): """Parse cluster config file. Updates producer keys data members.""" - nodeName=Cluster.nodeExtensionToName("bios") + nodeName=Utils.nodeExtensionToName("bios") configFile=Cluster.__configDir + nodeName + "/config.ini" if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) producerKeys=Cluster.parseProducerKeys(configFile, nodeName) @@ -879,7 +872,7 @@ def parseClusterKeys(totalNodes): return None for i in range(0, totalNodes): - nodeName=Cluster.nodeExtensionToName(i) + nodeName=Utils.nodeExtensionToName(i) configFile=Cluster.__configDir + nodeName + "/config.ini" if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) @@ -1254,7 +1247,7 @@ def myFunc(): @staticmethod def pgrepEosServerPattern(nodeInstance): - dataLocation=Cluster.__dataDir + Cluster.nodeExtensionToName(nodeInstance) + dataLocation=Utils.getNodeDataDir(nodeInstance) return r"[\n]?(\d+) (.* --data-dir %s .*)\n" % (dataLocation) # Populates list of EosInstanceInfo objects, matched to actual running instances @@ -1272,18 +1265,30 @@ def discoverLocalNodes(self, totalNodes, timeout=None): psOutDisplay=psOut[:6660]+"..." if Utils.Debug: Utils.Print("pgrep output: \"%s\"" % psOutDisplay) for i in range(0, totalNodes): - pattern=Cluster.pgrepEosServerPattern(i) - m=re.search(pattern, psOut, re.MULTILINE) - if m is None: - Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) + instance=self.discoverLocalNode(i, psOut) + if instance is None: break - instance=Node(self.host, self.port + i, pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) - if Utils.Debug: Utils.Print("Node>", instance) nodes.append(instance) if Utils.Debug: Utils.Print("Found %d nodes" % (len(nodes))) return nodes + # Populate a node matched to actual running instance + def discoverLocalNode(self, nodeNum, psOut=None): + if psOut is None: + psOut=Cluster.pgrepEosServers(timeout) + if psOut is None: + Utils.Print("ERROR: No nodes discovered.") + return nodes + pattern=Cluster.pgrepEosServerPattern(nodeNum) + m=re.search(pattern, psOut, re.MULTILINE) + if m is None: + Utils.Print("ERROR: Failed to find %s pid. Pattern %s" % (Utils.EosServerName, pattern)) + return None + instance=Node(self.host, self.port + nodeNum, pid=int(m.group(1)), cmd=m.group(2), walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) + if Utils.Debug: Utils.Print("Node>", instance) + return instance + def discoverBiosNodePid(self, timeout=None): psOut=Cluster.pgrepEosServers(timeout=timeout) pattern=Cluster.pgrepEosServerPattern("bios") @@ -1348,20 +1353,20 @@ def __findFiles(path): return files def dumpErrorDetails(self): - fileName=os.path.join(Cluster.__configDir + Cluster.nodeExtensionToName("bios"), "config.ini") + fileName=os.path.join(Cluster.__configDir + Utils.nodeExtensionToName("bios"), "config.ini") Cluster.dumpErrorDetailImpl(fileName) - path=Cluster.__dataDir + Cluster.nodeExtensionToName("bios") + path=Utils.getNodeDataDir("bios") fileNames=Cluster.__findFiles(path) for fileName in fileNames: Cluster.dumpErrorDetailImpl(fileName) for i in range(0, len(self.nodes)): - configLocation=Cluster.__configDir + Cluster.nodeExtensionToName(i) + configLocation=Cluster.__configDir + Utils.nodeExtensionToName(i) fileName=os.path.join(configLocation, "config.ini") Cluster.dumpErrorDetailImpl(fileName) fileName=os.path.join(configLocation, "genesis.json") Cluster.dumpErrorDetailImpl(fileName) - path=Cluster.__dataDir + Cluster.nodeExtensionToName(i) + path=Utils.getNodeDataDir(i) fileNames=Cluster.__findFiles(path) for fileName in fileNames: Cluster.dumpErrorDetailImpl(fileName) @@ -1435,7 +1440,7 @@ def waitForNextBlock(self, timeout=None): return node.waitForNextBlock(timeout) def cleanup(self): - for f in glob.glob(Cluster.__dataDir + "node_*"): + for f in glob.glob(Utils.DataDir + "node_*"): shutil.rmtree(f) for f in glob.glob(Cluster.__configDir + "node_*"): shutil.rmtree(f) @@ -1510,7 +1515,7 @@ def printBlockLogIfNeeded(self): self.printBlockLog() def getBlockLog(self, nodeExtension): - blockLogDir=Cluster.__dataDir + Cluster.nodeExtensionToName(nodeExtension) + "/blocks/" + blockLogDir=os.path.join(Utils.getNodeDataDir(nodeExtension), "blocks", "") return Utils.getBlockLog(blockLogDir, exitOnError=False) def printBlockLog(self): @@ -1600,8 +1605,8 @@ def compareCommon(blockLogs, blockNameExtensions, first, last): if Utils.Debug: Utils.Print("context=%s" % (context)) ret=Utils.compare(commonBlockLogs[0], commonBlockLogs[i], context) if ret is not None: - blockLogDir1=Cluster.__dataDir + Cluster.nodeExtensionToName(commonBlockNameExtensions[0]) + "/blocks/" - blockLogDir2=Cluster.__dataDir + Cluster.nodeExtensionToName(commonBlockNameExtensions[i]) + "/blocks/" + blockLogDir1=Utils.DataDir + Utils.nodeExtensionToName(commonBlockNameExtensions[0]) + "/blocks/" + blockLogDir2=Utils.DataDir + Utils.nodeExtensionToName(commonBlockNameExtensions[i]) + "/blocks/" Utils.Print(Utils.FileDivider) Utils.Print("Block log from %s:\n%s" % (blockLogDir1, json.dumps(commonBlockLogs[0], indent=1))) Utils.Print(Utils.FileDivider) diff --git a/tests/Node.py b/tests/Node.py index 1c01893ceca..ab0859c7b0d 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1334,8 +1334,7 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim dataDir="var/lib/node_%02d" % (nodeId) dt = datetime.datetime.now() - dateStr="%d_%02d_%02d_%02d_%02d_%02d" % ( - dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) + dateStr=Utils.getDateString(dt) stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr) stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr) with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr: diff --git a/tests/consensus-validation-malicious-producers.py b/tests/consensus-validation-malicious-producers.py index 971228854d9..e3c6d7fe50e 100755 --- a/tests/consensus-validation-malicious-producers.py +++ b/tests/consensus-validation-malicious-producers.py @@ -246,7 +246,7 @@ def myTest(transWillEnterBlock): topo="mesh" delay=0 Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo, delay) is False: + if cluster.launch(pnodes=pnodes, total_nodes=total_nodes, topo=topo, delay=delay) is False: error("Failed to stand up eos cluster.") return False diff --git a/tests/distributed-transactions-test.py b/tests/distributed-transactions-test.py index 5b302dcf141..c3b794b89c0 100755 --- a/tests/distributed-transactions-test.py +++ b/tests/distributed-transactions-test.py @@ -63,7 +63,7 @@ (pnodes, total_nodes-pnodes, topo, delay)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=pnodes, total_nodes=total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") diff --git a/tests/testUtils.py b/tests/testUtils.py index 9e7e9c604be..38719fb8455 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -36,6 +36,7 @@ class Utils: EosBlockLogPath="programs/eosio-blocklog/eosio-blocklog" FileDivider="=================================================================" + DataDir="var/lib/" @staticmethod def Print(*args, **kwargs): @@ -65,6 +66,24 @@ def setIrreversibleTimeout(timeout): def setSystemWaitTimeout(timeout): Utils.systemWaitTimeout=timeout + @staticmethod + def getDateString(dt): + return "%d_%02d_%02d_%02d_%02d_%02d" % ( + dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second) + + @staticmethod + def nodeExtensionToName(ext): + r"""Convert node extension (bios, 0, 1, etc) to node name. """ + prefix="node_" + if ext == "bios": + return prefix + ext + + return "node_%02d" % (ext) + + @staticmethod + def getNodeDataDir(ext): + return os.path.join(Utils.DataDir, Utils.nodeExtensionToName(ext)) + @staticmethod def getChainStrategies(): chainSyncStrategies={} @@ -180,7 +199,8 @@ def runCmdArrReturnJson(cmdArr, trace=False, silentErrors=True): @staticmethod def runCmdReturnStr(cmd, trace=False): - retStr=Utils.checkOutput(cmd.split()) + cmdArr=shlex.split(cmd) + retStr=Utils.checkOutput(cmdArr) if trace: Utils.Print ("RAW > %s" % (retStr)) return retStr From 5aa5835e8897fd38fd0fdffc6434bf037af7d3cc Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 25 Feb 2019 08:36:08 -0600 Subject: [PATCH 181/680] Added config dir and data dir utils methods. --- tests/Cluster.py | 19 +++++++++---------- tests/testUtils.py | 19 +++++++++++++++++-- 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 2c2486d48bf..0e16c803f05 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -30,7 +30,6 @@ class Cluster(object): __BiosPort=8788 __LauncherCmdArr=[] __bootlog="eosio-ignition-wd/bootlog.txt" - __configDir="etc/eosio/" # pylint: disable=too-many-arguments # walletd [True|False] Is keosd running. If not load the wallet plugin @@ -845,7 +844,7 @@ def parseProducerKeys(configFile, nodeName): def parseProducers(nodeNum): """Parse node config file for producers.""" - configFile=Cluster.__configDir + Utils.nodeExtensionToName(nodeNum) + "/config.ini" + configFile=Utils.getNodeConfigDir(nodeNum, "config.ini") if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) configStr=None with open(configFile, 'r') as f: @@ -863,19 +862,19 @@ def parseProducers(nodeNum): def parseClusterKeys(totalNodes): """Parse cluster config file. Updates producer keys data members.""" - nodeName=Utils.nodeExtensionToName("bios") - configFile=Cluster.__configDir + nodeName + "/config.ini" + configFile=Utils.getNodeConfigDir("bios", "config.ini") if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) + nodeName=Utils.nodeExtensionToName("bios") producerKeys=Cluster.parseProducerKeys(configFile, nodeName) if producerKeys is None: Utils.Print("ERROR: Failed to parse eosio private keys from cluster config files.") return None for i in range(0, totalNodes): - nodeName=Utils.nodeExtensionToName(i) - configFile=Cluster.__configDir + nodeName + "/config.ini" + configFile=Utils.getNodeConfigDir(i, "config.ini") if Utils.Debug: Utils.Print("Parsing config file %s" % configFile) + nodeName=Utils.nodeExtensionToName(i) keys=Cluster.parseProducerKeys(configFile, nodeName) if keys is not None: producerKeys.update(keys) @@ -1353,7 +1352,7 @@ def __findFiles(path): return files def dumpErrorDetails(self): - fileName=os.path.join(Cluster.__configDir + Utils.nodeExtensionToName("bios"), "config.ini") + fileName=Utils.getNodeConfigDir("bios", "config.ini") Cluster.dumpErrorDetailImpl(fileName) path=Utils.getNodeDataDir("bios") fileNames=Cluster.__findFiles(path) @@ -1361,7 +1360,7 @@ def dumpErrorDetails(self): Cluster.dumpErrorDetailImpl(fileName) for i in range(0, len(self.nodes)): - configLocation=Cluster.__configDir + Utils.nodeExtensionToName(i) + configLocation=Utils.getNodeConfigDir(i) fileName=os.path.join(configLocation, "config.ini") Cluster.dumpErrorDetailImpl(fileName) fileName=os.path.join(configLocation, "genesis.json") @@ -1442,7 +1441,7 @@ def waitForNextBlock(self, timeout=None): def cleanup(self): for f in glob.glob(Utils.DataDir + "node_*"): shutil.rmtree(f) - for f in glob.glob(Cluster.__configDir + "node_*"): + for f in glob.glob(Utils.ConfigDir + "node_*"): shutil.rmtree(f) for f in self.filesToCleanup: @@ -1515,7 +1514,7 @@ def printBlockLogIfNeeded(self): self.printBlockLog() def getBlockLog(self, nodeExtension): - blockLogDir=os.path.join(Utils.getNodeDataDir(nodeExtension), "blocks", "") + blockLogDir=Utils.getNodeDataDir(nodeExtension, "blocks") return Utils.getBlockLog(blockLogDir, exitOnError=False) def printBlockLog(self): diff --git a/tests/testUtils.py b/tests/testUtils.py index 38719fb8455..107be3f087a 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -37,6 +37,7 @@ class Utils: FileDivider="=================================================================" DataDir="var/lib/" + ConfigDir="etc/eosio/" @staticmethod def Print(*args, **kwargs): @@ -81,8 +82,22 @@ def nodeExtensionToName(ext): return "node_%02d" % (ext) @staticmethod - def getNodeDataDir(ext): - return os.path.join(Utils.DataDir, Utils.nodeExtensionToName(ext)) + def getNodeDataDir(ext, relativeDir=None, trailingSlash=False): + path=os.path.join(Utils.DataDir, Utils.nodeExtensionToName(ext)) + if relativeDir is not None: + path=os.path.join(path, relativeDir) + if trailingSlash: + path=os.path.join(path, "") + return path + + @staticmethod + def getNodeConfigDir(ext, relativeDir=None, trailingSlash=False): + path=os.path.join(Utils.ConfigDir, Utils.nodeExtensionToName(ext)) + if relativeDir is not None: + path=os.path.join(path, relativeDir) + if trailingSlash: + path=os.path.join(path, "") + return path @staticmethod def getChainStrategies(): From 50ea21fa9a6ddfc1feb46926fbcfa438975583ba Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Tue, 26 Feb 2019 08:44:31 -0600 Subject: [PATCH 182/680] Refactoring relaunch logic to allow for a general launch via a command line. --- tests/Node.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index ab0859c7b0d..802aa35e9df 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1332,19 +1332,8 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim myCmd=" ".join(cmdArr) - dataDir="var/lib/node_%02d" % (nodeId) - dt = datetime.datetime.now() - dateStr=Utils.getDateString(dt) - stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr) - stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr) - with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr: - cmd=myCmd + ("" if chainArg is None else (" " + chainArg)) - Utils.Print("cmd: %s" % (cmd)) - popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) - if cachePopen: - self.popenProc=popen - self.pid=popen.pid - if Utils.Debug: Utils.Print("restart Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) + cmd=myCmd + ("" if chainArg is None else (" " + chainArg)) + self.launchCmd(cmd, nodeId) def isNodeAlive(): """wait for node to be responsive.""" @@ -1366,6 +1355,20 @@ def isNodeAlive(): self.killed=False return True + def launchCmd(self, cmd, nodeId): + dataDir=Utils.getNodeDataDir(nodeId) + dt = datetime.datetime.now() + dateStr=Utils.getDateString(dt) + stdoutFile="%s/stdout.%s.txt" % (dataDir, dateStr) + stderrFile="%s/stderr.%s.txt" % (dataDir, dateStr) + with open(stdoutFile, 'w') as sout, open(stderrFile, 'w') as serr: + Utils.Print("cmd: %s" % (cmd)) + popen=subprocess.Popen(cmd.split(), stdout=sout, stderr=serr) + if cachePopen: + self.popenProc=popen + self.pid=popen.pid + if Utils.Debug: Utils.Print("restart Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) + def trackCmdTransaction(self, trans, ignoreNonTrans=False): if trans is None: if Utils.Debug: Utils.Print(" cmd returned transaction: %s" % (trans)) From 7206f767e3a08d4ada52c86c89816e0a4f679783 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 6 Mar 2019 13:40:27 -0600 Subject: [PATCH 183/680] Fixed initialization of bios node and fixed merge error. --- programs/eosio-launcher/main.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 066305e4122..51a0808103b 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -247,7 +247,7 @@ class tn_node_def { vector producers; eosd_def* instance; string gelf_endpoint; - bool dont_start; + bool dont_start = false; }; void @@ -392,7 +392,6 @@ struct launcher_def { bool force_overwrite; size_t total_nodes; size_t unstarted_nodes; - size_t total_nodes; size_t prod_nodes; size_t producers; size_t next_node; @@ -893,7 +892,7 @@ launcher_def::bind_nodes () { int extra = producers % non_bios; unsigned int i = 0; unsigned int producer_number = 0; - const auto to_not_start_node = total_nodes - unstarted_nodes - 1; + const auto to_not_start_node = total_nodes - unstarted_nodes; for (auto &h : bindings) { for (auto &inst : h.instances) { bool is_bios = inst.name == "bios"; From 5c71b5d5e75eeb30bcbb5cf1906570b248b1f4a3 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 6 Mar 2019 13:42:33 -0600 Subject: [PATCH 184/680] Fixed error in launchCmd refactor. GH #6727 --- tests/Node.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 802aa35e9df..8f15ba5fece 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1333,7 +1333,7 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim myCmd=" ".join(cmdArr) cmd=myCmd + ("" if chainArg is None else (" " + chainArg)) - self.launchCmd(cmd, nodeId) + self.launchCmd(cmd, nodeId, cachePopen) def isNodeAlive(): """wait for node to be responsive.""" @@ -1355,7 +1355,7 @@ def isNodeAlive(): self.killed=False return True - def launchCmd(self, cmd, nodeId): + def launchCmd(self, cmd, nodeId, cachePopen=False): dataDir=Utils.getNodeDataDir(nodeId) dt = datetime.datetime.now() dateStr=Utils.getDateString(dt) From 5f86a9d20bc8ba4014a0397531104e7deb6bae57 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Wed, 6 Mar 2019 13:43:25 -0600 Subject: [PATCH 185/680] Fixed errors from previous attempt to explicitly set parameters. GH #6727 --- tests/consensus-validation-malicious-producers.py | 2 +- tests/distributed-transactions-test.py | 2 +- tests/restart-scenarios-test.py | 2 +- tests/validate-dirty-db.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/consensus-validation-malicious-producers.py b/tests/consensus-validation-malicious-producers.py index e3c6d7fe50e..6a3ac94d511 100755 --- a/tests/consensus-validation-malicious-producers.py +++ b/tests/consensus-validation-malicious-producers.py @@ -246,7 +246,7 @@ def myTest(transWillEnterBlock): topo="mesh" delay=0 Print("Stand up cluster") - if cluster.launch(pnodes=pnodes, total_nodes=total_nodes, topo=topo, delay=delay) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay) is False: error("Failed to stand up eos cluster.") return False diff --git a/tests/distributed-transactions-test.py b/tests/distributed-transactions-test.py index c3b794b89c0..2ea4edfe462 100755 --- a/tests/distributed-transactions-test.py +++ b/tests/distributed-transactions-test.py @@ -63,7 +63,7 @@ (pnodes, total_nodes-pnodes, topo, delay)) Print("Stand up cluster") - if cluster.launch(pnodes=pnodes, total_nodes=total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") diff --git a/tests/restart-scenarios-test.py b/tests/restart-scenarios-test.py index 6b3c217d75d..894a7d0d271 100755 --- a/tests/restart-scenarios-test.py +++ b/tests/restart-scenarios-test.py @@ -66,7 +66,7 @@ pnodes, topo, delay, chainSyncStrategyStr)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, p2pPlugin=p2pPlugin) is False: errorExit("Failed to stand up eos cluster.") Print ("Wait for Cluster stabilization") diff --git a/tests/validate-dirty-db.py b/tests/validate-dirty-db.py index ac7520bc353..afcf2767b73 100755 --- a/tests/validate-dirty-db.py +++ b/tests/validate-dirty-db.py @@ -74,7 +74,7 @@ def runNodeosAndGetOutput(myTimeout=3): pnodes, topo, delay, chainSyncStrategyStr)) Print("Stand up cluster") - if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, dontBootstrap=True) is False: + if cluster.launch(pnodes=pnodes, totalNodes=total_nodes, topo=topo, delay=delay, dontBootstrap=True) is False: errorExit("Failed to stand up eos cluster.") node=cluster.getNode(0) From b6852154701c309c389de6501b9ca8f7cf8f213d Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 9 Mar 2019 07:52:30 -0600 Subject: [PATCH 186/680] Cleanup. --- tests/nodeos_forked_chain_test.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/nodeos_forked_chain_test.py b/tests/nodeos_forked_chain_test.py index 4ef22ab082f..a7f2c777e3c 100755 --- a/tests/nodeos_forked_chain_test.py +++ b/tests/nodeos_forked_chain_test.py @@ -7,7 +7,6 @@ from WalletMgr import WalletMgr from Node import BlockType from Node import Node -from TestHelper import AppArgs from TestHelper import TestHelper import decimal From 87e9e61cc48c80505f650bf8cf3c54fe12de7bf1 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 9 Mar 2019 07:53:25 -0600 Subject: [PATCH 187/680] Added support for adding true flag. --- tests/TestHelper.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/TestHelper.py b/tests/TestHelper.py index a9920a731c1..768fccef890 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -22,6 +22,11 @@ def add(self, flag, type, help, default, choices=None): arg=self.AppArg(flag, type, help, default, choices) self.args.append(arg) + + def add_bool(self, flag, help, action='store_true'): + arg=self.AppArg(flag=flag, help=help, action=action) + self.args.append(arg) + # pylint: disable=too-many-instance-attributes class TestHelper(object): LOCAL_HOST="localhost" From a5ab5ba9ec1007f02d57c7357ca7abf420748ba4 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 9 Mar 2019 08:00:40 -0600 Subject: [PATCH 188/680] Fixing logic for launching started and unstarted nodes. --- tests/Cluster.py | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 0e16c803f05..258532331f1 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -131,8 +131,8 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me associatedNodeLabels=None): """Launch cluster. pnodes: producer nodes count - unstartedNodes: non-producer nodes that are configured into the launch, but not started - totalNodes: producer + non-producer nodes count + unstartedNodes: non-producer nodes that are configured into the launch, but not started. Should be included in totalNodes. + totalNodes: producer + non-producer nodes + unstarted non-producer nodes count prodCount: producers per producer node count topo: cluster topology (as defined by launcher, and "bridge" shape that is specific to this launch method) delay: delay between individual nodes launch (as defined by launcher) @@ -189,14 +189,14 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me tries = tries - 1 time.sleep(2) - cmd="%s -p %s -n %s -d %s -i %s -f --p2p-plugin %s %s" % ( + cmd="%s -p %s -n %s -d %s -i %s -f --p2p-plugin %s %s --unstarted-nodes %s" % ( Utils.EosLauncherPath, pnodes, totalNodes, delay, datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3], - p2pPlugin, producerFlag) + p2pPlugin, producerFlag, unstartedNodes) cmdArr=cmd.split() if self.staging: cmdArr.append("--nogen") - nodeosArgs="--max-transaction-time -1 --abi-serializer-max-time-ms 990000 --filter-on * --p2p-max-nodes-per-host %d" % (totalNodes) + nodeosArgs="--max-transaction-time -1 --abi-serializer-max-time-ms 990000 --filter-on \"*\" --p2p-max-nodes-per-host %d" % (totalNodes) if not self.walletd: nodeosArgs += " --plugin eosio::wallet_api_plugin" if self.enableMongo: @@ -262,7 +262,7 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me # of two entries - [ , ] with first being the name and second being the node definition shapeFileNodes = shapeFileObject["nodes"] - numProducers=totalProducers if totalProducers is not None else totalNodes + numProducers=totalProducers if totalProducers is not None else (totalNodes - unstartedNodes) maxProducers=ord('z')-ord('a')+1 assert numProducers Date: Sat, 9 Mar 2019 08:04:35 -0600 Subject: [PATCH 189/680] Fixed txn_test_gen_plugin to allow using different prefixes for the test accounts. --- .../txn_test_gen_plugin.cpp | 82 +++++++++++-------- 1 file changed, 50 insertions(+), 32 deletions(-) diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index 60383175387..deea09ace55 100755 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -101,6 +101,9 @@ struct txn_test_gen_plugin_impl { uint16_t thread_pool_size; optional thread_pool; std::shared_ptr timer; + name newaccountA; + name newaccountB; + name newaccountT; void push_next_transaction(const std::shared_ptr>& trxs, const std::function& next ) { chain_plugin& cp = app().get_plugin(); @@ -131,9 +134,6 @@ struct txn_test_gen_plugin_impl { trxs.reserve(2); try { - name newaccountA("txn.test.a"); - name newaccountB("txn.test.b"); - name newaccountC("txn.test.t"); name creator(init_name); abi_def currency_abi_def = fc::json::from_string(contracts::eosio_token_abi().data()).as(); @@ -170,12 +170,12 @@ struct txn_test_gen_plugin_impl { trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountB, owner_auth, active_auth}); } - //create "txn.test.t" account + //create "T" account { auto owner_auth = eosio::chain::authority{1, {{txn_text_receiver_C_pub_key, 1}}, {}}; auto active_auth = eosio::chain::authority{1, {{txn_text_receiver_C_pub_key, 1}}, {}}; - trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountC, owner_auth, active_auth}); + trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountT, owner_auth, active_auth}); } trx.expiration = cc.head_block_time() + fc::seconds(30); @@ -184,55 +184,67 @@ struct txn_test_gen_plugin_impl { trxs.emplace_back(std::move(trx)); } - //set txn.test.t contract to eosio.token & initialize it + //set newaccountT contract to eosio.token & initialize it { signed_transaction trx; vector wasm = contracts::eosio_token_wasm(); setcode handler; - handler.account = newaccountC; + handler.account = newaccountT; handler.code.assign(wasm.begin(), wasm.end()); - trx.actions.emplace_back( vector{{newaccountC,"active"}}, handler); + trx.actions.emplace_back( vector{{newaccountT,"active"}}, handler); { setabi handler; - handler.account = newaccountC; + handler.account = newaccountT; handler.abi = fc::raw::pack(json::from_string(contracts::eosio_token_abi().data()).as()); - trx.actions.emplace_back( vector{{newaccountC,"active"}}, handler); + trx.actions.emplace_back( vector{{newaccountT,"active"}}, handler); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(create); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("create", fc::json::from_string("{\"issuer\":\"txn.test.t\",\"maximum_supply\":\"1000000000.0000 CUR\"}}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("create", + fc::json::from_string(fc::format_string("{\"issuer\":\"${issuer}\",\"maximum_supply\":\"1000000000.0000 CUR\"}}", + fc::mutable_variant_object()("issuer",newaccountT.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(issue); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("issue", fc::json::from_string("{\"to\":\"txn.test.t\",\"quantity\":\"60000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("issue", + fc::json::from_string(fc::format_string("{\"to\":\"${to}\",\"quantity\":\"60000.0000 CUR\",\"memo\":\"\"}", + fc::mutable_variant_object()("to",newaccountT.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(transfer); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"txn.test.t\",\"to\":\"txn.test.a\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}", + fc::mutable_variant_object()("from",newaccountT.to_string())("to",newaccountA.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } { action act; - act.account = N(txn.test.t); + act.account = newaccountT; act.name = N(transfer); - act.authorization = vector{{newaccountC,config::active_name}}; - act.data = eosio_token_serializer.variant_to_binary("transfer", fc::json::from_string("{\"from\":\"txn.test.t\",\"to\":\"txn.test.b\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}"), abi_serializer_max_time); + act.authorization = vector{{newaccountT,config::active_name}}; + act.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"20000.0000 CUR\",\"memo\":\"\"}", + fc::mutable_variant_object()("from",newaccountT.to_string())("to",newaccountB.to_string()))), + abi_serializer_max_time); trx.actions.push_back(act); } @@ -266,20 +278,20 @@ struct txn_test_gen_plugin_impl { auto abi_serializer_max_time = app().get_plugin().get_abi_serializer_max_time(); abi_serializer eosio_token_serializer{fc::json::from_string(contracts::eosio_token_abi().data()).as(), abi_serializer_max_time}; //create the actions here - act_a_to_b.account = N(txn.test.t); + act_a_to_b.account = newaccountT; act_a_to_b.name = N(transfer); - act_a_to_b.authorization = vector{{name("txn.test.a"),config::active_name}}; - act_a_to_b.data = eosio_token_serializer.variant_to_binary("transfer", - fc::json::from_string(fc::format_string("{\"from\":\"txn.test.a\",\"to\":\"txn.test.b\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", - fc::mutable_variant_object()("l", salt))), + act_a_to_b.authorization = vector{{newaccountA,config::active_name}}; + act_a_to_b.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"{to}\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", + fc::mutable_variant_object()("from",newaccountA.to_string())("to",newaccountB.to_string())("l", salt))), abi_serializer_max_time); - act_b_to_a.account = N(txn.test.t); + act_b_to_a.account = newaccountT; act_b_to_a.name = N(transfer); - act_b_to_a.authorization = vector{{name("txn.test.b"),config::active_name}}; - act_b_to_a.data = eosio_token_serializer.variant_to_binary("transfer", - fc::json::from_string(fc::format_string("{\"from\":\"txn.test.b\",\"to\":\"txn.test.a\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", - fc::mutable_variant_object()("l", salt))), + act_b_to_a.authorization = vector{{newaccountB,config::active_name}}; + act_b_to_a.data = eosio_token_serializer.variant_to_binary("transfer", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", + fc::mutable_variant_object()("from",newaccountB.to_string())("to",newaccountA.to_string())("l", salt))), abi_serializer_max_time); timer_timeout = period; @@ -371,6 +383,7 @@ struct txn_test_gen_plugin_impl { next(e.dynamic_copy_exception()); } + ilog("send ${c} transactions", ("c",trxs.size())); push_transactions(std::move(trxs), next); } @@ -414,6 +427,7 @@ void txn_test_gen_plugin::set_program_options(options_description&, options_desc cfg.add_options() ("txn-reference-block-lag", bpo::value()->default_value(0), "Lag in number of blocks from the head block when selecting the reference block for transactions (-1 means Last Irreversible Block)") ("txn-test-gen-threads", bpo::value()->default_value(2), "Number of worker threads in txn_test_gen thread pool") + ("txn-test-gen-account-prefix", bpo::value()->default_value("txn.test."), "Prefix to use for accounts generated and used by this plugin") ; } @@ -422,6 +436,10 @@ void txn_test_gen_plugin::plugin_initialize(const variables_map& options) { my.reset( new txn_test_gen_plugin_impl ); my->txn_reference_block_lag = options.at( "txn-reference-block-lag" ).as(); my->thread_pool_size = options.at( "txn-test-gen-threads" ).as(); + const std::string thread_pool_account_prefix = options.at( "txn-test-gen-account-prefix" ).as(); + my->newaccountA = thread_pool_account_prefix + "a"; + my->newaccountB = thread_pool_account_prefix + "b"; + my->newaccountT = thread_pool_account_prefix + "t"; EOS_ASSERT( my->thread_pool_size > 0, chain::plugin_config_exception, "txn-test-gen-threads ${num} must be greater than 0", ("num", my->thread_pool_size) ); } FC_LOG_AND_RETHROW() From 52ac5788faea3696af3aea745ffcdc8e48dfad0a Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 9 Mar 2019 08:08:23 -0600 Subject: [PATCH 190/680] Pulled out curl processing into its own function and added functions for interacting with the test accounts. --- tests/Node.py | 38 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 8f15ba5fece..77c3157b5dc 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1075,8 +1075,12 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head assert(isinstance(blockType, BlockType)) assert(isinstance(returnType, ReturnType)) basedOnLib="true" if blockType==BlockType.lib else "false" - cmd="curl %s/v1/test_control/kill_node_on_producer -d '{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":\"%s\" }' -X POST -H \"Content-Type: application/json\"" % \ - (self.endpointHttp, producer, whereInSequence, basedOnLib) + payload="{ \"producer\":\"%s\", \"where_in_sequence\":%d, \"based_on_lib\":\"%s\" }" % (producer, whereInSequence, basedOnLib) + return self.processCurlCmd("test_control", "kill_node_on_producer", payload, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=exitMsg, returnType=returnType) + + def processCurlCmd(self, resource, command, payload, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + cmd="curl %s/v1/%s/%s -d '%s' -X POST -H \"Content-Type: application/json\"" % \ + (self.endpointHttp, resource, command, payload) if Utils.Debug: Utils.Print("cmd: %s" % (cmd)) rtn=None start=time.perf_counter() @@ -1113,6 +1117,23 @@ def killNodeOnProducer(self, producer, whereInSequence, blockType=BlockType.head return rtn + def txnGenCreateTestAccounts(self, genAccount, genKey, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + assert(isinstance(genAccount, str)) + assert(isinstance(genKey, str)) + assert(isinstance(returnType, ReturnType)) + + payload="[ \"%s\", \"%s\" ]" % (genAccount, genKey) + return self.processCurlCmd("txn_test_gen", "create_test_accounts", payload, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=exitMsg, returnType=returnType) + + def txnGenStart(self, salt, period, batchSize, silentErrors=True, exitOnError=False, exitMsg=None, returnType=ReturnType.json): + assert(isinstance(salt, str)) + assert(isinstance(period, int)) + assert(isinstance(batchSize, int)) + assert(isinstance(returnType, ReturnType)) + + payload="[ \"%s\", %d, %d ]" % (salt, period, batchSize) + return self.processCurlCmd("txn_test_gen", "start_generation", payload, silentErrors=silentErrors, exitOnError=exitOnError, exitMsg=exitMsg, returnType=returnType) + def waitForTransBlockIfNeeded(self, trans, waitForTransBlock, exitOnError=False): if not waitForTransBlock: return trans @@ -1355,6 +1376,19 @@ def isNodeAlive(): self.killed=False return True + def launchUnstarted(self, nodeId, cachePopen=False): + startFile=Utils.getNodeDataDir(nodeId, "start.cmd") + if not os.path.exists(startFile): + Utils.Print("Cannot launch unstarted process since %s file does not exist" % startFile) + return False + + with open(startFile, 'r') as file: + cmd=file.read() + Utils.Print("launchUnstarted cmd: %s" % (cmd)) + + self.launchCmd(cmd, nodeId, cachePopen) + return True + def launchCmd(self, cmd, nodeId, cachePopen=False): dataDir=Utils.getNodeDataDir(nodeId) dt = datetime.datetime.now() From 277df3c0c5319d7ad38e0e44b7e3e2244a0a9b7e Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 11 Mar 2019 21:59:07 -0500 Subject: [PATCH 191/680] Fix error in variable substitution. --- plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index deea09ace55..780127efc15 100755 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -130,6 +130,7 @@ struct txn_test_gen_plugin_impl { } void create_test_accounts(const std::string& init_name, const std::string& init_priv_key, const std::function& next) { + ilog("create_test_accounts"); std::vector trxs; trxs.reserve(2); @@ -282,7 +283,7 @@ struct txn_test_gen_plugin_impl { act_a_to_b.name = N(transfer); act_a_to_b.authorization = vector{{newaccountA,config::active_name}}; act_a_to_b.data = eosio_token_serializer.variant_to_binary("transfer", - fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"{to}\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", + fc::json::from_string(fc::format_string("{\"from\":\"${from}\",\"to\":\"${to}\",\"quantity\":\"1.0000 CUR\",\"memo\":\"${l}\"}", fc::mutable_variant_object()("from",newaccountA.to_string())("to",newaccountB.to_string())("l", salt))), abi_serializer_max_time); From 3089f7039010ad6ba438cde1cb4055b2d80f1d5a Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 11 Mar 2019 22:01:36 -0500 Subject: [PATCH 192/680] Add option to not load system contract. GH #6727 --- tests/Cluster.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 258532331f1..8665deffb32 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -128,7 +128,7 @@ def setAlternateVersionLabels(self, file): # pylint: disable=too-many-statements def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, alternateVersionLabelsFile=None, - associatedNodeLabels=None): + associatedNodeLabels=None, loadSystemContract=True): """Launch cluster. pnodes: producer nodes count unstartedNodes: non-producer nodes that are configured into the launch, but not started. Should be included in totalNodes. @@ -147,6 +147,7 @@ def launch(self, pnodes=1, unstartedNodes=0, totalNodes=1, prodCount=1, topo="me --specific-nodeos flags on launcher), example: { "5" : "--plugin eosio::test_control_api_plugin" } alternateVersionLabelsFile: Supply an alternate version labels file to use with associatedNodeLabels. associatedNodeLabels: Supply a dictionary of node numbers to use an alternate label for a specific node. + loadSystemContract: indicate whether the eosio.system contract should be loaded (setting this to False causes useBiosBootFile to be treated as False) """ assert(isinstance(topo, str)) if alternateVersionLabelsFile is not None: @@ -397,8 +398,10 @@ def connectGroup(group, producerNodes, bridgeNodes) : return True Utils.Print("Bootstrap cluster.") + if not loadSystemContract: + useBiosBootFile=False #ensure we use Cluster.bootstrap if onlyBios or not useBiosBootFile: - self.biosNode=Cluster.bootstrap(startedNodes, prodCount, totalProducers, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr, onlyBios) + self.biosNode=Cluster.bootstrap(startedNodes, prodCount, totalProducers, Cluster.__BiosHost, Cluster.__BiosPort, self.walletMgr, onlyBios, loadSystemContract) if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") return False @@ -965,7 +968,7 @@ def bios_bootstrap(totalNodes, biosHost, biosPort, walletMgr, silent=False): return biosNode @staticmethod - def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletMgr, onlyBios=False): + def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletMgr, onlyBios=False, loadSystemContract=True): """Create 'prodCount' init accounts and deposits 10000000000 SYS in each. If prodCount is -1 will initialize all possible producers. Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1.""" @@ -1187,17 +1190,18 @@ def bootstrap(totalNodes, prodCount, totalProducers, biosHost, biosPort, walletM (expectedAmount, actualAmount)) return None - contract="eosio.system" - contractDir="unittests/contracts/%s" % (contract) - wasmFile="%s.wasm" % (contract) - abiFile="%s.abi" % (contract) - Utils.Print("Publish %s contract" % (contract)) - trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) - if trans is None: - Utils.Print("ERROR: Failed to publish contract %s." % (contract)) - return None + if loadSystemContract: + contract="eosio.system" + contractDir="unittests/contracts/%s" % (contract) + wasmFile="%s.wasm" % (contract) + abiFile="%s.abi" % (contract) + Utils.Print("Publish %s contract" % (contract)) + trans=biosNode.publishContract(eosioAccount.name, contractDir, wasmFile, abiFile, waitForTransBlock=True) + if trans is None: + Utils.Print("ERROR: Failed to publish contract %s." % (contract)) + return None - Node.validateTransaction(trans) + Node.validateTransaction(trans) initialFunds="1000000.0000 {0}".format(CORE_SYMBOL) Utils.Print("Transfer initial fund %s to individual accounts." % (initialFunds)) From 0532e5c787372453c44814a645cc522c3e6e6020 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Mon, 11 Mar 2019 22:07:10 -0500 Subject: [PATCH 193/680] Add test to ensure catchup lockup does not occur. GH #6727 --- tests/nodeos_startup_catchup.py | 97 +++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100755 tests/nodeos_startup_catchup.py diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py new file mode 100755 index 00000000000..da75a72b23b --- /dev/null +++ b/tests/nodeos_startup_catchup.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +import testUtils +import time +from Cluster import Cluster +from WalletMgr import WalletMgr +from Node import Node +from TestHelper import AppArgs +from TestHelper import TestHelper + +import decimal +import math +import re + +############################################################### +# nodeos_startup_catchup +# Test configures a producing node and <--txn-plugins count> non-producing nodes with the +# txn_test_gen_plugin. Each non-producing node starts generating transactions and sending them +# to the producing node. +# 1) After 10 seconds a new node is started. +# 2) 10 seconds later, that node is checked to see if it has caught up to the producing node and +# that node is killed and a new node is started. +# 3) Repeat step 2, <--catchup-count - 1> more times +############################################################### + +Print=Utils.Print +errorExit=Utils.errorExit + +from core_symbol import CORE_SYMBOL + +appArgs=AppArgs() +extraArgs = appArgs.add(flag="--catchup-count", type=int, help="How many catchup-nodes to launch", default=10) +extraArgs = appArgs.add(flag="--txn-gen-nodes", type=int, help="How many transaction generator nodes", default=4) +args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", + "-p","--p2p-plugin","--wallet-port"}, applicationSpecificArgs=appArgs) +Utils.Debug=args.v +pnodes=args.p if args.p > 0 else 1 +startedNonProdNodes = args.txn_gen_nodes if args.txn_gen_nodes >= 2 else 2 +cluster=Cluster(walletd=True) +dumpErrorDetails=args.dump_error_details +keepLogs=args.keep_logs +dontKill=args.leave_running +prodCount=args.prod_count if args.prod_count > 1 else 2 +killAll=args.clean_run +p2pPlugin=args.p2p_plugin +walletPort=args.wallet_port +catchupCount=args.catchup_count +totalNodes=startedNonProdNodes+pnodes+catchupCount + +walletMgr=WalletMgr(True, port=walletPort) +testSuccessful=False +killEosInstances=not dontKill +killWallet=not dontKill + +WalletdName=Utils.EosWalletName +ClientName="cleos" + +try: + TestHelper.printSystemInfo("BEGIN") + cluster.setWalletMgr(walletMgr) + + cluster.killall(allInstances=killAll) + cluster.cleanup() + specificExtraNodeosArgs={} + txnGenNodeNum=pnodes # next node after producer nodes + for nodeNum in range(txnGenNodeNum, txnGenNodeNum+startedNonProdNodes): + specificExtraNodeosArgs[nodeNum]="--plugin eosio::txn_test_gen_plugin --txn-test-gen-account-prefix txntestacct" + Print("Stand up cluster") + if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=pnodes, totalNodes=totalNodes, totalProducers=pnodes*prodCount, p2pPlugin=p2pPlugin, + useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs, unstartedNodes=catchupCount, loadSystemContract=False) is False: + Utils.cmdError("launcher") + Utils.errorExit("Failed to stand up eos cluster.") + + Print("Validating system accounts after bootstrap") + cluster.validateAccounts(None) + + txnGenNodes=[] + for nodeNum in range(txnGenNodeNum, txnGenNodeNum+startedNonProdNodes): + txnGenNodes.append(cluster.getNode(nodeNum)) + + txnGenNodes[0].txnGenCreateTestAccounts(cluster.eosioAccount.name, cluster.eosioAccount.activePrivateKey) + time.sleep(20) + + for genNum in range(0, len(txnGenNodes)): + salt="%d" % genNum + txnGenNodes[genNum].txnGenStart(salt, 1000, 200) + + time.sleep(10) + + + testSuccessful=True + +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killEosInstances=killEosInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails) + +exit(0) From 97f777bbb073316680a2e5214ede61a1503c397c Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Mar 2019 22:57:01 -0500 Subject: [PATCH 194/680] Fixed launcher setup of unstarted nodes. GH #6727. --- programs/eosio-launcher/main.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 51a0808103b..35f12b94e75 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -892,7 +892,7 @@ launcher_def::bind_nodes () { int extra = producers % non_bios; unsigned int i = 0; unsigned int producer_number = 0; - const auto to_not_start_node = total_nodes - unstarted_nodes; + const auto to_not_start_node = total_nodes - unstarted_nodes - 1; for (auto &h : bindings) { for (auto &inst : h.instances) { bool is_bios = inst.name == "bios"; From dd4d3a476cad1ffe8b13e09f154ffb5a5607f4de Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Mar 2019 23:01:01 -0500 Subject: [PATCH 195/680] Added python script handling for unstarted nodes. GH #6727. --- tests/Cluster.py | 31 +++++++++++++++++++++++++++++++ tests/Node.py | 27 ++++++++++++++------------- 2 files changed, 45 insertions(+), 13 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index 8665deffb32..debfa1464cd 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -50,6 +50,7 @@ def __init__(self, walletd=False, localCluster=True, host="localhost", port=8888 """ self.accounts={} self.nodes={} + self.unstartedNodes=[] self.localCluster=localCluster self.wallet=None self.walletd=walletd @@ -379,6 +380,9 @@ def connectGroup(group, producerNodes, bridgeNodes) : self.nodes=nodes + if unstartedNodes > 0: + self.unstartedNodes=self.discoverUnstartedLocalNodes(unstartedNodes, totalNodes) + if onlyBios: biosNode=Node(Cluster.__BiosHost, Cluster.__BiosPort, walletMgr=self.walletMgr) if not biosNode.checkPulse(): @@ -645,6 +649,16 @@ def getNode(self, nodeId=0, exitOnError=True): def getNodes(self): return self.nodes + def launchUnstarted(self, numToLaunch=1, cachePopen=False): + assert(isinstance(numToLaunch, int)) + assert(numToLaunch>0) + launchList=self.unstartedNodes[:numToLaunch] + del self.unstartedNodes[:numToLaunch] + for node in launchList: + # the node number is indexed off of the started nodes list + node.launchUnstarted(len(self.nodes), cachePopen=cachePopen) + self.nodes.append(node) + # Spread funds across accounts with transactions spread through cluster nodes. # Validate transactions are synchronized on root node def spreadFunds(self, source, accounts, amount=1): @@ -1485,6 +1499,23 @@ def createAccounts(self, creator, waitForTransBlock=True, stakedDeposit=1000): return True + def discoverUnstartedLocalNodes(self, unstartedNodes, totalNodes): + unstarted=[] + firstUnstartedNode=totalNodes-unstartedNodes + for nodeId in range(firstUnstartedNode, totalNodes): + unstarted.append(self.discoverUnstartedLocalNode(nodeId)) + return unstarted + + def discoverUnstartedLocalNode(self, nodeId): + startFile=Node.unstartedFile(nodeId) + with open(startFile, 'r') as file: + cmd=file.read() + Utils.Print("unstarted local node cmd: %s" % (cmd)) + p=re.compile(r'^\s*(\w+)\s*=\s*([^\s](?:.*[^\s])?)\s*$') + instance=Node(self.host, port=self.port+nodeId, pid=None, cmd=cmd, walletMgr=self.walletMgr, enableMongo=self.enableMongo, mongoHost=self.mongoHost, mongoPort=self.mongoPort, mongoDb=self.mongoDb) + if Utils.Debug: Utils.Print("Unstarted Node>", instance) + return instance + def getInfos(self, silentErrors=False, exitOnError=False): infos=[] for node in self.nodes: diff --git a/tests/Node.py b/tests/Node.py index 77c3157b5dc..7b3259ece53 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -62,7 +62,7 @@ def eosClientArgs(self): def __str__(self): #return "Host: %s, Port:%d, Pid:%s, Cmd:\"%s\"" % (self.host, self.port, self.pid, self.cmd) - return "Host: %s, Port:%d" % (self.host, self.port) + return "Host: %s, Port:%d, Pid:%s" % (self.host, self.port, self.pid) @staticmethod def validateTransaction(trans): @@ -1095,6 +1095,8 @@ def processCurlCmd(self, resource, command, payload, silentErrors=True, exitOnEr if Utils.Debug: end=time.perf_counter() Utils.Print("cmd Duration: %.3f sec" % (end-start)) + printReturn=json.dumps(rtn) if returnType==ReturnType.json else rtn + Utils.Print("cmd returned: %s" % (printReturn)) except subprocess.CalledProcessError as ex: if not silentErrors: end=time.perf_counter() @@ -1241,12 +1243,12 @@ def myFunc(): self.killed=True return True - def interruptAndVerifyExitStatus(self): + def interruptAndVerifyExitStatus(self, timeout=15): if Utils.Debug: Utils.Print("terminating node: %s" % (self.cmd)) assert self.popenProc is not None, "node: \"%s\" does not have a popenProc, this may be because it is only set after a relaunch." % (self.cmd) self.popenProc.send_signal(signal.SIGINT) try: - outs, _ = self.popenProc.communicate(timeout=15) + outs, _ = self.popenProc.communicate(timeout=timeout) assert self.popenProc.returncode == 0, "Expected terminating \"%s\" to have an exit status of 0, but got %d" % (self.cmd, self.popenProc.returncode) except subprocess.TimeoutExpired: Utils.errorExit("Terminate call failed on node: %s" % (self.cmd)) @@ -1376,18 +1378,17 @@ def isNodeAlive(): self.killed=False return True - def launchUnstarted(self, nodeId, cachePopen=False): + @staticmethod + def unstartedFile(nodeId): + assert(isinstance(nodeId, int)) startFile=Utils.getNodeDataDir(nodeId, "start.cmd") if not os.path.exists(startFile): - Utils.Print("Cannot launch unstarted process since %s file does not exist" % startFile) - return False - - with open(startFile, 'r') as file: - cmd=file.read() - Utils.Print("launchUnstarted cmd: %s" % (cmd)) + Utils.errorExit("Cannot find unstarted node since %s file does not exist" % startFile) + return startFile - self.launchCmd(cmd, nodeId, cachePopen) - return True + def launchUnstarted(self, nodeId, cachePopen=False): + Utils.Print("launchUnstarted cmd: %s" % (self.cmd)) + self.launchCmd(self.cmd, nodeId, cachePopen) def launchCmd(self, cmd, nodeId, cachePopen=False): dataDir=Utils.getNodeDataDir(nodeId) @@ -1401,7 +1402,7 @@ def launchCmd(self, cmd, nodeId, cachePopen=False): if cachePopen: self.popenProc=popen self.pid=popen.pid - if Utils.Debug: Utils.Print("restart Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) + if Utils.Debug: Utils.Print("start Node host=%s, port=%s, pid=%s, cmd=%s" % (self.host, self.port, self.pid, self.cmd)) def trackCmdTransaction(self, trans, ignoreNonTrans=False): if trans is None: From 03c2eaa624c45de9616d522add7be8fc9a13e3e0 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Mar 2019 23:11:05 -0500 Subject: [PATCH 196/680] Added starting up unstarted nodes and verifying catchup. GH #6727. --- tests/nodeos_startup_catchup.py | 78 ++++++++++++++++++++++++++++++--- 1 file changed, 73 insertions(+), 5 deletions(-) diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index da75a72b23b..bc73392c702 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -5,6 +5,7 @@ import time from Cluster import Cluster from WalletMgr import WalletMgr +from Node import BlockType from Node import Node from TestHelper import AppArgs from TestHelper import TestHelper @@ -31,7 +32,7 @@ appArgs=AppArgs() extraArgs = appArgs.add(flag="--catchup-count", type=int, help="How many catchup-nodes to launch", default=10) -extraArgs = appArgs.add(flag="--txn-gen-nodes", type=int, help="How many transaction generator nodes", default=4) +extraArgs = appArgs.add(flag="--txn-gen-nodes", type=int, help="How many transaction generator nodes", default=2) args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run", "-p","--p2p-plugin","--wallet-port"}, applicationSpecificArgs=appArgs) Utils.Debug=args.v @@ -45,7 +46,7 @@ killAll=args.clean_run p2pPlugin=args.p2p_plugin walletPort=args.wallet_port -catchupCount=args.catchup_count +catchupCount=args.catchup_count if args.catchup_count > 0 else 1 totalNodes=startedNonProdNodes+pnodes+catchupCount walletMgr=WalletMgr(True, port=walletPort) @@ -69,7 +70,6 @@ Print("Stand up cluster") if cluster.launch(prodCount=prodCount, onlyBios=False, pnodes=pnodes, totalNodes=totalNodes, totalProducers=pnodes*prodCount, p2pPlugin=p2pPlugin, useBiosBootFile=False, specificExtraNodeosArgs=specificExtraNodeosArgs, unstartedNodes=catchupCount, loadSystemContract=False) is False: - Utils.cmdError("launcher") Utils.errorExit("Failed to stand up eos cluster.") Print("Validating system accounts after bootstrap") @@ -84,11 +84,79 @@ for genNum in range(0, len(txnGenNodes)): salt="%d" % genNum - txnGenNodes[genNum].txnGenStart(salt, 1000, 200) + txnGenNodes[genNum].txnGenStart(salt, 1500, 150) + time.sleep(1) + + node0=cluster.getNode(0) + + def lib(node): + return node.getBlockNum(BlockType.lib) + + def head(node): + return node.getBlockNum(BlockType.head) time.sleep(10) + retryCountMax=100 + for catchup_num in range(0, catchupCount): + lastLibNum=lib(node0) + lastHeadNum=head(node0) + lastCatchupLibNum=None + + cluster.launchUnstarted(cachePopen=True) + retryCount=0 + # verify that production node is advancing (sanity check) + while lib(node0)<=lastLibNum: + time.sleep(4) + retryCount+=1 + # give it some more time if the head is still moving forward + if retryCount>=20 or head(node0)<=lastHeadNum: + Utils.errorExit("Node 0 failing to advance lib. Was %s, now %s." % (lastLibNum, lib(node0))) + if Utils.Debug: Utils.Print("Node 0 head was %s, now %s. Waiting for lib to advance" % (lastLibNum, lib(node0))) + lastHeadNum=head(node0) + + catchupNode=cluster.getNodes()[-1] + time.sleep(9) + lastCatchupLibNum=lib(catchupNode) + lastCatchupHeadNum=head(catchupNode) + retryCount=0 + while lib(catchupNode)<=lastCatchupLibNum: + time.sleep(5) + retryCount+=1 + # give it some more time if the head is still moving forward + if retryCount>=100 or head(catchupNode)<=lastCatchupHeadNum: + Utils.errorExit("Catchup Node %s failing to advance lib. Was %s, now %s." % + (cluster.getNodes().index(catchupNode), lastCatchupLibNum, lib(catchupNode))) + if Utils.Debug: Utils.Print("Catchup Node %s head was %s, now %s. Waiting for lib to advance" % (cluster.getNodes().index(catchupNode), lastCatchupLibNum, lib(catchupNode))) + lastCatchupHeadNum=head(catchupNode) + + retryCount=0 + lastLibNum=lib(node0) + trailingLibNum=lastLibNum-lib(catchupNode) + lastHeadNum=head(node0) + libNotMovingCount=0 + while trailingLibNum>0: + delay=5 + time.sleep(delay) + libMoving=lib(catchupNode)>lastCatchupLibNum + if libMoving: + trailingLibNum=lastLibNum-lib(catchupNode) + libNotMovingCount=0 + else: + libNotMovingCount+=1 + if Utils.Debug and libNotMovingCount%10==0: + Utils.Print("Catchup node %s lib has not moved for %s seconds, lib is %s" % + (cluster.getNodes().index(catchupNode), (delay*libNotMovingCount), lib(catchupNode))) + retryCount+=1 + # give it some more time if the head is still moving forward + if retryCount>=retryCountMax or head(catchupNode)<=lastCatchupHeadNum or libNotMovingCount>100: + Utils.errorExit("Catchup Node %s failing to advance lib along with node 0. Catchup node lib is %s, node 0 lib is %s." % + (cluster.getNodes().index(catchupNode), lib(catchupNode), lastLibNum)) + if Utils.Debug: Utils.Print("Catchup Node %s head is %s, node 0 head is %s. Waiting for lib to advance from %s to %s" % (cluster.getNodes().index(catchupNode), head(catchupNode), head(node0), lib(catchupNode), lastLibNum)) + lastCatchupHeadNum=head(catchupNode) + + catchupNode.interruptAndVerifyExitStatus(60) + retryCountMax*=3 - testSuccessful=True finally: From fef0d2acb7e19193678d28692a9943cc95267f15 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 22 Mar 2019 23:15:08 -0500 Subject: [PATCH 197/680] Changed api to return a json status to indicate what happened. GH #6727. --- .../txn_test_gen_plugin.cpp | 25 ++++++++++++------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp index 780127efc15..670114ea85c 100755 --- a/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp +++ b/plugins/txn_test_gen_plugin/txn_test_gen_plugin.cpp @@ -28,9 +28,13 @@ using namespace eosio::testing; namespace eosio { namespace detail { struct txn_test_gen_empty {}; + struct txn_test_gen_status { + string status; + }; }} FC_REFLECT(eosio::detail::txn_test_gen_empty, ); +FC_REFLECT(eosio::detail::txn_test_gen_status, (status)); namespace eosio { @@ -53,8 +57,8 @@ using io_work_t = boost::asio::executor_work_guard(); \ - api_handle->call_name(vs.at(0).as(), vs.at(1).as(), vs.at(2).as()); \ - eosio::detail::txn_test_gen_empty result; + auto status = api_handle->call_name(vs.at(0).as(), vs.at(1).as(), vs.at(2).as()); \ + eosio::detail::txn_test_gen_status result = { status }; #define INVOKE_V_R_R(api_handle, call_name, in_param0, in_param1) \ const auto& vs = fc::json::json::from_string(body).as(); \ @@ -179,7 +183,7 @@ struct txn_test_gen_plugin_impl { trx.actions.emplace_back(vector{{creator,"active"}}, newaccount{creator, newaccountT, owner_auth, active_auth}); } - trx.expiration = cc.head_block_time() + fc::seconds(30); + trx.expiration = cc.head_block_time() + fc::seconds(180); trx.set_reference_block(cc.head_block_id()); trx.sign(creator_priv_key, chainid); trxs.emplace_back(std::move(trx)); @@ -249,7 +253,7 @@ struct txn_test_gen_plugin_impl { trx.actions.push_back(act); } - trx.expiration = cc.head_block_time() + fc::seconds(30); + trx.expiration = cc.head_block_time() + fc::seconds(180); trx.set_reference_block(cc.head_block_id()); trx.max_net_usage_words = 5000; trx.sign(txn_test_receiver_C_priv_key, chainid); @@ -263,15 +267,17 @@ struct txn_test_gen_plugin_impl { push_transactions(std::move(trxs), next); } - void start_generation(const std::string& salt, const uint64_t& period, const uint64_t& batch_size) { + string start_generation(const std::string& salt, const uint64_t& period, const uint64_t& batch_size) { + ilog("Starting transaction test plugin"); if(running) - throw fc::exception(fc::invalid_operation_exception_code); + return "start_generation already running"; if(period < 1 || period > 2500) - throw fc::exception(fc::invalid_operation_exception_code); + return "period must be between 1 and 2500"; if(batch_size < 1 || batch_size > 250) - throw fc::exception(fc::invalid_operation_exception_code); + return "batch_size must be between 1 and 250"; if(batch_size & 1) - throw fc::exception(fc::invalid_operation_exception_code); + return "batch_size must be even"; + ilog("Starting transaction test plugin valid"); running = true; @@ -312,6 +318,7 @@ struct txn_test_gen_plugin_impl { boost::asio::post( *gen_ioc, [this]() { arm_timer(boost::asio::high_resolution_timer::clock_type::now()); }); + return "success"; } void arm_timer(boost::asio::high_resolution_timer::time_point s) { From 640257efc443e55a17984807cda4710fb34ce96e Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 23 Mar 2019 00:03:42 -0500 Subject: [PATCH 198/680] Added nodeos_startup_catchup to long running tests. GH #6727. --- tests/CMakeLists.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index ae9b36bcd68..0eea67cbce3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -35,6 +35,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-test.py ${CM configure_file(${CMAKE_CURRENT_SOURCE_DIR}/distributed-transactions-remote-test.py ${CMAKE_CURRENT_BINARY_DIR}/distributed-transactions-remote-test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/sample-cluster-map.json ${CMAKE_CURRENT_BINARY_DIR}/sample-cluster-map.json COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/restart-scenarios-test.py ${CMAKE_CURRENT_BINARY_DIR}/restart-scenarios-test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_startup_catchup.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_startup_catchup.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_forked_chain_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_forked_chain_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_run_remote_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_run_remote_test.py COPYONLY) @@ -106,6 +107,8 @@ set_property(TEST nodeos_voting_bnet_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_avail_ram.py -v --wallet-port 9904 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_startup_catchup_lr_test COMMAND tests/nodeos_startup_catchup.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_startup_catchup_lr_test PROPERTY LABELS long_running_tests) if(ENABLE_COVERAGE_TESTING) From fcd01c82b01574f474ceaac068d84882a9cff4bb Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 23 Mar 2019 15:55:56 -0500 Subject: [PATCH 199/680] Fixed interruptAndVerifyExitStatus to track that it was killed. GH #6727. --- tests/Node.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/Node.py b/tests/Node.py index 7b3259ece53..3e31c396d5f 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1253,6 +1253,10 @@ def interruptAndVerifyExitStatus(self, timeout=15): except subprocess.TimeoutExpired: Utils.errorExit("Terminate call failed on node: %s" % (self.cmd)) + # mark node as killed + self.pid=None + self.killed=True + def verifyAlive(self, silent=False): if not silent and Utils.Debug: Utils.Print("Checking if node(pid=%s) is alive(killed=%s): %s" % (self.pid, self.killed, self.cmd)) if self.killed or self.pid is None: @@ -1318,7 +1322,7 @@ def getNextCleanProductionCycle(self, trans): # TBD: make nodeId an internal property # pylint: disable=too-many-locals - def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None, cachePopen=False): + def relaunch(self, nodeId, chainArg=None, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None, cachePopen=False): assert(self.pid is None) assert(self.killed) From 2790d66b81dd021b335249dcb360ddf3eea5d2ba Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sat, 23 Mar 2019 16:00:06 -0500 Subject: [PATCH 200/680] Added catchup after relaunching the catchup node and refactored test using framework methods. GH #6727. --- tests/nodeos_startup_catchup.py | 104 ++++++++++++-------------------- 1 file changed, 39 insertions(+), 65 deletions(-) diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index bc73392c702..c7f1fa80ae4 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -20,9 +20,11 @@ # txn_test_gen_plugin. Each non-producing node starts generating transactions and sending them # to the producing node. # 1) After 10 seconds a new node is started. -# 2) 10 seconds later, that node is checked to see if it has caught up to the producing node and -# that node is killed and a new node is started. -# 3) Repeat step 2, <--catchup-count - 1> more times +# 2) the node is allowed to catch up to the producing node +# 3) that node is killed +# 4) restart the node +# 5) the node is allowed to catch up to the producing node +# 3) Repeat steps 2-5, <--catchup-count - 1> more times ############################################################### Print=Utils.Print @@ -80,14 +82,6 @@ txnGenNodes.append(cluster.getNode(nodeNum)) txnGenNodes[0].txnGenCreateTestAccounts(cluster.eosioAccount.name, cluster.eosioAccount.activePrivateKey) - time.sleep(20) - - for genNum in range(0, len(txnGenNodes)): - salt="%d" % genNum - txnGenNodes[genNum].txnGenStart(salt, 1500, 150) - time.sleep(1) - - node0=cluster.getNode(0) def lib(node): return node.getBlockNum(BlockType.lib) @@ -95,67 +89,47 @@ def lib(node): def head(node): return node.getBlockNum(BlockType.head) - time.sleep(10) - retryCountMax=100 - for catchup_num in range(0, catchupCount): - lastLibNum=lib(node0) - lastHeadNum=head(node0) - lastCatchupLibNum=None + node0=cluster.getNode(0) + blockNum=head(node0) + node0.waitForBlock(blockNum, blockType=BlockType.lib) + + for genNum in range(0, len(txnGenNodes)): + salt="%d" % genNum + txnGenNodes[genNum].txnGenStart(salt, 1500, 150) + time.sleep(1) + + blockNum=head(node0) + node0.waitForBlock(blockNum+20) + + twoRounds=21*2*12 + for catchup_num in range(0, catchupCount): cluster.launchUnstarted(cachePopen=True) - retryCount=0 - # verify that production node is advancing (sanity check) - while lib(node0)<=lastLibNum: - time.sleep(4) - retryCount+=1 - # give it some more time if the head is still moving forward - if retryCount>=20 or head(node0)<=lastHeadNum: - Utils.errorExit("Node 0 failing to advance lib. Was %s, now %s." % (lastLibNum, lib(node0))) - if Utils.Debug: Utils.Print("Node 0 head was %s, now %s. Waiting for lib to advance" % (lastLibNum, lib(node0))) - lastHeadNum=head(node0) + lastLibNum=lib(node0) + # verify producer lib is still advancing + node0.waitForBlock(lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) catchupNode=cluster.getNodes()[-1] - time.sleep(9) + catchupNodeNum=cluster.getNodes().index(catchupNode) lastCatchupLibNum=lib(catchupNode) - lastCatchupHeadNum=head(catchupNode) - retryCount=0 - while lib(catchupNode)<=lastCatchupLibNum: - time.sleep(5) - retryCount+=1 - # give it some more time if the head is still moving forward - if retryCount>=100 or head(catchupNode)<=lastCatchupHeadNum: - Utils.errorExit("Catchup Node %s failing to advance lib. Was %s, now %s." % - (cluster.getNodes().index(catchupNode), lastCatchupLibNum, lib(catchupNode))) - if Utils.Debug: Utils.Print("Catchup Node %s head was %s, now %s. Waiting for lib to advance" % (cluster.getNodes().index(catchupNode), lastCatchupLibNum, lib(catchupNode))) - lastCatchupHeadNum=head(catchupNode) - - retryCount=0 - lastLibNum=lib(node0) - trailingLibNum=lastLibNum-lib(catchupNode) - lastHeadNum=head(node0) - libNotMovingCount=0 - while trailingLibNum>0: - delay=5 - time.sleep(delay) - libMoving=lib(catchupNode)>lastCatchupLibNum - if libMoving: - trailingLibNum=lastLibNum-lib(catchupNode) - libNotMovingCount=0 - else: - libNotMovingCount+=1 - if Utils.Debug and libNotMovingCount%10==0: - Utils.Print("Catchup node %s lib has not moved for %s seconds, lib is %s" % - (cluster.getNodes().index(catchupNode), (delay*libNotMovingCount), lib(catchupNode))) - retryCount+=1 - # give it some more time if the head is still moving forward - if retryCount>=retryCountMax or head(catchupNode)<=lastCatchupHeadNum or libNotMovingCount>100: - Utils.errorExit("Catchup Node %s failing to advance lib along with node 0. Catchup node lib is %s, node 0 lib is %s." % - (cluster.getNodes().index(catchupNode), lib(catchupNode), lastLibNum)) - if Utils.Debug: Utils.Print("Catchup Node %s head is %s, node 0 head is %s. Waiting for lib to advance from %s to %s" % (cluster.getNodes().index(catchupNode), head(catchupNode), head(node0), lib(catchupNode), lastLibNum)) - lastCatchupHeadNum=head(catchupNode) + # verify lib is advancing (before we wait for it to have to catchup with producer) + catchupNode.waitForBlock(lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + + numBlocksToCatchup=(lastLibNum-lastCatchupLibNum-1)+twoRounds + catchupNode.waitForBlock(lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) catchupNode.interruptAndVerifyExitStatus(60) - retryCountMax*=3 + + catchupNode.relaunch(catchupNodeNum) + lastCatchupLibNum=lib(catchupNode) + # verify catchup node is advancing to producer + catchupNode.waitForBlock(lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + + lastLibNum=lib(node0) + # verify producer lib is still advancing + node0.waitForBlock(lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + # verify catchup node is advancing to producer + catchupNode.waitForBlock(lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) testSuccessful=True From 722ac062fc3cc1469328bfc273c6b0a2964790a6 Mon Sep 17 00:00:00 2001 From: UMU Date: Mon, 25 Mar 2019 15:31:23 +0800 Subject: [PATCH 201/680] Improve for MongoDB sharding --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 29 ++++++++++++--------- 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 8131b6a2bb2..2ba100bdc84 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -1462,39 +1462,44 @@ void mongo_db_plugin_impl::init() { } try { + // Due to the vast amounts of data, we suggest MongoDB administrators: + // 1. enableSharding database (default to EOS) + // 2. shardCollection: blocks, action_traces, transaction_traces, especially action_traces + // 3. Use compound index with shard key (default to _id), to improve query performance. + // blocks indexes auto blocks = mongo_conn[db_name][blocks_col]; - blocks.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); - blocks.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); + blocks.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1, "_id" : 1 })xxx" )); + blocks.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1, "_id" : 1 })xxx" )); auto block_states = mongo_conn[db_name][block_states_col]; - block_states.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); - block_states.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1 })xxx" )); + block_states.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1, "_id" : 1 })xxx" )); + block_states.create_index( bsoncxx::from_json( R"xxx({ "block_id" : 1, "_id" : 1 })xxx" )); // accounts indexes - accounts.create_index( bsoncxx::from_json( R"xxx({ "name" : 1 })xxx" )); + accounts.create_index( bsoncxx::from_json( R"xxx({ "name" : 1, "_id" : 1 })xxx" )); // transactions indexes auto trans = mongo_conn[db_name][trans_col]; - trans.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1 })xxx" )); + trans.create_index( bsoncxx::from_json( R"xxx({ "trx_id" : 1, "_id" : 1 })xxx" )); auto trans_trace = mongo_conn[db_name][trans_traces_col]; - trans_trace.create_index( bsoncxx::from_json( R"xxx({ "id" : 1 })xxx" )); + trans_trace.create_index( bsoncxx::from_json( R"xxx({ "id" : 1, "_id" : 1 })xxx" )); // action traces indexes auto action_traces = mongo_conn[db_name][action_traces_col]; - action_traces.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1 })xxx" )); + action_traces.create_index( bsoncxx::from_json( R"xxx({ "block_num" : 1, "_id" : 1 })xxx" )); // pub_keys indexes auto pub_keys = mongo_conn[db_name][pub_keys_col]; - pub_keys.create_index( bsoncxx::from_json( R"xxx({ "account" : 1, "permission" : 1 })xxx" )); - pub_keys.create_index( bsoncxx::from_json( R"xxx({ "public_key" : 1 })xxx" )); + pub_keys.create_index( bsoncxx::from_json( R"xxx({ "account" : 1, "permission" : 1, "_id" : 1 })xxx" )); + pub_keys.create_index( bsoncxx::from_json( R"xxx({ "public_key" : 1, "_id" : 1 })xxx" )); // account_controls indexes auto account_controls = mongo_conn[db_name][account_controls_col]; account_controls.create_index( - bsoncxx::from_json( R"xxx({ "controlled_account" : 1, "controlled_permission" : 1 })xxx" )); - account_controls.create_index( bsoncxx::from_json( R"xxx({ "controlling_account" : 1 })xxx" )); + bsoncxx::from_json( R"xxx({ "controlled_account" : 1, "controlled_permission" : 1, "_id" : 1 })xxx" )); + account_controls.create_index( bsoncxx::from_json( R"xxx({ "controlling_account" : 1, "_id" : 1 })xxx" )); } catch (...) { handle_mongo_exception( "create indexes", __LINE__ ); From 77f519f20bade5917ff7a9e4663ac38c648b7e78 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 07:17:51 -0500 Subject: [PATCH 202/680] Update fc to fc with set_os_thread_name --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 0c348cc9af4..73f2d256ed0 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 0c348cc9af47d71af57e6926fd64848594a78658 +Subproject commit 73f2d256ed04d6ad0e4b2ac2507b0e7981c51803 From d2c8a7e076e23b9f1f5cbe77383a6d90a5bb137f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:17:17 -0500 Subject: [PATCH 203/680] Name bnet threads --- plugins/bnet_plugin/bnet_plugin.cpp | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/plugins/bnet_plugin/bnet_plugin.cpp b/plugins/bnet_plugin/bnet_plugin.cpp index b788d833503..08d2091040f 100644 --- a/plugins/bnet_plugin/bnet_plugin.cpp +++ b/plugins/bnet_plugin/bnet_plugin.cpp @@ -51,6 +51,7 @@ #include #include +#include #include #include @@ -1398,7 +1399,13 @@ namespace eosio { my->_socket_threads.reserve( my->_num_threads ); for( auto i = 0; i < my->_num_threads; ++i ) { - my->_socket_threads.emplace_back( [&ioc]{ wlog( "start thread" ); ioc.run(); wlog( "end thread" ); } ); + my->_socket_threads.emplace_back( [&ioc, i]{ + std::string tn = "bnet-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + wlog( "start thread" ); + ioc.run(); + wlog( "end thread" ); + } ); } for( const auto& peer : my->_connect_to_peers ) { From f4889220e68ad1df90ece9976d362c3e2f5ecf4b Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:17:34 -0500 Subject: [PATCH 204/680] Name http threads --- plugins/http_plugin/http_plugin.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index 7e205736874..fe2b31472e7 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -522,7 +522,11 @@ namespace eosio { my->server_ioc = std::make_shared(); my->server_ioc_work.emplace( boost::asio::make_work_guard(*my->server_ioc) ); for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { - boost::asio::post( *my->thread_pool, [ioc = my->server_ioc]() { ioc->run(); } ); + boost::asio::post( *my->thread_pool, [ioc = my->server_ioc, i]() { + std::string tn = "http-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ioc->run(); + } ); } if(my->listen_endpoint) { From ae9c3c9d3e1cc7d4001a4bd142d710cab6d5760e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:17:40 -0500 Subject: [PATCH 205/680] Name mongo_db_plugin consume thread --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 8131b6a2bb2..767d3b4f558 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -1523,7 +1524,10 @@ void mongo_db_plugin_impl::init() { ilog("starting db plugin thread"); - consume_thread = boost::thread([this] { consume_blocks(); }); + consume_thread = boost::thread([this] { + fc::set_os_thread_name( "mongodb" ); + consume_blocks(); + }); startup = false; } From df67f7d68670538303266dc49f20cd7a1b139778 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:18:27 -0500 Subject: [PATCH 206/680] Name main application thread --- programs/nodeos/main.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 7034a03858a..403b5c2b317 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -84,6 +84,7 @@ enum return_codes { int main(int argc, char** argv) { try { + fc::set_os_thread_name( "main" ); app().set_version(eosio::nodeos::config::version); auto root = fc::app_path(); From 804da252887981bb988c18abada21546d5e90151 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:20:04 -0500 Subject: [PATCH 207/680] Name net_plugin server_ioc threads --- plugins/net_plugin/net_plugin.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index a26353ab387..c8e7bf20a6f 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -3016,7 +3017,11 @@ namespace eosio { my->server_ioc_work.emplace( boost::asio::make_work_guard( *my->server_ioc ) ); // currently thread_pool only used for server_ioc for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { - boost::asio::post( *my->thread_pool, [ioc = my->server_ioc]() { ioc->run(); } ); + boost::asio::post( *my->thread_pool, [ioc = my->server_ioc, i]() { + std::string tn = "net-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ioc->run(); + } ); } my->resolver = std::make_shared( std::ref( *my->server_ioc )); From 6c79f2ee270484c3ff352d7146ba41194f3b5e69 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:20:39 -0500 Subject: [PATCH 208/680] Name all threads in chain controller thread pool --- libraries/chain/controller.cpp | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index f3b0a841981..9ee8626a77f 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -21,6 +21,7 @@ #include #include +#include #include #include @@ -1727,7 +1728,25 @@ void controller::add_indices() { my->add_indices(); } +void set_thread_name( boost::asio::thread_pool& tp, uint16_t i, uint16_t sz ) { + std::string tn = "chain-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ++i; + if( i < sz ) { + // post recursively so we consume all the threads + auto fut = eosio::chain::async_thread_pool( tp, [&tp, i, sz]() { + set_thread_name( tp, i, sz ); + }); + fut.wait(); + } +} + void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { + // name threads in thread pool for logger + boost::asio::post( get_thread_pool(), [&tp = get_thread_pool(), sz = my->conf.thread_pool_size]() { + set_thread_name( tp, 0, sz ); + }); + my->head = my->fork_db.head(); if( snapshot ) { ilog( "Starting initialization from snapshot, this may take a significant amount of time" ); From 9a34ba52a216fb34a37f95a223fe943eb1dfb48e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:21:16 -0500 Subject: [PATCH 209/680] Name all threads in producer thread pool --- plugins/producer_plugin/producer_plugin.cpp | 24 +++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index a35fa34a9c5..0f9fc79ccea 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -8,9 +8,11 @@ #include #include #include +#include #include #include +#include #include #include @@ -620,6 +622,19 @@ make_keosd_signature_provider(const std::shared_ptr& impl, }; } +void set_thread_name( boost::asio::thread_pool& tp, uint16_t i, uint16_t sz ) { + std::string tn = "prod-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ++i; + if( i < sz ) { + // post recursively so we consume all the threads + auto fut = eosio::chain::async_thread_pool( tp, [&tp, i, sz]() { + set_thread_name( tp, i, sz ); + }); + fut.wait(); + } +} + void producer_plugin::plugin_initialize(const boost::program_options::variables_map& options) { try { my->chain_plug = app().find_plugin(); @@ -690,6 +705,11 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ "producer-threads ${num} must be greater than 0", ("num", thread_pool_size)); my->_thread_pool.emplace( thread_pool_size ); + // name threads in thread pool for logger + boost::asio::post( *my->_thread_pool, [&tp = *my->_thread_pool, sz = thread_pool_size]() { + set_thread_name( tp, 0, sz ); + }); + if( options.count( "snapshots-dir" )) { auto sd = options.at( "snapshots-dir" ).as(); if( sd.is_relative()) { @@ -738,10 +758,10 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ void producer_plugin::plugin_startup() { try { - handle_sighup(); // Sets loggers - ilog("producer plugin: plugin_startup() begin"); + handle_sighup(); // Sets loggers + chain::controller& chain = my->chain_plug->chain(); EOS_ASSERT( my->_producers.empty() || chain.get_read_mode() == chain::db_read_mode::SPECULATIVE, plugin_config_exception, "node cannot have any producer-name configured because block production is impossible when read_mode is not \"speculative\"" ); From bf1bae2822d8ebb017d40926d7a34b04a1d959f2 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:49:03 -0500 Subject: [PATCH 210/680] Revert move of ilog message --- plugins/producer_plugin/producer_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 0f9fc79ccea..0754d1248c1 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -758,10 +758,10 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ void producer_plugin::plugin_startup() { try { - ilog("producer plugin: plugin_startup() begin"); - handle_sighup(); // Sets loggers + ilog("producer plugin: plugin_startup() begin"); + chain::controller& chain = my->chain_plug->chain(); EOS_ASSERT( my->_producers.empty() || chain.get_read_mode() == chain::db_read_mode::SPECULATIVE, plugin_config_exception, "node cannot have any producer-name configured because block production is impossible when read_mode is not \"speculative\"" ); From 32540b3eea84928e5f45740ee7ef51943f85d3a6 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 12:10:59 -0500 Subject: [PATCH 211/680] Fix for tests which were destroying controller before all set_thread_name finished causing deadlock. --- libraries/chain/controller.cpp | 3 ++- plugins/producer_plugin/producer_plugin.cpp | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 9ee8626a77f..fc71d06a42a 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1743,9 +1743,10 @@ void set_thread_name( boost::asio::thread_pool& tp, uint16_t i, uint16_t sz ) { void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { // name threads in thread pool for logger - boost::asio::post( get_thread_pool(), [&tp = get_thread_pool(), sz = my->conf.thread_pool_size]() { + auto fut = eosio::chain::async_thread_pool( get_thread_pool(), [&tp = get_thread_pool(), sz = my->conf.thread_pool_size]() { set_thread_name( tp, 0, sz ); }); + fut.wait(); my->head = my->fork_db.head(); if( snapshot ) { diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 0754d1248c1..84fb3866012 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -706,9 +706,10 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ my->_thread_pool.emplace( thread_pool_size ); // name threads in thread pool for logger - boost::asio::post( *my->_thread_pool, [&tp = *my->_thread_pool, sz = thread_pool_size]() { + auto fut = eosio::chain::async_thread_pool( *my->_thread_pool, [&tp = *my->_thread_pool, sz = thread_pool_size]() { set_thread_name( tp, 0, sz ); }); + fut.wait(); if( options.count( "snapshots-dir" )) { auto sd = options.at( "snapshots-dir" ).as(); From bb0646b62d2fbdc17db38c986b364250e880ff52 Mon Sep 17 00:00:00 2001 From: Joseph J Guerra <8146030+josephjguerra@users.noreply.github.com> Date: Tue, 26 Mar 2019 14:06:52 -0400 Subject: [PATCH 212/680] Create CONTRIBUTING.md --- CONTRIBUTING.md | 148 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000000..40ecbf9cea8 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,148 @@ +# Contributing to eos + +Interested in contributing? That's awesome! Here are some guidelines to get started quickly and easily: + +- [Reporting An Issue](#reporting-an-issue) + - [Bug Reports](#bug-reports) + - [Feature Requests](#feature-requests) + - [Change Requests](#change-requests) +- [Working on eos](#working-on-eos) + - [Feature Branches](#feature-branches) + - [Submitting Pull Requests](#submitting-pull-requests) + - [Testing and Quality Assurance](#testing-and-quality-assurance) +- [Conduct](#conduct) +- [Contributor License & Acknowledgments](#contributor-license--acknowledgments) +- [References](#references) + +## Reporting An Issue + +If you're about to raise an issue because you think you've found a problem with eos, or you'd like to make a request for a new feature in the codebase, or any other reason… please read this first. + +The GitHub issue tracker is the preferred channel for [bug reports](#bug-reports), [feature requests](#feature-requests), and [submitting pull requests](#submitting-pull-requests), but please respect the following restrictions: + +* Please **search for existing issues**. Help us keep duplicate issues to a minimum by checking to see if someone has already reported your problem or requested your idea. + +* Please **be civil**. Keep the discussion on topic and respect the opinions of others. See also our [Contributor Code of Conduct](#conduct). + +### Bug Reports + +A bug is a _demonstrable problem_ that is caused by the code in the repository. Good bug reports are extremely helpful - thank you! + +Guidelines for bug reports: + +1. **Use the GitHub issue search** — check if the issue has already been + reported. + +1. **Check if the issue has been fixed** — look for [closed issues in the + current milestone](https://github.com/EOSIO/eos/issues?q=is%3Aissue+is%3Aclosed) or try to reproduce it + using the latest `develop` branch. + +A good bug report shouldn't leave others needing to chase you up for more information. Be sure to include the details of your environment and relevant tests that demonstrate the failure. + +[Report a bug](https://github.com/EOSIO/eos/issues/new?title=Bug%3A) + +### Feature Requests + +Feature requests are welcome. Before you submit one be sure to have: + +1. **Use the GitHub search** and check the feature hasn't already been requested. +1. Take a moment to think about whether your idea fits with the scope and aims of the project. +1. Remember, it's up to *you* to make a strong case to convince the project's leaders of the merits of this feature. Please provide as much detail and context as possible, this means explaining the use case and why it is likely to be common. + +### Change Requests + +Change requests cover both architectural and functional changes to how eos works. If you have an idea for a new or different dependency, a refactor, or an improvement to a feature, etc - please be sure to: + +1. **Use the GitHub search** and check someone else didn't get there first +1. Take a moment to think about the best way to make a case for, and explain what you're thinking. Are you sure this shouldn't really be + a [bug report](#bug-reports) or a [feature request](#feature-requests)? Is it really one idea or is it many? What's the context? What problem are you solving? Why is what you are suggesting better than what's already there? + +## Working on eos + +Code contributions are welcome and encouraged! If you are looking for a good place to start, check out the [good first issue](https://github.com/EOSIO/eos/labels/good%20first%20issue) label in GitHub issues. + +Also, please follow these guidelines when submitting code: + +### Feature Branches + +To get it out of the way: + +- **[develop](https://github.com/EOSIO/eos/tree/develop)** is the development branch. All work on the next release happens here so you should generally branch off `develop`. Do **NOT** use this branch for a production site. +- **[master](https://github.com/EOSIO/eos/tree/master)** contains the latest release of eos. This branch may be used in production. Do **NOT** use this branch to work on eos's source. + +### Submitting Pull Requests + +Pull requests are awesome. If you're looking to raise a PR for something which doesn't have an open issue, please think carefully about [raising an issue](#reporting-an-issue) which your PR can close, especially if you're fixing a bug. This makes it more likely that there will be enough information available for your PR to be properly tested and merged. + +### Testing and Quality Assurance + +Never underestimate just how useful quality assurance is. If you're looking to get involved with the code base and don't know where to start, checking out and testing a pull request is one of the most useful things you could do. + +Essentially, [check out the latest develop branch](#working-on-eos), take it for a spin, and if you find anything odd, please follow the [bug report guidelines](#bug-reports) and let us know! + +## Conduct + +While contributing, please be respectful and constructive, so that participation in our project is a positive experience for everyone. + +Examples of behavior that contributes to creating a positive environment include: +- Using welcoming and inclusive language +- Being respectful of differing viewpoints and experiences +- Gracefully accepting constructive criticism +- Focusing on what is best for the community +- Showing empathy towards other community members + +Examples of unacceptable behavior include: +- The use of sexualized language or imagery and unwelcome sexual attention or advances +- Trolling, insulting/derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others’ private information, such as a physical or electronic address, without explicit permission +- Other conduct which could reasonably be considered inappropriate in a professional setting + +## Contributor License & Acknowledgments + +Whenever you make a contribution to this project, you license your contribution under the same terms as set out in LICENSE, and you represent and warrant that you have the right to license your contribution under those terms. Whenever you make a contribution to this project, you also certify in the terms of the Developer’s Certificate of Origin set out below: + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +## References + +* Overall CONTRIB adapted from https://github.com/mathjax/MathJax/blob/master/CONTRIBUTING.md +* Conduct section adapted from the Contributor Covenant, version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html From 7faca6c888a2e90c8cf0bf74bec41852a342d3f1 Mon Sep 17 00:00:00 2001 From: Joseph J Guerra <8146030+josephjguerra@users.noreply.github.com> Date: Tue, 26 Mar 2019 14:07:20 -0400 Subject: [PATCH 213/680] Update LICENSE --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 1516b96cbdf..31dee1d933c 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2019 Respective Authors all rights reserved. +Copyright (c) 2017-2019 block.one all rights reserved. The MIT License From cee6dea42fce72c3d3f313a4a7438acfe8d9dc43 Mon Sep 17 00:00:00 2001 From: Joseph J Guerra <8146030+josephjguerra@users.noreply.github.com> Date: Tue, 26 Mar 2019 14:10:05 -0400 Subject: [PATCH 214/680] Update README.md --- README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/README.md b/README.md index e22a2b2cebc..7bce246fbc3 100644 --- a/README.md +++ b/README.md @@ -105,3 +105,17 @@ EOSIO currently supports the following operating systems: ## Getting Started Instructions detailing the process of getting the software, building it, running a simple test network that produces blocks, account creation and uploading a sample contract to the blockchain can be found in [Getting Started](https://developers.eos.io/eosio-home/docs) on the [EOSIO Developer Portal](https://developers.eos.io). + +## Contributing + +[Contributing Guide](./CONTRIBUTING.md) + +[Code of Conduct](./CONTRIBUTING.md#conduct) + +## License + +[MIT](./LICENSE) + +## Important + +See LICENSE for copyright and license terms. Block.one makes its contribution on a voluntary basis as a member of the EOSIO community and is not responsible for ensuring the overall performance of the software or any related applications. We make no representation, warranty, guarantee or undertaking in respect of the software or any related documentation, whether expressed or implied, including but not limited to the warranties or merchantability, fitness for a particular purpose and noninfringement. In no event shall we be liable for any claim, damages or other liability, whether in an action of contract, tort or otherwise, arising from, out of or in connection with the software or documentation or the use or other dealings in the software or documentation. Any test results or performance figures are indicative and will not reflect performance under all conditions. Any reference to any third party or third-party product, service or other resource is not an endorsement or recommendation by Block.one. We are not responsible, and disclaim any and all responsibility and liability, for your use of or reliance on any of these resources. Third-party resources may be updated, changed or terminated at any time, so the information here may be out of date or inaccurate. From 8460f2567125a554276d1b96ef3c83cfb23a1812 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 25 Mar 2019 16:56:39 -0400 Subject: [PATCH 215/680] implement REPLACE_DEFERRED protocol feature --- libraries/chain/apply_context.cpp | 55 +++++++++++-------- libraries/chain/controller.cpp | 34 ++++++++++++ .../include/eosio/chain/account_object.hpp | 22 +++++++- .../chain/include/eosio/chain/controller.hpp | 2 + .../eosio/chain/protocol_feature_manager.hpp | 3 +- libraries/chain/include/eosio/chain/types.hpp | 1 + libraries/chain/protocol_feature_manager.cpp | 12 ++++ 7 files changed, 104 insertions(+), 25 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 2e8113c8ec7..069f79eb3cf 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -363,35 +363,44 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a if ( auto ptr = db.find(boost::make_tuple(receiver, sender_id)) ) { EOS_ASSERT( replace_existing, deferred_tx_duplicate, "deferred transaction with the same sender_id and payer already exists" ); - // TODO: Remove the following subjective check when the deferred trx replacement RAM bug has been fixed with a hard fork. - EOS_ASSERT( !control.is_producing_block(), subjective_block_production_exception, + bool replace_deferred_activated = control.is_builtin_activated(builtin_protocol_feature_t::replace_deferred); + + EOS_ASSERT( replace_deferred_activated || !control.is_producing_block(), + subjective_block_production_exception, "Replacing a deferred transaction is temporarily disabled." ); - // TODO: The logic of the next line needs to be incorporated into the next hard fork. - // add_ram_usage( ptr->payer, -(config::billable_size_v + ptr->packed_trx.size()) ); + uint64_t orig_trx_ram_bytes = config::billable_size_v + ptr->packed_trx.size(); + if( replace_deferred_activated ) { + add_ram_usage( ptr->payer, -static_cast( orig_trx_ram_bytes ) ); + } else { + control.add_to_ram_correction( ptr->payer, orig_trx_ram_bytes ); + } db.modify( *ptr, [&]( auto& gtx ) { - gtx.sender = receiver; - gtx.sender_id = sender_id; - gtx.payer = payer; - gtx.published = control.pending_block_time(); - gtx.delay_until = gtx.published + delay; - gtx.expiration = gtx.delay_until + fc::seconds(control.get_global_properties().configuration.deferred_trx_expiration_window); - - trx_size = gtx.set( trx ); - }); + if( replace_deferred_activated ) { + gtx.trx_id = trx.id(); + } + gtx.sender = receiver; + gtx.sender_id = sender_id; + gtx.payer = payer; + gtx.published = control.pending_block_time(); + gtx.delay_until = gtx.published + delay; + gtx.expiration = gtx.delay_until + fc::seconds(control.get_global_properties().configuration.deferred_trx_expiration_window); + + trx_size = gtx.set( trx ); + } ); } else { db.create( [&]( auto& gtx ) { - gtx.trx_id = trx.id(); - gtx.sender = receiver; - gtx.sender_id = sender_id; - gtx.payer = payer; - gtx.published = control.pending_block_time(); - gtx.delay_until = gtx.published + delay; - gtx.expiration = gtx.delay_until + fc::seconds(control.get_global_properties().configuration.deferred_trx_expiration_window); - - trx_size = gtx.set( trx ); - }); + gtx.trx_id = trx.id(); + gtx.sender = receiver; + gtx.sender_id = sender_id; + gtx.payer = payer; + gtx.published = control.pending_block_time(); + gtx.delay_until = gtx.published + delay; + gtx.expiration = gtx.delay_until + fc::seconds(control.get_global_properties().configuration.deferred_trx_expiration_window); + + trx_size = gtx.set( trx ); + } ); } EOS_ASSERT( control.is_ram_billing_in_notify_allowed() || (receiver == act.account) || (receiver == payer) || privileged, diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 279964752e1..c93b147f052 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -35,6 +35,7 @@ using resource_limits::resource_limits_manager; using controller_index_set = index_set< account_index, account_sequence_index, + account_ram_correction_index, global_property_multi_index, protocol_state_multi_index, dynamic_global_property_multi_index, @@ -306,6 +307,7 @@ struct controller_impl { ); set_activation_handler(); + set_activation_handler(); #define SET_APP_HANDLER( receiver, contract, action) \ @@ -2946,6 +2948,20 @@ const flat_set &controller::get_resource_greylist() const { return my->conf.resource_greylist; } + +void controller::add_to_ram_correction( account_name account, uint64_t ram_bytes ) { + if( auto ptr = my->db.find( account ) ) { + my->db.modify( *ptr, [&]( auto& rco ) { + rco.ram_correction += ram_bytes; + } ); + } else { + my->db.create( [&]( auto& rco ) { + rco.name = account; + rco.ram_correction = ram_bytes; + } ); + } +} + /// Protocol feature activation handlers: template<> @@ -2956,6 +2972,24 @@ void controller_impl::on_activation +void controller_impl::on_activation() { + const auto& indx = db.get_index(); + auto itr = indx.begin(); + for( auto itr = indx.begin(); itr != indx.end(); itr = indx.begin() ) { + int64_t current_ram_usage = resource_limits.get_account_ram_usage( itr->name ); + int64_t ram_delta = -static_cast(itr->ram_correction); + if( itr->ram_correction > static_cast(current_ram_usage) ) { + ram_delta = -current_ram_usage; + elog( "account ${name} was to be reduced by ${adjust} bytes of RAM despite only using ${current} bytes of RAM", + ("name", itr->name)("adjust", itr->ram_correction)("current", current_ram_usage) ); + } + + resource_limits.add_pending_ram_usage( itr->name, ram_delta ); + db.remove( *itr ); + } +} + /// End of protocol feature activation handlers } } /// eosio::chain diff --git a/libraries/chain/include/eosio/chain/account_object.hpp b/libraries/chain/include/eosio/chain/account_object.hpp index b995a8508a1..ca314b23146 100644 --- a/libraries/chain/include/eosio/chain/account_object.hpp +++ b/libraries/chain/include/eosio/chain/account_object.hpp @@ -75,11 +75,31 @@ namespace eosio { namespace chain { > >; + class account_ram_correction_object : public chainbase::object + { + OBJECT_CTOR(account_ram_correction_object); + + id_type id; + account_name name; + uint64_t ram_correction = 0; + }; + + struct by_name; + using account_ram_correction_index = chainbase::shared_multi_index_container< + account_ram_correction_object, + indexed_by< + ordered_unique, member>, + ordered_unique, member> + > + >; + } } // eosio::chain CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_object, eosio::chain::account_index) CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_sequence_object, eosio::chain::account_sequence_index) +CHAINBASE_SET_INDEX_TYPE(eosio::chain::account_ram_correction_object, eosio::chain::account_ram_correction_index) FC_REFLECT(eosio::chain::account_object, (name)(vm_type)(vm_version)(privileged)(last_code_update)(code_version)(creation_date)(code)(abi)) -FC_REFLECT(eosio::chain::account_sequence_object, (name)(recv_sequence)(auth_sequence)(code_sequence)(abi_sequence)) \ No newline at end of file +FC_REFLECT(eosio::chain::account_sequence_object, (name)(recv_sequence)(auth_sequence)(code_sequence)(abi_sequence)) +FC_REFLECT(eosio::chain::account_ram_correction_object, (name)(ram_correction)) diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 6b1c82faea4..c4f9b6ed5ee 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -271,6 +271,8 @@ namespace eosio { namespace chain { void set_subjective_cpu_leeway(fc::microseconds leeway); + void add_to_ram_correction( account_name account, uint64_t ram_bytes ); + signal pre_accepted_block; signal accepted_block_header; signal accepted_block; diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index 2f6f6cb3630..4a0292ae3a7 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -15,7 +15,8 @@ enum class protocol_feature_t : uint32_t { enum class builtin_protocol_feature_t : uint32_t { preactivate_feature, - only_link_to_existing_permission + only_link_to_existing_permission, + replace_deferred }; struct protocol_feature_subjective_restrictions { diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index a3332271fbe..1cea911d9d9 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -190,6 +190,7 @@ namespace eosio { namespace chain { action_history_object_type, ///< Defined by history_plugin reversible_block_object_type, protocol_state_object_type, + account_ram_correction_object_type, OBJECT_TYPE_COUNT ///< Sentry value which contains the number of different object types }; diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index a7fe5ecb1ad..92605d035d5 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -38,6 +38,18 @@ Pre-activated protocol features must be activated in the next block. Builtin protocol feature: ONLY_LINK_TO_EXISTING_PERMISSION Disallows linking an action to a non-existing permission. +*/ + {} + } ) + ( builtin_protocol_feature_t::replace_deferred, builtin_protocol_feature_spec{ + "REPLACE_DEFERRED", + fc::variant("9908b3f8413c8474ab2a6be149d3f4f6d0421d37886033f27d4759c47a26d944").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: REPLACE_DEFERRED + +Fix the problems associated with replacing an existing deferred transaction. +Also corrects the RAM usage of accounts affected by the replace deferred transaction bug. */ {} } ) From 7401beb0b6fb0f5cdaf570d0cc90d80e12959273 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 25 Mar 2019 19:30:36 -0400 Subject: [PATCH 216/680] Added protocol_features_tests/replace_deferred_test unit test to test the REPLACE_DEFERRED protocol feature. Updated the deferred_test test contract to make it possible to selectively replace existing deferred transaction, which was needed for the above unit test. Added disable_all_subjective_mitigations configuration option to controller to make it possible to write the above unit test. Finally, enabled the part of api_tests/deferred_transaction_tests that deals with replacing deferred transactions. --- libraries/chain/apply_context.cpp | 3 +- libraries/chain/controller.cpp | 4 + .../chain/include/eosio/chain/controller.hpp | 2 + unittests/api_tests.cpp | 3 - unittests/protocol_feature_tests.cpp | 139 +++++++++++++++++- unittests/test-contracts/README.md | 2 + .../deferred_test/deferred_test.cpp | 3 +- .../deferred_test/deferred_test.wasm | Bin 8204 -> 8216 bytes 8 files changed, 150 insertions(+), 6 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 069f79eb3cf..37c78ff7c7b 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -365,7 +365,8 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a bool replace_deferred_activated = control.is_builtin_activated(builtin_protocol_feature_t::replace_deferred); - EOS_ASSERT( replace_deferred_activated || !control.is_producing_block(), + EOS_ASSERT( replace_deferred_activated || !control.is_producing_block() + || control.all_subjective_mitigations_disabled(), subjective_block_production_exception, "Replacing a deferred transaction is temporarily disabled." ); diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index c93b147f052..90b8f05d47e 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -2962,6 +2962,10 @@ void controller::add_to_ram_correction( account_name account, uint64_t ram_bytes } } +bool controller::all_subjective_mitigations_disabled()const { + return my->conf.disable_all_subjective_mitigations; +} + /// Protocol feature activation handlers: template<> diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index c4f9b6ed5ee..c3d20bb4ea0 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -76,6 +76,7 @@ namespace eosio { namespace chain { bool disable_replay_opts = false; bool contracts_console = false; bool allow_ram_billing_in_notify = false; + bool disable_all_subjective_mitigations = false; //< for testing purposes only genesis_state genesis; wasm_interface::vm_type wasm_runtime = chain::config::default_wasm_runtime; @@ -272,6 +273,7 @@ namespace eosio { namespace chain { void set_subjective_cpu_leeway(fc::microseconds leeway); void add_to_ram_correction( account_name account, uint64_t ram_bytes ); + bool all_subjective_mitigations_disabled()const; signal pre_accepted_block; signal accepted_block_header; diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 1d4fedc0e7c..889effc9cb3 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -1164,8 +1164,6 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { produce_blocks(10); -#warning re-enable deferred transaction replacement test after bug has been fixed - #if 0 //schedule twice with replace_existing flag (second deferred transaction should replace first one) { transaction_trace_ptr trace; @@ -1186,7 +1184,6 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { BOOST_CHECK_EQUAL( 1, trace->action_traces.size() ); c.disconnect(); } - #endif produce_blocks(10); diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 10648136c0c..2a7a466f556 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -3,7 +3,7 @@ * @copyright defined in eos/LICENSE.txt */ #include -#include +#include #include #include @@ -348,4 +348,141 @@ BOOST_AUTO_TEST_CASE( subjective_restrictions_test ) try { BOOST_CHECK( c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); } FC_LOG_AND_RETHROW() +BOOST_AUTO_TEST_CASE( replace_deferred_test ) try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + c.create_accounts( {N(alice), N(bob), N(test)} ); + c.set_code( N(test), contracts::deferred_test_wasm() ); + c.set_abi( N(test), contracts::deferred_test_abi().data() ); + c.produce_block(); + + auto alice_ram_usage0 = c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ); + + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 42) + ("contract", "test") + ("payload", 100) + ); + + auto alice_ram_usage1 = c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ); + + // Verify subjective mitigation is in place + BOOST_CHECK_EXCEPTION( + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 42) + ("contract", "test") + ("payload", 101 ) + ), + subjective_block_production_exception, + fc_exception_message_is( "Replacing a deferred transaction is temporarily disabled." ) + ); + + BOOST_CHECK_EQUAL( c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ), alice_ram_usage1 ); + + c.control->abort_block(); + + c.close(); + auto cfg = c.get_config(); + cfg.disable_all_subjective_mitigations = true; + c.init( cfg, nullptr ); + + BOOST_CHECK_EQUAL( c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ), alice_ram_usage0 ); + + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 42) + ("contract", "test") + ("payload", 100) + ); + + BOOST_CHECK_EQUAL( c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ), alice_ram_usage1 ); + auto dtrxs = c.get_scheduled_transactions(); + BOOST_CHECK_EQUAL( dtrxs.size(), 1 ); + auto first_dtrx_id = dtrxs[0]; + + // With the subjective mitigation disabled, replacing the deferred transaction is allowed. + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 42) + ("contract", "test") + ("payload", 101) + ); + + auto alice_ram_usage2 = c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ); + BOOST_CHECK_EQUAL( alice_ram_usage2, alice_ram_usage1 + (alice_ram_usage1 - alice_ram_usage0) ); + + dtrxs = c.get_scheduled_transactions(); + BOOST_CHECK_EQUAL( dtrxs.size(), 1 ); + BOOST_CHECK_EQUAL( first_dtrx_id, dtrxs[0] ); // Incorrectly kept as the old transaction ID. + + c.produce_block(); + + auto alice_ram_usage3 = c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ); + BOOST_CHECK_EQUAL( alice_ram_usage3, alice_ram_usage1 ); + + dtrxs = c.get_scheduled_transactions(); + BOOST_CHECK_EQUAL( dtrxs.size(), 0 ); + + c.produce_block(); + + c.close(); + cfg.disable_all_subjective_mitigations = false; + c.init( cfg, nullptr ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + + auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::replace_deferred ); + BOOST_REQUIRE( d ); + + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + + BOOST_CHECK_EQUAL( c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ), alice_ram_usage0 ); + + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 42) + ("contract", "test") + ("payload", 100) + ); + + BOOST_CHECK_EQUAL( c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ), alice_ram_usage1 ); + + dtrxs = c.get_scheduled_transactions(); + BOOST_CHECK_EQUAL( dtrxs.size(), 1 ); + auto first_dtrx_id2 = dtrxs[0]; + + // With REPLACE_DEFERRED activated, replacing the deferred transaction is allowed and now should work properly. + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 42) + ("contract", "test") + ("payload", 101) + ); + + BOOST_CHECK_EQUAL( c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ), alice_ram_usage1 ); + + dtrxs = c.get_scheduled_transactions(); + BOOST_CHECK_EQUAL( dtrxs.size(), 1 ); + BOOST_CHECK( first_dtrx_id2 != dtrxs[0] ); + + // Replace again with a deferred transaction identical to the first one + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 42) + ("contract", "test") + ("payload", 100), + 100 // Needed to make this input transaction unique + ); + + BOOST_CHECK_EQUAL( c.control->get_resource_limits_manager().get_account_ram_usage( N(alice) ), alice_ram_usage1 ); + + dtrxs = c.get_scheduled_transactions(); + BOOST_CHECK_EQUAL( dtrxs.size(), 1 ); + BOOST_CHECK_EQUAL( first_dtrx_id2, dtrxs[0] ); + +} FC_LOG_AND_RETHROW() + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/test-contracts/README.md b/unittests/test-contracts/README.md index 8b03cc131cd..157455c7202 100644 --- a/unittests/test-contracts/README.md +++ b/unittests/test-contracts/README.md @@ -2,4 +2,6 @@ test_ram_limit contract was compiled with eosio.cdt v1.4.1 That contract was ported to compile with eosio.cdt v1.5.0, but the test that uses it is very sensitive to stdlib/eosiolib changes, compilation flags and linker flags. +deferred_test contract was compiled with eosio.cdt v1.6.1 + The remaining contracts have been ported to compile with eosio.cdt v1.6.x. They were compiled with a patched version of eosio.cdt v1.6.0-rc1 (commit 1c9180ff5a1e431385180ce459e11e6a1255c1a4). diff --git a/unittests/test-contracts/deferred_test/deferred_test.cpp b/unittests/test-contracts/deferred_test/deferred_test.cpp index 54c02ed4a95..4ee7465537c 100644 --- a/unittests/test-contracts/deferred_test/deferred_test.cpp +++ b/unittests/test-contracts/deferred_test/deferred_test.cpp @@ -15,7 +15,8 @@ void deferred_test::defercall( name payer, uint64_t sender_id, name contract, ui transaction trx; deferfunc_action a( contract, {get_self(), "active"_n} ); trx.actions.emplace_back( a.to_action( payload ) ); - trx.send( (static_cast(payer.value) << 64) | sender_id, payer ); + bool replace_existing = (payload >= 100); + trx.send( (static_cast(payer.value) << 64) | sender_id, payer, replace_existing ); } void deferred_test::deferfunc( uint64_t payload ) { diff --git a/unittests/test-contracts/deferred_test/deferred_test.wasm b/unittests/test-contracts/deferred_test/deferred_test.wasm index eea70b8dc6ef0738a6e760c5705d1b7c5590f8af..fbfdaf14f0841a352371938b55345e21362908ff 100755 GIT binary patch delta 2578 zcmai0U2L0I89wKHKX!5)`#5o2$8nnDFF$oclBIuXDH`?Z8metKtYRQRMdI3Dhrg!w z>Udj=AP!qE5DIK?Hf@3-ae-hPlMoDUhLBo;#2D)BG$F(dS4gXnKto&s&pDrywIvd< ze7^I&f9HAM_j!-AQ+=iS!sJ=j>ydS**Xz;VI`;i_Yn@Q@HL@+}x3;iSqxE{&8xdT{ zy4&k*^~ksD7xY6km~@@6^>iw1Z#LU&wav}2(=Mt0s6W-|gstvcw|ONbN&RbD?D(!m9gU;*>;VDK2d@q$^%=cZFCD~Bg&7@qs z_yT{v54j#<%ZZqJe7Dlpi#h9@D@vNR0BtnNF^e5?P@?) z*o~@w;^5Q>tf`_=m8jmiA&SuIHU0gAM+!IKCt~QHA;|e4EBxS%0aL|tjROA9OL>$B~ z!?MC&sRqpWvMe!~LyMF+mjnk(B8ogsj5maM8|AUI#4jW26cWB1uxX+nEKN*}!Gu*! zU>g)3IM4)TDiuj89WkcATADq7@cxKNM8?UIdqfNpzDVg7LzG4GYSmVE&iO$V$)pTk zMNdI&YIFipfJB1d(tl+KkKVdT1eLB}>OH%o!cp(rodemw`!c4UyJhT{aK)59<*d`9 z{+@G=&q_~yKqBdhL;8+0wp!vy+N&^O%K>FBGHo$6C~@h{IbWw`M|zT;aFTei6AppV zJ0^lj^~_($XGKMycE<*dxlxEYebH^;xZk;-qzUbNUweEWXAmVLbV;IUrGQ*kiAOA< z?f|8aCP+smBb5}1B5a*KJmAy{%f(-xr6oO+oGuzvWdN1lIn!+u+;rFxC0$P*&VLNYLFSqReWP%tFXr_d$)j{Z|2esY zC_@9E!Fq9E4(rZ973=MRX{>*@lG5Nul4l2v&6|U5BwymEX`nCgODMr_IOK2L;lE<^ zsD3^D#ez**`Y1=)0GwV9!W zR4qs1n4TAh=&U|3UZwN;Z{icHuw)gs9Dtw|c1)7XB8`+|kx7}E8bgkT$MWfz3k<>c zxS-qFB9i+?c9!1JKg;Hk);rn5qj4sP6^|kSp@ljoGHOmd@yCq_Q8#AC6Q2(sHxx#R%;k)(?Yu zi19gdPO|K)7wyOqjN$8c=L2-_{ek#r+Pukb8g%y^|}Z?+veik&`QX zz(pUpDT;{7%oau5SC3@Ba(Zxd3S?Xw{ZwHQhM230xdbu75_dORk1^O8Jv8yZ{n?!> zCLMh>S6nbvuqDVm8eijgVH9YQ`r}^i@9BL0c%Q&Txn}C^Jq<#*AV)oav`-tr=?{RX z7&Jf0&)w%{r^B&sALKvRS7KQE0r%!6UC8UJ2PQzx=MNZa{^-D`_c88IP`YqqJm&2# zy8aOM_1W=~{(a%vk=Q0-LE?5vrmUH7pcZ998iit!y(^`UJi+zNv6uVj-aXuVlUvZI ziot#CoJu+jqirNM`JHk{qyn4D*rJ{;ivZ|;Y*il&5f`j zyKPz9*l5>l-B3PRdm`KqCi2qM5O`KT)9ii;r(EA?*BbIc;J^D|`k4R# delta 2381 zcmZuzU2IfE6rP#;x9#k9hqk-j-Tv(C(w5r-rKP`s%1(n4V<2KQ`e106-CNu%?3V4) zN@8{kFZ#fPWkii3TEm0U2P6;^B2noBhCobwFd74i@a97wz(C@oXYOu)(sYxZJ9}o% zH{UtuJ9GE5)cMp$@m{S`so=^asZ`J;s!qJIjFb(R!C0Qu%H=XzI2_h7>|MybZJ8&X zlbPu=3lE!{49&^U(itb8oyg@+I<@4_oGuofd}*STJL#Z$Woka|lsu$aXJWco$ezwP z#TjI&CBNOj|HD#kP!H75Mt_9eUKm-ApO?mJzY9tBk}-A zMmmh~`T6|GA#8q#mf$fi)jfnf+j4+)I%5ifNcgxG>!LATD#4>WZ!NMfP4V$zj(Tf2D zR_<~%LJ&vHmO(2fY|0IeM>QWj$f%~a+)<7k1Dlqr^Vo{>b8F7vrZ%#OoVBuW>V4}4 zY{;1JERM=czS!Q|ONf~iV%3Dt6+ULf086t8t72)2MYCc-VF!0RQBX~o%lhPFUoO<0 zAbtV~Nf_`Yehf&g?|Q!OF2=UpLpxzSN+U4N(SdLbumN3H=%xx&=O&BFpXeZNmH*N{ z*p~3W4&$u92gXHz3dTo%1!4x2r9ZG{`NXr_3KSry{)YXyPfj;XL1=$Az|QM9c$46y z{Bg_Ch!-I@ZGsXyg(Hb*a0Oco_T8(BKoeswq^Al9SP4T_OT2d~ zX$l`CLLjlavmdnQk$MzreZFQTTz3;X$OD4)7J-+DRAEdJ*?<5FuOjRTAHdDB9JV(j zc_VBC$w%R)l|w-ORfbxR=1Z7j*=sBW&2|=|z@R9-+KRy491pza(Y_!DT6S&*Ki#qm z;1^qB4Ye3*@bXRzA6X5St&4}b3F4)NZHUKL%bo0qYz0k@M_!7w!A9sMJ4w8z=0sJ8 zKu0e|wr$4duSnO9x)|3I2BbasN&rJ=t)yHLV^C^w%?ncnd`0#~he5cLQ6=2D=wPU- z4mh%=bwd!)&Ck)E4WjZIK@&PNF2vrftwCGXG_~3xh1lQ*$C~C;`}`1lbra8EwYLhf zupDd613B6e1#(NRAGcP?SsppKEqs2Dh3Qe8ZVTg1`9a&K>-sPv54R`b_bIP0KWsm@ z4y{d!j*(ipYprd0iag(;yuRyDN%^J2eq#)i3KRkf%u#C+v!NskAT0$HK+!h&O{nr< zPPQO*9R;{+&ZfxH=07jBhP#z5Ifg=EI4ZiH4vkS5_OjNeF@30^k{5%5_>=#gIs{6%6X8oew)Z|G@B2pxd?k} zLAXo2I~9ovTvi;r(AQ72_wlgI>qC{D7?%mK+ld|spej(0_E3O(P(?2AUN8$_G744* zcOGL6619TpIjG2W>Y~ K5!L?#xc>onUfHz( From fb84366f0c94c2ce72e146dd4ec1a6832a3f0480 Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 26 Mar 2019 14:54:07 -0400 Subject: [PATCH 217/680] upgrade chain snapshot version to 2; also add num_supported_key_types field to protocol_state_object --- libraries/chain/controller.cpp | 1 + libraries/chain/include/eosio/chain/chain_snapshot.hpp | 9 ++++++--- .../chain/include/eosio/chain/protocol_state_object.hpp | 6 ++++-- libraries/chain/protocol_state_object.cpp | 4 ++++ 4 files changed, 15 insertions(+), 5 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 90b8f05d47e..8bec114d6be 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -887,6 +887,7 @@ struct controller_impl { }); db.create([&](auto& pso ){ + pso.num_supported_key_types = 2; for( const auto& i : genesis_intrinsics ) { add_intrinsic_to_whitelist( pso.whitelisted_intrinsics, i ); } diff --git a/libraries/chain/include/eosio/chain/chain_snapshot.hpp b/libraries/chain/include/eosio/chain/chain_snapshot.hpp index 3b3e64f264f..5546b301999 100644 --- a/libraries/chain/include/eosio/chain/chain_snapshot.hpp +++ b/libraries/chain/include/eosio/chain/chain_snapshot.hpp @@ -12,10 +12,13 @@ struct chain_snapshot_header { /** * Version history * 1: initial version + * 2: Updated chain snapshot for v1.8.0 initial protocol features release: + * - Incompatible with version 1. + * - Adds new indices for: protocol_state_object and account_ram_correction_object */ - static constexpr uint32_t minimum_compatible_version = 1; - static constexpr uint32_t current_version = 1; + static constexpr uint32_t minimum_compatible_version = 2; + static constexpr uint32_t current_version = 2; uint32_t version = current_version; @@ -31,4 +34,4 @@ struct chain_snapshot_header { } } -FC_REFLECT(eosio::chain::chain_snapshot_header,(version)) \ No newline at end of file +FC_REFLECT(eosio::chain::chain_snapshot_header,(version)) diff --git a/libraries/chain/include/eosio/chain/protocol_state_object.hpp b/libraries/chain/include/eosio/chain/protocol_state_object.hpp index 6be252a2638..91fc47b08c4 100644 --- a/libraries/chain/include/eosio/chain/protocol_state_object.hpp +++ b/libraries/chain/include/eosio/chain/protocol_state_object.hpp @@ -40,6 +40,7 @@ namespace eosio { namespace chain { shared_vector activated_protocol_features; shared_vector preactivated_protocol_features; whitelisted_intrinsics_type whitelisted_intrinsics; + uint32_t num_supported_key_types = 0; }; using protocol_state_multi_index = chainbase::shared_multi_index_container< @@ -55,6 +56,7 @@ namespace eosio { namespace chain { vector activated_protocol_features; vector preactivated_protocol_features; std::set whitelisted_intrinsics; + uint32_t num_supported_key_types = 0; }; namespace detail { @@ -81,9 +83,9 @@ FC_REFLECT(eosio::chain::protocol_state_object::activated_protocol_feature, ) FC_REFLECT(eosio::chain::protocol_state_object, - (activated_protocol_features)(preactivated_protocol_features)(whitelisted_intrinsics) + (activated_protocol_features)(preactivated_protocol_features)(whitelisted_intrinsics)(num_supported_key_types) ) FC_REFLECT(eosio::chain::snapshot_protocol_state_object, - (activated_protocol_features)(preactivated_protocol_features)(whitelisted_intrinsics) + (activated_protocol_features)(preactivated_protocol_features)(whitelisted_intrinsics)(num_supported_key_types) ) diff --git a/libraries/chain/protocol_state_object.cpp b/libraries/chain/protocol_state_object.cpp index 8a860248a3b..7009ee57874 100644 --- a/libraries/chain/protocol_state_object.cpp +++ b/libraries/chain/protocol_state_object.cpp @@ -26,6 +26,8 @@ namespace eosio { namespace chain { res.whitelisted_intrinsics = convert_intrinsic_whitelist_to_set( value.whitelisted_intrinsics ); + res.num_supported_key_types = value.num_supported_key_types; + return res; } @@ -47,6 +49,8 @@ namespace eosio { namespace chain { } reset_intrinsic_whitelist( value.whitelisted_intrinsics, row.whitelisted_intrinsics ); + + value.num_supported_key_types = row.num_supported_key_types; } } From 2cee787e29b74c873e1c00c294e2b249cbda484a Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 26 Mar 2019 21:06:28 -0400 Subject: [PATCH 218/680] implement FIX_LINKAUTH_RESTRICTION protocol feature --- libraries/chain/authorization_manager.cpp | 24 +++++++++++-------- .../eosio/chain/protocol_feature_manager.hpp | 3 ++- libraries/chain/protocol_feature_manager.cpp | 12 ++++++++++ 3 files changed, 28 insertions(+), 11 deletions(-) diff --git a/libraries/chain/authorization_manager.cpp b/libraries/chain/authorization_manager.cpp index e69f7129121..83988c16657 100644 --- a/libraries/chain/authorization_manager.cpp +++ b/libraries/chain/authorization_manager.cpp @@ -331,16 +331,20 @@ namespace eosio { namespace chain { EOS_ASSERT( auth.actor == link.account, irrelevant_auth_exception, "the owner of the linked permission needs to be the actor of the declared authorization" ); - EOS_ASSERT( link.type != updateauth::get_name(), action_validate_exception, - "Cannot link eosio::updateauth to a minimum permission" ); - EOS_ASSERT( link.type != deleteauth::get_name(), action_validate_exception, - "Cannot link eosio::deleteauth to a minimum permission" ); - EOS_ASSERT( link.type != linkauth::get_name(), action_validate_exception, - "Cannot link eosio::linkauth to a minimum permission" ); - EOS_ASSERT( link.type != unlinkauth::get_name(), action_validate_exception, - "Cannot link eosio::unlinkauth to a minimum permission" ); - EOS_ASSERT( link.type != canceldelay::get_name(), action_validate_exception, - "Cannot link eosio::canceldelay to a minimum permission" ); + if( link.code == config::system_account_name + || !_control.is_builtin_activated( builtin_protocol_feature_t::fix_linkauth_restriction ) ) + { + EOS_ASSERT( link.type != updateauth::get_name(), action_validate_exception, + "Cannot link eosio::updateauth to a minimum permission" ); + EOS_ASSERT( link.type != deleteauth::get_name(), action_validate_exception, + "Cannot link eosio::deleteauth to a minimum permission" ); + EOS_ASSERT( link.type != linkauth::get_name(), action_validate_exception, + "Cannot link eosio::linkauth to a minimum permission" ); + EOS_ASSERT( link.type != unlinkauth::get_name(), action_validate_exception, + "Cannot link eosio::unlinkauth to a minimum permission" ); + EOS_ASSERT( link.type != canceldelay::get_name(), action_validate_exception, + "Cannot link eosio::canceldelay to a minimum permission" ); + } const auto linked_permission_name = lookup_minimum_permission(link.account, link.code, link.type); diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index 4a0292ae3a7..4c7bcf6d95a 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -16,7 +16,8 @@ enum class protocol_feature_t : uint32_t { enum class builtin_protocol_feature_t : uint32_t { preactivate_feature, only_link_to_existing_permission, - replace_deferred + replace_deferred, + fix_linkauth_restriction }; struct protocol_feature_subjective_restrictions { diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 92605d035d5..94d42cfad1d 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -50,6 +50,18 @@ Builtin protocol feature: REPLACE_DEFERRED Fix the problems associated with replacing an existing deferred transaction. Also corrects the RAM usage of accounts affected by the replace deferred transaction bug. +*/ + {} + } ) + ( builtin_protocol_feature_t::fix_linkauth_restriction, builtin_protocol_feature_spec{ + "FIX_LINKAUTH_RESTRICTION", + fc::variant("a98241c83511dc86c857221b9372b4aa7cea3aaebc567a48604e1d3db3557050").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: FIX_LINKAUTH_RESTRICTION + +Removes the restriction on eosio::linkauth for non-native actions named one of the five special action names: +updateauth, deleteauth, linkauth, unlinkauth, or canceldelay. */ {} } ) From b4f8d70b89b1309943dbaee551da32234bd9e392 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 20:28:13 -0500 Subject: [PATCH 219/680] Do not name main thread since some tests expect it to be nodeos --- programs/nodeos/main.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 403b5c2b317..7034a03858a 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -84,7 +84,6 @@ enum return_codes { int main(int argc, char** argv) { try { - fc::set_os_thread_name( "main" ); app().set_version(eosio::nodeos::config::version); auto root = fc::app_path(); From 0380b93619e29aa955d221d5485f39bc27d0feb7 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Fri, 22 Mar 2019 15:17:37 +0800 Subject: [PATCH 220/680] Add multiple version protocol feature test with cmake Add additional case for multi version protocol feature activation test --- tests/CMakeLists.txt | 4 + tests/Cluster.py | 11 +- tests/Node.py | 31 +++- tests/TestHelper.py | 4 +- ..._multiple_version_protocol_feature_test.py | 174 ++++++++++++++++++ tests/nodeos_protocol_feature_test.py | 10 +- tests/testUtils.py | 7 +- 7 files changed, 214 insertions(+), 27 deletions(-) create mode 100755 tests/nodeos_multiple_version_protocol_feature_test.py diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index fb34062c900..d635f2083dc 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -42,6 +42,7 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_under_min_avail_ram.py ${CMAKE configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_voting_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_voting_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_irreversible_mode_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_irreversible_mode_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_protocol_feature_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_protocol_feature_test.py COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nodeos_multiple_version_protocol_feature_test.py ${CMAKE_CURRENT_BINARY_DIR}/nodeos_multiple_version_protocol_feature_test.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/consensus-validation-malicious-producers.py ${CMAKE_CURRENT_BINARY_DIR}/consensus-validation-malicious-producers.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/validate-dirty-db.py ${CMAKE_CURRENT_BINARY_DIR}/validate-dirty-db.py COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/launcher_test.py ${CMAKE_CURRENT_BINARY_DIR}/launcher_test.py COPYONLY) @@ -105,6 +106,9 @@ set_property(TEST nodeos_remote_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_forked_chain_lr_test COMMAND tests/nodeos_forked_chain_test.py -v --wallet-port 9901 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_forked_chain_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_multiple_version_protocol_feature_lr_test COMMAND tests/nodeos_multiple_version_protocol_feature_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_multiple_version_protocol_feature_lr_test PROPERTY LABELS long_running_tests) + add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9902 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_voting_lr_test PROPERTY LABELS long_running_tests) diff --git a/tests/Cluster.py b/tests/Cluster.py index 13d1a2e6abe..3476bf9de03 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -144,8 +144,8 @@ def setAlternateVersionLabels(self, file): # pylint: disable=too-many-branches # pylint: disable=too-many-statements def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="net", delay=1, onlyBios=False, dontBootstrap=False, - totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, pfSetupPolicy = PFSetupPolicy.FULL, alternateVersionLabelsFile=None, - associatedNodeLabels=None): + totalProducers=None, extraNodeosArgs=None, useBiosBootFile=True, specificExtraNodeosArgs=None, onlySetProds=False, + pfSetupPolicy=PFSetupPolicy.FULL, alternateVersionLabelsFile=None, associatedNodeLabels=None): """Launch cluster. pnodes: producer nodes count totalNodes: producer + non-producer nodes count @@ -161,6 +161,7 @@ def launch(self, pnodes=1, totalNodes=1, prodCount=1, topo="mesh", p2pPlugin="ne A value of false uses manual bootstrapping in this script, which does not do things like stake votes for producers. specificExtraNodeosArgs: dictionary of arguments to pass to a specific node (via --specific-num and --specific-nodeos flags on launcher), example: { "5" : "--plugin eosio::test_control_api_plugin" } + onlySetProds: Stop the bootstrap process after setting the producers (only if useBiosBootFile is false) pfSetupPolicy: determine the protocol feature setup policy (none, preactivate_feature_only, or full) alternateVersionLabelsFile: Supply an alternate version labels file to use with associatedNodeLabels. associatedNodeLabels: Supply a dictionary of node numbers to use an alternate label for a specific node. @@ -420,7 +421,7 @@ def connectGroup(group, producerNodes, bridgeNodes) : Utils.Print("Bootstrap cluster.") if onlyBios or not useBiosBootFile: - self.biosNode=self.bootstrap(biosNode, totalNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios) + self.biosNode=self.bootstrap(biosNode, totalNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios, onlySetProds) else: self.useBiosBootFile=True self.biosNode=self.bios_bootstrap(biosNode, totalNodes, pfSetupPolicy) @@ -998,7 +999,7 @@ def bios_bootstrap(self, biosNode, totalNodes, pfSetupPolicy, silent=False): return biosNode - def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios=False): + def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios=False, onlySetProds=False): """Create 'prodCount' init accounts and deposits 10000000000 SYS in each. If prodCount is -1 will initialize all possible producers. Ensure nodes are inter-connected prior to this call. One way to validate this will be to check if every node has block 1.""" @@ -1134,6 +1135,8 @@ def bootstrap(self, biosNode, totalNodes, prodCount, totalProducers, pfSetupPoli Utils.Print("ERROR: Block production handover failed.") return None + if onlySetProds: return biosNode + eosioTokenAccount=copy.deepcopy(eosioAccount) eosioTokenAccount.name="eosio.token" trans=biosNode.createAccount(eosioTokenAccount, eosioAccount, 0) diff --git a/tests/Node.py b/tests/Node.py index cf65e2282f0..1ac81c5ebaa 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1303,7 +1303,8 @@ def getNextCleanProductionCycle(self, trans): # TBD: make nodeId an internal property # pylint: disable=too-many-locals - def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None, cachePopen=False): + # If nodeosPath is equal to None, it will use the existing nodeos path + def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTimeout, addOrSwapFlags=None, cachePopen=False, nodeosPath=None): assert(self.pid is None) assert(self.killed) @@ -1312,12 +1313,14 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim if Utils.Debug: Utils.Print("Launching node process, Id: {}".format(nodeId)) cmdArr=[] - myCmd=self.cmd + splittedCmd=self.cmd.split() + if nodeosPath: splittedCmd[0] = nodeosPath + myCmd="".join(splittedCmd) toAddOrSwap=copy.deepcopy(addOrSwapFlags) if addOrSwapFlags is not None else {} if not newChain: skip=False swapValue=None - for i in self.cmd.split(): + for i in splittedCmd: Utils.Print("\"%s\"" % (i)) if skip: skip=False @@ -1338,9 +1341,7 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim for k,v in toAddOrSwap.items(): cmdArr.append(k) cmdArr.append(v) - myCmd=" ".join(cmdArr) - if nodeId == "bios": dataDir="var/lib/node_bios" else: @@ -1466,13 +1467,13 @@ def waitForHeadToAdvance(self, timeout=6): currentHead = self.getHeadBlockNum() def isHeadAdvancing(): return self.getHeadBlockNum() > currentHead - Utils.waitForBool(isHeadAdvancing, timeout) + return Utils.waitForBool(isHeadAdvancing, timeout) - def waitForLibToAdvance(self, timeout=6): + def waitForLibToAdvance(self, timeout=30): currentLib = self.getIrreversibleBlockNum() def isLibAdvancing(): return self.getIrreversibleBlockNum() > currentLib - Utils.waitForBool(isLibAdvancing, timeout) + return Utils.waitForBool(isLibAdvancing, timeout) # Require producer_api_plugin def activatePreactivateFeature(self): @@ -1522,3 +1523,17 @@ def getLatestBlockHeaderState(self): def getActivatedProtocolFeatures(self): latestBlockHeaderState = self.getLatestBlockHeaderState() return latestBlockHeaderState["activated_protocol_features"]["protocol_features"] + + def modifyBuiltinPFSubjRestrictions(self, nodeId, featureCodename, subjectiveRestriction={}): + from Cluster import Cluster + jsonPath = os.path.join(Cluster.getConfigDir(nodeId), + "protocol_features", + "BUILTIN-{}.json".format(featureCodename)) + protocolFeatureJson = [] + with open(jsonPath) as f: + protocolFeatureJson = json.load(f) + protocolFeatureJson["subjective_restrictions"] = { + **protocolFeatureJson["subjective_restrictions"], **subjectiveRestriction + } + with open(jsonPath, "w") as f: + json.dump(protocolFeatureJson, f, indent=2) diff --git a/tests/TestHelper.py b/tests/TestHelper.py index a9920a731c1..2c606263420 100644 --- a/tests/TestHelper.py +++ b/tests/TestHelper.py @@ -105,8 +105,8 @@ def parse_args(includeArgs, applicationSpecificArgs=AppArgs()): parser.add_argument("--clean-run", help="Kill all nodeos and kleos instances", action='store_true') if "--sanity-test" in includeArgs: parser.add_argument("--sanity-test", help="Validates nodeos and kleos are in path and can be started up.", action='store_true') - if "--alternate-versions-labels-file" in includeArgs: - parser.add_argument("--alternate-versions-labels-file", type=str, help="Provide a file to define the labels that can be used in the test and the path to the version installation associated with that.") + if "--alternate-version-labels-file" in includeArgs: + parser.add_argument("--alternate-version-labels-file", type=str, help="Provide a file to define the labels that can be used in the test and the path to the version installation associated with that.") for arg in applicationSpecificArgs.args: parser.add_argument(arg.flag, type=arg.type, help=arg.help, choices=arg.choices, default=arg.default) diff --git a/tests/nodeos_multiple_version_protocol_feature_test.py b/tests/nodeos_multiple_version_protocol_feature_test.py new file mode 100755 index 00000000000..8b70ff68ab1 --- /dev/null +++ b/tests/nodeos_multiple_version_protocol_feature_test.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python3 + +from testUtils import Utils +from Cluster import Cluster, PFSetupPolicy +from TestHelper import TestHelper +from WalletMgr import WalletMgr +from Node import Node + +import signal +import json +import time +from os.path import join +from datetime import datetime + +# Parse command line arguments +args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running", + "--keep-logs", "--alternate-version-labels-file"}) +Utils.Debug=args.v +killAll=args.clean_run +dumpErrorDetails=args.dump_error_details +dontKill=args.leave_running +killEosInstances=not dontKill +killWallet=not dontKill +keepLogs=args.keep_logs +alternateVersionLabelsFile=args.alternate_version_labels_file + +walletMgr=WalletMgr(True) +cluster=Cluster(walletd=True) +cluster.setWalletMgr(walletMgr) + +def restartNode(node: Node, nodeId, chainArg=None, addOrSwapFlags=None, nodeosPath=None): + if not node.killed: + node.kill(signal.SIGTERM) + isRelaunchSuccess = node.relaunch(nodeId, chainArg, addOrSwapFlags=addOrSwapFlags, + timeout=5, cachePopen=True, nodeosPath=nodeosPath) + assert isRelaunchSuccess, "Fail to relaunch" + +def shouldNodeContainPreactivateFeature(node): + preactivateFeatureDigest = node.getSupportedProtocolFeatureDict()["PREACTIVATE_FEATURE"]["feature_digest"] + assert preactivateFeatureDigest + blockHeaderState = node.getLatestBlockHeaderState() + activatedProtocolFeatures = blockHeaderState["activated_protocol_features"]["protocol_features"] + return preactivateFeatureDigest in activatedProtocolFeatures + +def waitUntilBeginningOfProdTurn(node, producerName, timeout=30, sleepTime=0.5): + def isDesiredProdTurn(): + headBlockNum = node.getHeadBlockNum() + res = node.getBlock(headBlockNum)["producer"] == producerName and \ + node.getBlock(headBlockNum-1)["producer"] != producerName + return res + Utils.waitForBool(isDesiredProdTurn, timeout, sleepTime) + +def waitForOneRound(): + time.sleep(24) # We have 4 producers for this test + +def setValidityOfActTimeSubjRestriction(node, nodeId, codename, valid): + invalidActTimeSubjRestriction = { + "earliest_allowed_activation_time": "2030-01-01T00:00:00.000", + } + validActTimeSubjRestriction = { + "earliest_allowed_activation_time": "1970-01-01T00:00:00.000", + } + actTimeSubjRestriction = validActTimeSubjRestriction if valid else invalidActTimeSubjRestriction + node.modifyBuiltinPFSubjRestrictions(nodeId, codename, actTimeSubjRestriction) + restartNode(node, nodeId) + +# List to contain the test result message +testSuccessful = False +try: + TestHelper.printSystemInfo("BEGIN") + cluster.killall(allInstances=killAll) + cluster.cleanup() + + # Create a cluster of 4 nodes, each node has 1 producer. The first 3 nodes use the latest vesion, + # While the 4th node use the version that doesn't support protocol feature activation (i.e. 1.7.0) + if not alternateVersionLabelsFile: + alternateVersionLabelsFile="/Users/andrianto/alternate-version-labels-file.txt" + associatedNodeLabels = { + "3": "170" + } + cluster.launch(pnodes=4, totalNodes=4, prodCount=1, totalProducers=4, + extraNodeosArgs=" --plugin eosio::producer_api_plugin ", + useBiosBootFile=False, + onlySetProds=True, + pfSetupPolicy=PFSetupPolicy.NONE, + alternateVersionLabelsFile=alternateVersionLabelsFile, + associatedNodeLabels=associatedNodeLabels) + + def pauseBlockProduction(): + for node in cluster.nodes: + node.sendRpcApi("v1/producer/pause") + + def resumeBlockProduction(): + for node in cluster.nodes: + node.sendRpcApi("v1/producer/resume") + + def shouldNodesBeInSync(nodes:[Node]): + # Pause all block production to ensure the head is not moving + pauseBlockProduction() + time.sleep(1) # Wait for some time to ensure all blocks are propagated + headBlockIds = [] + for node in nodes: + headBlockId = node.getInfo()["head_block_id"] + headBlockIds.append(headBlockId) + resumeBlockProduction() + return len(set(headBlockIds)) == 1 + + newNodeIds = [0, 1, 2] + oldNodeId = 3 + newNodes = list(map(lambda id: cluster.getNode(id), newNodeIds)) + oldNode = cluster.getNode(oldNodeId) + allNodes = [*newNodes, oldNode] + + # Before everything starts, all nodes (new version and old version) should be in sync + assert shouldNodesBeInSync(allNodes), "Nodes are not in sync before preactivation" + + # First, we are going to test the case where: + # - 1st node has valid earliest_allowed_activation_time + # - While 2nd and 3rd node have invalid earliest_allowed_activation_time + # Producer in the 1st node is going to activate PREACTIVATE_FEATURE during his turn + # Immediately, in the next block PREACTIVATE_FEATURE should be active in 1st node, but not on 2nd and 3rd + # Therefore, 1st node will be out of sync with 2nd, 3rd, and 4th node + # After a round has passed though, 1st node will realize he's in minority fork and then join the other nodes + # Hence, the PREACTIVATE_FEATURE that was previously activated will be dropped and all of the nodes should be in sync + setValidityOfActTimeSubjRestriction(newNodes[1], newNodeIds[1], "PREACTIVATE_FEATURE", False) + setValidityOfActTimeSubjRestriction(newNodes[2], newNodeIds[2], "PREACTIVATE_FEATURE", False) + + waitUntilBeginningOfProdTurn(newNodes[0], "defproducera") + newNodes[0].activatePreactivateFeature() + assert shouldNodeContainPreactivateFeature(newNodes[0]), "1st node should contain PREACTIVATE FEATURE" + assert not (shouldNodeContainPreactivateFeature(newNodes[1]) or shouldNodeContainPreactivateFeature(newNodes[2])), \ + "2nd and 3rd node should not contain PREACTIVATE FEATURE" + assert shouldNodesBeInSync([newNodes[1], newNodes[2], oldNode]), "2nd, 3rd and 4th node should be in sync" + assert not shouldNodesBeInSync(allNodes), "1st node should be out of sync with the rest nodes" + + waitForOneRound() + + assert not shouldNodeContainPreactivateFeature(newNodes[0]), "PREACTIVATE_FEATURE should be dropped" + assert shouldNodesBeInSync(allNodes), "All nodes should be in sync" + + # Then we set the earliest_allowed_activation_time of 2nd node and 3rd node with valid value + # Once the 1st node activate PREACTIVATE_FEATURE, all of them should have PREACTIVATE_FEATURE activated in the next block + # They will be in sync and their LIB will advance since they control > 2/3 of the producers + # However, the 4th node will be out of sync with them, and his LIB will never advance + setValidityOfActTimeSubjRestriction(newNodes[1], newNodeIds[1], "PREACTIVATE_FEATURE", True) + setValidityOfActTimeSubjRestriction(newNodes[2], newNodeIds[2], "PREACTIVATE_FEATURE", True) + + waitUntilBeginningOfProdTurn(newNodes[0], "defproducera") + newNodes[0].activatePreactivateFeature() + + assert shouldNodesBeInSync(newNodes), "New nodes should be in sync" + assert not shouldNodesBeInSync(allNodes), "Nodes should not be in sync after preactivation" + for node in newNodes: assert shouldNodeContainPreactivateFeature(node), "New node should contain PREACTIVATE_FEATURE" + assert newNodes[0].waitForLibToAdvance(), "1st node LIB should advance" + newNode0Lib = newNodes[0].getIrreversibleBlockNum() + assert newNodes[1].getIrreversibleBlockNum() >= newNode0Lib and \ + newNodes[1].getIrreversibleBlockNum() >= newNode0Lib, "2nd and 3rd node LIB should also advance" + assert not oldNode.waitForLibToAdvance(), "4th node LIB should not advance" + + # Restart old node with newest version + # Finally, when we restart the 4th node with the version of nodeos that supports protocol feature, + # all nodes should be in sync, and the 4th node will also contain PREACTIVATE_FEATURE + restartNode(oldNode, oldNodeId, chainArg=" --replay ", nodeosPath="programs/nodeos/nodeos") + time.sleep(2) # Give some time to replay + + assert shouldNodesBeInSync(allNodes), "All nodes should be in sync" + assert shouldNodeContainPreactivateFeature(oldNode), "4th node should contain PREACTIVATE_FEATURE" + + testSuccessful = True +finally: + TestHelper.shutdown(cluster, walletMgr, testSuccessful, killEosInstances, killWallet, keepLogs, killAll, dumpErrorDetails) + +exitCode = 0 if testSuccessful else 1 +exit(exitCode) diff --git a/tests/nodeos_protocol_feature_test.py b/tests/nodeos_protocol_feature_test.py index 2c581b39dbd..e42f934b2f4 100755 --- a/tests/nodeos_protocol_feature_test.py +++ b/tests/nodeos_protocol_feature_test.py @@ -22,14 +22,6 @@ keepLogs=args.keep_logs # The following test case will test the Protocol Feature JSON reader of the blockchain -def modifyPFSubjectiveRestrictions(nodeId, featureCodename, subjectiveRestrictions): - jsonPath = join(Cluster.getConfigDir(nodeId), "protocol_features", "BUILTIN-{}.json".format(featureCodename)) - protocolFeatureJson = [] - with open(jsonPath) as f: - protocolFeatureJson = json.load(f) - protocolFeatureJson["subjective_restrictions"] = subjectiveRestrictions - with open(jsonPath, "w") as f: - json.dump(protocolFeatureJson, f, indent=2) def restartNode(node: Node, nodeId, chainArg=None, addOrSwapFlags=None): if not node.killed: @@ -58,7 +50,7 @@ def restartNode(node: Node, nodeId, chainArg=None, addOrSwapFlags=None): "preactivation_required": True, "enabled": False } - modifyPFSubjectiveRestrictions("bios", "PREACTIVATE_FEATURE", newSubjectiveRestrictions) + biosNode.modifyBuiltinPFSubjRestrictions("bios", "PREACTIVATE_FEATURE", newSubjectiveRestrictions) restartNode(biosNode, "bios") supportedProtocolFeatureDict = biosNode.getSupportedProtocolFeatureDict() diff --git a/tests/testUtils.py b/tests/testUtils.py index 9e7e9c604be..351b56537b1 100755 --- a/tests/testUtils.py +++ b/tests/testUtils.py @@ -108,7 +108,7 @@ def cmdError(name, cmdCode=0): Utils.Print(msg) @staticmethod - def waitForObj(lam, timeout=None): + def waitForObj(lam, timeout=None, sleepTime=3): if timeout is None: timeout=60 @@ -119,7 +119,6 @@ def waitForObj(lam, timeout=None): ret=lam() if ret is not None: return ret - sleepTime=3 if Utils.Debug: Utils.Print("cmd: sleep %d seconds, remaining time: %d seconds" % (sleepTime, endTime - time.time())) @@ -135,9 +134,9 @@ def waitForObj(lam, timeout=None): return None @staticmethod - def waitForBool(lam, timeout=None): + def waitForBool(lam, timeout=None, sleepTime=3): myLam = lambda: True if lam() else None - ret=Utils.waitForObj(myLam, timeout) + ret=Utils.waitForObj(myLam, timeout, sleepTime) return False if ret is None else ret @staticmethod From 1433583b44b479a3f9b270b243dea726a88822af Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 27 Mar 2019 10:13:28 +0800 Subject: [PATCH 221/680] Change label of nodeos_multiple_version_protocol_feature_test --- tests/CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index d635f2083dc..78eaed27be6 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -106,9 +106,6 @@ set_property(TEST nodeos_remote_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_forked_chain_lr_test COMMAND tests/nodeos_forked_chain_test.py -v --wallet-port 9901 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_forked_chain_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_multiple_version_protocol_feature_lr_test COMMAND tests/nodeos_multiple_version_protocol_feature_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_multiple_version_protocol_feature_lr_test PROPERTY LABELS long_running_tests) - add_test(NAME nodeos_voting_lr_test COMMAND tests/nodeos_voting_test.py -v --wallet-port 9902 --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_voting_lr_test PROPERTY LABELS long_running_tests) @@ -121,6 +118,9 @@ set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_runnin add_test(NAME nodeos_irreversible_mode_lr_test COMMAND tests/nodeos_irreversible_mode_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_irreversible_mode_lr_test PROPERTY LABELS long_running_tests) +add_test(NAME nodeos_multiple_version_protocol_feature_mv_test COMMAND tests/nodeos_multiple_version_protocol_feature_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_property(TEST nodeos_multiple_version_protocol_feature_mv_test PROPERTY LABELS mixed_version_tests) + if(ENABLE_COVERAGE_TESTING) set(Coverage_NAME ${PROJECT_NAME}_coverage) From dec455460339d572b086233f4c22e8b1da852763 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 27 Mar 2019 10:55:54 +0800 Subject: [PATCH 222/680] Move alternate version label file to cmake --- tests/CMakeLists.txt | 4 +++- tests/nodeos_multiple_version_protocol_feature_test.py | 5 ++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 78eaed27be6..71f1146a413 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -118,7 +118,9 @@ set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_runnin add_test(NAME nodeos_irreversible_mode_lr_test COMMAND tests/nodeos_irreversible_mode_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_irreversible_mode_lr_test PROPERTY LABELS long_running_tests) -add_test(NAME nodeos_multiple_version_protocol_feature_mv_test COMMAND tests/nodeos_multiple_version_protocol_feature_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set(ALTERNATE_VERSION_LABELS_FILE "/Users/andrianto/alternate-version-labels-file.txt") +add_test(NAME nodeos_multiple_version_protocol_feature_mv_test COMMAND tests/nodeos_multiple_version_protocol_feature_test.py + -v --clean-run --dump-error-detail --alternate-version-labels-file ${ALTERNATE_VERSION_LABELS_FILE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_multiple_version_protocol_feature_mv_test PROPERTY LABELS mixed_version_tests) if(ENABLE_COVERAGE_TESTING) diff --git a/tests/nodeos_multiple_version_protocol_feature_test.py b/tests/nodeos_multiple_version_protocol_feature_test.py index 8b70ff68ab1..213685d39ad 100755 --- a/tests/nodeos_multiple_version_protocol_feature_test.py +++ b/tests/nodeos_multiple_version_protocol_feature_test.py @@ -9,7 +9,7 @@ import signal import json import time -from os.path import join +from os.path import join, exists from datetime import datetime # Parse command line arguments @@ -73,11 +73,10 @@ def setValidityOfActTimeSubjRestriction(node, nodeId, codename, valid): # Create a cluster of 4 nodes, each node has 1 producer. The first 3 nodes use the latest vesion, # While the 4th node use the version that doesn't support protocol feature activation (i.e. 1.7.0) - if not alternateVersionLabelsFile: - alternateVersionLabelsFile="/Users/andrianto/alternate-version-labels-file.txt" associatedNodeLabels = { "3": "170" } + assert exists(alternateVersionLabelsFile), "Alternate version labels file does not exist" cluster.launch(pnodes=4, totalNodes=4, prodCount=1, totalProducers=4, extraNodeosArgs=" --plugin eosio::producer_api_plugin ", useBiosBootFile=False, From b8dd9d4a1dbd9a70ffb7828508d0ed891f704f95 Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 26 Mar 2019 23:33:51 -0400 Subject: [PATCH 223/680] remove redundant line --- libraries/chain/controller.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 8bec114d6be..d2faef7bde0 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -2980,7 +2980,6 @@ void controller_impl::on_activation void controller_impl::on_activation() { const auto& indx = db.get_index(); - auto itr = indx.begin(); for( auto itr = indx.begin(); itr != indx.end(); itr = indx.begin() ) { int64_t current_ram_usage = resource_limits.get_account_ram_usage( itr->name ); int64_t ram_delta = -static_cast(itr->ram_correction); From 1d1e48c9fbc81c9d9133891e14cde0a821ea9d48 Mon Sep 17 00:00:00 2001 From: Kayan Date: Wed, 27 Mar 2019 16:27:44 +0800 Subject: [PATCH 224/680] add test case to 6672 --- unittests/protocol_feature_tests.cpp | 74 ++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 2a7a466f556..881746adb71 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -485,4 +485,78 @@ BOOST_AUTO_TEST_CASE( replace_deferred_test ) try { } FC_LOG_AND_RETHROW() +BOOST_AUTO_TEST_CASE( fix_linkauth_restriction ) { try { + tester chain( setup_policy::preactivate_feature_and_new_bios ); + + const auto& tester_account = N(tester); + std::vector ids; + + chain.produce_blocks(); + chain.create_account(N(currency)); + + chain.produce_blocks(); + chain.create_account(N(tester)); + chain.create_account(N(tester2)); + chain.produce_blocks(); + + chain.push_action(config::system_account_name, updateauth::get_name(), tester_account, fc::mutable_variant_object() + ("account", "tester") + ("permission", "first") + ("parent", "active") + ("auth", authority(chain.get_public_key(tester_account, "first"), 5)) + ); + + auto validate_disallow = [&] (const char *code, const char *type) { + BOOST_REQUIRE_EXCEPTION( + chain.push_action(config::system_account_name, linkauth::get_name(), tester_account, fc::mutable_variant_object() + ("account", "tester") + ("code", code) + ("type", type) + ("requirement", "first")), + action_validate_exception, + fc_exception_message_is(std::string("Cannot link eosio::") + std::string(type) + std::string(" to a minimum permission")) + ); + }; + + validate_disallow("eosio", "linkauth"); + validate_disallow("eosio", "unlinkauth"); + validate_disallow("eosio", "deleteauth"); + validate_disallow("eosio", "updateauth"); + validate_disallow("eosio", "canceldelay"); + + validate_disallow("currency", "linkauth"); + validate_disallow("currency", "unlinkauth"); + validate_disallow("currency", "deleteauth"); + validate_disallow("currency", "updateauth"); + validate_disallow("currency", "canceldelay"); + + const auto& pfm = chain.control->get_protocol_feature_manager(); + auto d = pfm.get_builtin_digest( builtin_protocol_feature_t::fix_linkauth_restriction ); + BOOST_REQUIRE( d ); + + chain.preactivate_protocol_features( {*d} ); + chain.produce_block(); + + auto validate_allowed = [&] (const char *code, const char *type) { + chain.push_action(config::system_account_name, linkauth::get_name(), tester_account, fc::mutable_variant_object() + ("account", "tester") + ("code", code) + ("type", type) + ("requirement", "first")); + }; + + validate_disallow("eosio", "linkauth"); + validate_disallow("eosio", "unlinkauth"); + validate_disallow("eosio", "deleteauth"); + validate_disallow("eosio", "updateauth"); + validate_disallow("eosio", "canceldelay"); + + validate_allowed("currency", "linkauth"); + validate_allowed("currency", "unlinkauth"); + validate_allowed("currency", "deleteauth"); + validate_allowed("currency", "updateauth"); + validate_allowed("currency", "canceldelay"); + +} FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_SUITE_END() From ccd6e53dc44b76c5351424d1896955574e230248 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 27 Mar 2019 12:19:03 -0500 Subject: [PATCH 225/680] Attempt to make comment clearer --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 2ba100bdc84..0adb1670068 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -1462,10 +1462,10 @@ void mongo_db_plugin_impl::init() { } try { - // Due to the vast amounts of data, we suggest MongoDB administrators: + // MongoDB administrators (to enable sharding) : // 1. enableSharding database (default to EOS) // 2. shardCollection: blocks, action_traces, transaction_traces, especially action_traces - // 3. Use compound index with shard key (default to _id), to improve query performance. + // 3. Compound index with shard key (default to _id below), to improve query performance. // blocks indexes auto blocks = mongo_conn[db_name][blocks_col]; From 85403df3a1ba476a708a79c3d20a39cac44bf0c2 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 27 Mar 2019 17:29:14 -0400 Subject: [PATCH 226/680] add multiversion.conf file needed by multiversion tests pipeline; ALTERNATE_VERSION_LABELS_FILE path now points to where the autogenerated multiversion_paths.conf file will be located --- tests/CMakeLists.txt | 3 ++- tests/multiversion.conf | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 tests/multiversion.conf diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 71f1146a413..96d51701655 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -118,7 +118,8 @@ set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_runnin add_test(NAME nodeos_irreversible_mode_lr_test COMMAND tests/nodeos_irreversible_mode_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_irreversible_mode_lr_test PROPERTY LABELS long_running_tests) -set(ALTERNATE_VERSION_LABELS_FILE "/Users/andrianto/alternate-version-labels-file.txt") +set(ALTERNATE_VERSION_LABELS_FILE "${CMAKE_BINARY_DIR}/tests/multiversion_paths.conf") + add_test(NAME nodeos_multiple_version_protocol_feature_mv_test COMMAND tests/nodeos_multiple_version_protocol_feature_test.py -v --clean-run --dump-error-detail --alternate-version-labels-file ${ALTERNATE_VERSION_LABELS_FILE} WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_multiple_version_protocol_feature_mv_test PROPERTY LABELS mixed_version_tests) diff --git a/tests/multiversion.conf b/tests/multiversion.conf new file mode 100644 index 00000000000..544263173a9 --- /dev/null +++ b/tests/multiversion.conf @@ -0,0 +1,2 @@ +[eosio] +170=v1.7.0 From c62b7ae40eb4b2a6ce2282f526403501c819fb1e Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 27 Mar 2019 20:49:27 -0400 Subject: [PATCH 227/680] bug fixes in nodeos_multiple_version_protocol_feature_test.py and Node.py --- tests/Node.py | 6 ++---- tests/nodeos_multiple_version_protocol_feature_test.py | 4 ++-- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 1ac81c5ebaa..9621186c9f9 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1315,7 +1315,7 @@ def relaunch(self, nodeId, chainArg, newChain=False, timeout=Utils.systemWaitTim cmdArr=[] splittedCmd=self.cmd.split() if nodeosPath: splittedCmd[0] = nodeosPath - myCmd="".join(splittedCmd) + myCmd=" ".join(splittedCmd) toAddOrSwap=copy.deepcopy(addOrSwapFlags) if addOrSwapFlags is not None else {} if not newChain: skip=False @@ -1532,8 +1532,6 @@ def modifyBuiltinPFSubjRestrictions(self, nodeId, featureCodename, subjectiveRes protocolFeatureJson = [] with open(jsonPath) as f: protocolFeatureJson = json.load(f) - protocolFeatureJson["subjective_restrictions"] = { - **protocolFeatureJson["subjective_restrictions"], **subjectiveRestriction - } + protocolFeatureJson["subjective_restrictions"].update(subjectiveRestriction) with open(jsonPath, "w") as f: json.dump(protocolFeatureJson, f, indent=2) diff --git a/tests/nodeos_multiple_version_protocol_feature_test.py b/tests/nodeos_multiple_version_protocol_feature_test.py index 213685d39ad..83ec622b7a6 100755 --- a/tests/nodeos_multiple_version_protocol_feature_test.py +++ b/tests/nodeos_multiple_version_protocol_feature_test.py @@ -42,7 +42,7 @@ def shouldNodeContainPreactivateFeature(node): activatedProtocolFeatures = blockHeaderState["activated_protocol_features"]["protocol_features"] return preactivateFeatureDigest in activatedProtocolFeatures -def waitUntilBeginningOfProdTurn(node, producerName, timeout=30, sleepTime=0.5): +def waitUntilBeginningOfProdTurn(node, producerName, timeout=30, sleepTime=0.4): def isDesiredProdTurn(): headBlockNum = node.getHeadBlockNum() res = node.getBlock(headBlockNum)["producer"] == producerName and \ @@ -153,7 +153,7 @@ def shouldNodesBeInSync(nodes:[Node]): assert newNodes[0].waitForLibToAdvance(), "1st node LIB should advance" newNode0Lib = newNodes[0].getIrreversibleBlockNum() assert newNodes[1].getIrreversibleBlockNum() >= newNode0Lib and \ - newNodes[1].getIrreversibleBlockNum() >= newNode0Lib, "2nd and 3rd node LIB should also advance" + newNodes[2].getIrreversibleBlockNum() >= newNode0Lib, "2nd and 3rd node LIB should also advance" assert not oldNode.waitForLibToAdvance(), "4th node LIB should not advance" # Restart old node with newest version From 560efeb446cb3b7ca7786f434eca0531c6587140 Mon Sep 17 00:00:00 2001 From: Kayan Date: Thu, 28 Mar 2019 14:05:29 +0800 Subject: [PATCH 228/680] fix test case --- unittests/protocol_feature_tests.cpp | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 881746adb71..eeb26def023 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -489,18 +489,14 @@ BOOST_AUTO_TEST_CASE( fix_linkauth_restriction ) { try { tester chain( setup_policy::preactivate_feature_and_new_bios ); const auto& tester_account = N(tester); - std::vector ids; chain.produce_blocks(); chain.create_account(N(currency)); - - chain.produce_blocks(); - chain.create_account(N(tester)); - chain.create_account(N(tester2)); + chain.create_account(tester_account); chain.produce_blocks(); chain.push_action(config::system_account_name, updateauth::get_name(), tester_account, fc::mutable_variant_object() - ("account", "tester") + ("account", name(tester_account).to_string()) ("permission", "first") ("parent", "active") ("auth", authority(chain.get_public_key(tester_account, "first"), 5)) @@ -509,7 +505,7 @@ BOOST_AUTO_TEST_CASE( fix_linkauth_restriction ) { try { auto validate_disallow = [&] (const char *code, const char *type) { BOOST_REQUIRE_EXCEPTION( chain.push_action(config::system_account_name, linkauth::get_name(), tester_account, fc::mutable_variant_object() - ("account", "tester") + ("account", name(tester_account).to_string()) ("code", code) ("type", type) ("requirement", "first")), @@ -539,7 +535,7 @@ BOOST_AUTO_TEST_CASE( fix_linkauth_restriction ) { try { auto validate_allowed = [&] (const char *code, const char *type) { chain.push_action(config::system_account_name, linkauth::get_name(), tester_account, fc::mutable_variant_object() - ("account", "tester") + ("account", name(tester_account).to_string()) ("code", code) ("type", type) ("requirement", "first")); From b8d0ba0012ea205fe0454b623a5adb7d87d83bff Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Thu, 28 Mar 2019 15:11:33 +0800 Subject: [PATCH 229/680] Use portable format of reversible blocks when switching nodeos version --- ..._multiple_version_protocol_feature_test.py | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/tests/nodeos_multiple_version_protocol_feature_test.py b/tests/nodeos_multiple_version_protocol_feature_test.py index 83ec622b7a6..42906334564 100755 --- a/tests/nodeos_multiple_version_protocol_feature_test.py +++ b/tests/nodeos_multiple_version_protocol_feature_test.py @@ -9,6 +9,7 @@ import signal import json import time +import os from os.path import join, exists from datetime import datetime @@ -140,7 +141,7 @@ def shouldNodesBeInSync(nodes:[Node]): # Then we set the earliest_allowed_activation_time of 2nd node and 3rd node with valid value # Once the 1st node activate PREACTIVATE_FEATURE, all of them should have PREACTIVATE_FEATURE activated in the next block # They will be in sync and their LIB will advance since they control > 2/3 of the producers - # However, the 4th node will be out of sync with them, and his LIB will never advance + # However, the 4th node will be out of sync with them, and its LIB will stuck setValidityOfActTimeSubjRestriction(newNodes[1], newNodeIds[1], "PREACTIVATE_FEATURE", True) setValidityOfActTimeSubjRestriction(newNodes[2], newNodeIds[2], "PREACTIVATE_FEATURE", True) @@ -151,15 +152,24 @@ def shouldNodesBeInSync(nodes:[Node]): assert not shouldNodesBeInSync(allNodes), "Nodes should not be in sync after preactivation" for node in newNodes: assert shouldNodeContainPreactivateFeature(node), "New node should contain PREACTIVATE_FEATURE" assert newNodes[0].waitForLibToAdvance(), "1st node LIB should advance" - newNode0Lib = newNodes[0].getIrreversibleBlockNum() - assert newNodes[1].getIrreversibleBlockNum() >= newNode0Lib and \ - newNodes[2].getIrreversibleBlockNum() >= newNode0Lib, "2nd and 3rd node LIB should also advance" - assert not oldNode.waitForLibToAdvance(), "4th node LIB should not advance" + nextLibAfterPreactivateFeature = newNodes[0].getIrreversibleBlockNum() + assert newNodes[1].getIrreversibleBlockNum() >= nextLibAfterPreactivateFeature and \ + newNodes[2].getIrreversibleBlockNum() >= nextLibAfterPreactivateFeature, "2nd and 3rd node LIB should also advance" + assert oldNode.getIrreversibleBlockNum() < nextLibAfterPreactivateFeature, "4th node LIB should stuck" # Restart old node with newest version + # Before we are migrating to new version, use --export-reversible-blocks as the old version + # and --import-reversible-blocks with the new version to ensure the compatibility of the reversible blocks # Finally, when we restart the 4th node with the version of nodeos that supports protocol feature, # all nodes should be in sync, and the 4th node will also contain PREACTIVATE_FEATURE - restartNode(oldNode, oldNodeId, chainArg=" --replay ", nodeosPath="programs/nodeos/nodeos") + portableRevBlkPath = join(Cluster.getDataDir(oldNodeId), "rev_blk_portable_format") + oldNode.kill(signal.SIGTERM) + # Note, for the following relaunch, these will fail to relaunch immediately (expected behavior of export/import), so the chainArg will not replace the old cmd + oldNode.relaunch(oldNodeId, chainArg="--export-reversible-blocks {}".format(portableRevBlkPath), timeout=1) + oldNode.relaunch(oldNodeId, chainArg="--import-reversible-blocks {}".format(portableRevBlkPath), timeout=1, nodeosPath="programs/nodeos/nodeos") + os.remove(portableRevBlkPath) + + restartNode(oldNode, oldNodeId, chainArg="--replay", nodeosPath="programs/nodeos/nodeos") time.sleep(2) # Give some time to replay assert shouldNodesBeInSync(allNodes), "All nodes should be in sync" From 33f61ccf1483a8b9134ac8a70762dded2c2ec458 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Thu, 28 Mar 2019 16:18:46 +0800 Subject: [PATCH 230/680] Ensure that LIB can advance past block which contains PREACTIVATE_FEATURE --- ..._multiple_version_protocol_feature_test.py | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/tests/nodeos_multiple_version_protocol_feature_test.py b/tests/nodeos_multiple_version_protocol_feature_test.py index 42906334564..67037663ead 100755 --- a/tests/nodeos_multiple_version_protocol_feature_test.py +++ b/tests/nodeos_multiple_version_protocol_feature_test.py @@ -65,6 +65,11 @@ def setValidityOfActTimeSubjRestriction(node, nodeId, codename, valid): node.modifyBuiltinPFSubjRestrictions(nodeId, codename, actTimeSubjRestriction) restartNode(node, nodeId) +def waitUntilBlockBecomeIrr(node, blockNum, timeout=60): + def hasBlockBecomeIrr(): + return node.getIrreversibleBlockNum() >= blockNum + return Utils.waitForBool(hasBlockBecomeIrr, timeout) + # List to contain the test result message testSuccessful = False try: @@ -141,21 +146,27 @@ def shouldNodesBeInSync(nodes:[Node]): # Then we set the earliest_allowed_activation_time of 2nd node and 3rd node with valid value # Once the 1st node activate PREACTIVATE_FEATURE, all of them should have PREACTIVATE_FEATURE activated in the next block # They will be in sync and their LIB will advance since they control > 2/3 of the producers + # Also the LIB should be able to advance past the block that contains PREACTIVATE_FEATURE # However, the 4th node will be out of sync with them, and its LIB will stuck setValidityOfActTimeSubjRestriction(newNodes[1], newNodeIds[1], "PREACTIVATE_FEATURE", True) setValidityOfActTimeSubjRestriction(newNodes[2], newNodeIds[2], "PREACTIVATE_FEATURE", True) waitUntilBeginningOfProdTurn(newNodes[0], "defproducera") + libBeforePreactivation = newNodes[0].getIrreversibleBlockNum() newNodes[0].activatePreactivateFeature() assert shouldNodesBeInSync(newNodes), "New nodes should be in sync" assert not shouldNodesBeInSync(allNodes), "Nodes should not be in sync after preactivation" for node in newNodes: assert shouldNodeContainPreactivateFeature(node), "New node should contain PREACTIVATE_FEATURE" - assert newNodes[0].waitForLibToAdvance(), "1st node LIB should advance" - nextLibAfterPreactivateFeature = newNodes[0].getIrreversibleBlockNum() - assert newNodes[1].getIrreversibleBlockNum() >= nextLibAfterPreactivateFeature and \ - newNodes[2].getIrreversibleBlockNum() >= nextLibAfterPreactivateFeature, "2nd and 3rd node LIB should also advance" - assert oldNode.getIrreversibleBlockNum() < nextLibAfterPreactivateFeature, "4th node LIB should stuck" + + activatedBlockNum = newNodes[0].getHeadBlockNum() # The PREACTIVATE_FEATURE should have been activated before or at this block num + assert waitUntilBlockBecomeIrr(newNodes[0], activatedBlockNum), \ + "1st node LIB should be able to advance past the block that contains PREACTIVATE_FEATURE" + assert newNodes[1].getIrreversibleBlockNum() >= activatedBlockNum and \ + newNodes[2].getIrreversibleBlockNum() >= activatedBlockNum, \ + "2nd and 3rd node LIB should also be able to advance past the block that contains PREACTIVATE_FEATURE" + assert oldNode.getIrreversibleBlockNum() <= libBeforePreactivation, \ + "4th node LIB should stuck on LIB before PREACTIVATE_FEATURE is activated" # Restart old node with newest version # Before we are migrating to new version, use --export-reversible-blocks as the old version From 7030d718fb33dfe2cf85b97b1b710f51d9e1c0eb Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 28 Mar 2019 11:29:30 -0400 Subject: [PATCH 231/680] improve protocol_feature_tests/require_preactivaton_test --- unittests/protocol_feature_tests.cpp | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 2a7a466f556..6fe418ca602 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -177,6 +177,31 @@ BOOST_AUTO_TEST_CASE( require_preactivation_test ) try { protocol_feature_exception, fc_exception_message_starts_with( "attempted to activate protocol feature without prior required preactivation:" ) ); + + c.protocol_features_to_be_activated_wo_preactivation.clear(); + + BOOST_CHECK( !c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + + c.preactivate_protocol_features( {*d} ); + c.finish_block(); + + BOOST_CHECK( !c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + + BOOST_CHECK_EXCEPTION( c.control->start_block( + c.control->head_block_time() + fc::milliseconds(config::block_interval_ms), + 0, + {} + ), + block_validate_exception, + fc_exception_message_is( "There are pre-activated protocol features that were not activated at the start of this block" ) + ); + + BOOST_CHECK( !c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + + c.produce_block(); + + BOOST_CHECK( c.control->is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ); + } FC_LOG_AND_RETHROW() BOOST_AUTO_TEST_CASE( only_link_to_existing_permission_test ) try { From 3e9ca96d539a8a65743bf4d9f34aa0171a1872f4 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 28 Mar 2019 11:55:45 -0400 Subject: [PATCH 232/680] implement DISALLOW_EMPTY_PRODUCER_SCHEDULE protocol feature --- libraries/chain/controller.cpp | 4 ++++ .../include/eosio/chain/protocol_feature_manager.hpp | 3 ++- libraries/chain/protocol_feature_manager.cpp | 11 +++++++++++ libraries/chain/wasm_interface.cpp | 7 +++++++ 4 files changed, 24 insertions(+), 1 deletion(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 8bec114d6be..01caab76083 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -2670,6 +2670,10 @@ int64_t controller::set_proposed_producers( vector producers ) { const auto& gpo = get_global_properties(); auto cur_block_num = head_block_num() + 1; + if( producers.size() == 0 && is_builtin_activated( builtin_protocol_feature_t::disallow_empty_producer_schedule ) ) { + return -1; + } + if( gpo.proposed_schedule_block_num.valid() ) { if( *gpo.proposed_schedule_block_num != cur_block_num ) return -1; // there is already a proposed schedule set in a previous block, wait for it to become pending diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index 4c7bcf6d95a..f9f55dffb7a 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -17,7 +17,8 @@ enum class builtin_protocol_feature_t : uint32_t { preactivate_feature, only_link_to_existing_permission, replace_deferred, - fix_linkauth_restriction + fix_linkauth_restriction, + disallow_empty_producer_schedule }; struct protocol_feature_subjective_restrictions { diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 94d42cfad1d..78c84ff9410 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -62,6 +62,17 @@ Builtin protocol feature: FIX_LINKAUTH_RESTRICTION Removes the restriction on eosio::linkauth for non-native actions named one of the five special action names: updateauth, deleteauth, linkauth, unlinkauth, or canceldelay. +*/ + {} + } ) + ( builtin_protocol_feature_t::disallow_empty_producer_schedule, builtin_protocol_feature_spec{ + "DISALLOW_EMPTY_PRODUCER_SCHEDULE", + fc::variant("2853617cec3eabd41881eb48882e6fc5e81a0db917d375057864b3befbe29acd").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: DISALLOW_EMPTY_PRODUCER_SCHEDULE + +Disallows proposing an empty producer schedule. */ {} } ) diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 98d82bbd335..3414d9dc972 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -168,6 +168,13 @@ class privileged_api : public context_aware_api { datastream ds( packed_producer_schedule, datalen ); vector producers; fc::raw::unpack(ds, producers); + EOS_ASSERT( producers.size() > 0 + || !context.control.is_builtin_activated( + builtin_protocol_feature_t::disallow_empty_producer_schedule + ), + wasm_execution_error, + "Producer schedule cannot be empty" + ); EOS_ASSERT(producers.size() <= config::max_producers, wasm_execution_error, "Producer schedule exceeds the maximum producer count for this chain"); // check that producers are unique std::set unique_producers; From e019da51863d3d8dc53f46918f3e5fa9abaf10ae Mon Sep 17 00:00:00 2001 From: Joseph J Guerra <8146030+josephjguerra@users.noreply.github.com> Date: Thu, 28 Mar 2019 13:28:47 -0400 Subject: [PATCH 233/680] Update LICENSE --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index 31dee1d933c..22d36d65db1 100644 --- a/LICENSE +++ b/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2017-2019 block.one all rights reserved. +Copyright (c) 2017-2019 block.one and its contributors. All rights reserved. The MIT License From cd28dafad1a7a5290370c50dd26288301f046da0 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 28 Mar 2019 14:59:58 -0400 Subject: [PATCH 234/680] fix producer_schedule_tests/empty_producer_schedule_has_no_effect --- unittests/producer_schedule_tests.cpp | 83 ++++++++++++++------------- 1 file changed, 43 insertions(+), 40 deletions(-) diff --git a/unittests/producer_schedule_tests.cpp b/unittests/producer_schedule_tests.cpp index 9003f8555bd..2a91f06a7c1 100644 --- a/unittests/producer_schedule_tests.cpp +++ b/unittests/producer_schedule_tests.cpp @@ -328,81 +328,84 @@ BOOST_FIXTURE_TEST_CASE( producer_schedule_reduction, tester ) try { BOOST_REQUIRE_EQUAL( validate(), true ); } FC_LOG_AND_RETHROW() -BOOST_FIXTURE_TEST_CASE( empty_producer_schedule_has_no_effect, tester ) try { - create_accounts( {N(alice),N(bob),N(carol)} ); - while (control->head_block_num() < 3) { - produce_block(); +BOOST_AUTO_TEST_CASE( empty_producer_schedule_has_no_effect ) try { + validating_tester c( validating_tester::default_config() ); + c.execute_setup_policy( setup_policy::preactivate_feature_and_new_bios ); + + c.create_accounts( {N(alice),N(bob),N(carol)} ); + while (c.control->head_block_num() < 3) { + c.produce_block(); } auto compare_schedules = [&]( const vector& a, const producer_schedule_type& b ) { return std::equal( a.begin(), a.end(), b.producers.begin(), b.producers.end() ); }; - auto res = set_producers( {N(alice),N(bob)} ); + auto res = c.set_producers( {N(alice),N(bob)} ); vector sch1 = { {N(alice), get_public_key(N(alice), "active")}, {N(bob), get_public_key(N(bob), "active")} }; wlog("set producer schedule to [alice,bob]"); - BOOST_REQUIRE_EQUAL( true, control->proposed_producers().valid() ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch1, *control->proposed_producers() ) ); - BOOST_CHECK_EQUAL( control->pending_producers().producers.size(), 0u ); + BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch1, *c.control->proposed_producers() ) ); + BOOST_CHECK_EQUAL( c.control->pending_producers().producers.size(), 0u ); // Start a new block which promotes the proposed schedule to pending - produce_block(); - BOOST_CHECK_EQUAL( control->pending_producers().version, 1u ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->pending_producers() ) ); - BOOST_CHECK_EQUAL( control->active_producers().version, 0u ); + c.produce_block(); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 1u ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch1, c.control->pending_producers() ) ); + BOOST_CHECK_EQUAL( c.control->active_producers().version, 0u ); // Start a new block which promotes the pending schedule to active - produce_block(); - BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch1, control->active_producers() ) ); - produce_blocks(6); + c.produce_block(); + BOOST_CHECK_EQUAL( c.control->active_producers().version, 1u ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch1, c.control->active_producers() ) ); + c.produce_blocks(6); - res = set_producers( {} ); + res = c.set_producers( {} ); wlog("set producer schedule to []"); - BOOST_REQUIRE_EQUAL( true, control->proposed_producers().valid() ); - BOOST_CHECK_EQUAL( control->proposed_producers()->producers.size(), 0u ); - BOOST_CHECK_EQUAL( control->proposed_producers()->version, 2u ); + BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); + BOOST_CHECK_EQUAL( c.control->proposed_producers()->producers.size(), 0u ); + BOOST_CHECK_EQUAL( c.control->proposed_producers()->version, 2u ); - produce_blocks(12); - BOOST_CHECK_EQUAL( control->pending_producers().version, 1u ); + c.produce_blocks(12); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 1u ); // Empty producer schedule does get promoted from proposed to pending - produce_block(); - BOOST_CHECK_EQUAL( control->pending_producers().version, 2u ); - BOOST_CHECK_EQUAL( false, control->proposed_producers().valid() ); + c.produce_block(); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 2u ); + BOOST_CHECK_EQUAL( false, c.control->proposed_producers().valid() ); // However it should not get promoted from pending to active - produce_blocks(24); - BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); - BOOST_CHECK_EQUAL( control->pending_producers().version, 2u ); + c.produce_blocks(24); + BOOST_CHECK_EQUAL( c.control->active_producers().version, 1u ); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 2u ); // Setting a new producer schedule should still use version 2 - res = set_producers( {N(alice),N(bob),N(carol)} ); + res = c.set_producers( {N(alice),N(bob),N(carol)} ); vector sch2 = { {N(alice), get_public_key(N(alice), "active")}, {N(bob), get_public_key(N(bob), "active")}, {N(carol), get_public_key(N(carol), "active")} }; wlog("set producer schedule to [alice,bob,carol]"); - BOOST_REQUIRE_EQUAL( true, control->proposed_producers().valid() ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch2, *control->proposed_producers() ) ); - BOOST_CHECK_EQUAL( control->proposed_producers()->version, 2u ); + BOOST_REQUIRE_EQUAL( true, c.control->proposed_producers().valid() ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch2, *c.control->proposed_producers() ) ); + BOOST_CHECK_EQUAL( c.control->proposed_producers()->version, 2u ); // Produce enough blocks to promote the proposed schedule to pending, which it can do because the existing pending has zero producers - produce_blocks(24); - BOOST_CHECK_EQUAL( control->active_producers().version, 1u ); - BOOST_CHECK_EQUAL( control->pending_producers().version, 2u ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch2, control->pending_producers() ) ); + c.produce_blocks(24); + BOOST_CHECK_EQUAL( c.control->active_producers().version, 1u ); + BOOST_CHECK_EQUAL( c.control->pending_producers().version, 2u ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch2, c.control->pending_producers() ) ); // Produce enough blocks to promote the pending schedule to active - produce_blocks(24); - BOOST_CHECK_EQUAL( control->active_producers().version, 2u ); - BOOST_CHECK_EQUAL( true, compare_schedules( sch2, control->active_producers() ) ); + c.produce_blocks(24); + BOOST_CHECK_EQUAL( c.control->active_producers().version, 2u ); + BOOST_CHECK_EQUAL( true, compare_schedules( sch2, c.control->active_producers() ) ); - BOOST_REQUIRE_EQUAL( validate(), true ); + BOOST_REQUIRE_EQUAL( c.validate(), true ); } FC_LOG_AND_RETHROW() BOOST_AUTO_TEST_CASE( producer_watermark_test ) try { From 00e96e1711c2c397dff5ccb70dca542a450d431f Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Thu, 28 Mar 2019 17:15:50 -0400 Subject: [PATCH 235/680] Python 36 for centos7 and amazonlinux1 (#7005) --- .buildkite/pipeline.yml | 44 +++++++++++++++++------------------ scripts/eosio_build_amazon.sh | 6 ++--- scripts/eosio_build_centos.sh | 12 +++++----- 3 files changed, 31 insertions(+), 31 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index 57ce31e5a6c..f83249df044 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -17,7 +17,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 60 @@ -38,7 +38,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 60 @@ -59,7 +59,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 60 @@ -80,7 +80,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" workdir: /data/job timeout: 60 @@ -101,7 +101,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 60 @@ -122,7 +122,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job timeout: 60 @@ -173,7 +173,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 60 @@ -193,7 +193,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 60 @@ -214,7 +214,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 60 @@ -234,7 +234,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 60 @@ -255,7 +255,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 60 @@ -275,7 +275,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 60 @@ -296,7 +296,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" workdir: /data/job timeout: 60 @@ -316,7 +316,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" workdir: /data/job timeout: 60 @@ -337,7 +337,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 60 @@ -357,7 +357,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 60 @@ -378,7 +378,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job timeout: 60 @@ -398,7 +398,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job timeout: 60 @@ -501,7 +501,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job env: OS: "ubuntu-16.04" @@ -527,7 +527,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job env: OS: "ubuntu-18.04" @@ -560,7 +560,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job env: OS: "fc27" @@ -593,7 +593,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job env: OS: "el7" diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 7a16e4486e9..ff655496a7b 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -8,13 +8,13 @@ DISK_AVAIL_KB=$( df . | tail -1 | awk '{print $4}' ) DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) -if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then +if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then # Amazonlinux1 DEP_ARRAY=( sudo procps util-linux which gcc72 gcc72-c++ autoconf automake libtool make doxygen graphviz \ - bzip2 bzip2-devel openssl-devel gmp gmp-devel libstdc++72 python27 python27-devel python34 python34-devel \ + bzip2 bzip2-devel openssl-devel gmp gmp-devel libstdc++72 python27 python27-devel python36 python36-devel \ libedit-devel ncurses-devel swig wget file libcurl-devel libusb1-devel ) -else +else # Amazonlinux2 DEP_ARRAY=( git procps-ng util-linux gcc gcc-c++ autoconf automake libtool make bzip2 \ bzip2-devel openssl-devel gmp-devel libstdc++ libcurl-devel libusbx-devel \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 1c1e97b2fab..8e7044001ab 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -121,7 +121,7 @@ printf "\\n" DEP_ARRAY=( git autoconf automake libtool make bzip2 doxygen graphviz \ bzip2-devel openssl-devel gmp-devel \ - ocaml libicu-devel python python-devel python33 \ + ocaml libicu-devel python python-devel rh-python36 \ gettext-devel file sudo libusbx-devel libcurl-devel ) COUNT=1 @@ -160,10 +160,10 @@ else printf " - No required YUM dependencies to install.\\n\\n" fi -if [ -d /opt/rh/python33 ]; then - printf "Enabling python33...\\n" - source /opt/rh/python33/enable || exit 1 - printf " - Python33 successfully enabled!\\n" +if [ -d /opt/rh/rh-python36 ]; then + printf "Enabling python36...\\n" + source /opt/rh/rh-python36/enable || exit 1 + printf " - Python36 successfully enabled!\\n" fi printf "\\n" @@ -190,7 +190,7 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" -export CPATH="$CPATH:/opt/rh/python33/root/usr/include/python3.3m" # m on the end causes problems with boost finding python3 +export CPATH="$CPATH:/opt/rh/rh-python36/root/usr/include/python3.6m" # m on the end causes problems with boost finding python3 printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then From c33196dcdeb83e82ff14338bad7a52651b2c9544 Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Thu, 28 Mar 2019 20:25:48 -0400 Subject: [PATCH 236/680] long-running image version bump (#7011) --- .buildkite/long_running_tests.yml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index 6383f57c392..dd0d6cbee9d 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -17,7 +17,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 60 @@ -38,7 +38,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 60 @@ -59,7 +59,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 60 @@ -80,7 +80,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" workdir: /data/job timeout: 60 @@ -101,7 +101,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 60 @@ -122,7 +122,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job timeout: 60 @@ -172,7 +172,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu16_2-2" workdir: /data/job timeout: 90 @@ -192,7 +192,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:ubuntu18_2-2" workdir: /data/job timeout: 90 @@ -212,7 +212,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:centos7_2-2" workdir: /data/job timeout: 90 @@ -232,7 +232,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" workdir: /data/job timeout: 90 @@ -252,7 +252,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux2_2-2" workdir: /data/job timeout: 90 @@ -272,7 +272,7 @@ steps: region: "us-west-2" docker#v2.1.0: debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-1" + image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" workdir: /data/job timeout: 90 From 65ca26684299634739f3ff6b950f12bfcde51c0c Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Fri, 29 Mar 2019 14:57:32 +0800 Subject: [PATCH 237/680] Add unit test for disallow empty producer schedule protocol feature --- unittests/protocol_feature_tests.cpp | 29 +++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index eeb26def023..10d9e96e718 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -494,7 +494,7 @@ BOOST_AUTO_TEST_CASE( fix_linkauth_restriction ) { try { chain.create_account(N(currency)); chain.create_account(tester_account); chain.produce_blocks(); - + chain.push_action(config::system_account_name, updateauth::get_name(), tester_account, fc::mutable_variant_object() ("account", name(tester_account).to_string()) ("permission", "first") @@ -555,4 +555,31 @@ BOOST_AUTO_TEST_CASE( fix_linkauth_restriction ) { try { } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE( disallow_empty_producer_schedule_test ) { try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest( builtin_protocol_feature_t::disallow_empty_producer_schedule ); + BOOST_REQUIRE( d ); + + // Before activation, it is allowed to set empty producer schedule + c.set_producers( {} ); + + // After activation, it should not be allowed + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + BOOST_REQUIRE_EXCEPTION( c.set_producers( {} ), + wasm_execution_error, + fc_exception_message_is( "Producer schedule cannot be empty" ) ); + + // Setting non empty producer schedule should still be fine + vector producer_names = {N(alice),N(bob),N(carol)}; + c.create_accounts( producer_names ); + c.set_producers( producer_names ); + c.produce_blocks(2); + const auto& schedule = c.get_producer_keys( producer_names ); + BOOST_CHECK( std::equal( schedule.begin(), schedule.end(), c.control->active_producers().producers.begin()) ); + +} FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_SUITE_END() From cc0eb5b65c808ddaa444a1b4b87cb4dc1e90d2a7 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 29 Mar 2019 14:20:44 -0400 Subject: [PATCH 238/680] Remove boost::thread usage from mongo plugin boost::thread is problematic on some new compiler + old boost combos --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 0adb1670068..25dea46d546 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -16,11 +16,10 @@ #include #include #include -#include -#include -#include #include +#include +#include #include #include @@ -164,9 +163,9 @@ class mongo_db_plugin_impl { std::deque block_state_process_queue; std::deque irreversible_block_state_queue; std::deque irreversible_block_state_process_queue; - boost::mutex mtx; - boost::condition_variable condition; - boost::thread consume_thread; + std::mutex mtx; + std::condition_variable condition; + std::thread consume_thread; std::atomic_bool done{false}; std::atomic_bool startup{true}; fc::optional chain_id; @@ -292,7 +291,7 @@ bool mongo_db_plugin_impl::filter_include( const transaction& trx ) const template void mongo_db_plugin_impl::queue( Queue& queue, const Entry& e ) { - boost::mutex::scoped_lock lock( mtx ); + std::unique_lock lock( mtx ); auto queue_size = queue.size(); if( queue_size > max_queue_size ) { lock.unlock(); @@ -300,7 +299,7 @@ void mongo_db_plugin_impl::queue( Queue& queue, const Entry& e ) { queue_sleep_time += 10; if( queue_sleep_time > 1000 ) wlog("queue size: ${q}", ("q", queue_size)); - boost::this_thread::sleep_for( boost::chrono::milliseconds( queue_sleep_time )); + std::this_thread::sleep_for( std::chrono::milliseconds( queue_sleep_time )); lock.lock(); } else { queue_sleep_time -= 10; @@ -408,7 +407,7 @@ void mongo_db_plugin_impl::consume_blocks() { _account_controls = mongo_conn[db_name][account_controls_col]; while (true) { - boost::mutex::scoped_lock lock(mtx); + std::unique_lock lock(mtx); while ( transaction_metadata_queue.empty() && transaction_trace_queue.empty() && block_state_queue.empty() && @@ -1528,7 +1527,7 @@ void mongo_db_plugin_impl::init() { ilog("starting db plugin thread"); - consume_thread = boost::thread([this] { consume_blocks(); }); + consume_thread = std::thread([this] { consume_blocks(); }); startup = false; } From d4b9cfdc76a4ba50d9a45c638f85bc0541693f09 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 29 Mar 2019 14:22:46 -0400 Subject: [PATCH 239/680] small improvements in error handling if cluster fails to launch --- ..._multiple_version_protocol_feature_test.py | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/tests/nodeos_multiple_version_protocol_feature_test.py b/tests/nodeos_multiple_version_protocol_feature_test.py index 67037663ead..2b8523aa1bd 100755 --- a/tests/nodeos_multiple_version_protocol_feature_test.py +++ b/tests/nodeos_multiple_version_protocol_feature_test.py @@ -83,31 +83,31 @@ def hasBlockBecomeIrr(): "3": "170" } assert exists(alternateVersionLabelsFile), "Alternate version labels file does not exist" - cluster.launch(pnodes=4, totalNodes=4, prodCount=1, totalProducers=4, - extraNodeosArgs=" --plugin eosio::producer_api_plugin ", - useBiosBootFile=False, - onlySetProds=True, - pfSetupPolicy=PFSetupPolicy.NONE, - alternateVersionLabelsFile=alternateVersionLabelsFile, - associatedNodeLabels=associatedNodeLabels) - - def pauseBlockProduction(): - for node in cluster.nodes: + assert cluster.launch(pnodes=4, totalNodes=4, prodCount=1, totalProducers=4, + extraNodeosArgs=" --plugin eosio::producer_api_plugin ", + useBiosBootFile=False, + onlySetProds=True, + pfSetupPolicy=PFSetupPolicy.NONE, + alternateVersionLabelsFile=alternateVersionLabelsFile, + associatedNodeLabels=associatedNodeLabels), "Unable to launch cluster" + + def pauseBlockProduction(nodes:[Node]): + for node in nodes: node.sendRpcApi("v1/producer/pause") - def resumeBlockProduction(): - for node in cluster.nodes: + def resumeBlockProduction(nodes:[Node]): + for node in nodes: node.sendRpcApi("v1/producer/resume") def shouldNodesBeInSync(nodes:[Node]): # Pause all block production to ensure the head is not moving - pauseBlockProduction() + pauseBlockProduction(nodes) time.sleep(1) # Wait for some time to ensure all blocks are propagated headBlockIds = [] for node in nodes: headBlockId = node.getInfo()["head_block_id"] headBlockIds.append(headBlockId) - resumeBlockProduction() + resumeBlockProduction(nodes) return len(set(headBlockIds)) == 1 newNodeIds = [0, 1, 2] From e718320c7304dddb1cc417a6229ad1a61364231e Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 29 Mar 2019 14:28:25 -0400 Subject: [PATCH 240/680] disable asio's experimental string_view usage on macos Newer stdlibc++s can #error in experimental string_view --- CMakeLists.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 00258c4b86d..694f0814aaa 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -116,6 +116,11 @@ FIND_PACKAGE(Boost 1.67 REQUIRED COMPONENTS locale iostreams) +# Some new stdlibc++s will #error on ; a problem for boost pre-1.69 +if( APPLE AND UNIX ) + add_definitions(-DBOOST_ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW) +endif() + if( WIN32 ) message( STATUS "Configuring EOSIO on WIN32") From 0ce2bfbaa6e387a2986b5188ce5d9fc485f06fee Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 29 Mar 2019 14:46:22 -0400 Subject: [PATCH 241/680] fc sync - Remove fc::shared_ptr & refactor logging code to not use it --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 0c348cc9af4..1f62ef7f68e 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 0c348cc9af47d71af57e6926fd64848594a78658 +Subproject commit 1f62ef7f68efdaa1240bf99b382d4785fd1afcbc From 5636b81b08a0cad8a276f0769fe6144c33015345 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Fri, 29 Mar 2019 15:34:56 -0400 Subject: [PATCH 242/680] chainbase sync - Remove boost thread include and unused typedef --- libraries/chainbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chainbase b/libraries/chainbase index a2563660f08..eb2d0c28bc1 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit a2563660f082622ab7a18778f5b91cc91f51c0c3 +Subproject commit eb2d0c28bc1f1328e8a5fc899291336ad487b084 From f6c9d81858fb5bd3e101c0ad72476c30f348c8cd Mon Sep 17 00:00:00 2001 From: Nathan Pierce Date: Fri, 29 Mar 2019 16:57:36 -0400 Subject: [PATCH 243/680] New disk space requirements (#7023) --- scripts/eosio_build.sh | 4 ++-- scripts/eosio_build_centos.sh | 9 +++++---- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index d3128903097..a97ceaa5058 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -30,9 +30,8 @@ # https://github.com/EOSIO/eos/blob/master/LICENSE ########################################################################## -VERSION=2.1 # Build script version +VERSION=2.2 # Build script version CMAKE_BUILD_TYPE=Release -export DISK_MIN=20 DOXYGEN=false ENABLE_COVERAGE_TESTING=false CORE_SYMBOL_NAME="SYS" @@ -75,6 +74,7 @@ export LLVM_DIR=${LLVM_ROOT}/lib/cmake/llvm export DOXYGEN_VERSION=1_8_14 export DOXYGEN_ROOT=${SRC_LOCATION}/doxygen-${DOXYGEN_VERSION} export TINI_VERSION=0.18.0 +export DISK_MIN=5 # Setup directories mkdir -p $SRC_LOCATION diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 8e7044001ab..621001d0a97 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -160,9 +160,10 @@ else printf " - No required YUM dependencies to install.\\n\\n" fi -if [ -d /opt/rh/rh-python36 ]; then +export PYTHON3PATH="/opt/rh/rh-python36" +if [ -d $PYTHON3PATH ]; then printf "Enabling python36...\\n" - source /opt/rh/rh-python36/enable || exit 1 + source $PYTHON3PATH/enable || exit 1 printf " - Python36 successfully enabled!\\n" fi @@ -190,7 +191,7 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" -export CPATH="$CPATH:/opt/rh/rh-python36/root/usr/include/python3.6m" # m on the end causes problems with boost finding python3 +export CPATH="${CPATH}:${PYTHON3PATH}/root/usr/include/python3.6m" # m on the end causes problems with boost finding python3 printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then @@ -299,7 +300,7 @@ cd .. printf "\\n" function print_instructions() { - printf "source /opt/rh/python33/enable\\n" + printf "source ${PYTHON3PATH}/enable\\n" printf "source /opt/rh/devtoolset-7/enable\\n" return 0 } From 50635a6867c4ec605ac5104441f72d8aa6aaf530 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 29 Mar 2019 23:39:22 -0400 Subject: [PATCH 244/680] initial attempt at restructuring transaction traces #6897 --- libraries/chain/CMakeLists.txt | 2 +- libraries/chain/apply_context.cpp | 47 +++++++------ libraries/chain/controller.cpp | 19 +++--- .../include/eosio/chain/abi_serializer.hpp | 1 - .../include/eosio/chain/apply_context.hpp | 8 ++- libraries/chain/include/eosio/chain/trace.hpp | 58 ++++++++-------- .../eosio/chain/transaction_context.hpp | 18 +++-- libraries/chain/trace.cpp | 41 ++++++++++++ libraries/chain/transaction_context.cpp | 67 ++++++++++++++++--- unittests/api_tests.cpp | 66 +++++++++--------- unittests/wasm_tests.cpp | 2 +- 11 files changed, 214 insertions(+), 115 deletions(-) create mode 100644 libraries/chain/trace.cpp diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 15e8bfb0802..f12bdabe70e 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -44,7 +44,7 @@ add_library( eosio_chain # # contracts/chain_initializer.cpp - + trace.cpp transaction_metadata.cpp protocol_state_object.cpp protocol_feature_activation.cpp diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 37c78ff7c7b..962e138dfa1 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -37,13 +37,6 @@ void apply_context::exec_one( action_trace& trace ) r.receiver = receiver; r.act_digest = digest_type::hash(act); - trace.trx_id = trx_context.id; - trace.block_num = control.head_block_num() + 1; - trace.block_time = control.pending_block_time(); - trace.producer_block_id = control.pending_producer_block_id(); - trace.act = act; - trace.context_free = context_free; - const auto& cfg = control.get_global_properties().configuration; try { try { @@ -71,7 +64,6 @@ void apply_context::exec_one( action_trace& trace ) } } FC_RETHROW_EXCEPTIONS( warn, "pending console output: ${console}", ("console", _pending_console_output) ) } catch( fc::exception& e ) { - trace.receipt = r; // fill with known data trace.except = e; finalize_trace( trace, start ); throw; @@ -90,7 +82,7 @@ void apply_context::exec_one( action_trace& trace ) trace.receipt = r; - trx_context.executed.emplace_back( move(r) ); + trx_context.executed.emplace_back( std::move(r) ); finalize_trace( trace, start ); @@ -112,12 +104,11 @@ void apply_context::finalize_trace( action_trace& trace, const fc::time_point& s void apply_context::exec( action_trace& trace ) { - _notified.push_back(receiver); + _notified.emplace_back( receiver, action_ordinal ); exec_one( trace ); for( uint32_t i = 1; i < _notified.size(); ++i ) { - receiver = _notified[i]; - trace.inline_traces.emplace_back( ); - exec_one( trace.inline_traces.back() ); + std::tie( receiver, action_ordinal ) = _notified[i]; + exec_one( trx_context.get_action_trace( action_ordinal ) ); } if( _cfa_inline_actions.size() > 0 || _inline_actions.size() > 0 ) { @@ -125,14 +116,12 @@ void apply_context::exec( action_trace& trace ) transaction_exception, "max inline action depth per transaction reached" ); } - for( const auto& inline_action : _cfa_inline_actions ) { - trace.inline_traces.emplace_back(); - trx_context.dispatch_action( trace.inline_traces.back(), inline_action, inline_action.account, true, recurse_depth + 1 ); + for( int32_t ordinal : _cfa_inline_actions ) { + trx_context.execute_action( ordinal, recurse_depth + 1 ); } - for( const auto& inline_action : _inline_actions ) { - trace.inline_traces.emplace_back(); - trx_context.dispatch_action( trace.inline_traces.back(), inline_action, inline_action.account, false, recurse_depth + 1 ); + for( int32_t ordinal : _inline_actions ) { + trx_context.execute_action( ordinal, recurse_depth + 1 ); } } /// exec() @@ -172,15 +161,18 @@ void apply_context::require_authorization(const account_name& account, } bool apply_context::has_recipient( account_name code )const { - for( auto a : _notified ) - if( a == code ) + for( const auto& p : _notified ) + if( p.first == code ) return true; return false; } void apply_context::require_recipient( account_name recipient ) { if( !has_recipient(recipient) ) { - _notified.push_back(recipient); + _notified.emplace_back( + recipient, + trx_context.schedule_action( act, recipient, false, action_ordinal, first_receiver_action_ordinal ) + ); } } @@ -271,7 +263,10 @@ void apply_context::execute_inline( action&& a ) { } } - _inline_actions.emplace_back( move(a) ); + auto inline_receiver = a.account; + _inline_actions.emplace_back( + trx_context.schedule_action( std::move(a), inline_receiver, false, action_ordinal, first_receiver_action_ordinal ) + ); } void apply_context::execute_context_free_inline( action&& a ) { @@ -282,7 +277,11 @@ void apply_context::execute_context_free_inline( action&& a ) { EOS_ASSERT( a.authorization.size() == 0, action_validate_exception, "context-free actions cannot have authorizations" ); - _cfa_inline_actions.emplace_back( move(a) ); + + auto inline_receiver = a.account; + _cfa_inline_actions.emplace_back( + trx_context.schedule_action( std::move(a), inline_receiver, true, action_ordinal, first_receiver_action_ordinal ) + ); } diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 2e732c869dc..a32a4ce1b2c 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -966,8 +966,7 @@ struct controller_impl { try { trx_context.init_for_implicit_trx(); trx_context.published = gtrx.published; - trx_context.trace->action_traces.emplace_back(); - trx_context.dispatch_action( trx_context.trace->action_traces.back(), etrx.actions.back(), gtrx.sender ); + trx_context.execute_action( trx_context.schedule_action( etrx.actions.back(), gtrx.sender ) ); trx_context.finalize(); // Automatically rounds up network and CPU usage in trace and bills payers if successful auto restore = make_block_restore_point(); @@ -988,14 +987,13 @@ struct controller_impl { return trace; } - void remove_scheduled_transaction( const generated_transaction_object& gto ) { - resource_limits.add_pending_ram_usage( - gto.payer, - -(config::billable_size_v + gto.packed_trx.size()) - ); + int64_t remove_scheduled_transaction( const generated_transaction_object& gto ) { + int64_t ram_delta = -(config::billable_size_v + gto.packed_trx.size()); + resource_limits.add_pending_ram_usage( gto.payer, ram_delta ); // No need to verify_account_ram_usage since we are only reducing memory db.remove( gto ); + return ram_delta; } bool failure_is_subjective( const fc::exception& e ) const { @@ -1042,7 +1040,7 @@ struct controller_impl { // // IF the transaction FAILs in a subjective way, `undo_session` should expire without being squashed // resulting in the GTO being restored and available for a future block to retire. - remove_scheduled_transaction(gto); + int64_t trx_removal_ram_delta = remove_scheduled_transaction(gto); fc::datastream ds( gtrx.packed_trx.data(), gtrx.packed_trx.size() ); @@ -1064,6 +1062,7 @@ struct controller_impl { trace->producer_block_id = self.pending_producer_block_id(); trace->scheduled = true; trace->receipt = push_receipt( gtrx.trx_id, transaction_receipt::expired, billed_cpu_time_us, 0 ); // expire the transaction + trace->account_ram_delta = account_delta( gtrx.payer, trx_removal_ram_delta ); emit( self.accepted_transaction, trx ); emit( self.applied_transaction, trace ); undo_session.squash(); @@ -1103,6 +1102,8 @@ struct controller_impl { fc::move_append( pending->_block_stage.get()._actions, move(trx_context.executed) ); + trace->account_ram_delta = account_delta( gtrx.payer, trx_removal_ram_delta ); + emit( self.accepted_transaction, trx ); emit( self.applied_transaction, trace ); @@ -1133,6 +1134,7 @@ struct controller_impl { error_trace->failed_dtrx_trace = trace; trace = error_trace; if( !trace->except_ptr ) { + trace->account_ram_delta = account_delta( gtrx.payer, trx_removal_ram_delta ); emit( self.accepted_transaction, trx ); emit( self.applied_transaction, trace ); undo_session.squash(); @@ -1169,6 +1171,7 @@ struct controller_impl { block_timestamp_type(self.pending_block_time()).slot ); // Should never fail trace->receipt = push_receipt(gtrx.trx_id, transaction_receipt::hard_fail, cpu_time_to_bill_us, 0); + trace->account_ram_delta = account_delta( gtrx.payer, trx_removal_ram_delta ); emit( self.accepted_transaction, trx ); emit( self.applied_transaction, trace ); diff --git a/libraries/chain/include/eosio/chain/abi_serializer.hpp b/libraries/chain/include/eosio/chain/abi_serializer.hpp index 398f219ced8..1e87cd26ef1 100644 --- a/libraries/chain/include/eosio/chain/abi_serializer.hpp +++ b/libraries/chain/include/eosio/chain/abi_serializer.hpp @@ -248,7 +248,6 @@ namespace impl { std::is_same::value || std::is_same::value || std::is_same::value || - std::is_same::value || std::is_same::value || std::is_same::value || std::is_same::value || diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index 951422cb753..49721cb0cfe 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -570,6 +570,8 @@ class apply_context { account_name receiver; ///< the code that is currently running vector used_authorizations; ///< Parallel to act.authorization; tracks which permissions have been used while processing the message uint32_t recurse_depth; ///< how deep inline actions can recurse + int32_t first_receiver_action_ordinal = -1; + int32_t action_ordinal = -1; bool privileged = false; bool context_free = false; bool used_context_free_api = false; @@ -583,9 +585,9 @@ class apply_context { private: iterator_cache keyval_cache; - vector _notified; ///< keeps track of new accounts to be notifed of current message - vector _inline_actions; ///< queued inline messages - vector _cfa_inline_actions; ///< queued inline messages + vector< std::pair > _notified; ///< keeps track of new accounts to be notifed of current message + vector _inline_actions; ///< action_ordinals of queued inline actions + vector _cfa_inline_actions; ///< action_ordinals of queued inline context-free actions std::string _pending_console_output; flat_set _account_ram_deltas; ///< flat_set of account_delta so json is an array of objects diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 58de120bdd8..07dbbd0fdeb 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -20,33 +20,34 @@ namespace eosio { namespace chain { friend bool operator<( const account_delta& lhs, const account_delta& rhs ) { return lhs.account < rhs.account; } }; - struct base_action_trace { - base_action_trace( const action_receipt& r ):receipt(r){} - base_action_trace(){} - - action_receipt receipt; - action act; - bool context_free = false; - fc::microseconds elapsed; - string console; + struct transaction_trace; + using transaction_trace_ptr = std::shared_ptr; - transaction_id_type trx_id; ///< the transaction that generated this action - uint32_t block_num = 0; - block_timestamp_type block_time; + struct action_trace { + action_trace( const transaction_trace& trace, const action& act, account_name receiver, bool context_free, + int32_t action_ordinal, int32_t creator_action_ordinal, int32_t parent_action_ordinal ); + action_trace( const transaction_trace& trace, action&& act, account_name receiver, bool context_free, + int32_t action_ordinal, int32_t creator_action_ordinal, int32_t parent_action_ordinal ); + //action_trace( const action_receipt& r ):receipt(r){} + action_trace(){} + + int32_t action_ordinal = 0; + int32_t creator_action_ordinal = -1; + int32_t parent_action_ordinal = -1; + fc::optional receipt; + action_name receiver; + action act; + bool context_free = false; + fc::microseconds elapsed; + string console; + transaction_id_type trx_id; ///< the transaction that generated this action + uint32_t block_num = 0; + block_timestamp_type block_time; fc::optional producer_block_id; flat_set account_ram_deltas; fc::optional except; }; - struct action_trace : public base_action_trace { - using base_action_trace::base_action_trace; - - vector inline_traces; - }; - - struct transaction_trace; - using transaction_trace_ptr = std::shared_ptr; - struct transaction_trace { transaction_id_type id; uint32_t block_num = 0; @@ -56,7 +57,8 @@ namespace eosio { namespace chain { fc::microseconds elapsed; uint64_t net_usage = 0; bool scheduled = false; - vector action_traces; ///< disposable + vector action_traces; + fc::optional account_ram_delta; transaction_trace_ptr failed_dtrx_trace; fc::optional except; @@ -68,13 +70,11 @@ namespace eosio { namespace chain { FC_REFLECT( eosio::chain::account_delta, (account)(delta) ) -FC_REFLECT( eosio::chain::base_action_trace, - (receipt)(act)(context_free)(elapsed)(console)(trx_id) - (block_num)(block_time)(producer_block_id)(account_ram_deltas)(except) ) - -FC_REFLECT_DERIVED( eosio::chain::action_trace, - (eosio::chain::base_action_trace), (inline_traces) ) +FC_REFLECT( eosio::chain::action_trace, + (action_ordinal)(creator_action_ordinal)(parent_action_ordinal)(receipt) + (receiver)(act)(context_free)(elapsed)(console)(trx_id)(block_num)(block_time) + (producer_block_id)(account_ram_deltas)(except) ) FC_REFLECT( eosio::chain::transaction_trace, (id)(block_num)(block_time)(producer_block_id) (receipt)(elapsed)(net_usage)(scheduled) - (action_traces)(failed_dtrx_trace)(except) ) + (action_traces)(account_ram_delta)(failed_dtrx_trace)(except) ) diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index b0327dafb18..0182f5a05f0 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -64,10 +64,20 @@ namespace eosio { namespace chain { void add_ram_usage( account_name account, int64_t ram_delta ); - void dispatch_action( action_trace& trace, const action& a, account_name receiver, bool context_free = false, uint32_t recurse_depth = 0 ); - inline void dispatch_action( action_trace& trace, const action& a, bool context_free = false ) { - dispatch_action(trace, a, a.account, context_free); - }; + int32_t schedule_action( const action& act, account_name receiver, bool context_free = false, + int32_t creator_action_ordinal = -1, int32_t parent_action_ordinal = -1 ); + + int32_t schedule_action( action&& act, account_name receiver, bool context_free = false, + int32_t creator_action_ordinal = -1, int32_t parent_action_ordinal = -1 ); + + action_trace& get_action_trace( int32_t action_ordinal ); + const action_trace& get_action_trace( int32_t action_ordinal )const; + + void execute_action( action_trace& act_trace, uint32_t recurse_depth ); + inline void execute_action( int32_t action_ordinal, uint32_t recurse_depth = 0 ) { + execute_action( get_action_trace( action_ordinal ), recurse_depth ); + } + void schedule_transaction(); void record_transaction( const transaction_id_type& id, fc::time_point_sec expire ); diff --git a/libraries/chain/trace.cpp b/libraries/chain/trace.cpp new file mode 100644 index 00000000000..0b379c89e00 --- /dev/null +++ b/libraries/chain/trace.cpp @@ -0,0 +1,41 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include + +namespace eosio { namespace chain { + +action_trace::action_trace( + const transaction_trace& trace, const action& act, account_name receiver, bool context_free, + int32_t action_ordinal, int32_t creator_action_ordinal, int32_t parent_action_ordinal +) +:action_ordinal( action_ordinal ) +,creator_action_ordinal( creator_action_ordinal ) +,parent_action_ordinal( parent_action_ordinal ) +,receiver( receiver ) +,act( act ) +,context_free( context_free ) +,trx_id( trace.id ) +,block_num( trace.block_num ) +,block_time( trace.block_time ) +,producer_block_id( trace.producer_block_id ) +{} + +action_trace::action_trace( + const transaction_trace& trace, action&& act, account_name receiver, bool context_free, + int32_t action_ordinal, int32_t creator_action_ordinal, int32_t parent_action_ordinal +) +:action_ordinal( action_ordinal ) +,creator_action_ordinal( creator_action_ordinal ) +,parent_action_ordinal( parent_action_ordinal ) +,receiver( receiver ) +,act( std::move(act) ) +,context_free( context_free ) +,trx_id( trace.id ) +,block_num( trace.block_num ) +,block_time( trace.block_time ) +,producer_block_id( trace.producer_block_id ) +{} + +} } // eosio::chain diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 226e6863a16..da99fd4dbe2 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -333,17 +333,23 @@ namespace bacc = boost::accumulators; if( apply_context_free ) { for( const auto& act : trx.context_free_actions ) { - trace->action_traces.emplace_back(); - dispatch_action( trace->action_traces.back(), act, true ); + schedule_action( act, act.account, true ); } } if( delay == fc::microseconds() ) { for( const auto& act : trx.actions ) { - trace->action_traces.emplace_back(); - dispatch_action( trace->action_traces.back(), act ); + schedule_action( act, act.account ); } - } else { + } + + auto& action_traces = trace->action_traces; + int32_t num_original_actions_to_execute = action_traces.size(); + for( int32_t i = 0; i < num_original_actions_to_execute; ++i ) { + execute_action( action_traces[i], 0 ); + } + + if( delay != fc::microseconds() ) { schedule_transaction(); } } @@ -566,14 +572,51 @@ namespace bacc = boost::accumulators; return std::make_tuple(account_net_limit, account_cpu_limit, greylisted_net, greylisted_cpu); } - void transaction_context::dispatch_action( action_trace& trace, const action& a, account_name receiver, bool context_free, uint32_t recurse_depth ) { - apply_context acontext( control, *this, a, recurse_depth ); - acontext.context_free = context_free; - acontext.receiver = receiver; + int32_t transaction_context::schedule_action( const action& act, account_name receiver, bool context_free, + int32_t creator_action_ordinal, int32_t parent_action_ordinal ) + { + int32_t action_ordinal = trace->action_traces.size(); + + trace->action_traces.emplace_back( *trace, act, receiver, context_free, + action_ordinal, creator_action_ordinal, parent_action_ordinal ); + + return action_ordinal; + } + + int32_t transaction_context::schedule_action( action&& act, account_name receiver, bool context_free, + int32_t creator_action_ordinal, int32_t parent_action_ordinal ) + { + int32_t action_ordinal = trace->action_traces.size(); + + trace->action_traces.emplace_back( *trace, std::move(act), receiver, context_free, + action_ordinal, creator_action_ordinal, parent_action_ordinal ); + + return action_ordinal; + } + + action_trace& transaction_context::get_action_trace( int32_t action_ordinal ) { + EOS_ASSERT( 0 <= action_ordinal && action_ordinal < trace->action_traces.size() , + transaction_exception, "invalid action_ordinal" ); + return trace->action_traces[action_ordinal]; + } - acontext.exec( trace ); + const action_trace& transaction_context::get_action_trace( int32_t action_ordinal )const { + EOS_ASSERT( 0 <= action_ordinal && action_ordinal < trace->action_traces.size() , + transaction_exception, "invalid action_ordinal" ); + return trace->action_traces[action_ordinal]; } + void transaction_context::execute_action( action_trace& act_trace, uint32_t recurse_depth ) { + apply_context acontext( control, *this, act_trace.act, recurse_depth ); + acontext.receiver = act_trace.receiver; + acontext.first_receiver_action_ordinal = act_trace.action_ordinal; + acontext.action_ordinal = act_trace.action_ordinal; + acontext.context_free = act_trace.context_free; + + acontext.exec( act_trace ); + } + + void transaction_context::schedule_transaction() { // Charge ahead of time for the additional net usage needed to retire the delayed transaction // whether that be by successfully executing, soft failure, hard failure, or expiration. @@ -597,7 +640,9 @@ namespace bacc = boost::accumulators; trx_size = gto.set( trx ); }); - add_ram_usage( cgto.payer, (config::billable_size_v + trx_size) ); + int64_t ram_delta = (config::billable_size_v + trx_size); + add_ram_usage( cgto.payer, ram_delta ); + trace->account_ram_delta = account_delta( cgto.payer, ram_delta ); } void transaction_context::record_transaction( const transaction_id_type& id, fc::time_point_sec expire ) { diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 889effc9cb3..83f2f20e618 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -330,25 +330,25 @@ BOOST_FIXTURE_TEST_CASE(action_receipt_tests, TESTER) { try { auto result = push_reqauth( config::system_account_name, "active" ); BOOST_REQUIRE_EQUAL( result->receipt->status, transaction_receipt::executed ); - BOOST_REQUIRE( result->action_traces[0].receipt.auth_sequence.find( config::system_account_name ) - != result->action_traces[0].receipt.auth_sequence.end() ); - auto base_global_sequence_num = result->action_traces[0].receipt.global_sequence; - auto base_system_recv_seq_num = result->action_traces[0].receipt.recv_sequence; - auto base_system_auth_seq_num = result->action_traces[0].receipt.auth_sequence[config::system_account_name]; - auto base_system_code_seq_num = result->action_traces[0].receipt.code_sequence.value; - auto base_system_abi_seq_num = result->action_traces[0].receipt.abi_sequence.value; + BOOST_REQUIRE( result->action_traces[0].receipt->auth_sequence.find( config::system_account_name ) + != result->action_traces[0].receipt->auth_sequence.end() ); + auto base_global_sequence_num = result->action_traces[0].receipt->global_sequence; + auto base_system_recv_seq_num = result->action_traces[0].receipt->recv_sequence; + auto base_system_auth_seq_num = result->action_traces[0].receipt->auth_sequence[config::system_account_name]; + auto base_system_code_seq_num = result->action_traces[0].receipt->code_sequence.value; + auto base_system_abi_seq_num = result->action_traces[0].receipt->abi_sequence.value; uint64_t base_test_recv_seq_num = 0; uint64_t base_test_auth_seq_num = 0; call_doit_and_check( N(test), N(test), [&]( const transaction_trace_ptr& res ) { BOOST_CHECK_EQUAL( res->receipt->status, transaction_receipt::executed ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.global_sequence, base_global_sequence_num + 1 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.code_sequence.value, 1 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.abi_sequence.value, 0 ); - base_test_recv_seq_num = res->action_traces[0].receipt.recv_sequence; + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->global_sequence, base_global_sequence_num + 1 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->code_sequence.value, 1 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->abi_sequence.value, 0 ); + base_test_recv_seq_num = res->action_traces[0].receipt->recv_sequence; BOOST_CHECK( base_test_recv_seq_num > 0 ); base_test_recv_seq_num--; - const auto& m = res->action_traces[0].receipt.auth_sequence; + const auto& m = res->action_traces[0].receipt->auth_sequence; BOOST_CHECK_EQUAL( m.size(), 1 ); BOOST_CHECK_EQUAL( m.begin()->first.to_string(), "test" ); base_test_auth_seq_num = m.begin()->second; @@ -361,11 +361,11 @@ BOOST_FIXTURE_TEST_CASE(action_receipt_tests, TESTER) { try { call_provereset_and_check( N(test), N(test), [&]( const transaction_trace_ptr& res ) { BOOST_CHECK_EQUAL( res->receipt->status, transaction_receipt::executed ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.global_sequence, base_global_sequence_num + 4 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.recv_sequence, base_test_recv_seq_num + 2 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.code_sequence.value, 2 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.abi_sequence.value, 0 ); - const auto& m = res->action_traces[0].receipt.auth_sequence; + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->global_sequence, base_global_sequence_num + 4 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->recv_sequence, base_test_recv_seq_num + 2 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->code_sequence.value, 2 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->abi_sequence.value, 0 ); + const auto& m = res->action_traces[0].receipt->auth_sequence; BOOST_CHECK_EQUAL( m.size(), 1 ); BOOST_CHECK_EQUAL( m.begin()->first.to_string(), "test" ); BOOST_CHECK_EQUAL( m.begin()->second, base_test_auth_seq_num + 3 ); @@ -377,11 +377,11 @@ BOOST_FIXTURE_TEST_CASE(action_receipt_tests, TESTER) { try { call_doit_and_check( config::system_account_name, N(test), [&]( const transaction_trace_ptr& res ) { BOOST_CHECK_EQUAL( res->receipt->status, transaction_receipt::executed ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.global_sequence, base_global_sequence_num + 6 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.recv_sequence, base_system_recv_seq_num + 4 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.code_sequence.value, base_system_code_seq_num + 1 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.abi_sequence.value, base_system_abi_seq_num ); - const auto& m = res->action_traces[0].receipt.auth_sequence; + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->global_sequence, base_global_sequence_num + 6 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->recv_sequence, base_system_recv_seq_num + 4 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->code_sequence.value, base_system_code_seq_num + 1 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->abi_sequence.value, base_system_abi_seq_num ); + const auto& m = res->action_traces[0].receipt->auth_sequence; BOOST_CHECK_EQUAL( m.size(), 1 ); BOOST_CHECK_EQUAL( m.begin()->first.to_string(), "test" ); BOOST_CHECK_EQUAL( m.begin()->second, base_test_auth_seq_num + 4 ); @@ -395,11 +395,11 @@ BOOST_FIXTURE_TEST_CASE(action_receipt_tests, TESTER) { try { call_doit_and_check( N(test), N(test), [&]( const transaction_trace_ptr& res ) { BOOST_CHECK_EQUAL( res->receipt->status, transaction_receipt::executed); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.global_sequence, base_global_sequence_num + 11 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.recv_sequence, base_test_recv_seq_num + 3 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.code_sequence.value, 4 ); - BOOST_CHECK_EQUAL( res->action_traces[0].receipt.abi_sequence.value, 1 ); - const auto& m = res->action_traces[0].receipt.auth_sequence; + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->global_sequence, base_global_sequence_num + 11 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->recv_sequence, base_test_recv_seq_num + 3 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->code_sequence.value, 4 ); + BOOST_CHECK_EQUAL( res->action_traces[0].receipt->abi_sequence.value, 1 ); + const auto& m = res->action_traces[0].receipt->auth_sequence; BOOST_CHECK_EQUAL( m.size(), 1 ); BOOST_CHECK_EQUAL( m.begin()->first.to_string(), "test" ); BOOST_CHECK_EQUAL( m.begin()->second, base_test_auth_seq_num + 8 ); @@ -701,12 +701,12 @@ BOOST_FIXTURE_TEST_CASE(cf_action_tests, TESTER) { try { // test send context free action auto ttrace = CALL_TEST_FUNCTION( *this, "test_transaction", "send_cf_action", {} ); - BOOST_CHECK_EQUAL(ttrace->action_traces.size(), 1); - BOOST_CHECK_EQUAL(ttrace->action_traces[0].inline_traces.size(), 1); - BOOST_CHECK_EQUAL(ttrace->action_traces[0].inline_traces[0].receipt.receiver, account_name("dummy")); - BOOST_CHECK_EQUAL(ttrace->action_traces[0].inline_traces[0].act.account, account_name("dummy")); - BOOST_CHECK_EQUAL(ttrace->action_traces[0].inline_traces[0].act.name, account_name("event1")); - BOOST_CHECK_EQUAL(ttrace->action_traces[0].inline_traces[0].act.authorization.size(), 0); + BOOST_REQUIRE_EQUAL(ttrace->action_traces.size(), 2); + BOOST_CHECK_EQUAL(ttrace->action_traces[1].creator_action_ordinal, 0); + BOOST_CHECK_EQUAL(ttrace->action_traces[1].receiver, account_name("dummy")); + BOOST_CHECK_EQUAL(ttrace->action_traces[1].act.account, account_name("dummy")); + BOOST_CHECK_EQUAL(ttrace->action_traces[1].act.name, account_name("event1")); + BOOST_CHECK_EQUAL(ttrace->action_traces[1].act.authorization.size(), 0); BOOST_CHECK_EXCEPTION( CALL_TEST_FUNCTION( *this, "test_transaction", "send_cf_action_fail", {} ), eosio_assert_message_exception, diff --git a/unittests/wasm_tests.cpp b/unittests/wasm_tests.cpp index 5335ee037c4..c29ec8da8c4 100644 --- a/unittests/wasm_tests.cpp +++ b/unittests/wasm_tests.cpp @@ -92,7 +92,7 @@ BOOST_FIXTURE_TEST_CASE( basic_test, TESTER ) try { auto result = push_transaction( trx ); BOOST_CHECK_EQUAL(result->receipt->status, transaction_receipt::executed); BOOST_CHECK_EQUAL(result->action_traces.size(), 1u); - BOOST_CHECK_EQUAL(result->action_traces.at(0).receipt.receiver.to_string(), name(N(asserter)).to_string() ); + BOOST_CHECK_EQUAL(result->action_traces.at(0).receiver.to_string(), name(N(asserter)).to_string() ); BOOST_CHECK_EQUAL(result->action_traces.at(0).act.account.to_string(), name(N(asserter)).to_string() ); BOOST_CHECK_EQUAL(result->action_traces.at(0).act.name.to_string(), name(N(procassert)).to_string() ); BOOST_CHECK_EQUAL(result->action_traces.at(0).act.authorization.size(), 1u ); From ed8b7b41c0c80e275b911268ca846113a32c5f16 Mon Sep 17 00:00:00 2001 From: arhag Date: Fri, 29 Mar 2019 23:41:57 -0400 Subject: [PATCH 245/680] fix plugins #6897 Now compiles, but the tests fail due to bugs. --- plugins/history_plugin/history_plugin.cpp | 48 +++++++++---------- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 14 ++---- .../state_history_serialization.hpp | 15 +++++- .../state_history_plugin_abi.cpp | 7 ++- 4 files changed, 45 insertions(+), 39 deletions(-) diff --git a/plugins/history_plugin/history_plugin.cpp b/plugins/history_plugin/history_plugin.cpp index a44321ca0bc..6cdcffcb546 100644 --- a/plugins/history_plugin/history_plugin.cpp +++ b/plugins/history_plugin/history_plugin.cpp @@ -148,34 +148,34 @@ namespace eosio { if (bypass_filter) { pass_on = true; } - if (filter_on.find({ act.receipt.receiver, 0, 0 }) != filter_on.end()) { + if (filter_on.find({ act.receiver, 0, 0 }) != filter_on.end()) { pass_on = true; } - if (filter_on.find({ act.receipt.receiver, act.act.name, 0 }) != filter_on.end()) { + if (filter_on.find({ act.receiver, act.act.name, 0 }) != filter_on.end()) { pass_on = true; } for (const auto& a : act.act.authorization) { - if (filter_on.find({ act.receipt.receiver, 0, a.actor }) != filter_on.end()) { + if (filter_on.find({ act.receiver, 0, a.actor }) != filter_on.end()) { pass_on = true; } - if (filter_on.find({ act.receipt.receiver, act.act.name, a.actor }) != filter_on.end()) { + if (filter_on.find({ act.receiver, act.act.name, a.actor }) != filter_on.end()) { pass_on = true; } } if (!pass_on) { return false; } - if (filter_out.find({ act.receipt.receiver, 0, 0 }) != filter_out.end()) { + if (filter_out.find({ act.receiver, 0, 0 }) != filter_out.end()) { return false; } - if (filter_out.find({ act.receipt.receiver, act.act.name, 0 }) != filter_out.end()) { + if (filter_out.find({ act.receiver, act.act.name, 0 }) != filter_out.end()) { return false; } for (const auto& a : act.act.authorization) { - if (filter_out.find({ act.receipt.receiver, 0, a.actor }) != filter_out.end()) { + if (filter_out.find({ act.receiver, 0, a.actor }) != filter_out.end()) { return false; } - if (filter_out.find({ act.receipt.receiver, act.act.name, a.actor }) != filter_out.end()) { + if (filter_out.find({ act.receiver, act.act.name, a.actor }) != filter_out.end()) { return false; } } @@ -186,17 +186,17 @@ namespace eosio { set account_set( const action_trace& act ) { set result; - result.insert( act.receipt.receiver ); + result.insert( act.receiver ); for( const auto& a : act.act.authorization ) { if( bypass_filter || - filter_on.find({ act.receipt.receiver, 0, 0}) != filter_on.end() || - filter_on.find({ act.receipt.receiver, 0, a.actor}) != filter_on.end() || - filter_on.find({ act.receipt.receiver, act.act.name, 0}) != filter_on.end() || - filter_on.find({ act.receipt.receiver, act.act.name, a.actor }) != filter_on.end() ) { - if ((filter_out.find({ act.receipt.receiver, 0, 0 }) == filter_out.end()) && - (filter_out.find({ act.receipt.receiver, 0, a.actor }) == filter_out.end()) && - (filter_out.find({ act.receipt.receiver, act.act.name, 0 }) == filter_out.end()) && - (filter_out.find({ act.receipt.receiver, act.act.name, a.actor }) == filter_out.end())) { + filter_on.find({ act.receiver, 0, 0}) != filter_on.end() || + filter_on.find({ act.receiver, 0, a.actor}) != filter_on.end() || + filter_on.find({ act.receiver, act.act.name, 0}) != filter_on.end() || + filter_on.find({ act.receiver, act.act.name, a.actor }) != filter_on.end() ) { + if ((filter_out.find({ act.receiver, 0, 0 }) == filter_out.end()) && + (filter_out.find({ act.receiver, 0, a.actor }) == filter_out.end()) && + (filter_out.find({ act.receiver, act.act.name, 0 }) == filter_out.end()) && + (filter_out.find({ act.receiver, act.act.name, a.actor }) == filter_out.end())) { result.insert( a.actor ); } } @@ -204,7 +204,7 @@ namespace eosio { return result; } - void record_account_action( account_name n, const base_action_trace& act ) { + void record_account_action( account_name n, const action_trace& act ) { auto& chain = chain_plug->chain(); chainbase::database& db = const_cast( chain.db() ); // Override read-only access to state DB (highly unrecommended practice!) @@ -216,13 +216,11 @@ namespace eosio { if( itr->account == n ) asn = itr->account_sequence_num + 1; - //idump((n)(act.receipt.global_sequence)(asn)); const auto& a = db.create( [&]( auto& aho ) { aho.account = n; - aho.action_sequence_num = act.receipt.global_sequence; + aho.action_sequence_num = act.receipt->global_sequence; aho.account_sequence_num = asn; }); - //idump((a.account)(a.action_sequence_num)(a.action_sequence_num)); } void on_system_action( const action_trace& at ) { @@ -263,7 +261,7 @@ namespace eosio { aho.packed_action_trace.resize(ps); datastream ds( aho.packed_action_trace.data(), ps ); fc::raw::pack( ds, at ); - aho.action_sequence_num = at.receipt.global_sequence; + aho.action_sequence_num = at.receipt->global_sequence; aho.block_num = chain.head_block_num() + 1; aho.block_time = chain.pending_block_time(); aho.trx_id = at.trx_id; @@ -274,11 +272,8 @@ namespace eosio { record_account_action( a, at ); } } - if( at.receipt.receiver == chain::config::system_account_name ) + if( at.receiver == chain::config::system_account_name ) on_system_action( at ); - for( const auto& iline : at.inline_traces ) { - on_action_trace( iline ); - } } void on_applied_transaction( const transaction_trace_ptr& trace ) { @@ -286,6 +281,7 @@ namespace eosio { trace->receipt->status != transaction_receipt_header::soft_fail) ) return; for( const auto& atrace : trace->action_traces ) { + if( !atrace.receipt ) continue; on_action_trace( atrace ); } } diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 8131b6a2bb2..3b3b39f4f84 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -751,7 +751,7 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti const signed_transaction& trx = t->packed_trx->get_signed_transaction(); if( !filter_include( trx ) ) return; - + auto trans_doc = bsoncxx::builder::basic::document{}; auto now = std::chrono::duration_cast( @@ -830,22 +830,20 @@ mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces using namespace bsoncxx::types; using bsoncxx::builder::basic::kvp; - if( executed && atrace.receipt.receiver == chain::config::system_account_name ) { + if( executed && atrace.receiver == chain::config::system_account_name ) { update_account( atrace.act ); } bool added = false; const bool in_filter = (store_action_traces || store_transaction_traces) && start_block_reached && - filter_include( atrace.receipt.receiver, atrace.act.name, atrace.act.authorization ); + filter_include( atrace.receiver, atrace.act.name, atrace.act.authorization ); write_ttrace |= in_filter; if( start_block_reached && store_action_traces && in_filter ) { auto action_traces_doc = bsoncxx::builder::basic::document{}; - const chain::base_action_trace& base = atrace; // without inline action traces - // improve data distributivity when using mongodb sharding action_traces_doc.append( kvp( "_id", make_custom_oid() ) ); - auto v = to_variant_with_abi( base ); + auto v = to_variant_with_abi( atrace ); string json = fc::json::to_string( v ); try { const auto& value = bsoncxx::from_json( json ); @@ -871,10 +869,6 @@ mongo_db_plugin_impl::add_action_trace( mongocxx::bulk_write& bulk_action_traces added = true; } - for( const auto& iline_atrace : atrace.inline_traces ) { - added |= add_action_trace( bulk_action_traces, iline_atrace, t, executed, now, write_ttrace ); - } - return added; } diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp index 37c817dd9cc..473e5720ec4 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp @@ -480,7 +480,14 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { fc::raw::pack(ds, fc::unsigned_int(0)); - fc::raw::pack(ds, make_history_serial_wrapper(obj.db, as_type(obj.obj.receipt))); + fc::raw::pack(ds, as_type(obj.obj.action_ordinal)); + fc::raw::pack(ds, as_type(obj.obj.creator_action_ordinal)); + fc::raw::pack(ds, as_type(obj.obj.parent_action_ordinal)); + fc::raw::pack(ds, bool(obj.obj.receipt)); + if (obj.obj.receipt) { + fc::raw::pack(ds, make_history_serial_wrapper(obj.db, as_type(*obj.obj.receipt))); + } + fc::raw::pack(ds, as_type(obj.obj.receiver.value)); fc::raw::pack(ds, make_history_serial_wrapper(obj.db, as_type(obj.obj.act))); fc::raw::pack(ds, as_type(obj.obj.context_free)); fc::raw::pack(ds, as_type(obj.obj.elapsed.count())); @@ -492,7 +499,6 @@ datastream& operator<<(datastream& ds, const history_serial_wrapperto_string(); fc::raw::pack(ds, as_type>(e)); - history_serialize_container(ds, obj.db, as_type>(obj.obj.inline_traces)); return ds; } @@ -519,6 +525,11 @@ datastream& operator<<(datastream& fc::raw::pack(ds, as_type(obj.obj.scheduled)); history_serialize_container(ds, obj.db, as_type>(obj.obj.action_traces)); + fc::raw::pack(ds, bool(obj.obj.account_ram_delta)); + if (obj.obj.account_ram_delta) { + fc::raw::pack(ds, make_history_serial_wrapper(obj.db, as_type(*obj.obj.account_ram_delta))); + } + fc::optional e; if (obj.obj.except) e = obj.obj.except->to_string(); diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index bdedcc81cd9..af2afb8e1bf 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -93,7 +93,11 @@ extern const char* const state_history_plugin_abi = R"({ }, { "name": "action_trace_v0", "fields": [ - { "name": "receipt", "type": "action_receipt" }, + { "name": "action_ordinal", "type": "int32" }, + { "name": "creator_action_ordinal", "type": "int32" }, + { "name": "parent_action_ordinal", "type": "int32" }, + { "name": "receipt", "type": "action_receipt?" }, + { "name": "receiver", "type": "name" }, { "name": "act", "type": "action" }, { "name": "context_free", "type": "bool" }, { "name": "elapsed", "type": "int64" }, @@ -113,6 +117,7 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "net_usage", "type": "uint64" }, { "name": "scheduled", "type": "bool" }, { "name": "action_traces", "type": "action_trace[]" }, + { "name": "account_ram_delta", "type": "account_delta?" }, { "name": "except", "type": "string?" }, { "name": "failed_dtrx_trace", "type": "transaction_trace?" } ] From 6692f2f39986a3d6a96ec9dddc01133e4c2f7ca4 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sun, 31 Mar 2019 12:30:45 -0500 Subject: [PATCH 246/680] Report better info while trying to identify cluster sync. GH #7034 --- tests/Cluster.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index debfa1464cd..77012324a66 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -517,18 +517,28 @@ def waitOnClusterBlockNumSync(self, targetBlockNum, timeout=None, blockType=Bloc """Wait for all nodes to have targetBlockNum finalized.""" assert(self.nodes) - def doNodesHaveBlockNum(nodes, targetBlockNum, blockType): + def doNodesHaveBlockNum(nodes, targetBlockNum, blockType, printCount): + ret=True for node in nodes: try: if (not node.killed) and (not node.isBlockPresent(targetBlockNum, blockType=blockType)): - return False + ret=False + break except (TypeError) as _: # This can happen if client connects before server is listening - return False + ret=False + break - return True + printCount+=1 + if Utils.Debug and not ret and printCount%5==0: + blockNums=[] + for i in range(0, len(nodes)): + blockNums.append(nodes[i].getBlockNum()) + Utils.Print("Cluster still not in sync, head blocks for nodes: [ %s ]" % (", ".join(blockNums))) + return ret - lam = lambda: doNodesHaveBlockNum(self.nodes, targetBlockNum, blockType) + printCount=0 + lam = lambda: doNodesHaveBlockNum(self.nodes, targetBlockNum, blockType, printCount) ret=Utils.waitForBool(lam, timeout) return ret From 522c69a766ea4778caeea0fdffa95c5f32e4351b Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Sun, 31 Mar 2019 12:33:53 -0500 Subject: [PATCH 247/680] Fix test and add verifying that the txn_test_gen_plugin is producing. GH #7034 --- tests/nodeos_startup_catchup.py | 43 ++++++++++++++++++++++++++++++--- 1 file changed, 40 insertions(+), 3 deletions(-) diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index c7f1fa80ae4..e75fe165230 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -77,10 +77,12 @@ Print("Validating system accounts after bootstrap") cluster.validateAccounts(None) + Print("Create txn generate nodes") txnGenNodes=[] for nodeNum in range(txnGenNodeNum, txnGenNodeNum+startedNonProdNodes): txnGenNodes.append(cluster.getNode(nodeNum)) + Print("Create accounts for generated txns") txnGenNodes[0].txnGenCreateTestAccounts(cluster.eosioAccount.name, cluster.eosioAccount.activePrivateKey) def lib(node): @@ -91,43 +93,78 @@ def head(node): node0=cluster.getNode(0) + Print("Wait for account creation to be irreversible") blockNum=head(node0) node0.waitForBlock(blockNum, blockType=BlockType.lib) + Print("Startup txn generation") + period=1500 + transPerPeriod=150 for genNum in range(0, len(txnGenNodes)): salt="%d" % genNum - txnGenNodes[genNum].txnGenStart(salt, 1500, 150) + txnGenNodes[genNum].txnGenStart(salt, period, transPerPeriod) time.sleep(1) blockNum=head(node0) - node0.waitForBlock(blockNum+20) - + timePerBlock=500 + blocksPerPeriod=period/timePerBlock + transactionsPerBlock=transPerPeriod/blocksPerPeriod + steadyStateWait=20 + startBlockNum=blockNum+steadyStateWait + numBlocks=20 + endBlockNum=startBlockNum+numBlocks + node0.waitForBlock(endBlockNum) + transactions=0 + avg=0 + for blockNum in range(startBlockNum, endBlockNum): + block=node0.getBlock(blockNum) + transactions+=len(block["transactions"]) + + avg=transactions / (blockNum - startBlockNum + 1) + + Print("Validate transactions are generating") + minRequiredTransactions=transactionsPerBlock + assert avg>minRequiredTransactions, "Expected to at least receive %s transactions per block, but only getting %s" % (minRequiredTransactions, avg) + + Print("Cycle through catchup scenarios") twoRounds=21*2*12 for catchup_num in range(0, catchupCount): + Print("Start catchup node") cluster.launchUnstarted(cachePopen=True) lastLibNum=lib(node0) + time.sleep(2) # verify producer lib is still advancing node0.waitForBlock(lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) catchupNode=cluster.getNodes()[-1] catchupNodeNum=cluster.getNodes().index(catchupNode) lastCatchupLibNum=lib(catchupNode) + + Print("Verify catchup node %s's LIB is advancing" % (catchupNodeNum)) # verify lib is advancing (before we wait for it to have to catchup with producer) catchupNode.waitForBlock(lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + Print("Verify catchup node is advancing to producer") numBlocksToCatchup=(lastLibNum-lastCatchupLibNum-1)+twoRounds catchupNode.waitForBlock(lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) + Print("Shutdown catchup node and validate exit code") catchupNode.interruptAndVerifyExitStatus(60) + Print("Restart catchup node") catchupNode.relaunch(catchupNodeNum) lastCatchupLibNum=lib(catchupNode) + + Print("Verify catchup node is advancing") # verify catchup node is advancing to producer catchupNode.waitForBlock(lastCatchupLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + Print("Verify producer is still advancing LIB") lastLibNum=lib(node0) # verify producer lib is still advancing node0.waitForBlock(lastLibNum+1, timeout=twoRounds/2, blockType=BlockType.lib) + + Print("Verify catchup node is advancing to producer") # verify catchup node is advancing to producer catchupNode.waitForBlock(lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) From a560fd391ce54936345a8a204484b76b30db5acb Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Sun, 31 Mar 2019 15:48:57 -0400 Subject: [PATCH 248/680] Remove final remnants of boost thread usage from cmake Because this could be the last boost thread reference, we need to tell cmake to still pass thread compiler flags --- CMakeLists.txt | 8 ++++---- libraries/appbase | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 694f0814aaa..17c3df72451 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -104,16 +104,12 @@ IF( WIN32 ) set(BOOST_ALL_DYN_LINK OFF) # force dynamic linking for all libraries ENDIF(WIN32) FIND_PACKAGE(Boost 1.67 REQUIRED COMPONENTS - thread date_time filesystem system program_options - serialization chrono unit_test_framework - context - locale iostreams) # Some new stdlibc++s will #error on ; a problem for boost pre-1.69 @@ -121,6 +117,10 @@ if( APPLE AND UNIX ) add_definitions(-DBOOST_ASIO_DISABLE_STD_EXPERIMENTAL_STRING_VIEW) endif() +set(THREADS_PREFER_PTHREAD_FLAG 1) +find_package(Threads) +link_libraries(Threads::Threads) + if( WIN32 ) message( STATUS "Configuring EOSIO on WIN32") diff --git a/libraries/appbase b/libraries/appbase index 013246f52f1..b6b55f5ff99 160000 --- a/libraries/appbase +++ b/libraries/appbase @@ -1 +1 @@ -Subproject commit 013246f52f13a7bc129193c3a64e6cd0cea44ac0 +Subproject commit b6b55f5ff993f4be954d2aa556538636fbdaabb4 From 8ea813a6abef9e251c90a8ac68aeaa9e3a5c66cf Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Sun, 31 Mar 2019 15:50:20 -0400 Subject: [PATCH 249/680] When building boost on macos, only build the libraries needed by eosio --- scripts/eosio_build_darwin.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index e418be9a717..224b0839f1d 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -171,7 +171,8 @@ if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST && tar -xjf boost_$BOOST_VERSION.tar.bz2 \ && cd $BOOST_ROOT \ && ./bootstrap.sh --prefix=$BOOST_ROOT \ - && ./b2 -q -j$(sysctl -in machdep.cpu.core_count) install \ + && ./b2 -q -j$(sysctl -in machdep.cpu.core_count) --with-iostreams --with-date_time --with-filesystem \ + --with-system --with-program_options --with-chrono --with-test install \ && cd .. \ && rm -f boost_$BOOST_VERSION.tar.bz2 \ && rm -rf $BOOST_LINK_LOCATION \ From 1ad1f742b62757d69ffb01d79369791581dd42d3 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 29 Mar 2019 15:59:30 -0500 Subject: [PATCH 250/680] Fix for close() called while async_read in-flight --- plugins/net_plugin/net_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index a26353ab387..9b79ae6bf70 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -826,7 +826,6 @@ namespace eosio { fc_dlog(logger, "canceling wait on ${p}", ("p",peer_name())); cancel_wait(); if( read_delay_timer ) read_delay_timer->cancel(); - pending_message_buffer.reset(); } void connection::txn_send_pending(const vector& ids) { @@ -1886,6 +1885,7 @@ namespace eosio { auto current_endpoint = *endpoint_itr; ++endpoint_itr; c->connecting = true; + c->pending_message_buffer.reset(); connection_wptr weak_conn = c; c->socket->async_connect( current_endpoint, boost::asio::bind_executor( c->strand, [weak_conn, endpoint_itr, this]( const boost::system::error_code& err ) { @@ -2061,7 +2061,7 @@ namespace eosio { [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { app().post( priority::medium, [this,weak_conn, ec, bytes_transferred]() { auto conn = weak_conn.lock(); - if (!conn) { + if (!conn || !conn->connected()) { return; } From cc4d83bcd0e87179baea2a312c1686b1034e61c5 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 29 Mar 2019 17:36:31 -0500 Subject: [PATCH 251/680] Can't call connected(), it checks flag that is only set after first read --- plugins/net_plugin/net_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 9b79ae6bf70..268ca1e3359 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2061,7 +2061,7 @@ namespace eosio { [this,weak_conn]( boost::system::error_code ec, std::size_t bytes_transferred ) { app().post( priority::medium, [this,weak_conn, ec, bytes_transferred]() { auto conn = weak_conn.lock(); - if (!conn || !conn->connected()) { + if (!conn || !conn->socket || !conn->socket->is_open()) { return; } From 0f7d853a4a8bea8612b92ce35115368b8f0d79d6 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:17:17 -0500 Subject: [PATCH 252/680] Name bnet threads --- plugins/bnet_plugin/bnet_plugin.cpp | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/plugins/bnet_plugin/bnet_plugin.cpp b/plugins/bnet_plugin/bnet_plugin.cpp index b788d833503..08d2091040f 100644 --- a/plugins/bnet_plugin/bnet_plugin.cpp +++ b/plugins/bnet_plugin/bnet_plugin.cpp @@ -51,6 +51,7 @@ #include #include +#include #include #include @@ -1398,7 +1399,13 @@ namespace eosio { my->_socket_threads.reserve( my->_num_threads ); for( auto i = 0; i < my->_num_threads; ++i ) { - my->_socket_threads.emplace_back( [&ioc]{ wlog( "start thread" ); ioc.run(); wlog( "end thread" ); } ); + my->_socket_threads.emplace_back( [&ioc, i]{ + std::string tn = "bnet-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + wlog( "start thread" ); + ioc.run(); + wlog( "end thread" ); + } ); } for( const auto& peer : my->_connect_to_peers ) { From 92ebd6a55c3d5ce4c1505b590df325d95931f1da Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:17:34 -0500 Subject: [PATCH 253/680] Name http threads --- plugins/http_plugin/http_plugin.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index 7e205736874..fe2b31472e7 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -522,7 +522,11 @@ namespace eosio { my->server_ioc = std::make_shared(); my->server_ioc_work.emplace( boost::asio::make_work_guard(*my->server_ioc) ); for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { - boost::asio::post( *my->thread_pool, [ioc = my->server_ioc]() { ioc->run(); } ); + boost::asio::post( *my->thread_pool, [ioc = my->server_ioc, i]() { + std::string tn = "http-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ioc->run(); + } ); } if(my->listen_endpoint) { From 9015d3063075b5678c972a9f51da478b40f23980 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:17:40 -0500 Subject: [PATCH 254/680] Name mongo_db_plugin consume thread --- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 25dea46d546..72a43caf418 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -1527,7 +1528,10 @@ void mongo_db_plugin_impl::init() { ilog("starting db plugin thread"); - consume_thread = std::thread([this] { consume_blocks(); }); + consume_thread = std::thread([this] { + fc::set_os_thread_name( "mongodb" ); + consume_blocks(); + }); startup = false; } From dc4026e711d307164cc59b25d7525941568b05e4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:18:27 -0500 Subject: [PATCH 255/680] Name main application thread --- programs/nodeos/main.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 7034a03858a..403b5c2b317 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -84,6 +84,7 @@ enum return_codes { int main(int argc, char** argv) { try { + fc::set_os_thread_name( "main" ); app().set_version(eosio::nodeos::config::version); auto root = fc::app_path(); From d6bf0b0447d30ba7b875bea2f5ad765b61fdd7b1 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:20:04 -0500 Subject: [PATCH 256/680] Name net_plugin server_ioc threads --- plugins/net_plugin/net_plugin.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index a26353ab387..c8e7bf20a6f 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -3016,7 +3017,11 @@ namespace eosio { my->server_ioc_work.emplace( boost::asio::make_work_guard( *my->server_ioc ) ); // currently thread_pool only used for server_ioc for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { - boost::asio::post( *my->thread_pool, [ioc = my->server_ioc]() { ioc->run(); } ); + boost::asio::post( *my->thread_pool, [ioc = my->server_ioc, i]() { + std::string tn = "net-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ioc->run(); + } ); } my->resolver = std::make_shared( std::ref( *my->server_ioc )); From 5dac7047901ab5e408fd9779e353244cce861d14 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:20:39 -0500 Subject: [PATCH 257/680] Name all threads in chain controller thread pool --- libraries/chain/controller.cpp | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index f3b0a841981..9ee8626a77f 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -21,6 +21,7 @@ #include #include +#include #include #include @@ -1727,7 +1728,25 @@ void controller::add_indices() { my->add_indices(); } +void set_thread_name( boost::asio::thread_pool& tp, uint16_t i, uint16_t sz ) { + std::string tn = "chain-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ++i; + if( i < sz ) { + // post recursively so we consume all the threads + auto fut = eosio::chain::async_thread_pool( tp, [&tp, i, sz]() { + set_thread_name( tp, i, sz ); + }); + fut.wait(); + } +} + void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { + // name threads in thread pool for logger + boost::asio::post( get_thread_pool(), [&tp = get_thread_pool(), sz = my->conf.thread_pool_size]() { + set_thread_name( tp, 0, sz ); + }); + my->head = my->fork_db.head(); if( snapshot ) { ilog( "Starting initialization from snapshot, this may take a significant amount of time" ); From c2a07d274c476aa1f4fdbde57d846e0c70bc74f4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:21:16 -0500 Subject: [PATCH 258/680] Name all threads in producer thread pool --- plugins/producer_plugin/producer_plugin.cpp | 24 +++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index a35fa34a9c5..0f9fc79ccea 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -8,9 +8,11 @@ #include #include #include +#include #include #include +#include #include #include @@ -620,6 +622,19 @@ make_keosd_signature_provider(const std::shared_ptr& impl, }; } +void set_thread_name( boost::asio::thread_pool& tp, uint16_t i, uint16_t sz ) { + std::string tn = "prod-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ++i; + if( i < sz ) { + // post recursively so we consume all the threads + auto fut = eosio::chain::async_thread_pool( tp, [&tp, i, sz]() { + set_thread_name( tp, i, sz ); + }); + fut.wait(); + } +} + void producer_plugin::plugin_initialize(const boost::program_options::variables_map& options) { try { my->chain_plug = app().find_plugin(); @@ -690,6 +705,11 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ "producer-threads ${num} must be greater than 0", ("num", thread_pool_size)); my->_thread_pool.emplace( thread_pool_size ); + // name threads in thread pool for logger + boost::asio::post( *my->_thread_pool, [&tp = *my->_thread_pool, sz = thread_pool_size]() { + set_thread_name( tp, 0, sz ); + }); + if( options.count( "snapshots-dir" )) { auto sd = options.at( "snapshots-dir" ).as(); if( sd.is_relative()) { @@ -738,10 +758,10 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ void producer_plugin::plugin_startup() { try { - handle_sighup(); // Sets loggers - ilog("producer plugin: plugin_startup() begin"); + handle_sighup(); // Sets loggers + chain::controller& chain = my->chain_plug->chain(); EOS_ASSERT( my->_producers.empty() || chain.get_read_mode() == chain::db_read_mode::SPECULATIVE, plugin_config_exception, "node cannot have any producer-name configured because block production is impossible when read_mode is not \"speculative\"" ); From e0ecb3f398166f7f7e6962735ca8f7739fff7c7a Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 10:49:03 -0500 Subject: [PATCH 259/680] Revert move of ilog message --- plugins/producer_plugin/producer_plugin.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 0f9fc79ccea..0754d1248c1 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -758,10 +758,10 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ void producer_plugin::plugin_startup() { try { - ilog("producer plugin: plugin_startup() begin"); - handle_sighup(); // Sets loggers + ilog("producer plugin: plugin_startup() begin"); + chain::controller& chain = my->chain_plug->chain(); EOS_ASSERT( my->_producers.empty() || chain.get_read_mode() == chain::db_read_mode::SPECULATIVE, plugin_config_exception, "node cannot have any producer-name configured because block production is impossible when read_mode is not \"speculative\"" ); From 34a43e20996a726fec0995f6f05558ead7c9d9f9 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 12:10:59 -0500 Subject: [PATCH 260/680] Fix for tests which were destroying controller before all set_thread_name finished causing deadlock. --- libraries/chain/controller.cpp | 3 ++- plugins/producer_plugin/producer_plugin.cpp | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 9ee8626a77f..fc71d06a42a 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1743,9 +1743,10 @@ void set_thread_name( boost::asio::thread_pool& tp, uint16_t i, uint16_t sz ) { void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { // name threads in thread pool for logger - boost::asio::post( get_thread_pool(), [&tp = get_thread_pool(), sz = my->conf.thread_pool_size]() { + auto fut = eosio::chain::async_thread_pool( get_thread_pool(), [&tp = get_thread_pool(), sz = my->conf.thread_pool_size]() { set_thread_name( tp, 0, sz ); }); + fut.wait(); my->head = my->fork_db.head(); if( snapshot ) { diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 0754d1248c1..84fb3866012 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -706,9 +706,10 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ my->_thread_pool.emplace( thread_pool_size ); // name threads in thread pool for logger - boost::asio::post( *my->_thread_pool, [&tp = *my->_thread_pool, sz = thread_pool_size]() { + auto fut = eosio::chain::async_thread_pool( *my->_thread_pool, [&tp = *my->_thread_pool, sz = thread_pool_size]() { set_thread_name( tp, 0, sz ); }); + fut.wait(); if( options.count( "snapshots-dir" )) { auto sd = options.at( "snapshots-dir" ).as(); From 79f507a1b9292020292884fd2bb2c60cabc11a68 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 26 Mar 2019 20:28:13 -0500 Subject: [PATCH 261/680] Do not name main thread since some tests expect it to be nodeos --- programs/nodeos/main.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 403b5c2b317..7034a03858a 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -84,7 +84,6 @@ enum return_codes { int main(int argc, char** argv) { try { - fc::set_os_thread_name( "main" ); app().set_version(eosio::nodeos::config::version); auto root = fc::app_path(); From 7b3f8014bd02dc2209c312f7cedacdec291ed362 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 10:55:06 -0400 Subject: [PATCH 262/680] Update to lastest fc with set_os_thread_name --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 1f62ef7f68e..809c8b7434e 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 1f62ef7f68efdaa1240bf99b382d4785fd1afcbc +Subproject commit 809c8b7434e6797efa8dd1bfba546b551e4d830e From f0e42dae564c523daa89041cff52d1bdf152c73c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 14:41:24 -0400 Subject: [PATCH 263/680] Use io_context in thread_pool and set thread name before run --- libraries/chain/controller.cpp | 33 ++++++++----------- .../chain/include/eosio/chain/controller.hpp | 2 +- plugins/chain_plugin/chain_plugin.cpp | 2 -- 3 files changed, 14 insertions(+), 23 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index fc71d06a42a..b697f5238b6 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -134,6 +134,7 @@ struct controller_impl { bool trusted_producer_light_validation = false; uint32_t snapshot_head_block = 0; boost::asio::thread_pool thread_pool; + boost::asio::io_context ioc; typedef pair handler_key; map< account_name, map > apply_handlers; @@ -404,6 +405,9 @@ struct controller_impl { } ~controller_impl() { + ioc.stop(); + thread_pool.stop(); + thread_pool.join(); pending.reset(); } @@ -1728,25 +1732,14 @@ void controller::add_indices() { my->add_indices(); } -void set_thread_name( boost::asio::thread_pool& tp, uint16_t i, uint16_t sz ) { - std::string tn = "chain-" + std::to_string( i ); - fc::set_os_thread_name( tn ); - ++i; - if( i < sz ) { - // post recursively so we consume all the threads - auto fut = eosio::chain::async_thread_pool( tp, [&tp, i, sz]() { - set_thread_name( tp, i, sz ); - }); - fut.wait(); - } -} - void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { - // name threads in thread pool for logger - auto fut = eosio::chain::async_thread_pool( get_thread_pool(), [&tp = get_thread_pool(), sz = my->conf.thread_pool_size]() { - set_thread_name( tp, 0, sz ); - }); - fut.wait(); + for( uint16_t i = 0; i < my->conf.thread_pool_size; ++i ) { + boost::asio::post( my->ioc, [&ioc = my->ioc, i]() { + std::string tn = "chain-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ioc.run(); + } ); + } my->head = my->fork_db.head(); if( snapshot ) { @@ -1799,8 +1792,8 @@ void controller::abort_block() { my->abort_block(); } -boost::asio::thread_pool& controller::get_thread_pool() { - return my->thread_pool; +boost::asio::io_context& controller::get_thread_pool() { + return my->ioc; } std::future controller::create_block_state_future( const signed_block_ptr& b ) { diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 2aab3179668..9249a5cf226 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -140,7 +140,7 @@ namespace eosio { namespace chain { std::future create_block_state_future( const signed_block_ptr& b ); void push_block( std::future& block_state_future ); - boost::asio::thread_pool& get_thread_pool(); + boost::asio::io_context& get_thread_pool(); const chainbase::database& db()const; diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index dbff9e03cbc..2b8b493392e 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -770,8 +770,6 @@ void chain_plugin::plugin_shutdown() { my->irreversible_block_connection.reset(); my->accepted_transaction_connection.reset(); my->applied_transaction_connection.reset(); - my->chain->get_thread_pool().stop(); - my->chain->get_thread_pool().join(); my->chain.reset(); } From b347269cb5086f9f1e72f3605a848011377828fa Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 16:19:42 -0400 Subject: [PATCH 264/680] Use io_context for thread pool and name threads before run --- plugins/producer_plugin/producer_plugin.cpp | 35 +++++++++------------ 1 file changed, 15 insertions(+), 20 deletions(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 84fb3866012..ca50c024d01 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -62,6 +62,7 @@ static appbase::abstract_plugin& _producer_plugin = app().register_plugin; namespace { bool failure_is_subjective(const fc::exception& e, bool deadline_is_subjective) { @@ -135,6 +136,8 @@ class producer_plugin_impl : public std::enable_shared_from_this _thread_pool; + boost::asio::io_context _ioc; + fc::optional _ioc_work; int32_t _max_transaction_time_ms; fc::microseconds _max_irreversible_block_age_us; @@ -353,8 +356,8 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); const auto& cfg = chain.get_global_properties().configuration; - transaction_metadata::create_signing_keys_future( trx, *_thread_pool, chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); - boost::asio::post( *_thread_pool, [self = this, trx, persist_until_expired, next]() { + transaction_metadata::create_signing_keys_future( trx, _ioc, chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); + boost::asio::post( _ioc, [self = this, trx, persist_until_expired, next]() { if( trx->signing_keys_future.valid() ) trx->signing_keys_future.wait(); app().post(priority::low, [self, trx, persist_until_expired, next]() { @@ -622,19 +625,6 @@ make_keosd_signature_provider(const std::shared_ptr& impl, }; } -void set_thread_name( boost::asio::thread_pool& tp, uint16_t i, uint16_t sz ) { - std::string tn = "prod-" + std::to_string( i ); - fc::set_os_thread_name( tn ); - ++i; - if( i < sz ) { - // post recursively so we consume all the threads - auto fut = eosio::chain::async_thread_pool( tp, [&tp, i, sz]() { - set_thread_name( tp, i, sz ); - }); - fut.wait(); - } -} - void producer_plugin::plugin_initialize(const boost::program_options::variables_map& options) { try { my->chain_plug = app().find_plugin(); @@ -705,11 +695,14 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ "producer-threads ${num} must be greater than 0", ("num", thread_pool_size)); my->_thread_pool.emplace( thread_pool_size ); - // name threads in thread pool for logger - auto fut = eosio::chain::async_thread_pool( *my->_thread_pool, [&tp = *my->_thread_pool, sz = thread_pool_size]() { - set_thread_name( tp, 0, sz ); - }); - fut.wait(); + my->_ioc_work.emplace( boost::asio::make_work_guard( my->_ioc ) ); + for( uint16_t i = 0; i < thread_pool_size; ++i ) { + boost::asio::post( *my->_thread_pool, [&ioc = my->_ioc, i]() { + std::string tn = "prod-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ioc.run(); + } ); + } if( options.count( "snapshots-dir" )) { auto sd = options.at( "snapshots-dir" ).as(); @@ -805,6 +798,8 @@ void producer_plugin::plugin_shutdown() { edump((e.to_detail_string())); } + my->_ioc_work.reset(); + my->_ioc.stop(); if( my->_thread_pool ) { my->_thread_pool->join(); my->_thread_pool->stop(); From 85bbc90d1d7a64ce355722aaac391b19ffb43b2f Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 16:20:27 -0400 Subject: [PATCH 265/680] Use ioc work to prevent io_context::run from exiting --- libraries/chain/controller.cpp | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index b697f5238b6..2bf1015f8d9 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -28,6 +28,7 @@ namespace eosio { namespace chain { using resource_limits::resource_limits_manager; +using ioc_work_t = boost::asio::executor_work_guard; using controller_index_set = index_set< account_index, @@ -135,6 +136,7 @@ struct controller_impl { uint32_t snapshot_head_block = 0; boost::asio::thread_pool thread_pool; boost::asio::io_context ioc; + fc::optional ioc_work; typedef pair handler_key; map< account_name, map > apply_handlers; @@ -405,6 +407,7 @@ struct controller_impl { } ~controller_impl() { + ioc_work.reset(); ioc.stop(); thread_pool.stop(); thread_pool.join(); @@ -1199,7 +1202,7 @@ struct controller_impl { auto& pt = receipt.trx.get(); auto mtrx = std::make_shared( std::make_shared( pt ) ); if( !self.skip_auth_check() ) { - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, chain_id, microseconds::maximum() ); + transaction_metadata::create_signing_keys_future( mtrx, ioc, chain_id, microseconds::maximum() ); } packed_transactions.emplace_back( std::move( mtrx ) ); } @@ -1277,7 +1280,7 @@ struct controller_impl { auto prev = fork_db.get_block( b->previous ); EOS_ASSERT( prev, unlinkable_block_exception, "unlinkable block ${id}", ("id", id)("previous", b->previous) ); - return async_thread_pool( thread_pool, [b, prev]() { + return async_thread_pool( ioc, [b, prev]() { const bool skip_validate_signee = false; return std::make_shared( *prev, move( b ), skip_validate_signee ); } ); @@ -1733,8 +1736,9 @@ void controller::add_indices() { } void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { + my->ioc_work.emplace( boost::asio::make_work_guard( my->ioc ) ); for( uint16_t i = 0; i < my->conf.thread_pool_size; ++i ) { - boost::asio::post( my->ioc, [&ioc = my->ioc, i]() { + boost::asio::post( my->thread_pool, [&ioc = my->ioc, i]() { std::string tn = "chain-" + std::to_string( i ); fc::set_os_thread_name( tn ); ioc.run(); From d1b6eb93ee0e3711e25f58adbb6f98e799ef1856 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 16:21:04 -0400 Subject: [PATCH 266/680] Use io_context instead of thread_pool --- libraries/chain/include/eosio/chain/thread_utils.hpp | 2 +- libraries/chain/include/eosio/chain/transaction_metadata.hpp | 2 +- libraries/chain/transaction_metadata.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/chain/include/eosio/chain/thread_utils.hpp b/libraries/chain/include/eosio/chain/thread_utils.hpp index 31b32cbd91f..bf5932fdf0f 100644 --- a/libraries/chain/include/eosio/chain/thread_utils.hpp +++ b/libraries/chain/include/eosio/chain/thread_utils.hpp @@ -13,7 +13,7 @@ namespace eosio { namespace chain { // async on thread_pool and return future template - auto async_thread_pool( boost::asio::thread_pool& thread_pool, F&& f ) { + auto async_thread_pool( boost::asio::io_context& thread_pool, F&& f ) { auto task = std::make_shared>( std::forward( f ) ); boost::asio::post( thread_pool, [task]() { (*task)(); } ); return task->get_future(); diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp index 6136580fa44..ce7189204cb 100644 --- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp +++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp @@ -52,7 +52,7 @@ class transaction_metadata { const flat_set& recover_keys( const chain_id_type& chain_id ); - static void create_signing_keys_future( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, + static void create_signing_keys_future( const transaction_metadata_ptr& mtrx, boost::asio::io_context& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ); }; diff --git a/libraries/chain/transaction_metadata.cpp b/libraries/chain/transaction_metadata.cpp index 482b3c488f7..9935270b037 100644 --- a/libraries/chain/transaction_metadata.cpp +++ b/libraries/chain/transaction_metadata.cpp @@ -24,7 +24,7 @@ const flat_set& transaction_metadata::recover_keys( const chain } void transaction_metadata::create_signing_keys_future( const transaction_metadata_ptr& mtrx, - boost::asio::thread_pool& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ) { + boost::asio::io_context& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ) { if( mtrx->signing_keys_future.valid() || mtrx->signing_keys.valid() ) // already created return; From d955af19f06cba7e10c31178d17c71e291dcbc44 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 16:21:37 -0400 Subject: [PATCH 267/680] Update test to run on io_context like in producer_plugin and controller --- unittests/misc_tests.cpp | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index bfaeca76727..0b988452ac2 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -10,6 +10,7 @@ #include #include +#include #include #include @@ -829,20 +830,30 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { BOOST_CHECK_EQUAL(trx.id(), mtrx->id); BOOST_CHECK_EQUAL(trx.id(), mtrx2->id); - boost::asio::thread_pool thread_pool(5); + using ioc_work_t = boost::asio::executor_work_guard; + const int num_threads = 5; + boost::asio::thread_pool thread_pool( num_threads ); + boost::asio::io_context ioc; + fc::optional ioc_work( boost::asio::make_work_guard( ioc ) ); + for( int i = 0; i < num_threads; ++i) { + boost::asio::post( thread_pool, [&ioc]() { + fc::set_os_thread_name( "misc_test" ); + ioc.run(); + } ); + } BOOST_CHECK( !mtrx->signing_keys_future.valid() ); BOOST_CHECK( !mtrx2->signing_keys_future.valid() ); - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::create_signing_keys_future( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::create_signing_keys_future( mtrx, ioc, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::create_signing_keys_future( mtrx2, ioc, test.control->get_chain_id(), fc::microseconds::maximum() ); BOOST_CHECK( mtrx->signing_keys_future.valid() ); BOOST_CHECK( mtrx2->signing_keys_future.valid() ); // no-op - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::create_signing_keys_future( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::create_signing_keys_future( mtrx, ioc, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::create_signing_keys_future( mtrx2, ioc, test.control->get_chain_id(), fc::microseconds::maximum() ); auto keys = mtrx->recover_keys( test.control->get_chain_id() ); BOOST_CHECK_EQUAL(1u, keys.size()); @@ -857,6 +868,10 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { BOOST_CHECK_EQUAL(1u, keys.size()); BOOST_CHECK_EQUAL(public_key, *keys.begin()); + ioc_work.reset(); + ioc.stop(); + thread_pool.stop(); + thread_pool.join(); } FC_LOG_AND_RETHROW() } From fe396671fa9e17d1918a3361c57ade0fa9b46538 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 29 Mar 2019 16:54:49 -0500 Subject: [PATCH 268/680] Use shared_future instead of future since accessed across threads --- .../include/eosio/chain/transaction_metadata.hpp | 11 +++++++---- libraries/chain/transaction_metadata.cpp | 9 ++++++--- plugins/producer_plugin/producer_plugin.cpp | 10 ++++++---- 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp index 6136580fa44..923e5d42f14 100644 --- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp +++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp @@ -15,6 +15,7 @@ namespace eosio { namespace chain { class transaction_metadata; using transaction_metadata_ptr = std::shared_ptr; +using signing_keys_future_type = std::shared_future>>; /** * This data structure should store context-free cached data about a transaction such as * packed/unpacked/compressed and recovered keys @@ -26,8 +27,7 @@ class transaction_metadata { packed_transaction_ptr packed_trx; fc::microseconds sig_cpu_usage; optional>> signing_keys; - std::future>> - signing_keys_future; + signing_keys_future_type signing_keys_future; bool accepted = false; bool implicit = false; bool scheduled = false; @@ -52,8 +52,11 @@ class transaction_metadata { const flat_set& recover_keys( const chain_id_type& chain_id ); - static void create_signing_keys_future( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, - const chain_id_type& chain_id, fc::microseconds time_limit ); + // must be called from main application thread + // signing_keys_future should only be accessed by main application thread + static signing_keys_future_type + create_signing_keys_future( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, + const chain_id_type& chain_id, fc::microseconds time_limit ); }; diff --git a/libraries/chain/transaction_metadata.cpp b/libraries/chain/transaction_metadata.cpp index 482b3c488f7..cbeda6cbec5 100644 --- a/libraries/chain/transaction_metadata.cpp +++ b/libraries/chain/transaction_metadata.cpp @@ -23,10 +23,11 @@ const flat_set& transaction_metadata::recover_keys( const chain return signing_keys->second; } -void transaction_metadata::create_signing_keys_future( const transaction_metadata_ptr& mtrx, - boost::asio::thread_pool& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ) { +signing_keys_future_type transaction_metadata::create_signing_keys_future( const transaction_metadata_ptr& mtrx, + boost::asio::thread_pool& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ) +{ if( mtrx->signing_keys_future.valid() || mtrx->signing_keys.valid() ) // already created - return; + return mtrx->signing_keys_future; std::weak_ptr mtrx_wp = mtrx; mtrx->signing_keys_future = async_thread_pool( thread_pool, [time_limit, chain_id, mtrx_wp]() { @@ -41,6 +42,8 @@ void transaction_metadata::create_signing_keys_future( const transaction_metadat } return std::make_tuple( chain_id, cpu_usage, std::move( recovered_pub_keys )); } ); + + return mtrx->signing_keys_future; } diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index a35fa34a9c5..28714d31597 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -351,10 +351,12 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); const auto& cfg = chain.get_global_properties().configuration; - transaction_metadata::create_signing_keys_future( trx, *_thread_pool, chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); - boost::asio::post( *_thread_pool, [self = this, trx, persist_until_expired, next]() { - if( trx->signing_keys_future.valid() ) - trx->signing_keys_future.wait(); + signing_keys_future_type future = + transaction_metadata::create_signing_keys_future( trx, *_thread_pool, chain.get_chain_id(), + fc::microseconds( cfg.max_transaction_cpu_usage ) ); + boost::asio::post( *_thread_pool, [self = this, future, trx, persist_until_expired, next]() { + if( future.valid() ) + future.wait(); app().post(priority::low, [self, trx, persist_until_expired, next]() { self->process_incoming_transaction_async( trx, persist_until_expired, next ); }); From 5b5a0a7d6940251d686ee8debca7b9dfd5b58ae8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Sat, 30 Mar 2019 08:53:58 -0500 Subject: [PATCH 269/680] Simplify key recovery future logic --- libraries/chain/controller.cpp | 7 ++-- .../eosio/chain/transaction_metadata.hpp | 15 +++++---- libraries/chain/transaction_metadata.cpp | 33 +++++++++---------- libraries/testing/tester.cpp | 15 +++++++-- plugins/mongo_db_plugin/mongo_db_plugin.cpp | 4 +-- plugins/producer_plugin/producer_plugin.cpp | 5 ++- unittests/misc_tests.cpp | 24 +++++++------- 7 files changed, 56 insertions(+), 47 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index f3b0a841981..63f5e740229 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -994,9 +994,10 @@ struct controller_impl { auto start = fc::time_point::now(); const bool check_auth = !self.skip_auth_check() && !trx->implicit; // call recover keys so that trx->sig_cpu_usage is set correctly - const flat_set& recovered_keys = check_auth ? trx->recover_keys( chain_id ) : flat_set(); + const fc::microseconds sig_cpu_usage = check_auth ? std::get<0>( trx->recover_keys( chain_id ) ) : fc::microseconds(); + const flat_set& recovered_keys = check_auth ? std::get<1>( trx->recover_keys( chain_id ) ) : flat_set(); if( !explicit_billed_cpu_time ) { - fc::microseconds already_consumed_time( EOS_PERCENT(trx->sig_cpu_usage.count(), conf.sig_cpu_bill_pct) ); + fc::microseconds already_consumed_time( EOS_PERCENT(sig_cpu_usage.count(), conf.sig_cpu_bill_pct) ); if( start.time_since_epoch() < already_consumed_time ) { start = fc::time_point(); @@ -1194,7 +1195,7 @@ struct controller_impl { auto& pt = receipt.trx.get(); auto mtrx = std::make_shared( std::make_shared( pt ) ); if( !self.skip_auth_check() ) { - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, chain_id, microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool, chain_id, microseconds::maximum() ); } packed_transactions.emplace_back( std::move( mtrx ) ); } diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp index 923e5d42f14..9d0c01e0a8c 100644 --- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp +++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp @@ -16,6 +16,8 @@ namespace eosio { namespace chain { class transaction_metadata; using transaction_metadata_ptr = std::shared_ptr; using signing_keys_future_type = std::shared_future>>; +using recovery_keys_type = std::pair&>; + /** * This data structure should store context-free cached data about a transaction such as * packed/unpacked/compressed and recovered keys @@ -25,8 +27,6 @@ class transaction_metadata { transaction_id_type id; transaction_id_type signed_id; packed_transaction_ptr packed_trx; - fc::microseconds sig_cpu_usage; - optional>> signing_keys; signing_keys_future_type signing_keys_future; bool accepted = false; bool implicit = false; @@ -50,13 +50,14 @@ class transaction_metadata { signed_id = digest_type::hash(*packed_trx); } - const flat_set& recover_keys( const chain_id_type& chain_id ); - // must be called from main application thread - // signing_keys_future should only be accessed by main application thread static signing_keys_future_type - create_signing_keys_future( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, - const chain_id_type& chain_id, fc::microseconds time_limit ); + start_recover_keys( const transaction_metadata_ptr& mtrx, boost::asio::thread_pool& thread_pool, + const chain_id_type& chain_id, fc::microseconds time_limit ); + + // start_recover_keys must be called first + recovery_keys_type recover_keys( const chain_id_type& chain_id ); + }; diff --git a/libraries/chain/transaction_metadata.cpp b/libraries/chain/transaction_metadata.cpp index cbeda6cbec5..ded655c8d79 100644 --- a/libraries/chain/transaction_metadata.cpp +++ b/libraries/chain/transaction_metadata.cpp @@ -4,35 +4,32 @@ namespace eosio { namespace chain { - -const flat_set& transaction_metadata::recover_keys( const chain_id_type& chain_id ) { +recovery_keys_type transaction_metadata::recover_keys( const chain_id_type& chain_id ) { // Unlikely for more than one chain_id to be used in one nodeos instance - if( !signing_keys || signing_keys->first != chain_id ) { - if( signing_keys_future.valid() ) { - std::tuple> sig_keys = signing_keys_future.get(); - if( std::get<0>( sig_keys ) == chain_id ) { - sig_cpu_usage = std::get<1>( sig_keys ); - signing_keys.emplace( std::get<0>( sig_keys ), std::move( std::get<2>( sig_keys ))); - return signing_keys->second; - } + if( signing_keys_future.valid() ) { + const std::tuple>& sig_keys = signing_keys_future.get(); + if( std::get<0>( sig_keys ) == chain_id ) { + return std::make_pair( std::get<1>( sig_keys ), std::cref( std::get<2>( sig_keys ) ) ); } - flat_set recovered_pub_keys; - sig_cpu_usage = packed_trx->get_signed_transaction().get_signature_keys( chain_id, fc::time_point::maximum(), recovered_pub_keys ); - signing_keys.emplace( chain_id, std::move( recovered_pub_keys )); + EOS_ASSERT( false, chain_id_type_exception, "chain id ${cid} does not match start_recover_keys ${sid}", + ("cid", chain_id)( "sid", std::get<0>( sig_keys ) ) ); } - return signing_keys->second; + + EOS_ASSERT( false, chain_id_type_exception, "start_recover_keys for ${cid} is required", ("cid", chain_id) ); } -signing_keys_future_type transaction_metadata::create_signing_keys_future( const transaction_metadata_ptr& mtrx, - boost::asio::thread_pool& thread_pool, const chain_id_type& chain_id, fc::microseconds time_limit ) +signing_keys_future_type transaction_metadata::start_recover_keys( const transaction_metadata_ptr& mtrx, + boost::asio::thread_pool& thread_pool, + const chain_id_type& chain_id, + fc::microseconds time_limit ) { - if( mtrx->signing_keys_future.valid() || mtrx->signing_keys.valid() ) // already created + if( mtrx->signing_keys_future.valid() && std::get<0>( mtrx->signing_keys_future.get() ) == chain_id ) // already created return mtrx->signing_keys_future; std::weak_ptr mtrx_wp = mtrx; mtrx->signing_keys_future = async_thread_pool( thread_pool, [time_limit, chain_id, mtrx_wp]() { fc::time_point deadline = time_limit == fc::microseconds::maximum() ? - fc::time_point::maximum() : fc::time_point::now() + time_limit; + fc::time_point::maximum() : fc::time_point::now() + time_limit; auto mtrx = mtrx_wp.lock(); fc::microseconds cpu_usage; flat_set recovered_pub_keys; diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index a6a77ff2998..63a0788931f 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -346,7 +346,13 @@ namespace eosio { namespace testing { { try { if( !control->pending_block_state() ) _start_block(control->head_block_time() + fc::microseconds(config::block_interval_us)); - auto r = control->push_transaction( std::make_shared(std::make_shared(trx)), deadline, billed_cpu_time_us ); + + auto mtrx = std::make_shared( std::make_shared(trx) ); + auto time_limit = deadline == fc::time_point::maximum() ? + fc::microseconds::maximum() : + fc::microseconds( deadline - fc::time_point::now() ); + transaction_metadata::start_recover_keys( mtrx, control->get_thread_pool(), control->get_chain_id(), time_limit ); + auto r = control->push_transaction( mtrx, deadline, billed_cpu_time_us ); if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); if( r->except ) throw *r->except; return r; @@ -365,7 +371,12 @@ namespace eosio { namespace testing { c = packed_transaction::zlib; } - auto r = control->push_transaction( std::make_shared(trx,c), deadline, billed_cpu_time_us ); + auto time_limit = deadline == fc::time_point::maximum() ? + fc::microseconds::maximum() : + fc::microseconds( deadline - fc::time_point::now() ); + auto mtrx = std::make_shared(trx, c); + transaction_metadata::start_recover_keys( mtrx, control->get_thread_pool(), control->get_chain_id(), time_limit ); + auto r = control->push_transaction( mtrx, deadline, billed_cpu_time_us ); if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); if( r->except) throw *r->except; return r; diff --git a/plugins/mongo_db_plugin/mongo_db_plugin.cpp b/plugins/mongo_db_plugin/mongo_db_plugin.cpp index 25dea46d546..e8148850a5a 100644 --- a/plugins/mongo_db_plugin/mongo_db_plugin.cpp +++ b/plugins/mongo_db_plugin/mongo_db_plugin.cpp @@ -780,8 +780,8 @@ void mongo_db_plugin_impl::_process_accepted_transaction( const chain::transacti } string signing_keys_json; - if( t->signing_keys.valid() ) { - signing_keys_json = fc::json::to_string( t->signing_keys->second ); + if( t->signing_keys_future.valid() ) { + signing_keys_json = fc::json::to_string( std::get<2>( t->signing_keys_future.get() ) ); } else { flat_set keys; trx.get_signature_keys( *chain_id, fc::time_point::maximum(), keys, false ); diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 28714d31597..05c909c55df 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -351,9 +351,8 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); const auto& cfg = chain.get_global_properties().configuration; - signing_keys_future_type future = - transaction_metadata::create_signing_keys_future( trx, *_thread_pool, chain.get_chain_id(), - fc::microseconds( cfg.max_transaction_cpu_usage ) ); + signing_keys_future_type future = transaction_metadata::start_recover_keys( trx, *_thread_pool, + chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); boost::asio::post( *_thread_pool, [self = this, future, trx, persist_until_expired, next]() { if( future.valid() ) future.wait(); diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index bfaeca76727..847ced59872 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -834,28 +834,28 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { BOOST_CHECK( !mtrx->signing_keys_future.valid() ); BOOST_CHECK( !mtrx2->signing_keys_future.valid() ); - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::create_signing_keys_future( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); BOOST_CHECK( mtrx->signing_keys_future.valid() ); BOOST_CHECK( mtrx2->signing_keys_future.valid() ); // no-op - transaction_metadata::create_signing_keys_future( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::create_signing_keys_future( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx2, thread_pool, test.control->get_chain_id(), fc::microseconds::maximum() ); auto keys = mtrx->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1u, keys.size()); - BOOST_CHECK_EQUAL(public_key, *keys.begin()); + BOOST_CHECK_EQUAL(1u, keys.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys.second.begin()); // again - keys = mtrx->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1u, keys.size()); - BOOST_CHECK_EQUAL(public_key, *keys.begin()); + auto keys2 = mtrx->recover_keys( test.control->get_chain_id() ); + BOOST_CHECK_EQUAL(1u, keys2.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys2.second.begin()); - auto keys2 = mtrx2->recover_keys( test.control->get_chain_id() ); - BOOST_CHECK_EQUAL(1u, keys.size()); - BOOST_CHECK_EQUAL(public_key, *keys.begin()); + auto keys3 = mtrx2->recover_keys( test.control->get_chain_id() ); + BOOST_CHECK_EQUAL(1u, keys3.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys3.second.begin()); } FC_LOG_AND_RETHROW() } From 3a2d2a2e3c8a63ab5949b8bedf6232104bd8948c Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 09:25:02 -0400 Subject: [PATCH 270/680] Calculate recovery keys instead of asserting if start not called or different chain_id. Restores old behavior. --- .../include/eosio/chain/transaction_metadata.hpp | 3 ++- libraries/chain/transaction_metadata.cpp | 13 ++++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/libraries/chain/include/eosio/chain/transaction_metadata.hpp b/libraries/chain/include/eosio/chain/transaction_metadata.hpp index 9d0c01e0a8c..0847159e6de 100644 --- a/libraries/chain/include/eosio/chain/transaction_metadata.hpp +++ b/libraries/chain/include/eosio/chain/transaction_metadata.hpp @@ -15,7 +15,8 @@ namespace eosio { namespace chain { class transaction_metadata; using transaction_metadata_ptr = std::shared_ptr; -using signing_keys_future_type = std::shared_future>>; +using signing_keys_future_value_type = std::tuple>; +using signing_keys_future_type = std::shared_future; using recovery_keys_type = std::pair&>; /** diff --git a/libraries/chain/transaction_metadata.cpp b/libraries/chain/transaction_metadata.cpp index ded655c8d79..9c33121a5a6 100644 --- a/libraries/chain/transaction_metadata.cpp +++ b/libraries/chain/transaction_metadata.cpp @@ -11,11 +11,18 @@ recovery_keys_type transaction_metadata::recover_keys( const chain_id_type& chai if( std::get<0>( sig_keys ) == chain_id ) { return std::make_pair( std::get<1>( sig_keys ), std::cref( std::get<2>( sig_keys ) ) ); } - EOS_ASSERT( false, chain_id_type_exception, "chain id ${cid} does not match start_recover_keys ${sid}", - ("cid", chain_id)( "sid", std::get<0>( sig_keys ) ) ); } - EOS_ASSERT( false, chain_id_type_exception, "start_recover_keys for ${cid} is required", ("cid", chain_id) ); + // shared_keys_future not created or different chain_id + std::promise p; + flat_set recovered_pub_keys; + const signed_transaction& trn = packed_trx->get_signed_transaction(); + fc::microseconds cpu_usage = trn.get_signature_keys( chain_id, fc::time_point::maximum(), recovered_pub_keys ); + p.set_value( std::make_tuple( chain_id, cpu_usage, std::move( recovered_pub_keys ) ) ); + signing_keys_future = p.get_future().share(); + + const std::tuple>& sig_keys = signing_keys_future.get(); + return std::make_pair( std::get<1>( sig_keys ), std::cref( std::get<2>( sig_keys ) ) ); } signing_keys_future_type transaction_metadata::start_recover_keys( const transaction_metadata_ptr& mtrx, From bdce0ded8c869bc87b05b09a18c887c2adf4fb93 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 1 Apr 2019 14:57:54 -0400 Subject: [PATCH 271/680] Add test for recover_keys without start_recover_keys --- unittests/misc_tests.cpp | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index 847ced59872..f8a49d71cb3 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -857,6 +857,17 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { BOOST_CHECK_EQUAL(1u, keys3.second.size()); BOOST_CHECK_EQUAL(public_key, *keys3.second.begin()); + // recover keys without first calling start_recover_keys + transaction_metadata_ptr mtrx4 = std::make_shared( std::make_shared( trx, packed_transaction::none) ); + transaction_metadata_ptr mtrx5 = std::make_shared( std::make_shared( trx, packed_transaction::zlib) ); + + auto keys4 = mtrx4->recover_keys( test.control->get_chain_id() ); + BOOST_CHECK_EQUAL(1u, keys4.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys4.second.begin()); + + auto keys5 = mtrx5->recover_keys( test.control->get_chain_id() ); + BOOST_CHECK_EQUAL(1u, keys5.second.size()); + BOOST_CHECK_EQUAL(public_key, *keys5.second.begin()); } FC_LOG_AND_RETHROW() } From 943522e39762c6800373441f943dc44830ba264e Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 1 Apr 2019 17:38:20 -0400 Subject: [PATCH 272/680] fix dangling reference issues #6897 --- libraries/chain/apply_context.cpp | 91 +++++++++++++------ libraries/chain/eosio_contract.cpp | 18 ++-- .../include/eosio/chain/apply_context.hpp | 40 ++++---- .../eosio/chain/transaction_context.hpp | 17 ++-- libraries/chain/transaction_context.cpp | 42 ++++++--- libraries/chain/wasm_interface.cpp | 15 ++- libraries/chain/webassembly/wabt.cpp | 10 +- libraries/chain/webassembly/wavm.cpp | 6 +- 8 files changed, 145 insertions(+), 94 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 962e138dfa1..22b583704e4 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -29,34 +29,53 @@ static inline void print_debug(account_name receiver, const action_trace& ar) { } } -void apply_context::exec_one( action_trace& trace ) +apply_context::apply_context(controller& con, transaction_context& trx_ctx, uint32_t action_ordinal, uint32_t depth) +:control(con) +,db(con.mutable_db()) +,trx_context(trx_ctx) +,recurse_depth(depth) +,first_receiver_action_ordinal(action_ordinal) +,action_ordinal(action_ordinal) +,idx64(*this) +,idx128(*this) +,idx256(*this) +,idx_double(*this) +,idx_long_double(*this) +{ + action_trace& trace = trx_ctx.get_action_trace(action_ordinal); + act = &trace.act; + receiver = trace.receiver; + context_free = trace.context_free; +} + +void apply_context::exec_one() { auto start = fc::time_point::now(); action_receipt r; r.receiver = receiver; - r.act_digest = digest_type::hash(act); + r.act_digest = digest_type::hash(*act); const auto& cfg = control.get_global_properties().configuration; try { try { const auto& a = control.get_account( receiver ); privileged = a.privileged; - auto native = control.find_apply_handler( receiver, act.account, act.name ); + auto native = control.find_apply_handler( receiver, act->account, act->name ); if( native ) { if( trx_context.enforce_whiteblacklist && control.is_producing_block() ) { control.check_contract_list( receiver ); - control.check_action_list( act.account, act.name ); + control.check_action_list( act->account, act->name ); } (*native)( *this ); } if( a.code.size() > 0 - && !(act.account == config::system_account_name && act.name == N( setcode ) && + && !(act->account == config::system_account_name && act->name == N( setcode ) && receiver == config::system_account_name) ) { if( trx_context.enforce_whiteblacklist && control.is_producing_block() ) { control.check_contract_list( receiver ); - control.check_action_list( act.account, act.name ); + control.check_action_list( act->account, act->name ); } try { control.get_wasm_interface().apply( a.code_version, a.code, *this ); @@ -64,6 +83,7 @@ void apply_context::exec_one( action_trace& trace ) } } FC_RETHROW_EXCEPTIONS( warn, "pending console output: ${console}", ("console", _pending_console_output) ) } catch( fc::exception& e ) { + action_trace& trace = trx_context.get_action_trace( action_ordinal ); trace.except = e; finalize_trace( trace, start ); throw; @@ -72,14 +92,15 @@ void apply_context::exec_one( action_trace& trace ) r.global_sequence = next_global_sequence(); r.recv_sequence = next_recv_sequence( receiver ); - const auto& account_sequence = db.get(act.account); + const auto& account_sequence = db.get(act->account); r.code_sequence = account_sequence.code_sequence; // could be modified by action execution above r.abi_sequence = account_sequence.abi_sequence; // could be modified by action execution above - for( const auto& auth : act.authorization ) { + for( const auto& auth : act->authorization ) { r.auth_sequence[auth.actor] = next_auth_sequence( auth.actor ); } + action_trace& trace = trx_context.get_action_trace( action_ordinal ); trace.receipt = r; trx_context.executed.emplace_back( std::move(r) ); @@ -102,13 +123,13 @@ void apply_context::finalize_trace( action_trace& trace, const fc::time_point& s trace.elapsed = fc::time_point::now() - start; } -void apply_context::exec( action_trace& trace ) +void apply_context::exec() { _notified.emplace_back( receiver, action_ordinal ); - exec_one( trace ); + exec_one(); for( uint32_t i = 1; i < _notified.size(); ++i ) { std::tie( receiver, action_ordinal ) = _notified[i]; - exec_one( trx_context.get_action_trace( action_ordinal ) ); + exec_one(); } if( _cfa_inline_actions.size() > 0 || _inline_actions.size() > 0 ) { @@ -131,9 +152,8 @@ bool apply_context::is_account( const account_name& account )const { } void apply_context::require_authorization( const account_name& account ) { - for( uint32_t i=0; i < act.authorization.size(); i++ ) { - if( act.authorization[i].actor == account ) { - used_authorizations[i] = true; + for( uint32_t i=0; i < act->authorization.size(); i++ ) { + if( act->authorization[i].actor == account ) { return; } } @@ -141,7 +161,7 @@ void apply_context::require_authorization( const account_name& account ) { } bool apply_context::has_authorization( const account_name& account )const { - for( const auto& auth : act.authorization ) + for( const auto& auth : act->authorization ) if( auth.actor == account ) return true; return false; @@ -149,10 +169,9 @@ bool apply_context::has_authorization( const account_name& account )const { void apply_context::require_authorization(const account_name& account, const permission_name& permission) { - for( uint32_t i=0; i < act.authorization.size(); i++ ) - if( act.authorization[i].actor == account ) { - if( act.authorization[i].permission == permission ) { - used_authorizations[i] = true; + for( uint32_t i=0; i < act->authorization.size(); i++ ) + if( act->authorization[i].actor == account ) { + if( act->authorization[i].permission == permission ) { return; } } @@ -171,7 +190,7 @@ void apply_context::require_recipient( account_name recipient ) { if( !has_recipient(recipient) ) { _notified.emplace_back( recipient, - trx_context.schedule_action( act, recipient, false, action_ordinal, first_receiver_action_ordinal ) + schedule_action( action_ordinal, recipient, false, action_ordinal, first_receiver_action_ordinal ) ); } } @@ -202,7 +221,7 @@ void apply_context::execute_inline( action&& a ) { bool disallow_send_to_self_bypass = false; // eventually set to whether the appropriate protocol feature has been activated bool send_to_self = (a.account == receiver); - bool inherit_parent_authorizations = (!disallow_send_to_self_bypass && send_to_self && (receiver == act.account) && control.is_producing_block()); + bool inherit_parent_authorizations = (!disallow_send_to_self_bypass && send_to_self && (receiver == act->account) && control.is_producing_block()); flat_set inherited_authorizations; if( inherit_parent_authorizations ) { @@ -219,7 +238,7 @@ void apply_context::execute_inline( action&& a ) { if( enforce_actor_whitelist_blacklist ) actors.insert( auth.actor ); - if( inherit_parent_authorizations && std::find(act.authorization.begin(), act.authorization.end(), auth) != act.authorization.end() ) { + if( inherit_parent_authorizations && std::find(act->authorization.begin(), act->authorization.end(), auth) != act->authorization.end() ) { inherited_authorizations.insert( auth ); } } @@ -265,7 +284,7 @@ void apply_context::execute_inline( action&& a ) { auto inline_receiver = a.account; _inline_actions.emplace_back( - trx_context.schedule_action( std::move(a), inline_receiver, false, action_ordinal, first_receiver_action_ordinal ) + schedule_action( std::move(a), inline_receiver, false, action_ordinal, first_receiver_action_ordinal ) ); } @@ -280,7 +299,7 @@ void apply_context::execute_context_free_inline( action&& a ) { auto inline_receiver = a.account; _cfa_inline_actions.emplace_back( - trx_context.schedule_action( std::move(a), inline_receiver, true, action_ordinal, first_receiver_action_ordinal ) + schedule_action( std::move(a), inline_receiver, true, action_ordinal, first_receiver_action_ordinal ) ); } @@ -403,7 +422,7 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a } ); } - EOS_ASSERT( control.is_ram_billing_in_notify_allowed() || (receiver == act.account) || (receiver == payer) || privileged, + EOS_ASSERT( control.is_ram_billing_in_notify_allowed() || (receiver == act->account) || (receiver == payer) || privileged, subjective_block_production_exception, "Cannot charge RAM to other accounts during notify." ); add_ram_usage( payer, (config::billable_size_v + trx_size) ); } @@ -418,6 +437,26 @@ bool apply_context::cancel_deferred_transaction( const uint128_t& sender_id, acc return gto; } +int32_t apply_context::schedule_action( int32_t ordinal_of_action_to_schedule, account_name receiver, bool context_free, + int32_t creator_action_ordinal, int32_t parent_action_ordinal ) +{ + int32_t scheduled_action_ordinal = trx_context.schedule_action( ordinal_of_action_to_schedule, receiver, context_free, + creator_action_ordinal, parent_action_ordinal ); + + act = &trx_context.get_action_trace( action_ordinal ).act; + return scheduled_action_ordinal; +} + +int32_t apply_context::schedule_action( action&& act_to_schedule, account_name receiver, bool context_free, + int32_t creator_action_ordinal, int32_t parent_action_ordinal ) +{ + int32_t scheduled_action_ordinal = trx_context.schedule_action( std::move(act_to_schedule), receiver, context_free, + creator_action_ordinal, parent_action_ordinal ); + + act = &trx_context.get_action_trace( action_ordinal ).act; + return scheduled_action_ordinal; +} + const table_id_object* apply_context::find_table( name code, name scope, name table ) { return db.find(boost::make_tuple(code, scope, table)); } @@ -461,7 +500,7 @@ bytes apply_context::get_packed_transaction() { void apply_context::update_db_usage( const account_name& payer, int64_t delta ) { if( delta > 0 ) { if( !(privileged || payer == account_name(receiver)) ) { - EOS_ASSERT( control.is_ram_billing_in_notify_allowed() || (receiver == act.account), + EOS_ASSERT( control.is_ram_billing_in_notify_allowed() || (receiver == act->account), subjective_block_production_exception, "Cannot charge RAM to other accounts during notify." ); require_authorization( payer ); } diff --git a/libraries/chain/eosio_contract.cpp b/libraries/chain/eosio_contract.cpp index cc303bb50cd..026efefb4a2 100644 --- a/libraries/chain/eosio_contract.cpp +++ b/libraries/chain/eosio_contract.cpp @@ -68,7 +68,7 @@ void validate_authority_precondition( const apply_context& context, const author * This method is called assuming precondition_system_newaccount succeeds a */ void apply_eosio_newaccount(apply_context& context) { - auto create = context.act.data_as(); + auto create = context.get_action().data_as(); try { context.require_authorization(create.creator); // context.require_write_lock( config::eosio_auth_scope ); @@ -129,7 +129,7 @@ void apply_eosio_setcode(apply_context& context) { const auto& cfg = context.control.get_global_properties().configuration; auto& db = context.db; - auto act = context.act.data_as(); + auto act = context.get_action().data_as(); context.require_authorization(act.account); EOS_ASSERT( act.vmtype == 0, invalid_contract_vm_type, "code should be 0" ); @@ -174,7 +174,7 @@ void apply_eosio_setcode(apply_context& context) { void apply_eosio_setabi(apply_context& context) { auto& db = context.db; - auto act = context.act.data_as(); + auto act = context.get_action().data_as(); context.require_authorization(act.account); @@ -205,7 +205,7 @@ void apply_eosio_setabi(apply_context& context) { void apply_eosio_updateauth(apply_context& context) { - auto update = context.act.data_as(); + auto update = context.get_action().data_as(); context.require_authorization(update.account); // only here to mark the single authority on this action as used auto& authorization = context.control.get_mutable_authorization_manager(); @@ -270,7 +270,7 @@ void apply_eosio_updateauth(apply_context& context) { void apply_eosio_deleteauth(apply_context& context) { // context.require_write_lock( config::eosio_auth_scope ); - auto remove = context.act.data_as(); + auto remove = context.get_action().data_as(); context.require_authorization(remove.account); // only here to mark the single authority on this action as used EOS_ASSERT(remove.permission != config::active_name, action_validate_exception, "Cannot delete active authority"); @@ -301,7 +301,7 @@ void apply_eosio_deleteauth(apply_context& context) { void apply_eosio_linkauth(apply_context& context) { // context.require_write_lock( config::eosio_auth_scope ); - auto requirement = context.act.data_as(); + auto requirement = context.get_action().data_as(); try { EOS_ASSERT(!requirement.requirement.empty(), action_validate_exception, "Required permission cannot be empty"); @@ -318,7 +318,7 @@ void apply_eosio_linkauth(apply_context& context) { const permission_object* permission = nullptr; if( context.control.is_builtin_activated( builtin_protocol_feature_t::only_link_to_existing_permission ) ) { permission = db.find( - boost::make_tuple( requirement.account, requirement.requirement ) + boost::make_tuple( requirement.account, requirement.requirement ) ); } else { permission = db.find(requirement.requirement); @@ -358,7 +358,7 @@ void apply_eosio_unlinkauth(apply_context& context) { // context.require_write_lock( config::eosio_auth_scope ); auto& db = context.db; - auto unlink = context.act.data_as(); + auto unlink = context.get_action().data_as(); context.require_authorization(unlink.account); // only here to mark the single authority on this action as used @@ -374,7 +374,7 @@ void apply_eosio_unlinkauth(apply_context& context) { } void apply_eosio_canceldelay(apply_context& context) { - auto cancel = context.act.data_as(); + auto cancel = context.get_action().data_as(); context.require_authorization(cancel.canceling_auth.actor); // only here to mark the single authority on this action as used const auto& trx_id = cancel.trx_id; diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index 49721cb0cfe..bfbe726da29 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -452,34 +452,25 @@ class apply_context { /// Constructor public: - apply_context(controller& con, transaction_context& trx_ctx, const action& a, uint32_t depth=0) - :control(con) - ,db(con.mutable_db()) - ,trx_context(trx_ctx) - ,act(a) - ,receiver(act.account) - ,used_authorizations(act.authorization.size(), false) - ,recurse_depth(depth) - ,idx64(*this) - ,idx128(*this) - ,idx256(*this) - ,idx_double(*this) - ,idx_long_double(*this) - { - } - + apply_context(controller& con, transaction_context& trx_ctx, uint32_t action_ordinal, uint32_t depth=0); /// Execution methods: public: - void exec_one( action_trace& trace ); - void exec( action_trace& trace ); + void exec_one(); + void exec(); void execute_inline( action&& a ); void execute_context_free_inline( action&& a ); void schedule_deferred_transaction( const uint128_t& sender_id, account_name payer, transaction&& trx, bool replace_existing ); bool cancel_deferred_transaction( const uint128_t& sender_id, account_name sender ); bool cancel_deferred_transaction( const uint128_t& sender_id ) { return cancel_deferred_transaction(sender_id, receiver); } + protected: + int32_t schedule_action( int32_t ordinal_of_action_to_schedule, account_name receiver, bool context_free = false, + int32_t creator_action_ordinal = -1, int32_t parent_action_ordinal = -1 ); + int32_t schedule_action( action&& act_to_schedule, account_name receiver, bool context_free = false, + int32_t creator_action_ordinal = -1, int32_t parent_action_ordinal = -1 ); + /// Authorization methods: public: @@ -560,22 +551,29 @@ class apply_context { void add_ram_usage( account_name account, int64_t ram_delta ); void finalize_trace( action_trace& trace, const fc::time_point& start ); + bool is_context_free()const { return context_free; } + bool is_privileged()const { return privileged; } + action_name get_receiver()const { return receiver; } + const action& get_action()const { return *act; } + /// Fields: public: controller& control; chainbase::database& db; ///< database where state is stored transaction_context& trx_context; ///< transaction context in which the action is running - const action& act; ///< message being applied + + private: + const action* act = nullptr; ///< action being applied + // act pointer may be invalidated on call to trx_context.schedule_action account_name receiver; ///< the code that is currently running - vector used_authorizations; ///< Parallel to act.authorization; tracks which permissions have been used while processing the message uint32_t recurse_depth; ///< how deep inline actions can recurse int32_t first_receiver_action_ordinal = -1; int32_t action_ordinal = -1; bool privileged = false; bool context_free = false; - bool used_context_free_api = false; + public: generic_index idx64; generic_index idx128; generic_index idx256; diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index 0182f5a05f0..f27a718523b 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -64,19 +64,22 @@ namespace eosio { namespace chain { void add_ram_usage( account_name account, int64_t ram_delta ); + action_trace& get_action_trace( int32_t action_ordinal ); + const action_trace& get_action_trace( int32_t action_ordinal )const; + + /** invalidates any action_trace references returned by get_action_trace */ int32_t schedule_action( const action& act, account_name receiver, bool context_free = false, int32_t creator_action_ordinal = -1, int32_t parent_action_ordinal = -1 ); - int32_t schedule_action( action&& act, account_name receiver, bool context_free = false, + /** invalidates any action_trace references returned by get_action_trace */ + int32_t schedule_action( action&& act, account_name receiver, bool context_free = false, int32_t creator_action_ordinal = -1, int32_t parent_action_ordinal = -1 ); - action_trace& get_action_trace( int32_t action_ordinal ); - const action_trace& get_action_trace( int32_t action_ordinal )const; + /** invalidates any action_trace references returned by get_action_trace */ + int32_t schedule_action( int32_t action_ordinal, account_name receiver, bool context_free = false, + int32_t creator_action_ordinal = -1, int32_t parent_action_ordinal = -1 ); - void execute_action( action_trace& act_trace, uint32_t recurse_depth ); - inline void execute_action( int32_t action_ordinal, uint32_t recurse_depth = 0 ) { - execute_action( get_action_trace( action_ordinal ), recurse_depth ); - } + void execute_action( int32_t action_ordinal, uint32_t recurse_depth = 0 ); void schedule_transaction(); void record_transaction( const transaction_id_type& id, fc::time_point_sec expire ); diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index da99fd4dbe2..138006715b5 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -346,7 +346,7 @@ namespace bacc = boost::accumulators; auto& action_traces = trace->action_traces; int32_t num_original_actions_to_execute = action_traces.size(); for( int32_t i = 0; i < num_original_actions_to_execute; ++i ) { - execute_action( action_traces[i], 0 ); + execute_action( i, 0 ); } if( delay != fc::microseconds() ) { @@ -575,23 +575,40 @@ namespace bacc = boost::accumulators; int32_t transaction_context::schedule_action( const action& act, account_name receiver, bool context_free, int32_t creator_action_ordinal, int32_t parent_action_ordinal ) { - int32_t action_ordinal = trace->action_traces.size(); + int32_t new_action_ordinal = trace->action_traces.size(); trace->action_traces.emplace_back( *trace, act, receiver, context_free, - action_ordinal, creator_action_ordinal, parent_action_ordinal ); + new_action_ordinal, creator_action_ordinal, parent_action_ordinal ); - return action_ordinal; + return new_action_ordinal; } int32_t transaction_context::schedule_action( action&& act, account_name receiver, bool context_free, int32_t creator_action_ordinal, int32_t parent_action_ordinal ) { - int32_t action_ordinal = trace->action_traces.size(); + int32_t new_action_ordinal = trace->action_traces.size(); trace->action_traces.emplace_back( *trace, std::move(act), receiver, context_free, - action_ordinal, creator_action_ordinal, parent_action_ordinal ); + new_action_ordinal, creator_action_ordinal, parent_action_ordinal ); - return action_ordinal; + return new_action_ordinal; + } + + int32_t transaction_context::schedule_action( int32_t action_ordinal, account_name receiver, bool context_free, + int32_t creator_action_ordinal, int32_t parent_action_ordinal ) + { + int32_t new_action_ordinal = trace->action_traces.size(); + + trace->action_traces.reserve( new_action_ordinal + 1 ); + + const action& provided_action = get_action_trace( action_ordinal ).act; + + // The reserve above is required so that the emplace_back below does not invalidate the provided_action reference. + + trace->action_traces.emplace_back( *trace, provided_action, receiver, context_free, + new_action_ordinal, creator_action_ordinal, parent_action_ordinal ); + + return new_action_ordinal; } action_trace& transaction_context::get_action_trace( int32_t action_ordinal ) { @@ -606,14 +623,9 @@ namespace bacc = boost::accumulators; return trace->action_traces[action_ordinal]; } - void transaction_context::execute_action( action_trace& act_trace, uint32_t recurse_depth ) { - apply_context acontext( control, *this, act_trace.act, recurse_depth ); - acontext.receiver = act_trace.receiver; - acontext.first_receiver_action_ordinal = act_trace.action_ordinal; - acontext.action_ordinal = act_trace.action_ordinal; - acontext.context_free = act_trace.context_free; - - acontext.exec( act_trace ); + void transaction_context::execute_action( int32_t action_ordinal, uint32_t recurse_depth ) { + apply_context acontext( control, *this, action_ordinal, recurse_depth ); + acontext.exec(); } diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 3414d9dc972..004a7326e83 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -77,9 +77,8 @@ class context_aware_api { context_aware_api(apply_context& ctx, bool context_free = false ) :context(ctx) { - if( context.context_free ) + if( context.is_context_free() ) EOS_ASSERT( context_free, unaccessible_api, "only context free api's can be used in this context" ); - context.used_context_free_api |= !context_free; } void checktime() { @@ -96,7 +95,7 @@ class context_free_api : public context_aware_api { context_free_api( apply_context& ctx ) :context_aware_api(ctx, true) { /* the context_free_data is not available during normal application because it is prunable */ - EOS_ASSERT( context.context_free, unaccessible_api, "this API may only be called from context_free apply" ); + EOS_ASSERT( context.is_context_free(), unaccessible_api, "this API may only be called from context_free apply" ); } int get_context_free_data( uint32_t index, array_ptr buffer, size_t buffer_size )const { @@ -109,7 +108,7 @@ class privileged_api : public context_aware_api { privileged_api( apply_context& ctx ) :context_aware_api(ctx) { - EOS_ASSERT( context.privileged, unaccessible_api, "${code} does not have permission to call this API", ("code",context.receiver) ); + EOS_ASSERT( context.is_privileged(), unaccessible_api, "${code} does not have permission to call this API", ("code",context.get_receiver()) ); } /** @@ -978,21 +977,21 @@ class action_api : public context_aware_api { :context_aware_api(ctx,true){} int read_action_data(array_ptr memory, size_t buffer_size) { - auto s = context.act.data.size(); + auto s = context.get_action().data.size(); if( buffer_size == 0 ) return s; auto copy_size = std::min( buffer_size, s ); - memcpy( memory, context.act.data.data(), copy_size ); + memcpy( memory, context.get_action().data.data(), copy_size ); return copy_size; } int action_data_size() { - return context.act.data.size(); + return context.get_action().data.size(); } name current_receiver() { - return context.receiver; + return context.get_receiver(); } }; diff --git a/libraries/chain/webassembly/wabt.cpp b/libraries/chain/webassembly/wabt.cpp index 2d45fa4ee01..a23919e0ec6 100644 --- a/libraries/chain/webassembly/wabt.cpp +++ b/libraries/chain/webassembly/wabt.cpp @@ -28,7 +28,7 @@ class wabt_instantiated_module : public wasm_instantiated_module_interface { continue; _initial_globals.emplace_back(_env->GetGlobal(i), _env->GetGlobal(i)->typed_value); } - + if(_env->GetMemoryCount()) _initial_memory_configuration = _env->GetMemory(0)->page_limits; } @@ -50,9 +50,9 @@ class wabt_instantiated_module : public wasm_instantiated_module_interface { memcpy(memory->data.data(), _initial_memory.data(), _initial_memory.size()); } - _params[0].set_i64(uint64_t(context.receiver)); - _params[1].set_i64(uint64_t(context.act.account)); - _params[2].set_i64(uint64_t(context.act.name)); + _params[0].set_i64(uint64_t(context.get_receiver())); + _params[1].set_i64(uint64_t(context.get_action().account)); + _params[2].set_i64(uint64_t(context.get_action().name)); ExecResult res = _executor.RunStartFunction(_instatiated_module); EOS_ASSERT( res.result == interp::Result::Ok, wasm_execution_error, "wabt start function failure (${s})", ("s", ResultToString(res.result)) ); @@ -92,7 +92,7 @@ std::unique_ptr wabt_runtime::instantiate_mo wabt::Result res = ReadBinaryInterp(env.get(), code_bytes, code_size, read_binary_options, &errors, &instantiated_module); EOS_ASSERT( Succeeded(res), wasm_execution_error, "Error building wabt interp: ${e}", ("e", wabt::FormatErrorsToString(errors, Location::Type::Binary)) ); - + return std::make_unique(std::move(env), initial_memory, instantiated_module); } diff --git a/libraries/chain/webassembly/wavm.cpp b/libraries/chain/webassembly/wavm.cpp index e614398c74e..a4e519a14e0 100644 --- a/libraries/chain/webassembly/wavm.cpp +++ b/libraries/chain/webassembly/wavm.cpp @@ -30,9 +30,9 @@ class wavm_instantiated_module : public wasm_instantiated_module_interface { {} void apply(apply_context& context) override { - vector args = {Value(uint64_t(context.receiver)), - Value(uint64_t(context.act.account)), - Value(uint64_t(context.act.name))}; + vector args = {Value(uint64_t(context.get_receiver())), + Value(uint64_t(context.get_action().account)), + Value(uint64_t(context.get_action().name))}; call("apply", args, context); } From 28377ed6d5d1285da6b0b00fce3e2a476549b9e8 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 1 Apr 2019 18:20:07 -0400 Subject: [PATCH 273/680] update cleos to support new transaction trace structure without inline_traces #6897 Also correct state history ABI for action_trace_v0 to reflect that inline_traces are no longer included. --- .../state_history_plugin/state_history_plugin_abi.cpp | 1 - programs/cleos/main.cpp | 11 +++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index af2afb8e1bf..3f564e9043c 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -104,7 +104,6 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "console", "type": "string" }, { "name": "account_ram_deltas", "type": "account_delta[]" }, { "name": "except", "type": "string?" }, - { "name": "inline_traces", "type": "action_trace[]" } ] }, { diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index caa24ae5ccf..00c99d17261 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -443,9 +443,11 @@ bytes json_or_file_to_bin( const account_name& account, const action_name& actio void print_action_tree( const fc::variant& action ) { print_action( action ); - const auto& inline_traces = action["inline_traces"].get_array(); - for( const auto& t : inline_traces ) { - print_action_tree( t ); + if( action.get_object().contains( "inline_traces" ) ) { + const auto& inline_traces = action["inline_traces"].get_array(); + for( const auto& t : inline_traces ) { + print_action_tree( t ); + } } } @@ -453,12 +455,13 @@ void print_result( const fc::variant& result ) { try { if (result.is_object() && result.get_object().contains("processed")) { const auto& processed = result["processed"]; const auto& transaction_id = processed["id"].as_string(); - string status = processed["receipt"].is_object() ? processed["receipt"]["status"].as_string() : "failed"; + string status = "failed"; int64_t net = -1; int64_t cpu = -1; if( processed.get_object().contains( "receipt" )) { const auto& receipt = processed["receipt"]; if( receipt.is_object()) { + status = receipt["status"].as_string(); net = receipt["net_usage_words"].as_int64() * 8; cpu = receipt["cpu_usage_us"].as_int64(); } From 0d7ec43a61b4f0c95e54868de72fb7cade5a770d Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 2 Apr 2019 11:04:35 -0400 Subject: [PATCH 274/680] Fix merge issue --- plugins/producer_plugin/producer_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 599a3ec2cf4..37115f17c4a 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -358,7 +358,7 @@ class producer_plugin_impl : public std::enable_shared_from_this Date: Tue, 2 Apr 2019 11:57:58 -0400 Subject: [PATCH 275/680] switch ordinal types to fc::unsigned_int and use 1-based indexing #6897 --- libraries/chain/apply_context.cpp | 26 +++++----- .../include/eosio/chain/apply_context.hpp | 16 +++--- libraries/chain/include/eosio/chain/trace.hpp | 10 ++-- .../eosio/chain/transaction_context.hpp | 18 +++---- libraries/chain/trace.cpp | 6 +-- libraries/chain/transaction_context.cpp | 50 +++++++++---------- unittests/api_tests.cpp | 2 +- 7 files changed, 63 insertions(+), 65 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 22b583704e4..46ea155aee8 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -137,11 +137,11 @@ void apply_context::exec() transaction_exception, "max inline action depth per transaction reached" ); } - for( int32_t ordinal : _cfa_inline_actions ) { + for( uint32_t ordinal : _cfa_inline_actions ) { trx_context.execute_action( ordinal, recurse_depth + 1 ); } - for( int32_t ordinal : _inline_actions ) { + for( uint32_t ordinal : _inline_actions ) { trx_context.execute_action( ordinal, recurse_depth + 1 ); } @@ -190,7 +190,7 @@ void apply_context::require_recipient( account_name recipient ) { if( !has_recipient(recipient) ) { _notified.emplace_back( recipient, - schedule_action( action_ordinal, recipient, false, action_ordinal, first_receiver_action_ordinal ) + schedule_action( action_ordinal, recipient, false ) ); } } @@ -284,7 +284,7 @@ void apply_context::execute_inline( action&& a ) { auto inline_receiver = a.account; _inline_actions.emplace_back( - schedule_action( std::move(a), inline_receiver, false, action_ordinal, first_receiver_action_ordinal ) + schedule_action( std::move(a), inline_receiver, false ) ); } @@ -299,7 +299,7 @@ void apply_context::execute_context_free_inline( action&& a ) { auto inline_receiver = a.account; _cfa_inline_actions.emplace_back( - schedule_action( std::move(a), inline_receiver, true, action_ordinal, first_receiver_action_ordinal ) + schedule_action( std::move(a), inline_receiver, true ) ); } @@ -437,21 +437,21 @@ bool apply_context::cancel_deferred_transaction( const uint128_t& sender_id, acc return gto; } -int32_t apply_context::schedule_action( int32_t ordinal_of_action_to_schedule, account_name receiver, bool context_free, - int32_t creator_action_ordinal, int32_t parent_action_ordinal ) +uint32_t apply_context::schedule_action( uint32_t ordinal_of_action_to_schedule, account_name receiver, bool context_free ) { - int32_t scheduled_action_ordinal = trx_context.schedule_action( ordinal_of_action_to_schedule, receiver, context_free, - creator_action_ordinal, parent_action_ordinal ); + uint32_t scheduled_action_ordinal = trx_context.schedule_action( ordinal_of_action_to_schedule, + receiver, context_free, + action_ordinal, first_receiver_action_ordinal ); act = &trx_context.get_action_trace( action_ordinal ).act; return scheduled_action_ordinal; } -int32_t apply_context::schedule_action( action&& act_to_schedule, account_name receiver, bool context_free, - int32_t creator_action_ordinal, int32_t parent_action_ordinal ) +uint32_t apply_context::schedule_action( action&& act_to_schedule, account_name receiver, bool context_free ) { - int32_t scheduled_action_ordinal = trx_context.schedule_action( std::move(act_to_schedule), receiver, context_free, - creator_action_ordinal, parent_action_ordinal ); + uint32_t scheduled_action_ordinal = trx_context.schedule_action( std::move(act_to_schedule), + receiver, context_free, + action_ordinal, first_receiver_action_ordinal ); act = &trx_context.get_action_trace( action_ordinal ).act; return scheduled_action_ordinal; diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index bfbe726da29..805bb74e485 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -466,10 +466,8 @@ class apply_context { bool cancel_deferred_transaction( const uint128_t& sender_id ) { return cancel_deferred_transaction(sender_id, receiver); } protected: - int32_t schedule_action( int32_t ordinal_of_action_to_schedule, account_name receiver, bool context_free = false, - int32_t creator_action_ordinal = -1, int32_t parent_action_ordinal = -1 ); - int32_t schedule_action( action&& act_to_schedule, account_name receiver, bool context_free = false, - int32_t creator_action_ordinal = -1, int32_t parent_action_ordinal = -1 ); + uint32_t schedule_action( uint32_t ordinal_of_action_to_schedule, account_name receiver, bool context_free = false ); + uint32_t schedule_action( action&& act_to_schedule, account_name receiver, bool context_free = false ); /// Authorization methods: @@ -568,8 +566,8 @@ class apply_context { // act pointer may be invalidated on call to trx_context.schedule_action account_name receiver; ///< the code that is currently running uint32_t recurse_depth; ///< how deep inline actions can recurse - int32_t first_receiver_action_ordinal = -1; - int32_t action_ordinal = -1; + uint32_t first_receiver_action_ordinal = 0; + uint32_t action_ordinal = 0; bool privileged = false; bool context_free = false; @@ -583,9 +581,9 @@ class apply_context { private: iterator_cache keyval_cache; - vector< std::pair > _notified; ///< keeps track of new accounts to be notifed of current message - vector _inline_actions; ///< action_ordinals of queued inline actions - vector _cfa_inline_actions; ///< action_ordinals of queued inline context-free actions + vector< std::pair > _notified; ///< keeps track of new accounts to be notifed of current message + vector _inline_actions; ///< action_ordinals of queued inline actions + vector _cfa_inline_actions; ///< action_ordinals of queued inline context-free actions std::string _pending_console_output; flat_set _account_ram_deltas; ///< flat_set of account_delta so json is an array of objects diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 07dbbd0fdeb..67e7f62a7ee 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -25,15 +25,15 @@ namespace eosio { namespace chain { struct action_trace { action_trace( const transaction_trace& trace, const action& act, account_name receiver, bool context_free, - int32_t action_ordinal, int32_t creator_action_ordinal, int32_t parent_action_ordinal ); + uint32_t action_ordinal, uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ); action_trace( const transaction_trace& trace, action&& act, account_name receiver, bool context_free, - int32_t action_ordinal, int32_t creator_action_ordinal, int32_t parent_action_ordinal ); + uint32_t action_ordinal, uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ); //action_trace( const action_receipt& r ):receipt(r){} action_trace(){} - int32_t action_ordinal = 0; - int32_t creator_action_ordinal = -1; - int32_t parent_action_ordinal = -1; + fc::unsigned_int action_ordinal; + fc::unsigned_int creator_action_ordinal; + fc::unsigned_int parent_action_ordinal; fc::optional receipt; action_name receiver; action act; diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index f27a718523b..636b3a87c82 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -64,22 +64,22 @@ namespace eosio { namespace chain { void add_ram_usage( account_name account, int64_t ram_delta ); - action_trace& get_action_trace( int32_t action_ordinal ); - const action_trace& get_action_trace( int32_t action_ordinal )const; + action_trace& get_action_trace( uint32_t action_ordinal ); + const action_trace& get_action_trace( uint32_t action_ordinal )const; /** invalidates any action_trace references returned by get_action_trace */ - int32_t schedule_action( const action& act, account_name receiver, bool context_free = false, - int32_t creator_action_ordinal = -1, int32_t parent_action_ordinal = -1 ); + uint32_t schedule_action( const action& act, account_name receiver, bool context_free = false, + uint32_t creator_action_ordinal = 0, uint32_t parent_action_ordinal = 0 ); /** invalidates any action_trace references returned by get_action_trace */ - int32_t schedule_action( action&& act, account_name receiver, bool context_free = false, - int32_t creator_action_ordinal = -1, int32_t parent_action_ordinal = -1 ); + uint32_t schedule_action( action&& act, account_name receiver, bool context_free = false, + uint32_t creator_action_ordinal = 0, uint32_t parent_action_ordinal = 0 ); /** invalidates any action_trace references returned by get_action_trace */ - int32_t schedule_action( int32_t action_ordinal, account_name receiver, bool context_free = false, - int32_t creator_action_ordinal = -1, int32_t parent_action_ordinal = -1 ); + uint32_t schedule_action( uint32_t action_ordinal, account_name receiver, bool context_free = false, + uint32_t creator_action_ordinal = 0, uint32_t parent_action_ordinal = 0 ); - void execute_action( int32_t action_ordinal, uint32_t recurse_depth = 0 ); + void execute_action( uint32_t action_ordinal, uint32_t recurse_depth = 0 ); void schedule_transaction(); void record_transaction( const transaction_id_type& id, fc::time_point_sec expire ); diff --git a/libraries/chain/trace.cpp b/libraries/chain/trace.cpp index 0b379c89e00..2379018518f 100644 --- a/libraries/chain/trace.cpp +++ b/libraries/chain/trace.cpp @@ -8,7 +8,7 @@ namespace eosio { namespace chain { action_trace::action_trace( const transaction_trace& trace, const action& act, account_name receiver, bool context_free, - int32_t action_ordinal, int32_t creator_action_ordinal, int32_t parent_action_ordinal + uint32_t action_ordinal, uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ) :action_ordinal( action_ordinal ) ,creator_action_ordinal( creator_action_ordinal ) @@ -23,8 +23,8 @@ action_trace::action_trace( {} action_trace::action_trace( - const transaction_trace& trace, action&& act, account_name receiver, bool context_free, - int32_t action_ordinal, int32_t creator_action_ordinal, int32_t parent_action_ordinal + const transaction_trace& trace, action&& act, account_name receiver, bool context_free, + uint32_t action_ordinal, uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ) :action_ordinal( action_ordinal ) ,creator_action_ordinal( creator_action_ordinal ) diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 138006715b5..730658538e7 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -344,8 +344,8 @@ namespace bacc = boost::accumulators; } auto& action_traces = trace->action_traces; - int32_t num_original_actions_to_execute = action_traces.size(); - for( int32_t i = 0; i < num_original_actions_to_execute; ++i ) { + uint32_t num_original_actions_to_execute = action_traces.size(); + for( uint32_t i = 1; i <= num_original_actions_to_execute; ++i ) { execute_action( i, 0 ); } @@ -572,10 +572,22 @@ namespace bacc = boost::accumulators; return std::make_tuple(account_net_limit, account_cpu_limit, greylisted_net, greylisted_cpu); } - int32_t transaction_context::schedule_action( const action& act, account_name receiver, bool context_free, - int32_t creator_action_ordinal, int32_t parent_action_ordinal ) + action_trace& transaction_context::get_action_trace( uint32_t action_ordinal ) { + EOS_ASSERT( 0 < action_ordinal && action_ordinal <= trace->action_traces.size() , + transaction_exception, "invalid action_ordinal" ); + return trace->action_traces[action_ordinal-1]; + } + + const action_trace& transaction_context::get_action_trace( uint32_t action_ordinal )const { + EOS_ASSERT( 0 < action_ordinal && action_ordinal <= trace->action_traces.size() , + transaction_exception, "invalid action_ordinal" ); + return trace->action_traces[action_ordinal-1]; + } + + uint32_t transaction_context::schedule_action( const action& act, account_name receiver, bool context_free, + uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ) { - int32_t new_action_ordinal = trace->action_traces.size(); + uint32_t new_action_ordinal = trace->action_traces.size() + 1; trace->action_traces.emplace_back( *trace, act, receiver, context_free, new_action_ordinal, creator_action_ordinal, parent_action_ordinal ); @@ -583,10 +595,10 @@ namespace bacc = boost::accumulators; return new_action_ordinal; } - int32_t transaction_context::schedule_action( action&& act, account_name receiver, bool context_free, - int32_t creator_action_ordinal, int32_t parent_action_ordinal ) + uint32_t transaction_context::schedule_action( action&& act, account_name receiver, bool context_free, + uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ) { - int32_t new_action_ordinal = trace->action_traces.size(); + uint32_t new_action_ordinal = trace->action_traces.size() + 1; trace->action_traces.emplace_back( *trace, std::move(act), receiver, context_free, new_action_ordinal, creator_action_ordinal, parent_action_ordinal ); @@ -594,12 +606,12 @@ namespace bacc = boost::accumulators; return new_action_ordinal; } - int32_t transaction_context::schedule_action( int32_t action_ordinal, account_name receiver, bool context_free, - int32_t creator_action_ordinal, int32_t parent_action_ordinal ) + uint32_t transaction_context::schedule_action( uint32_t action_ordinal, account_name receiver, bool context_free, + uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ) { - int32_t new_action_ordinal = trace->action_traces.size(); + uint32_t new_action_ordinal = trace->action_traces.size() + 1; - trace->action_traces.reserve( new_action_ordinal + 1 ); + trace->action_traces.reserve( new_action_ordinal ); const action& provided_action = get_action_trace( action_ordinal ).act; @@ -611,19 +623,7 @@ namespace bacc = boost::accumulators; return new_action_ordinal; } - action_trace& transaction_context::get_action_trace( int32_t action_ordinal ) { - EOS_ASSERT( 0 <= action_ordinal && action_ordinal < trace->action_traces.size() , - transaction_exception, "invalid action_ordinal" ); - return trace->action_traces[action_ordinal]; - } - - const action_trace& transaction_context::get_action_trace( int32_t action_ordinal )const { - EOS_ASSERT( 0 <= action_ordinal && action_ordinal < trace->action_traces.size() , - transaction_exception, "invalid action_ordinal" ); - return trace->action_traces[action_ordinal]; - } - - void transaction_context::execute_action( int32_t action_ordinal, uint32_t recurse_depth ) { + void transaction_context::execute_action( uint32_t action_ordinal, uint32_t recurse_depth ) { apply_context acontext( control, *this, action_ordinal, recurse_depth ); acontext.exec(); } diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 83f2f20e618..b64a97feb7d 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -702,7 +702,7 @@ BOOST_FIXTURE_TEST_CASE(cf_action_tests, TESTER) { try { auto ttrace = CALL_TEST_FUNCTION( *this, "test_transaction", "send_cf_action", {} ); BOOST_REQUIRE_EQUAL(ttrace->action_traces.size(), 2); - BOOST_CHECK_EQUAL(ttrace->action_traces[1].creator_action_ordinal, 0); + BOOST_CHECK_EQUAL(ttrace->action_traces[1].creator_action_ordinal.value, 1); BOOST_CHECK_EQUAL(ttrace->action_traces[1].receiver, account_name("dummy")); BOOST_CHECK_EQUAL(ttrace->action_traces[1].act.account, account_name("dummy")); BOOST_CHECK_EQUAL(ttrace->action_traces[1].act.name, account_name("event1")); From c49af23c06344f42682275a4ec4d41462165af24 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 2 Apr 2019 11:58:12 -0400 Subject: [PATCH 276/680] Add named_thread_pool to reduce duplicated code --- libraries/chain/CMakeLists.txt | 4 +- libraries/chain/controller.cpp | 25 +++--------- .../include/eosio/chain/thread_utils.hpp | 29 ++++++++++++++ libraries/chain/thread_utils.cpp | 40 +++++++++++++++++++ plugins/producer_plugin/producer_plugin.cpp | 23 ++--------- unittests/misc_tests.cpp | 25 +++--------- 6 files changed, 86 insertions(+), 60 deletions(-) create mode 100644 libraries/chain/thread_utils.cpp diff --git a/libraries/chain/CMakeLists.txt b/libraries/chain/CMakeLists.txt index 2c430fecea0..f2bc4806b30 100644 --- a/libraries/chain/CMakeLists.txt +++ b/libraries/chain/CMakeLists.txt @@ -44,9 +44,9 @@ add_library( eosio_chain # global_property_object.cpp # # contracts/chain_initializer.cpp - - + transaction_metadata.cpp + thread_utils.cpp ${HEADERS} ) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 176e743d32c..ac2fddbf159 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -28,7 +28,6 @@ namespace eosio { namespace chain { using resource_limits::resource_limits_manager; -using ioc_work_t = boost::asio::executor_work_guard; using controller_index_set = index_set< account_index, @@ -134,9 +133,7 @@ struct controller_impl { optional subjective_cpu_leeway; bool trusted_producer_light_validation = false; uint32_t snapshot_head_block = 0; - boost::asio::thread_pool thread_pool; - boost::asio::io_context ioc; - fc::optional ioc_work; + named_thread_pool thread_pool; typedef pair handler_key; map< account_name, map > apply_handlers; @@ -188,7 +185,7 @@ struct controller_impl { conf( cfg ), chain_id( cfg.genesis.compute_chain_id() ), read_mode( cfg.read_mode ), - thread_pool( cfg.thread_pool_size ) + thread_pool( "chain", cfg.thread_pool_size ) { #define SET_APP_HANDLER( receiver, contract, action) \ @@ -407,10 +404,7 @@ struct controller_impl { } ~controller_impl() { - ioc_work.reset(); - ioc.stop(); thread_pool.stop(); - thread_pool.join(); pending.reset(); } @@ -1203,7 +1197,7 @@ struct controller_impl { auto& pt = receipt.trx.get(); auto mtrx = std::make_shared( std::make_shared( pt ) ); if( !self.skip_auth_check() ) { - transaction_metadata::start_recover_keys( mtrx, ioc, chain_id, microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool.get_executor(), chain_id, microseconds::maximum() ); } packed_transactions.emplace_back( std::move( mtrx ) ); } @@ -1281,7 +1275,7 @@ struct controller_impl { auto prev = fork_db.get_block( b->previous ); EOS_ASSERT( prev, unlinkable_block_exception, "unlinkable block ${id}", ("id", id)("previous", b->previous) ); - return async_thread_pool( ioc, [b, prev]() { + return async_thread_pool( thread_pool.get_executor(), [b, prev]() { const bool skip_validate_signee = false; return std::make_shared( *prev, move( b ), skip_validate_signee ); } ); @@ -1737,15 +1731,6 @@ void controller::add_indices() { } void controller::startup( std::function shutdown, const snapshot_reader_ptr& snapshot ) { - my->ioc_work.emplace( boost::asio::make_work_guard( my->ioc ) ); - for( uint16_t i = 0; i < my->conf.thread_pool_size; ++i ) { - boost::asio::post( my->thread_pool, [&ioc = my->ioc, i]() { - std::string tn = "chain-" + std::to_string( i ); - fc::set_os_thread_name( tn ); - ioc.run(); - } ); - } - my->head = my->fork_db.head(); if( snapshot ) { ilog( "Starting initialization from snapshot, this may take a significant amount of time" ); @@ -1798,7 +1783,7 @@ void controller::abort_block() { } boost::asio::io_context& controller::get_thread_pool() { - return my->ioc; + return my->thread_pool.get_executor(); } std::future controller::create_block_state_future( const signed_block_ptr& b ) { diff --git a/libraries/chain/include/eosio/chain/thread_utils.hpp b/libraries/chain/include/eosio/chain/thread_utils.hpp index bf5932fdf0f..b3aea3085f5 100644 --- a/libraries/chain/include/eosio/chain/thread_utils.hpp +++ b/libraries/chain/include/eosio/chain/thread_utils.hpp @@ -4,6 +4,8 @@ */ #pragma once +#include +#include #include #include #include @@ -11,6 +13,33 @@ namespace eosio { namespace chain { + /** + * Wrapper class for boost asio thread pool and io_context run. + * Also names threads so that tools like htop can see thread name. + */ + class named_thread_pool { + public: + // name_prefix is name appended with -## of thread. + // short name_prefix (6 chars or under) is recommended as console_appender uses 9 chars for thread name + named_thread_pool( std::string name_prefix, size_t num_threads ); + + // calls stop() + ~named_thread_pool(); + + boost::asio::io_context& get_executor() { return _ioc; } + + // destroy work guard, stop io_context, join thread_pool, and stop thread_pool + void stop(); + + private: + using ioc_work_t = boost::asio::executor_work_guard; + + boost::asio::thread_pool _thread_pool; + boost::asio::io_context _ioc; + fc::optional _ioc_work; + }; + + // async on thread_pool and return future template auto async_thread_pool( boost::asio::io_context& thread_pool, F&& f ) { diff --git a/libraries/chain/thread_utils.cpp b/libraries/chain/thread_utils.cpp new file mode 100644 index 00000000000..1d8a2707c14 --- /dev/null +++ b/libraries/chain/thread_utils.cpp @@ -0,0 +1,40 @@ +/** + * @file + * @copyright defined in eos/LICENSE.txt + */ + +#include +#include + +namespace eosio { namespace chain { + + +// +// named_thread_pool +// +named_thread_pool::named_thread_pool( std::string name_prefix, size_t num_threads ) +: _thread_pool( num_threads ) +{ + _ioc_work.emplace( boost::asio::make_work_guard( _ioc ) ); + for( size_t i = 0; i < num_threads; ++i ) { + boost::asio::post( _thread_pool, [&ioc = _ioc, name_prefix, i]() { + std::string tn = name_prefix + "-" + std::to_string( i ); + fc::set_os_thread_name( tn ); + ioc.run(); + } ); + } +} + +named_thread_pool::~named_thread_pool() { + stop(); +} + +void named_thread_pool::stop() { + _ioc_work.reset(); + _ioc.stop(); + _thread_pool.join(); + _thread_pool.stop(); +} + + +} } // eosio::chain \ No newline at end of file diff --git a/plugins/producer_plugin/producer_plugin.cpp b/plugins/producer_plugin/producer_plugin.cpp index 37115f17c4a..67a90d0887c 100644 --- a/plugins/producer_plugin/producer_plugin.cpp +++ b/plugins/producer_plugin/producer_plugin.cpp @@ -62,7 +62,6 @@ static appbase::abstract_plugin& _producer_plugin = app().register_plugin; namespace { bool failure_is_subjective(const fc::exception& e, bool deadline_is_subjective) { @@ -135,9 +134,7 @@ class producer_plugin_impl : public std::enable_shared_from_this _producer_watermarks; pending_block_mode _pending_block_mode; transaction_id_with_expiry_index _persistent_transactions; - fc::optional _thread_pool; - boost::asio::io_context _ioc; - fc::optional _ioc_work; + fc::optional _thread_pool; int32_t _max_transaction_time_ms; fc::microseconds _max_irreversible_block_age_us; @@ -356,9 +353,9 @@ class producer_plugin_impl : public std::enable_shared_from_this next) { chain::controller& chain = chain_plug->chain(); const auto& cfg = chain.get_global_properties().configuration; - signing_keys_future_type future = transaction_metadata::start_recover_keys( trx, _ioc, + signing_keys_future_type future = transaction_metadata::start_recover_keys( trx, _thread_pool->get_executor(), chain.get_chain_id(), fc::microseconds( cfg.max_transaction_cpu_usage ) ); - boost::asio::post( _ioc, [self = this, future, trx, persist_until_expired, next]() { + boost::asio::post( _thread_pool->get_executor(), [self = this, future, trx, persist_until_expired, next]() { if( future.valid() ) future.wait(); app().post(priority::low, [self, trx, persist_until_expired, next]() { @@ -694,16 +691,7 @@ void producer_plugin::plugin_initialize(const boost::program_options::variables_ auto thread_pool_size = options.at( "producer-threads" ).as(); EOS_ASSERT( thread_pool_size > 0, plugin_config_exception, "producer-threads ${num} must be greater than 0", ("num", thread_pool_size)); - my->_thread_pool.emplace( thread_pool_size ); - - my->_ioc_work.emplace( boost::asio::make_work_guard( my->_ioc ) ); - for( uint16_t i = 0; i < thread_pool_size; ++i ) { - boost::asio::post( *my->_thread_pool, [&ioc = my->_ioc, i]() { - std::string tn = "prod-" + std::to_string( i ); - fc::set_os_thread_name( tn ); - ioc.run(); - } ); - } + my->_thread_pool.emplace( "prod", thread_pool_size ); if( options.count( "snapshots-dir" )) { auto sd = options.at( "snapshots-dir" ).as(); @@ -799,10 +787,7 @@ void producer_plugin::plugin_shutdown() { edump((e.to_detail_string())); } - my->_ioc_work.reset(); - my->_ioc.stop(); if( my->_thread_pool ) { - my->_thread_pool->join(); my->_thread_pool->stop(); } my->_accepted_block_connection.reset(); diff --git a/unittests/misc_tests.cpp b/unittests/misc_tests.cpp index 48bfb9ed229..611d9f1f40e 100644 --- a/unittests/misc_tests.cpp +++ b/unittests/misc_tests.cpp @@ -7,13 +7,13 @@ #include #include #include +#include #include #include #include #include -#include #include #ifdef NON_VALIDATING_TEST @@ -830,30 +830,20 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { BOOST_CHECK_EQUAL(trx.id(), mtrx->id); BOOST_CHECK_EQUAL(trx.id(), mtrx2->id); - using ioc_work_t = boost::asio::executor_work_guard; - const int num_threads = 5; - boost::asio::thread_pool thread_pool( num_threads ); - boost::asio::io_context ioc; - fc::optional ioc_work( boost::asio::make_work_guard( ioc ) ); - for( int i = 0; i < num_threads; ++i) { - boost::asio::post( thread_pool, [&ioc]() { - fc::set_os_thread_name( "misc_test" ); - ioc.run(); - } ); - } + named_thread_pool thread_pool( "misc", 5 ); BOOST_CHECK( !mtrx->signing_keys_future.valid() ); BOOST_CHECK( !mtrx2->signing_keys_future.valid() ); - transaction_metadata::start_recover_keys( mtrx, ioc, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::start_recover_keys( mtrx2, ioc, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx2, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); BOOST_CHECK( mtrx->signing_keys_future.valid() ); BOOST_CHECK( mtrx2->signing_keys_future.valid() ); // no-op - transaction_metadata::start_recover_keys( mtrx, ioc, test.control->get_chain_id(), fc::microseconds::maximum() ); - transaction_metadata::start_recover_keys( mtrx2, ioc, test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); + transaction_metadata::start_recover_keys( mtrx2, thread_pool.get_executor(), test.control->get_chain_id(), fc::microseconds::maximum() ); auto keys = mtrx->recover_keys( test.control->get_chain_id() ); BOOST_CHECK_EQUAL(1u, keys.second.size()); @@ -880,10 +870,7 @@ BOOST_AUTO_TEST_CASE(transaction_metadata_test) { try { BOOST_CHECK_EQUAL(1u, keys5.second.size()); BOOST_CHECK_EQUAL(public_key, *keys5.second.begin()); - ioc_work.reset(); - ioc.stop(); thread_pool.stop(); - thread_pool.join(); } FC_LOG_AND_RETHROW() } From 01d2dbcb68414bfafb3a1eef970906fcd11477e3 Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 2 Apr 2019 13:56:03 -0400 Subject: [PATCH 277/680] state_history_plugin changes to reflect ordinal type change to fc::unsigned_int #6897 --- .../state_history_plugin/state_history_serialization.hpp | 6 +++--- plugins/state_history_plugin/state_history_plugin_abi.cpp | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp index 473e5720ec4..3251abef7fb 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp @@ -480,9 +480,9 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper datastream& operator<<(datastream& ds, const history_serial_wrapper& obj) { fc::raw::pack(ds, fc::unsigned_int(0)); - fc::raw::pack(ds, as_type(obj.obj.action_ordinal)); - fc::raw::pack(ds, as_type(obj.obj.creator_action_ordinal)); - fc::raw::pack(ds, as_type(obj.obj.parent_action_ordinal)); + fc::raw::pack(ds, as_type(obj.obj.action_ordinal)); + fc::raw::pack(ds, as_type(obj.obj.creator_action_ordinal)); + fc::raw::pack(ds, as_type(obj.obj.parent_action_ordinal)); fc::raw::pack(ds, bool(obj.obj.receipt)); if (obj.obj.receipt) { fc::raw::pack(ds, make_history_serial_wrapper(obj.db, as_type(*obj.obj.receipt))); diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index 3f564e9043c..95affaf57c2 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -93,9 +93,9 @@ extern const char* const state_history_plugin_abi = R"({ }, { "name": "action_trace_v0", "fields": [ - { "name": "action_ordinal", "type": "int32" }, - { "name": "creator_action_ordinal", "type": "int32" }, - { "name": "parent_action_ordinal", "type": "int32" }, + { "name": "action_ordinal", "type": "varuint32" }, + { "name": "creator_action_ordinal", "type": "varuint32" }, + { "name": "parent_action_ordinal", "type": "varuint32" }, { "name": "receipt", "type": "action_receipt?" }, { "name": "receiver", "type": "name" }, { "name": "act", "type": "action" }, From cb224ed1cb9daba38a0d0ff3a3f010abefb5b617 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 2 Apr 2019 14:04:52 -0400 Subject: [PATCH 278/680] Simply by using named_thread_pool --- plugins/http_plugin/http_plugin.cpp | 36 ++++++++--------------------- 1 file changed, 10 insertions(+), 26 deletions(-) diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index fe2b31472e7..befa9686287 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -123,7 +124,6 @@ namespace eosio { using websocket_local_server_type = websocketpp::server; using websocket_server_tls_type = websocketpp::server>; using ssl_context_ptr = websocketpp::lib::shared_ptr; - using io_work_t = boost::asio::executor_work_guard; static bool verbose_http_errors = false; @@ -140,9 +140,7 @@ namespace eosio { websocket_server_type server; uint16_t thread_pool_size = 2; - optional thread_pool; - std::shared_ptr server_ioc; - optional server_ioc_work; + optional thread_pool; std::atomic bytes_in_flight{0}; size_t max_bytes_in_flight = 0; @@ -301,12 +299,12 @@ namespace eosio { con->defer_http_response(); bytes_in_flight += body.size(); app().post( appbase::priority::low, - [ioc = this->server_ioc, &bytes_in_flight = this->bytes_in_flight, handler_itr, + [&ioc = thread_pool->get_executor(), &bytes_in_flight = this->bytes_in_flight, handler_itr, resource{std::move( resource )}, body{std::move( body )}, con]() { try { handler_itr->second( resource, body, - [ioc{std::move(ioc)}, &bytes_in_flight, con]( int code, fc::variant response_body ) { - boost::asio::post( *ioc, [ioc, response_body{std::move( response_body )}, &bytes_in_flight, con, code]() mutable { + [&ioc, &bytes_in_flight, con]( int code, fc::variant response_body ) { + boost::asio::post( ioc, [&ioc, response_body{std::move( response_body )}, &bytes_in_flight, con, code]() mutable { std::string json = fc::json::to_string( response_body ); response_body.clear(); const size_t json_size = json.size(); @@ -340,11 +338,11 @@ namespace eosio { void create_server_for_endpoint(const tcp::endpoint& ep, websocketpp::server>& ws) { try { ws.clear_access_channels(websocketpp::log::alevel::all); - ws.init_asio(&(*server_ioc)); + ws.init_asio( &thread_pool->get_executor() ); ws.set_reuse_addr(true); ws.set_max_http_body_size(max_body_size); // capture server_ioc shared_ptr in http handler to keep it alive while in use - ws.set_http_handler([&, ioc = this->server_ioc](connection_hdl hdl) { + ws.set_http_handler([&](connection_hdl hdl) { handle_http_request>(ws.get_con_from_hdl(hdl)); }); } catch ( const fc::exception& e ){ @@ -518,16 +516,7 @@ namespace eosio { void http_plugin::plugin_startup() { - my->thread_pool.emplace( my->thread_pool_size ); - my->server_ioc = std::make_shared(); - my->server_ioc_work.emplace( boost::asio::make_work_guard(*my->server_ioc) ); - for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { - boost::asio::post( *my->thread_pool, [ioc = my->server_ioc, i]() { - std::string tn = "http-" + std::to_string( i ); - fc::set_os_thread_name( tn ); - ioc->run(); - } ); - } + my->thread_pool.emplace( "http", my->thread_pool_size ); if(my->listen_endpoint) { try { @@ -551,10 +540,10 @@ namespace eosio { if(my->unix_endpoint) { try { my->unix_server.clear_access_channels(websocketpp::log::alevel::all); - my->unix_server.init_asio(&(*my->server_ioc)); + my->unix_server.init_asio( &my->thread_pool->get_executor() ); my->unix_server.set_max_http_body_size(my->max_body_size); my->unix_server.listen(*my->unix_endpoint); - my->unix_server.set_http_handler([&, ioc = my->server_ioc](connection_hdl hdl) { + my->unix_server.set_http_handler([&, &ioc = my->thread_pool->get_executor()](connection_hdl hdl) { my->handle_http_request( my->unix_server.get_con_from_hdl(hdl)); }); my->unix_server.start_accept(); @@ -614,12 +603,7 @@ namespace eosio { if(my->unix_server.is_listening()) my->unix_server.stop_listening(); - if( my->server_ioc_work ) - my->server_ioc_work->reset(); - if( my->server_ioc ) - my->server_ioc->stop(); if( my->thread_pool ) { - my->thread_pool->join(); my->thread_pool->stop(); } } From 1c27590109abc5ec0d412c3da23d462c88dda88e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Tue, 2 Apr 2019 14:20:12 -0400 Subject: [PATCH 279/680] Use named_thread_pool to simplify code --- plugins/net_plugin/net_plugin.cpp | 53 +++++++++++-------------------- 1 file changed, 18 insertions(+), 35 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 58a47c8c0c5..9341191beb6 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -158,11 +159,8 @@ namespace eosio { channels::transaction_ack::channel_type::handle incoming_transaction_ack_subscription; - uint16_t thread_pool_size = 1; // currently used by server_ioc - optional thread_pool; - std::shared_ptr server_ioc; - optional server_ioc_work; - + uint16_t thread_pool_size = 1; + optional thread_pool; void connect(const connection_ptr& c); void connect(const connection_ptr& c, tcp::resolver::iterator endpoint_itr); @@ -497,7 +495,7 @@ namespace eosio { peer_block_state_index blk_state; transaction_state_index trx_state; optional peer_requested; // this peer is requesting info from us - std::shared_ptr server_ioc; // keep ioc alive + boost::asio::io_context& server_ioc; boost::asio::io_context::strand strand; socket_ptr socket; @@ -731,9 +729,9 @@ namespace eosio { : blk_state(), trx_state(), peer_requested(), - server_ioc( my_impl->server_ioc ), + server_ioc( my_impl->thread_pool->get_executor() ), strand( app().get_io_service() ), - socket( std::make_shared( std::ref( *my_impl->server_ioc ))), + socket( std::make_shared( my_impl->thread_pool->get_executor() ) ), node_id(), last_handshake_recv(), last_handshake_sent(), @@ -757,7 +755,7 @@ namespace eosio { : blk_state(), trx_state(), peer_requested(), - server_ioc( my_impl->server_ioc ), + server_ioc( my_impl->thread_pool->get_executor() ), strand( app().get_io_service() ), socket( s ), node_id(), @@ -784,8 +782,8 @@ namespace eosio { void connection::initialize() { auto *rnd = node_id.data(); rnd[0] = 0; - response_expected.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); - read_delay_timer.reset(new boost::asio::steady_timer( *my_impl->server_ioc )); + response_expected.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); + read_delay_timer.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); } bool connection::connected() { @@ -1933,9 +1931,9 @@ namespace eosio { void net_plugin_impl::start_listen_loop() { - auto socket = std::make_shared( std::ref( *server_ioc ) ); - acceptor->async_accept( *socket, [socket, this, ioc = server_ioc]( boost::system::error_code ec ) { - app().post( priority::low, [socket, this, ec, ioc{std::move(ioc)}]() { + auto socket = std::make_shared( my_impl->thread_pool->get_executor() ); + acceptor->async_accept( *socket, [socket, this]( boost::system::error_code ec ) { + app().post( priority::low, [socket, this, ec]() { if( !ec ) { uint32_t visitors = 0; uint32_t from_addr = 0; @@ -2662,8 +2660,8 @@ namespace eosio { } void net_plugin_impl::start_monitors() { - connector_check.reset(new boost::asio::steady_timer( *server_ioc )); - transaction_check.reset(new boost::asio::steady_timer( *server_ioc )); + connector_check.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); + transaction_check.reset(new boost::asio::steady_timer( my_impl->thread_pool->get_executor() )); start_conn_timer(connector_period, std::weak_ptr()); start_txn_timer(); } @@ -3012,19 +3010,10 @@ namespace eosio { void net_plugin::plugin_startup() { my->producer_plug = app().find_plugin(); - my->thread_pool.emplace( my->thread_pool_size ); - my->server_ioc = std::make_shared(); - my->server_ioc_work.emplace( boost::asio::make_work_guard( *my->server_ioc ) ); // currently thread_pool only used for server_ioc - for( uint16_t i = 0; i < my->thread_pool_size; ++i ) { - boost::asio::post( *my->thread_pool, [ioc = my->server_ioc, i]() { - std::string tn = "net-" + std::to_string( i ); - fc::set_os_thread_name( tn ); - ioc->run(); - } ); - } + my->thread_pool.emplace( "net", my->thread_pool_size ); - my->resolver = std::make_shared( std::ref( *my->server_ioc )); + my->resolver = std::make_shared( my->thread_pool->get_executor() ); if( my->p2p_address.size() > 0 ) { auto host = my->p2p_address.substr( 0, my->p2p_address.find( ':' )); auto port = my->p2p_address.substr( host.size() + 1, my->p2p_address.size()); @@ -3033,7 +3022,7 @@ namespace eosio { my->listen_endpoint = *my->resolver->resolve( query ); - my->acceptor.reset( new tcp::acceptor( *my->server_ioc ) ); + my->acceptor.reset( new tcp::acceptor( my_impl->thread_pool->get_executor() ) ); if( !my->p2p_server_address.empty() ) { my->p2p_address = my->p2p_server_address; @@ -3053,7 +3042,7 @@ namespace eosio { } } - my->keepalive_timer.reset( new boost::asio::steady_timer( *my->server_ioc ) ); + my->keepalive_timer.reset( new boost::asio::steady_timer( my->thread_pool->get_executor() ) ); my->ticker(); if( my->acceptor ) { @@ -3098,9 +3087,6 @@ namespace eosio { void net_plugin::plugin_shutdown() { try { fc_ilog( logger, "shutdown.." ); - if( my->server_ioc_work ) - my->server_ioc_work->reset(); - if( my->connector_check ) my->connector_check->cancel(); if( my->transaction_check ) @@ -3122,10 +3108,7 @@ namespace eosio { my->connections.clear(); } - if( my->server_ioc ) - my->server_ioc->stop(); if( my->thread_pool ) { - my->thread_pool->join(); my->thread_pool->stop(); } fc_ilog( logger, "exit shutdown" ); From ef84f7c9e331f29bd8f1942360dc9594139ad6df Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Tue, 2 Apr 2019 17:20:06 -0400 Subject: [PATCH 280/680] Remove unwinding frames from WAVM code generation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit WAVM, via LLVM, generates unwinding frames for each wasm that is instantiated. This allows exceptions to unwind through the JITed code. Unfortunately when hundreds or thousands of these unwinding frames are registered exception unwinding becomes exceptionally slow. This change removes generation of the unwinding frames. Because exceptions can no longer unwind through WAVM’s compiled code, all exceptions must be caught before unwinding reaches the compiled code. This means none of the intrinsics — either the ones we register or the ones wavm registers itself — can allow an exception to escape. Instead, the exception_ptr is stored and we longjmp through the compiled code. --- .../include/eosio/chain/webassembly/wavm.hpp | 14 +++++++++-- libraries/chain/webassembly/wavm.cpp | 2 +- .../wasm-jit/Include/Platform/Platform.h | 2 +- libraries/wasm-jit/Source/Platform/POSIX.cpp | 8 +++++- .../wasm-jit/Source/Platform/Windows.cpp | 4 +++ libraries/wasm-jit/Source/Runtime/LLVMJIT.cpp | 6 ----- .../Source/Runtime/WAVMIntrinsics.cpp | 25 ++++++++++++------- 7 files changed, 41 insertions(+), 20 deletions(-) diff --git a/libraries/chain/include/eosio/chain/webassembly/wavm.hpp b/libraries/chain/include/eosio/chain/webassembly/wavm.hpp index f619e318b3f..23df2f04e08 100644 --- a/libraries/chain/include/eosio/chain/webassembly/wavm.hpp +++ b/libraries/chain/include/eosio/chain/webassembly/wavm.hpp @@ -312,7 +312,12 @@ struct intrinsic_invoker_impl, std::tuple> { template static native_to_wasm_t invoke(Translated... translated) { - return convert_native_to_wasm(the_running_instance_context, Method(the_running_instance_context, translated...)); + try { + return convert_native_to_wasm(the_running_instance_context, Method(the_running_instance_context, translated...)); + } + catch(...) { + Platform::immediately_exit(std::current_exception()); + } } template @@ -331,7 +336,12 @@ struct intrinsic_invoker_impl, std::tuple template static void invoke(Translated... translated) { - Method(the_running_instance_context, translated...); + try { + Method(the_running_instance_context, translated...); + } + catch(...) { + Platform::immediately_exit(std::current_exception()); + } } template diff --git a/libraries/chain/webassembly/wavm.cpp b/libraries/chain/webassembly/wavm.cpp index e614398c74e..febd2ec8b07 100644 --- a/libraries/chain/webassembly/wavm.cpp +++ b/libraries/chain/webassembly/wavm.cpp @@ -131,7 +131,7 @@ void wavm_runtime::immediately_exit_currently_running_module() { #ifdef _WIN32 throw wasm_exit(); #else - Platform::immediately_exit(); + Platform::immediately_exit(nullptr); #endif } diff --git a/libraries/wasm-jit/Include/Platform/Platform.h b/libraries/wasm-jit/Include/Platform/Platform.h index 8d8769d4834..a646778ae8a 100644 --- a/libraries/wasm-jit/Include/Platform/Platform.h +++ b/libraries/wasm-jit/Include/Platform/Platform.h @@ -134,7 +134,7 @@ namespace Platform Uptr& outTrapOperand, const std::function& thunk ); - PLATFORM_API void immediately_exit(); + PLATFORM_API void immediately_exit(std::exception_ptr except) __attribute__((noreturn)); // // Threading diff --git a/libraries/wasm-jit/Source/Platform/POSIX.cpp b/libraries/wasm-jit/Source/Platform/POSIX.cpp index 4305381b39f..b6810e97480 100644 --- a/libraries/wasm-jit/Source/Platform/POSIX.cpp +++ b/libraries/wasm-jit/Source/Platform/POSIX.cpp @@ -176,6 +176,7 @@ namespace Platform THREAD_LOCAL Uptr* signalOperand = nullptr; THREAD_LOCAL bool isReentrantSignal = false; THREAD_LOCAL bool isCatchingSignals = false; + thread_local std::exception_ptr thrown_exception; void signalHandler(int signalNumber,siginfo_t* signalInfo,void*) { @@ -252,6 +253,7 @@ namespace Platform jmp_buf oldSignalReturnEnv; memcpy(&oldSignalReturnEnv,&signalReturnEnv,sizeof(jmp_buf)); const bool oldIsCatchingSignals = isCatchingSignals; + thrown_exception = nullptr; // Use setjmp to allow signals to jump back to this point. bool isReturningFromSignalHandler = sigsetjmp(signalReturnEnv,1); @@ -273,10 +275,14 @@ namespace Platform signalCallStack = nullptr; signalOperand = nullptr; + if(thrown_exception) + std::rethrow_exception(thrown_exception); + return signalType; } - void immediately_exit() { + void immediately_exit(std::exception_ptr except) { + thrown_exception = except; siglongjmp(signalReturnEnv,1); } diff --git a/libraries/wasm-jit/Source/Platform/Windows.cpp b/libraries/wasm-jit/Source/Platform/Windows.cpp index bc3c30fc46a..2c34c613b64 100644 --- a/libraries/wasm-jit/Source/Platform/Windows.cpp +++ b/libraries/wasm-jit/Source/Platform/Windows.cpp @@ -359,6 +359,10 @@ namespace Platform { errorUnless(SetEvent(reinterpret_cast(event))); } + + void immediately_exit(std::exception_ptr except) { + std::rethrow_exception(except); + } } #endif diff --git a/libraries/wasm-jit/Source/Runtime/LLVMJIT.cpp b/libraries/wasm-jit/Source/Runtime/LLVMJIT.cpp index ba5354c5d22..98fbe2fac9e 100644 --- a/libraries/wasm-jit/Source/Runtime/LLVMJIT.cpp +++ b/libraries/wasm-jit/Source/Runtime/LLVMJIT.cpp @@ -112,15 +112,9 @@ namespace LLVMJIT void registerEHFrames(U8* addr, U64 loadAddr,uintptr_t numBytes) override { - llvm::RTDyldMemoryManager::registerEHFrames(addr,loadAddr,numBytes); - hasRegisteredEHFrames = true; - ehFramesAddr = addr; - ehFramesLoadAddr = loadAddr; - ehFramesNumBytes = numBytes; } void deregisterEHFrames(U8* addr, U64 loadAddr,uintptr_t numBytes) override { - llvm::RTDyldMemoryManager::deregisterEHFrames(addr,loadAddr,numBytes); } virtual bool needsToReserveAllocationSpace() override { return true; } diff --git a/libraries/wasm-jit/Source/Runtime/WAVMIntrinsics.cpp b/libraries/wasm-jit/Source/Runtime/WAVMIntrinsics.cpp index a8d1fe80c71..a85d0e21fcd 100644 --- a/libraries/wasm-jit/Source/Runtime/WAVMIntrinsics.cpp +++ b/libraries/wasm-jit/Source/Runtime/WAVMIntrinsics.cpp @@ -8,6 +8,11 @@ namespace Runtime { + static void causeIntrensicException(Exception::Cause cause) { + Platform::immediately_exit(std::make_exception_ptr(Exception{cause, std::vector()})); + __builtin_unreachable(); + } + template Float quietNaN(Float value) { @@ -104,11 +109,11 @@ namespace Runtime { if(sourceValue != sourceValue) { - causeException(Exception::Cause::invalidFloatOperation); + causeIntrensicException(Exception::Cause::invalidFloatOperation); } else if(sourceValue >= maxValue || (isMinInclusive ? sourceValue <= minValue : sourceValue < minValue)) { - causeException(Exception::Cause::integerDivideByZeroOrIntegerOverflow); + causeIntrensicException(Exception::Cause::integerDivideByZeroOrIntegerOverflow); } return (Dest)sourceValue; } @@ -125,17 +130,17 @@ namespace Runtime DEFINE_INTRINSIC_FUNCTION0(wavmIntrinsics,divideByZeroOrIntegerOverflowTrap,divideByZeroOrIntegerOverflowTrap,none) { - causeException(Exception::Cause::integerDivideByZeroOrIntegerOverflow); + causeIntrensicException(Exception::Cause::integerDivideByZeroOrIntegerOverflow); } DEFINE_INTRINSIC_FUNCTION0(wavmIntrinsics,unreachableTrap,unreachableTrap,none) { - causeException(Exception::Cause::reachedUnreachable); + causeIntrensicException(Exception::Cause::reachedUnreachable); } DEFINE_INTRINSIC_FUNCTION0(wavmIntrinsics,accessViolationTrap,accessViolationTrap,none) { - causeException(Exception::Cause::accessViolation); + causeIntrensicException(Exception::Cause::accessViolation); } DEFINE_INTRINSIC_FUNCTION3(wavmIntrinsics,indirectCallSignatureMismatch,indirectCallSignatureMismatch,none,i32,index,i64,expectedSignatureBits,i64,tableBits) @@ -152,18 +157,19 @@ namespace Runtime actualSignature ? asString(actualSignature).c_str() : "nullptr", ipDescription.c_str() ); - causeException(elementValue == nullptr ? Exception::Cause::undefinedTableElement : Exception::Cause::indirectCallSignatureMismatch); + causeIntrensicException(elementValue == nullptr ? Exception::Cause::undefinedTableElement : Exception::Cause::indirectCallSignatureMismatch); } DEFINE_INTRINSIC_FUNCTION0(wavmIntrinsics,indirectCallIndexOutOfBounds,indirectCallIndexOutOfBounds,none) { - causeException(Exception::Cause::undefinedTableElement); + causeIntrensicException(Exception::Cause::undefinedTableElement); } DEFINE_INTRINSIC_FUNCTION2(wavmIntrinsics,_growMemory,growMemory,i32,i32,deltaPages,i64,memoryBits) { MemoryInstance* memory = reinterpret_cast(memoryBits); - WAVM_ASSERT_THROW(memory); + if(!memory) + causeIntrensicException(Exception::Cause::outOfMemory); const Iptr numPreviousMemoryPages = growMemory(memory,(Uptr)deltaPages); if(numPreviousMemoryPages + (Uptr)deltaPages > IR::maxMemoryPages) { return -1; } else { return (I32)numPreviousMemoryPages; } @@ -172,7 +178,8 @@ namespace Runtime DEFINE_INTRINSIC_FUNCTION1(wavmIntrinsics,_currentMemory,currentMemory,i32,i64,memoryBits) { MemoryInstance* memory = reinterpret_cast(memoryBits); - WAVM_ASSERT_THROW(memory); + if(!memory) + causeIntrensicException(Exception::Cause::outOfMemory); Uptr numMemoryPages = getMemoryNumPages(memory); if(numMemoryPages > UINT32_MAX) { numMemoryPages = UINT32_MAX; } return (U32)numMemoryPages; From c2bcec21df1f6bbe18e3c64a1c74b1509bb5c070 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 3 Apr 2019 11:00:08 -0400 Subject: [PATCH 281/680] implement RESTRICT_ACTION_TO_SELF protocol feature --- libraries/chain/apply_context.cpp | 2 +- .../eosio/chain/protocol_feature_manager.hpp | 3 ++- libraries/chain/protocol_feature_manager.cpp | 13 +++++++++++++ 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 37c78ff7c7b..153582429cf 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -318,7 +318,7 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a // So, the deferred transaction must always go through the authorization checking if it is not sent by a privileged contract. // However, the old logic must still be considered because it cannot objectively change until a consensus protocol upgrade. - bool disallow_send_to_self_bypass = false; // eventually set to whether the appropriate protocol feature has been activated + bool disallow_send_to_self_bypass = control.is_builtin_activated( builtin_protocol_feature_t::restrict_action_to_self ); auto is_sending_only_to_self = [&trx]( const account_name& self ) { bool send_to_self = true; diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index f9f55dffb7a..773546eead0 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -18,7 +18,8 @@ enum class builtin_protocol_feature_t : uint32_t { only_link_to_existing_permission, replace_deferred, fix_linkauth_restriction, - disallow_empty_producer_schedule + disallow_empty_producer_schedule, + restrict_action_to_self }; struct protocol_feature_subjective_restrictions { diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 78c84ff9410..67dc76e36ef 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -73,6 +73,19 @@ updateauth, deleteauth, linkauth, unlinkauth, or canceldelay. Builtin protocol feature: DISALLOW_EMPTY_PRODUCER_SCHEDULE Disallows proposing an empty producer schedule. +*/ + {} + } ) + ( builtin_protocol_feature_t::restrict_action_to_self, builtin_protocol_feature_spec{ + "RESTRICT_ACTION_TO_SELF", + fc::variant("e71b6712188391994c78d8c722c1d42c477cf091e5601b5cf1befd05721a57f3").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: RESTRICT_ACTION_TO_SELF + +Disallows bypass of authorization checks by unprivileged contracts when sending inline actions or deferred transactions. +The original protocol rules allow a bypass of authorization checks for actions sent by a contract to itself. +This protocol feature removes that bypass. */ {} } ) From d62825b681ad183d3ba76e91e906f05e4d7466b2 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 3 Apr 2019 11:45:57 -0400 Subject: [PATCH 282/680] update api_tests/deferred_transaction_tests to reflect the fact that protocol features RESTRICT_ACTION_TO_SELF and REPLACE_DEFERRED have been activated --- unittests/api_tests.cpp | 5 +++-- unittests/test-contracts/test_api/test_transaction.cpp | 1 - 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 889effc9cb3..cc03ce721ea 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -1263,7 +1263,8 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_tx_with_dtt_action", fc::raw::pack(dtt_act2)); // If the deferred tx receiver == this tx receiver, the authorization checking would originally be bypassed. - // But not anymore. Now it should subjectively fail because testapi@additional permission is not unilaterally satisfied by testapi@eosio.code. + // But not anymore. With the RESTRICT_ACTION_TO_SELF protocol feature activated, it should now objectively + // fail because testapi@additional permission is not unilaterally satisfied by testapi@eosio.code. dtt_action dtt_act3; dtt_act3.deferred_account = N(testapi); dtt_act3.permission_name = N(additional); @@ -1272,7 +1273,7 @@ BOOST_FIXTURE_TEST_CASE(deferred_transaction_tests, TESTER) { try { ("code", name(dtt_act3.deferred_account)) ("type", name(dtt_act3.deferred_action)) ("requirement", name(dtt_act3.permission_name))); - BOOST_CHECK_THROW(CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_tx_with_dtt_action", fc::raw::pack(dtt_act3)), subjective_block_production_exception); + BOOST_CHECK_THROW(CALL_TEST_FUNCTION(*this, "test_transaction", "send_deferred_tx_with_dtt_action", fc::raw::pack(dtt_act3)), unsatisfied_authorization); // But it should again work if the deferred transaction has a sufficient delay. dtt_act3.delay_sec = 10; diff --git a/unittests/test-contracts/test_api/test_transaction.cpp b/unittests/test-contracts/test_api/test_transaction.cpp index 065828ea307..535f9896e9b 100644 --- a/unittests/test-contracts/test_api/test_transaction.cpp +++ b/unittests/test-contracts/test_api/test_transaction.cpp @@ -283,7 +283,6 @@ void test_transaction::send_deferred_tx_with_dtt_action() { auto trx = transaction(); trx.actions.emplace_back(deferred_act); trx.delay_sec = dtt_act.delay_sec; - cancel_deferred( 0xffffffffffffffff ); // TODO: Remove this line after fixing deferred trx replacement RAM bug trx.send( 0xffffffffffffffff, name{dtt_act.payer}, true ); } From d433884a1dcef19e795b36aafba7c65e7d3bf94c Mon Sep 17 00:00:00 2001 From: Kyle Morgan Date: Wed, 3 Apr 2019 13:09:44 -0400 Subject: [PATCH 283/680] Add git submodule regression check * Add submodule_check.sh, which checks to see if the HEAD of any submodules on a pull-requested branch are older than they were previously. * Add the corresponding buildkite pipeline to run submodule_check.sh. --- .buildkite/pipeline.yml | 8 ++++++++ scripts/submodule_check.sh | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100755 scripts/submodule_check.sh diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index f83249df044..c1500cd77ee 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -614,3 +614,11 @@ steps: - "build/packages/eosio_highsierra.rb" - "build/packages/eosio.rb" timeout: 60 + + - command: | + echo "+++ :microscope: Running git submodule regression check" && \ + ./scripts/submodule_check.sh + label: "Git submodule regression check" + agents: + queue: "automation-large-builder-fleet" + timeout: 240 diff --git a/scripts/submodule_check.sh b/scripts/submodule_check.sh new file mode 100755 index 00000000000..16aace418bf --- /dev/null +++ b/scripts/submodule_check.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +REPO_DIR=`mktemp -d` +git clone "$BUILDKITE_REPO" "$REPO_DIR" +git submodule update --init --recursive +cd "$REPO_DIR" + +declare -A PR_MAP +declare -A BASE_MAP + +echo "getting submodule info for $BUILDKITE_BRANCH" +git checkout "$BUILDKITE_BRANCH" &> /dev/null +git submodule update --init &> /dev/null +while read -r a b; do + PR_MAP[$a]=$b +done < <(git submodule --quiet foreach --recursive 'echo $path `git log -1 --format=%ct`') + +echo "getting submodule info for $BUILDKITE_PULL_REQUEST_BASE_BRANCH" +git checkout "$BUILDKITE_PULL_REQUEST_BASE_BRANCH" &> /dev/null +git submodule update --init &> /dev/null +while read -r a b; do + BASE_MAP[$a]=$b +done < <(git submodule --quiet foreach --recursive 'echo $path `git log -1 --format=%ct`') + +for k in "${!BASE_MAP[@]}"; do + base_ts=${BASE_MAP[$k]} + pr_ts=${PR_MAP[$k]} + echo "submodule $k" + echo " timestamp on $BUILDKITE_BRANCH: $pr_ts" + echo " timestamp on $BUILDKITE_PULL_REQUEST_BASE_BRANCH: $base_ts" + if (( $pr_ts < $base_ts)); then + echo "ERROR: $k is older on $BUILDKITE_BRANCH than $BUILDKITE_PULL_REQUEST_BASE_BRANCH" + exit 1 + fi +done From 8aa24ca6617daecede9c2ea7dc7b6282558db6b8 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Wed, 3 Apr 2019 18:13:54 -0400 Subject: [PATCH 284/680] Add back in Docker Hub deprecation that was accidentally removed --- Docker/README.md | 40 +--------------------------------------- 1 file changed, 1 insertion(+), 39 deletions(-) diff --git a/Docker/README.md b/Docker/README.md index 1aa0513cca9..6eade280f9b 100644 --- a/Docker/README.md +++ b/Docker/README.md @@ -133,45 +133,7 @@ docker volume rm keosd-data-volume ### Docker Hub -Docker Hub image available from [docker hub](https://hub.docker.com/r/eosio/eos/). -Create a new `docker-compose.yaml` file with the content below - -```bash -version: "3" - -services: - nodeosd: - image: eosio/eos:latest - command: /opt/eosio/bin/nodeosd.sh --data-dir /opt/eosio/bin/data-dir -e --http-alias=nodeosd:8888 --http-alias=127.0.0.1:8888 --http-alias=localhost:8888 - hostname: nodeosd - ports: - - 8888:8888 - - 9876:9876 - expose: - - "8888" - volumes: - - nodeos-data-volume:/opt/eosio/bin/data-dir - - keosd: - image: eosio/eos:latest - command: /opt/eosio/bin/keosd --wallet-dir /opt/eosio/bin/data-dir --http-server-address=127.0.0.1:8900 --http-alias=localhost:8900 --http-alias=keosd:8900 - hostname: keosd - links: - - nodeosd - volumes: - - keosd-data-volume:/opt/eosio/bin/data-dir - -volumes: - nodeos-data-volume: - keosd-data-volume: - -``` - -*NOTE:* the default version is the latest, you can change it to what you want - -run `docker pull eosio/eos:latest` - -run `docker-compose up` +Docker Hub images are now deprecated. New build images were discontinued on January 1st, 2019. The existing old images will be removed on June 1st, 2019. ### EOSIO Testnet From 7a0ffb3ccca7d91c6791f00fb257d45c81ff180f Mon Sep 17 00:00:00 2001 From: Kayan Date: Thu, 4 Apr 2019 15:56:02 +0800 Subject: [PATCH 285/680] add test cases for 6897-restructure-traces --- unittests/api_tests.cpp | 412 +++++++++++++++++- .../test-contracts/test_api/test_action.cpp | 83 ++++ .../test-contracts/test_api/test_api.cpp | 6 + .../test-contracts/test_api/test_api.hpp | 6 + .../test-contracts/test_api/test_api.wasm | Bin 67533 -> 69149 bytes 5 files changed, 506 insertions(+), 1 deletion(-) diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index b64a97feb7d..367b2c6bcf5 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -245,9 +245,42 @@ transaction_trace_ptr CallFunction(TESTER& test, T ac, const vector& data, } } +template +transaction_trace_ptr CallFunctionExpectFail(TESTER& test, T ac, const vector& data, const vector& scope = {N(testapi)}) { + { + signed_transaction trx; + + auto pl = vector{{scope[0], config::active_name}}; + if (scope.size() > 1) + for (unsigned int i=1; i < scope.size(); i++) + pl.push_back({scope[i], config::active_name}); + + action act(pl, ac); + act.data = data; + act.authorization = {{N(testapi), config::active_name}}; + trx.actions.push_back(act); + + test.set_transaction_headers(trx, test.DEFAULT_EXPIRATION_DELTA); + auto sigs = trx.sign(test.get_private_key(scope[0], "active"), test.control->get_chain_id()); + + flat_set keys; + trx.get_signature_keys(test.control->get_chain_id(), fc::time_point::maximum(), keys); + + auto c = packed_transaction::none; + + if( fc::raw::pack_size(trx) > 1000 ) { + c = packed_transaction::zlib; + } + + auto res = test.control->push_transaction(std::make_shared(trx,c), fc::time_point::maximum(), 100); + return res; + } +} + #define CALL_TEST_FUNCTION(_TESTER, CLS, MTH, DATA) CallFunction(_TESTER, test_api_action{}, DATA) #define CALL_TEST_FUNCTION_SYSTEM(_TESTER, CLS, MTH, DATA) CallFunction(_TESTER, test_chain_action{}, DATA, {config::system_account_name} ) #define CALL_TEST_FUNCTION_SCOPE(_TESTER, CLS, MTH, DATA, ACCOUNT) CallFunction(_TESTER, test_api_action{}, DATA, ACCOUNT) +#define CALL_TEST_FUNCTION_SCOPE_EXPECT_FAIL(_TESTER, CLS, MTH, DATA, ACCOUNT) CallFunctionExpectFail(_TESTER, test_api_action{}, DATA, ACCOUNT) #define CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION(_TESTER, CLS, MTH, DATA, EXC, EXC_MESSAGE) \ BOOST_CHECK_EXCEPTION( \ CALL_TEST_FUNCTION( _TESTER, CLS, MTH, DATA), \ @@ -702,7 +735,7 @@ BOOST_FIXTURE_TEST_CASE(cf_action_tests, TESTER) { try { auto ttrace = CALL_TEST_FUNCTION( *this, "test_transaction", "send_cf_action", {} ); BOOST_REQUIRE_EQUAL(ttrace->action_traces.size(), 2); - BOOST_CHECK_EQUAL(ttrace->action_traces[1].creator_action_ordinal.value, 1); + BOOST_CHECK_EQUAL((int)(ttrace->action_traces[1].creator_action_ordinal), 0); BOOST_CHECK_EQUAL(ttrace->action_traces[1].receiver, account_name("dummy")); BOOST_CHECK_EQUAL(ttrace->action_traces[1].act.account, account_name("dummy")); BOOST_CHECK_EQUAL(ttrace->action_traces[1].act.name, account_name("event1")); @@ -2060,4 +2093,381 @@ BOOST_FIXTURE_TEST_CASE(eosio_assert_code_tests, TESTER) { try { BOOST_REQUIRE_EQUAL( validate(), true ); } FC_LOG_AND_RETHROW() } +/************************************************************************************* ++ * action_ordinal_test test cases ++ *************************************************************************************/ +BOOST_FIXTURE_TEST_CASE(action_ordinal_test, TESTER) { try { + + produce_blocks(1); + create_account(N(testapi) ); + set_code( N(testapi), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(bob) ); + set_code( N(bob), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(charlie) ); + set_code( N(charlie), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(david) ); + set_code( N(david), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(erin) ); + set_code( N(erin), contracts::test_api_wasm() ); + produce_blocks(1); + + transaction_trace_ptr txn_trace = CALL_TEST_FUNCTION_SCOPE( *this, "test_action", "test_action_ordinal1", + {}, vector{ N(testapi)}); + + BOOST_REQUIRE_EQUAL( validate(), true ); + + BOOST_REQUIRE_EQUAL( txn_trace != nullptr, true); + BOOST_REQUIRE_EQUAL( txn_trace->action_traces.size(), 11); + + auto &atrace = txn_trace->action_traces; + BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); + BOOST_REQUIRE_EQUAL((int)atrace[0].parent_action_ordinal, 0); + BOOST_REQUIRE_EQUAL(atrace[0].receipt.valid(), true); + int start_gseq = atrace[0].receipt->global_sequence; + + BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal,2); + BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[1].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[1].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[1].receipt->global_sequence, start_gseq + 1); + + BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[2].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[2].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[2].receipt->global_sequence, start_gseq + 4); + + BOOST_REQUIRE_EQUAL((int)atrace[3].action_ordinal, 4); + BOOST_REQUIRE_EQUAL((int)atrace[3].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[3].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[3].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[3].receipt->global_sequence, start_gseq + 8); + + BOOST_REQUIRE_EQUAL((int)atrace[4].action_ordinal, 5); + BOOST_REQUIRE_EQUAL((int)atrace[4].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[4].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[4].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[4].receipt->global_sequence, start_gseq + 2); + + BOOST_REQUIRE_EQUAL((int)atrace[5].action_ordinal, 6); + BOOST_REQUIRE_EQUAL((int)atrace[5].creator_action_ordinal, 2); + BOOST_REQUIRE_EQUAL((int)atrace[5].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[5].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[5].receipt->global_sequence, start_gseq + 9); + + BOOST_REQUIRE_EQUAL((int)atrace[6].action_ordinal, 7); + BOOST_REQUIRE_EQUAL((int)atrace[6].creator_action_ordinal,2); + BOOST_REQUIRE_EQUAL((int)atrace[6].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[6].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[6].receipt->global_sequence, start_gseq + 3); + + BOOST_REQUIRE_EQUAL((int)atrace[7].action_ordinal, 8); + BOOST_REQUIRE_EQUAL((int)atrace[7].creator_action_ordinal, 5); + BOOST_REQUIRE_EQUAL((int)atrace[7].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[7].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[7].receipt->global_sequence, start_gseq + 10); + + BOOST_REQUIRE_EQUAL((int)atrace[8].action_ordinal, 9); + BOOST_REQUIRE_EQUAL((int)atrace[8].creator_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[8].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[8].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[8].receipt->global_sequence, start_gseq + 5); + + BOOST_REQUIRE_EQUAL((int)atrace[9].action_ordinal, 10); + BOOST_REQUIRE_EQUAL((int)atrace[9].creator_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[9].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[9].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[9].receipt->global_sequence, start_gseq + 6); + + BOOST_REQUIRE_EQUAL((int)atrace[10].action_ordinal, 11); + BOOST_REQUIRE_EQUAL((int)atrace[10].creator_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[10].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[10].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[10].receipt->global_sequence, start_gseq + 7); +} FC_LOG_AND_RETHROW() } + + +/************************************************************************************* ++ * action_ordinal_failtest1 test cases ++ *************************************************************************************/ +BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest1, TESTER) { try { + + produce_blocks(1); + create_account(N(testapi) ); + set_code( N(testapi), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(bob) ); + set_code( N(bob), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(charlie) ); + set_code( N(charlie), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(david) ); + set_code( N(david), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(erin) ); + set_code( N(erin), contracts::test_api_wasm() ); + produce_blocks(1); + + create_account(N(fail1) ); // <- make first action fails in the middle + produce_blocks(1); + + transaction_trace_ptr txn_trace = + CALL_TEST_FUNCTION_SCOPE_EXPECT_FAIL( *this, "test_action", "test_action_ordinal1", + {}, vector{ N(testapi)}); + + BOOST_REQUIRE_EQUAL( validate(), true ); + + BOOST_REQUIRE_EQUAL( txn_trace != nullptr, true); + BOOST_REQUIRE_EQUAL( txn_trace->action_traces.size(), 3); + + auto &atrace = txn_trace->action_traces; + + // fails here after creating one notify action and one inline action + BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); + BOOST_REQUIRE_EQUAL((int)atrace[0].parent_action_ordinal, 0); + BOOST_REQUIRE_EQUAL(atrace[0].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[0].except.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[0].except->code(), 3050003); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal, 2); + BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[1].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[1].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[1].except.valid(), false); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[2].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[2].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[2].except.valid(), false); + +} FC_LOG_AND_RETHROW() } + +/************************************************************************************* ++ * action_ordinal_failtest2 test cases ++ *************************************************************************************/ +BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { + + produce_blocks(1); + create_account(N(testapi) ); + set_code( N(testapi), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(bob) ); + set_code( N(bob), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(charlie) ); + set_code( N(charlie), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(david) ); + set_code( N(david), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(erin) ); + set_code( N(erin), contracts::test_api_wasm() ); + produce_blocks(1); + + create_account(N(fail3) ); // <- make action 3 fails in the middle + produce_blocks(1); + + transaction_trace_ptr txn_trace = + CALL_TEST_FUNCTION_SCOPE_EXPECT_FAIL( *this, "test_action", "test_action_ordinal1", + {}, vector{ N(testapi)}); + + BOOST_REQUIRE_EQUAL( validate(), true ); + + BOOST_REQUIRE_EQUAL( txn_trace != nullptr, true); + BOOST_REQUIRE_EQUAL( txn_trace->action_traces.size(), 8); + + auto &atrace = txn_trace->action_traces; + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); + BOOST_REQUIRE_EQUAL((int)atrace[0].parent_action_ordinal, 0); + BOOST_REQUIRE_EQUAL(atrace[0].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[0].except.valid(), false); + int start_gseq = atrace[0].receipt->global_sequence; + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal,2); + BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[1].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[1].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[1].receipt->global_sequence, start_gseq + 1); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[2].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[2].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[2].except.valid(), false); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[3].action_ordinal, 4); + BOOST_REQUIRE_EQUAL((int)atrace[3].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[3].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[3].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[3].except.valid(), false); + + // hey exception is here + BOOST_REQUIRE_EQUAL((int)atrace[4].action_ordinal, 5); + BOOST_REQUIRE_EQUAL((int)atrace[4].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[4].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[4].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[4].except.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[4].except->code(), 3050003); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[5].action_ordinal, 6); + BOOST_REQUIRE_EQUAL((int)atrace[5].creator_action_ordinal, 2); + BOOST_REQUIRE_EQUAL((int)atrace[5].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[5].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[5].except.valid(), false); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[6].action_ordinal, 7); + BOOST_REQUIRE_EQUAL((int)atrace[6].creator_action_ordinal,2); + BOOST_REQUIRE_EQUAL((int)atrace[6].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[6].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[6].except.valid(), false); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[7].action_ordinal, 8); + BOOST_REQUIRE_EQUAL((int)atrace[7].creator_action_ordinal, 5); + BOOST_REQUIRE_EQUAL((int)atrace[7].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[7].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[7].except.valid(), false); + +} FC_LOG_AND_RETHROW() } + +/************************************************************************************* ++ * action_ordinal_failtest3 test cases ++ *************************************************************************************/ +BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { + + produce_blocks(1); + create_account(N(testapi) ); + set_code( N(testapi), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(bob) ); + set_code( N(bob), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(charlie) ); + set_code( N(charlie), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(david) ); + set_code( N(david), contracts::test_api_wasm() ); + produce_blocks(1); + create_account(N(erin) ); + set_code( N(erin), contracts::test_api_wasm() ); + produce_blocks(1); + + create_account(N(failnine) ); // <- make action 9 fails in the middle + produce_blocks(1); + + transaction_trace_ptr txn_trace = + CALL_TEST_FUNCTION_SCOPE_EXPECT_FAIL( *this, "test_action", "test_action_ordinal1", + {}, vector{ N(testapi)}); + + for (auto &at : txn_trace->action_traces) { + std::cout << "\n\n"; + std::cout << FC_LOG_MESSAGE(info, "${t}", ("t", at)).get_message(); + } + + BOOST_REQUIRE_EQUAL( validate(), true ); + + BOOST_REQUIRE_EQUAL( txn_trace != nullptr, true); + BOOST_REQUIRE_EQUAL( txn_trace->action_traces.size(), 11); + + auto &atrace = txn_trace->action_traces; + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); + BOOST_REQUIRE_EQUAL((int)atrace[0].parent_action_ordinal, 0); + BOOST_REQUIRE_EQUAL(atrace[0].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[0].except.valid(), false); + int start_gseq = atrace[0].receipt->global_sequence; + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal,2); + BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[1].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[1].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[1].receipt->global_sequence, start_gseq + 1); + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[2].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[2].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[2].receipt->global_sequence, start_gseq + 4); + + // fails here + BOOST_REQUIRE_EQUAL((int)atrace[3].action_ordinal, 4); + BOOST_REQUIRE_EQUAL((int)atrace[3].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[3].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[3].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[3].except.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[3].except->code(), 3050003); + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[4].action_ordinal, 5); + BOOST_REQUIRE_EQUAL((int)atrace[4].creator_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[4].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[4].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[4].receipt->global_sequence, start_gseq + 2); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[5].action_ordinal, 6); + BOOST_REQUIRE_EQUAL((int)atrace[5].creator_action_ordinal, 2); + BOOST_REQUIRE_EQUAL((int)atrace[5].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[5].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[5].except.valid(), false); + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[6].action_ordinal, 7); + BOOST_REQUIRE_EQUAL((int)atrace[6].creator_action_ordinal,2); + BOOST_REQUIRE_EQUAL((int)atrace[6].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[6].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[6].receipt->global_sequence, start_gseq + 3); + + // not executed + BOOST_REQUIRE_EQUAL((int)atrace[7].action_ordinal, 8); + BOOST_REQUIRE_EQUAL((int)atrace[7].creator_action_ordinal, 5); + BOOST_REQUIRE_EQUAL((int)atrace[7].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[7].receipt.valid(), false); + BOOST_REQUIRE_EQUAL(atrace[7].except.valid(), false); + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[8].action_ordinal, 9); + BOOST_REQUIRE_EQUAL((int)atrace[8].creator_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[8].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[8].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[8].receipt->global_sequence, start_gseq + 5); + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[9].action_ordinal, 10); + BOOST_REQUIRE_EQUAL((int)atrace[9].creator_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[9].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[9].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[9].receipt->global_sequence, start_gseq + 6); + + // executed + BOOST_REQUIRE_EQUAL((int)atrace[10].action_ordinal, 11); + BOOST_REQUIRE_EQUAL((int)atrace[10].creator_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[10].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[10].receipt.valid(), true); + BOOST_REQUIRE_EQUAL(atrace[10].receipt->global_sequence, start_gseq + 7); + +} FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/test-contracts/test_api/test_action.cpp b/unittests/test-contracts/test_api/test_action.cpp index bf1985ae3ef..6424d05f513 100644 --- a/unittests/test-contracts/test_api/test_action.cpp +++ b/unittests/test-contracts/test_api/test_action.cpp @@ -259,3 +259,86 @@ void test_action::test_ram_billing_in_notify( uint64_t receiver, uint64_t code, db_store_i64( "notifytest"_n.value, "notifytest"_n.value, payer, "notifytest"_n.value, &to_notify, sizeof(to_notify) ); } } + +void test_action::test_action_ordinal1(uint64_t receiver, uint64_t code, uint64_t action) { + uint64_t _self = receiver; + if (receiver == "testapi"_n.value) { + print("exec 1"); + eosio::require_recipient( "bob"_n ); //-> exec2 + + eosio::action act1({name(_self), "active"_n}, name(_self), + name(WASM_TEST_ACTION("test_action", "test_action_ordinal2")), + std::tuple<>()); + act1.send(); // -> exec 5, 6, 7 + + if (is_account("fail1"_n)) { + eosio_assert(false, "fail at point 1"); + } + + eosio::action act2({name(_self), "active"_n}, name(_self), + name(WASM_TEST_ACTION("test_action", "test_action_ordinal3")), + std::tuple<>()); + act2.send(); // -> exec 9 + + eosio::require_recipient( "charlie"_n ); // -> exec 3 + + } else if (receiver == "bob"_n.value) { + print("exec 2"); + eosio::action act1({name(_self), "active"_n}, name(_self), + name(WASM_TEST_ACTION("test_action", "test_action_ordinal_foo")), + std::tuple<>()); + act1.send(); + + eosio::require_recipient( "david"_n ); // -> exec 4 + } else if (receiver == "charlie"_n.value) { + print("exec 3"); + eosio::action act1({name(_self), "active"_n}, name(_self), + name(WASM_TEST_ACTION("test_action", "test_action_ordinal_bar")), + std::tuple<>()); // exec 11 + act1.send(); + + if (is_account("fail3"_n)) { + eosio_assert(false, "fail at point 3"); + } + + } else if (receiver == "david"_n.value) { + print("exec 4"); + } else { + eosio_assert(false, "assert failed at test_action::test_action_ordinal1"); + } +} +void test_action::test_action_ordinal2(uint64_t receiver, uint64_t code, uint64_t action) { + uint64_t _self = receiver; + if (receiver == "testapi"_n.value) { + print("exec 5"); + eosio::require_recipient( "david"_n ); + eosio::require_recipient( "erin"_n ); + + eosio::action act1({name(_self), "active"_n}, name(_self), + name(WASM_TEST_ACTION("test_action", "test_action_ordinal4")), + std::tuple<>()); + act1.send(); // -> exec 8 + } else if (receiver == "david"_n.value) { + print("exec 6"); + } else if (receiver == "erin"_n.value) { + print("exec 7"); + } else { + eosio_assert(false, "assert failed at test_action::test_action_ordinal2"); + } +} +void test_action::test_action_ordinal4(uint64_t receiver, uint64_t code, uint64_t action) { + print("exec 8"); +} +void test_action::test_action_ordinal3(uint64_t receiver, uint64_t code, uint64_t action) { + print("exec 9"); + + if (is_account("failnine"_n)) { + eosio_assert(false, "fail at point 9"); + } +} +void test_action::test_action_ordinal_foo(uint64_t receiver, uint64_t code, uint64_t action) { + print("exec 10"); +} +void test_action::test_action_ordinal_bar(uint64_t receiver, uint64_t code, uint64_t action) { + print("exec 11"); +} diff --git a/unittests/test-contracts/test_api/test_api.cpp b/unittests/test-contracts/test_api/test_api.cpp index 598990dc1a7..241d4762a00 100644 --- a/unittests/test-contracts/test_api/test_api.cpp +++ b/unittests/test-contracts/test_api/test_api.cpp @@ -64,6 +64,12 @@ extern "C" { WASM_TEST_HANDLER ( test_action, test_publication_time ); WASM_TEST_HANDLER ( test_action, test_assert_code ); WASM_TEST_HANDLER_EX( test_action, test_ram_billing_in_notify ); + WASM_TEST_HANDLER_EX( test_action, test_action_ordinal1 ); + WASM_TEST_HANDLER_EX( test_action, test_action_ordinal2 ); + WASM_TEST_HANDLER_EX( test_action, test_action_ordinal3 ); + WASM_TEST_HANDLER_EX( test_action, test_action_ordinal4 ); + WASM_TEST_HANDLER_EX( test_action, test_action_ordinal_foo ); + WASM_TEST_HANDLER_EX( test_action, test_action_ordinal_bar ); // test named actions // We enforce action name matches action data type name, so name mangling will not work for these tests. diff --git a/unittests/test-contracts/test_api/test_api.hpp b/unittests/test-contracts/test_api/test_api.hpp index 865923fcfb2..bbcf9965352 100644 --- a/unittests/test-contracts/test_api/test_api.hpp +++ b/unittests/test-contracts/test_api/test_api.hpp @@ -69,6 +69,12 @@ struct test_action { static void test_publication_time(); static void test_assert_code(); static void test_ram_billing_in_notify(uint64_t receiver, uint64_t code, uint64_t action); + static void test_action_ordinal1(uint64_t receiver, uint64_t code, uint64_t action); + static void test_action_ordinal2(uint64_t receiver, uint64_t code, uint64_t action); + static void test_action_ordinal3(uint64_t receiver, uint64_t code, uint64_t action); + static void test_action_ordinal4(uint64_t receiver, uint64_t code, uint64_t action); + static void test_action_ordinal_foo(uint64_t receiver, uint64_t code, uint64_t action); + static void test_action_ordinal_bar(uint64_t receiver, uint64_t code, uint64_t action); }; struct test_db { diff --git a/unittests/test-contracts/test_api/test_api.wasm b/unittests/test-contracts/test_api/test_api.wasm index c7a7601ace1eba1bdb1353776620bcfc72ebbbaa..dc9a7125d82e9770c5724b76b05ef779bc39d1ba 100755 GIT binary patch delta 20458 zcmd6P2Ygh;_WzxGv%5*|ZgK-@gp}PSU;+U`3%%S>MW2!Y`m9MHi6n$1Bmu!EY$ysU zk2YZFO=&6^UPQ6b1nUFAUa(+!`b1HrSSkPSnYp{!0RH~J^8bJS|M=eCIp@ronKS21 zJ9CD^3qn?}3RyTkEyOo{x=+bhebbbD8`9IK6L2qnrcY=0ir;DUyXbH26WBC1EuY;B zv?lU}_3+u!9dNsopUQ(S`Fm`Ok zr1BcY#a~m`cL>YNE2thX}O{pm=FRU)EDekN^=j&~; zdT3tWgh^#J#a)zWKFt>I16)`#S-`{tu%NIo00xRE6h*{{N>J=XDp0qevZ7$9yb(fS z)sw~q3UlV=)l^L?A6rvg2<}+H%?*1?BTm#@SyfVAQ=M0)sNAr{q#$>3$&^Xu)iuS{ zB}zPHfw(|Pm=|lQtOb8aYsV7!Gg>lh$=}mbSR%KDTpO7r$`%C{7Awj8j*uxVg})c# z?)m_m!;}<C4sv6>O*zI-||F_%So)nuyQyq$8*EGeUYKqtE<$tj!`MeIz z?$8uXRWybEyhf`OP4gP~5C2nC#i4kap&%Rmsp3b)SJgsntdcQi)0o2Sv6_#Wwfhv^ zrBzjUmKn^gMe1%% zR~0>rZ~CEC%h@Q#4X#>z`{O%rfNodJg=p+KzVeIK?%|QT@fR~TOGOhRb|4SO`?7>us27cyN zTB7TQdli?Wp=u9`Pg9o-@T!Ad;E7VeN>m81>uv>Yax2CNJwhRFAOT&cOKh$_o~L}0 z-s(o6)d5tPCN-U|zzXW8xmY8KB+S1c^Uo)?WV9=k>hm^MMX2-HUwJ zC-c|{KIBtxj3WU}S4&N$7kUhuCh1S{X`gNbJ>#>IwgAzQ7uiI2=wz6c(31|95XkLF zfMK}pq?QmPm+$*5Q&knt9I?KC36oRIWJ5JwL(GR}kOeB{SBivJTCfeEZHW1V1-c~l zc?q^x6!Rrzi1~>G(-oHe%b|BSJi6h`+{b-6=FFL*%rqs;T>SCN8yrK-%@SjtQf#EC zU-(K#rg6wZ#+RDRRE%Uh1sP{H?I8=;0XIHbWaChp&)8=aEJn?;75bRr11c zZ~U6gvPVNoA%sUIji@Fw9DGf9ml#PT@jRLiO=)H*DBq21#q+PVt7|s%C_RQb`1EUI zd0#z|5J&O|F;VZ7b5-61x9S$X6OqWS6u^D}!i#prCN4uA3seVOW{(itKv*QtE-It8#=o!)Pf`jAlfB_I}wN zfdU=QP}pL+mTxpC`VzG0o5%rXzj-=_ZE5qA_(v`udn(vzgspGhsh1@B$DZa6AbPS>ptCdKp9PoN6-}KQY{cO;+(#@E@sv}1oL-m z*_+?~X4hk1%=>JPue&w?u$VL{Q+KV0X&58CA=$M)i6k5r1(}8B)$Up-n)NYF3S=^x zc@#ej6F(?Dp}6LXOF$67_ksvOH)UALr8}jO*F(I zmLT2j18EqMXx5`L{Um_L70k5gP$}&aA$?AxxT5%6Oes61y#D+!(HDTh0w2KCQjSLX z4QMO-`y*@TJ@JLFY4?a#BdB}OI~MByVDG2{to}Lp#)m$|Xn}q~o99rQVGEb(K!AZZ zNU8-I$b3>rA%%!1rFy6%F>P9EOAFQ-NM1XZPoZlwwA5`keWA3T5<)XmgEi`w8qEM6 zONZ2`RDoZk8GemsHfoLb7tpBNPePG?g$k1Sj+|Qqw%`lOK%BxBNOf1Y1#^wmb@r(p zSK790UQ_M)aet;h-+#lp{rani{sQXTDAY&dUm@ewM5O*E5q*K~0YVtO{)&oo1=i+F zv0;^SfHekcYY+YKu96d&0cyn<$e|5%qn+fsRP)D1gSSSZG%LX`%>RTm9G{pS$dYIx zuna5IKRYy{uF`)Q0V8um!l0=kG1!jcr5&Y0yinMYw2xH3eWcwI)H#wC@28=d;Lgbu zlYx0b8h~kmKQ{#xbM`zr^fUSK==7WZB_JGQs?046Ybn-D1)Y_y>|oeATEwu@bOm#& zSF*Ba?sC@|{Qvy^mHF{pmq1Z}`sp0E=o;$&(K3MNW2|CaW34!&+CV z3)tecw8;8}!NQE;^V+P`_AMlmQM;&#Ohj9yE)rZvEs~T>avdj9de~yggwtzr+(231 zSRvM)G&hT|s8X!4Bo0()jUAQ)bC;Fgq?}Drfw`_BTk>SiyB?r}v_oaJ{ZF8!wOR z-Kdx@v75*Wpm=wwaa%zDq?@#OX$*}tzkVc9LXA`@j^Io%1<**7jc!_f0Xncr^|uGA zFSw$5qFL2j#Ng_QDytX7xZvuECaXWlPjs#I8_UFQm9duOpc;1s>K?<7WaY#QT}nyes52SRflE5)xHE&y>rY_F_?l?^o)E@AvgkNBJZEYD!dAts3Xa9T7(5%@9u zQ@+X}1vSJrC1S#Sm9TnTQtgga86j*Kc;hYIR@#(V!a z*)}2DiJl3*rrG2wJ8DlmGk9tO+g-mNhs zuj(D&swrb*zhR>ShmJfVzTevu9aMrR1pXQG9Tem0(=rnU&}szx3aPSu8JD-4U{FyS z%IAOWlc-V?`n0ye8(0-_Z|&_Z_row(-{($>NeR# zIz_<>(&!Y7b3>MzhRq)o;5MDc2%g=yYi}^j5e&2uXCglx{8Q=R52u2M+8o3O2-Tbu zT)e(-EKbd@_U)fEmszfvb>MxJc;u+WY2X3Iz5P0?OBuhZUz?DnB*=_@eIUr*e#w}+ zNDlsGzjl(+yg!L}WB)WiWm^CClJaE#-k^Nnzn!0wG+;#Yac~)g7)^c~W)H|z<8WMc zJK!E5a|t0|h6Ao4ql6#M9#Q!6`4%x= zHMBA;kc00X7N0y^91E}NydYaTH>^FsahN^kN(Tl}`(E3MN0itv^L@a(2>-V*MRF$Y zA10iMpA92t;<;g0b41Fr(gRM&hUwRqsjQBFcU?DiG2@==Q)x*ba{a#{WaeL9NkfPG zh3u=qX#M6#T}{dmt{tR!;tehAMJUM3?bh$dGmX-39+`$>8IE#qm zF(aP^?j10(dNmhw!Y!)`(^K7;m7;M&kjr`Q-RdKo`J1z-kNO;n=eTbAc9;wQ_IliTs-9k#?O{2;&R_LS)VT}c> zq59@F3|+GFuTyB~MBmaDeLdioTGqh#-*S!BBn>P90fo&hzO@zA`@yZrO>7fWo5(1% zqX@@Z{;koXEKhQf%NM;KC{e`_Mwd~E7$YCtnrb=7gUX^4GA475vUu8UEz~;3vu|4r zSK+(2byt^R0=_+CX!j?&YnlENGK5pInbE@=V)}+)l7s^eMP!zul%C{TQ0QQ(I?1mo zv-+t~y!Q6kYjTBy;!k`R$huFQ8AkHGx5q~f_tN!|N)sd{mNWkE+gqvlIPaL))LORP zac8iKY59N&L`Vy6b!P`l#lk)W(P4TZbn%^2BP=I7su2Bf^3GOX3uQy^iFJ-vDFcdy zV+XSkX6D1Zb6y4;&hztzsw)}ak|$jG{@a39JT%|9V$ zCeXzXV~WpG&(iqC7T^@?u1x{@SFuZjch^|W5(3+aP@_L+ONja3#EcX%e{V7W8_bmf z=2Mq3rwQhViTQXKaU+S<#a)o-39chkWLrkrKA>DkA=e$3<(eXLZKB(Pqm&CNj+(@B;Ib#}yJ^E#>RmXP9RZqE)LazB^T8b_?vhA@K=fAzs z%Jl-}+CsUILax`xv=qXw{qp^7-)?>R)EPxc`#NP?N7;~~2Gja3cX!=2>?bT~)1#f7ui%bxn0m=_W=QfQxV ztd!!jB^$TC{??L>>7t6Wlxr^KLJGN_AKP*usqDvPFYlQ5<+7Kh%4XIfS1sj2iW)K- zX%eCq4G9f21YRfN^%?znd|_vG>t=p^;lt{STliOnJ6j#0s{u?2@V~wY2a%i`;1G3( z%!W@CGll*=f1&7B%$4!QskmC`SKJ%Rz&*tsLUMI|58qbYBC3vcS4N{YIF%M6t&QNf zS9rkiYjNVuBPakM9<9PBkv?Lyx^X*)aVt&?w3SN~SkTSYBoZ|p36jVu1qvnqERAnm zcCBU}mETa-g|EFU1C!yQyE@aR_|jc*w0lrYsH}>+$0cAw?=vnTMoemA4W_*x=ooOR z*j>b>DF4Csg7$A~!rSws2v4!!Ri{uA040R8{xH+V;okLNu6$| zRGQWOxG}-DfYgZ(sgqpeS*jb8ul;|ZPHaXzWX0H!ntXKGebDN$vNGAdW|+t3Or&j( zVtxiXX(n$%R&=iE1fhjr<1pWib^Ngjw1s_RLhJZk(*QX}r@Q`UW)#xVNMkqSDyJ-+ z%4f3I_*3Qgk<`5^20}BwibVXbu1M-m`-cC9Rq~%I9zx5fRo>i8>_};$Q_LSjWt+dQ z%m}j2#EA)MLNzdcEZ5?MV)hAg_o@Tjh%do-L6Zw6#>Gn04JFYG&jMUFXtw1JQ%O8#x{Ox zJGPk*o0{U?!k{&5vobCJJvr~YQk)5_mo`tvR{r2r5ksL_r=Syj$JFsLBRt{Qcw-EN z;s4=cduH6i2Rt+a)%IOV)9wCyR>HFG_}lfEfFJ)`Qj}$1cDK!c6j<~y?s^-d{yv() zx^J77ir*R2;&oJQ!w^Mp?%*4zb%=Y6CD3Nk9YPy^83uz%t!A$%bnp@cn`0Q#wtQ!D1yT#6;6Adt?(dd=0b7eJ#R;V)&2*^LEWK^ z>JIg*`}aqNp^4M(Cw1388q1H|pAVI1&S_7bkvAu%g#(&%+h`Ln^kxf$0yNB@BeXtk zQ))Y?iFxeqP&Y%Eicb6aP-zq>2S+{NfD97KZid#}$0d;+7uSgV>W<#t{Z< zrkDw{HmsBsGxtBIm>ZY3W&hy!EKk*7hyOpSCTQ;YFN)OWzh`T2@)OG+gW4Wm(F=C{ z=8D_dPTpZ<5*x~|UzyxS90_nfM6bhcvFw&vjzQWG2BL!aqLp45oFm;-Yx%yFV>*uT zsB*+2as^4+Ba^1EYT3-RvQi8_PPpNKH%? z7?ApD{`Kly*jCRqZTvGj?SeW=)2dt(*W8-t$$AvASvH>GE7l~m@oyarzYIZvO|&DT zt6}|F{_&a&b@3J+zIHuod17t%uKxHUtiTjsq>eX+p)*8$k=SiuZKmlSn~p$y(OW!z zT_$>U__`_-vU^>6a8vsSnwoe;Q!z~&&rr)}t>SIhCra-E`rYksGa@6fo?=5lwfV0= z9^@74J-+9d7i(vkd2kiQ@q}zM`k;TICu69`W}Xp9%-EIacrkjXAmV|B99$vDmsx7` zGwWA_)BG6&?+<9+*3t-vpfctH3z-j_)-tRRG?cOFluj@a>?4vqas#cs1siT_{ygnG z5Z)oGM=NQt9oZoEW1=RQlNh^Vil53aY-k1FgZGin*I14UMiIU6J(2d%&rXjx_|O8T z>Tmp4R)lHj{fmz#NNJUVBz?d2{lfardPL$M zwZ1P|->A;lWUMIlPv-KeTimVpinSfbDP$uL&_1%1 z*@!o`%On;+V?W=sC9C(6<*3gh(gN-e-xaYY2+$Q|NeIn;Y4)7Aw}_Cn9P|CHsB=bt?m?}-3#{^|KoZj|D= zA^{&+`Od4G1f1)NZ%*wC<8hoYO$Akrn=8Bffc0zrYYea{8jo8t;~ z`_SO$w;$YUwJ{6~jgeNlu4of+=qM?UN;>o2+h3l2=O1r9>ATs589sa7?yoOi5O9tw z7Vz=!=6xmUIW8yQvw{4%uI7+vHzp~v=p)wm3w-hZf|=(m#2jl(uCl)M*7rsFI%BB~ z2hh8+17#hnKf7T3~LUSZPmEj8Rbt-aT>bM+(Qbcyhk@Z^RgA+7Kw^qqF^bChc0tQ5C;SEWT#6v z-xU_&)Xnu&V?g*NBs|aR-n@Q5Dxbf-Pi82JeSh7|U2|x@ z80~5X@We~M{C>ZjK4_Ns*V6~*2x_jYtFe*)zP(EqV*2cj+PD6gvE-wNeWP78KV1BM z`!f>GaS_`Kt2Q5yaIUM1F^%Ux*-^|R>lUv#F|)DsS6@9N!CY5oF-K2qCidK)3lD>T zTI^Dv>LWvcS?S)HUs!OitJ5Gw{2lLanC3z_W5X>( z#J&I`YVqmXwRdjn=SMiZQa8U@yWr$Q)Wn;`=$ie>uFdmL^7rba^GL8mbC>)fsJX68 z%Mzbp&Mw5*aQ5KYkF4fVb65Ov;h2PTT!_og-X5UmxLP*JpX=&qtYUoF4sR+E9zL|} z+n+?|=C~38UwG);FJ}at>*`?OCCJ}*c%z8$;Kij&E?60p_`V&9zI+(`Ns5eeW>S-0 zxp4pT8M3>R0G{4{_yeo=X`cIN{lPDpBqqbt}A^YwAtSnAXN#Va$f*Af3|Zs3-EN{aS<8xs07Y6>rn|c;S^jb zs?o(tu|P@@DZy-Xakl13PbG(VjHeiX@2OTP6P&HA{RcL?xEI7>INumF#QYh4eCG7> z%$@1X%WvJ;&a;t17|9)hy^_uJ;#}nP(#tt98`m?Zo3Ge8YFxI|kZgp}2w}tJhny)? zBOFv#zln+s9Y=(181~_An>aU6JMXOh3N9n6`?Z+v>hb$y#u)(BB$ zjnKYE2)iF+E$vIJW^bpBiLstJv+?9@^&|fiVPjCL|GhO-d=Ry$#f_Nd z<5QoDY$;;%*1SO0W<2Ma*uk%0;V|NccCw^}CHBFhr_^I8Q>{;&F)0$1?L-$;BeUq|NXp=&9 z8BNfM=&Gxrlf{Nan4MzQGQ-Yy?@5Y$Y8AE!V)8%=7m_FTw9Lb$B3i$PCL&8@|K8VveXxlCINS;^1aPIl{OpV#*&ZS+xEt&1Uv%+hWg0r z7NfV-BkyA1Flgy2LWDwXS3$SrpX_bhJWRtLhRVnvfxRd^I_@x@urJ9M3P+8a&F=N0 zO(2ef2NehG&{B$U3BzpV33D?dOYIODPVpyTC^o-^JZh``MAh~R}}etusUf3ySkn(}r{IuC@422KU^>H(g!zb(bP z_#>Dy_}=}Fs{}Y88j~N}-x$Tj6JP8`;W8s%EV=AuCT$VnRiJyecV6s3c|s24Aj~G` ze#QULG(e%tOiA4K%3ozU3EcB4Io@hsy|J@w2}K9tVC})1LKVIhhj6w5 z2UDPe+z#0y{k(i_H^W9Q!4Mw#wui5NEyD(9pkd=jUaN+`_~`rO60UsxE^^!_9f=3( z^y|4-iE^P^3{xJ^rVb@U66jmc0JG zyHI_L_lH_mQ2c&LzzQzB-_vio4}Fl#-r$dYP)dl{5AW(F4Nvs7H3Sg$lza5%RNMPlx-BsU6hF&|UD+dl4(^vaK0#FOt@p?Aqb>kK_UKGkD8K-^0Y3Q=?b340v#2M?vyF;PXHeojPFLd@id;V`>~CNH}U+nAJa8F zg6HS(-Hfm2RC*%bitEkpDyp&Sa2FSplob_%5ij0(@u?JzUboLZm8{`)JKuV$y+-fZ zKRlHrUb6poYM_Rf?7aU^ZFD?c*S&?4CUol3VKK7PyY)pswGpqfpZh6U!+UIg>?coa zysOqDO3Eh}l$8{^@jIyqb?pVxUQ<+E6Al0`rMc&HqK22!e8A}z8eU5C+fTRA=!x-t zr_(h&MdmwBU*o_dXg#TL3}19QNuwvnmyid~hI!hV3>`0obx-xw>YAbnL^r+yY#&!t zR#t%u=%w_$Gi_VKlc7hA99J=^tnix2MU9ffjlkbL(?Wx*fS)|$)o_gG&Ca%N0f&a} zsw$c|sidkXuV7NmIFtofwneZyng1P|0Q^3nv2#sf|R;I3i@I+PbQczu8R8?ciP8C24 zKEE;a>P0%SEF-U?s<5QIpp2S?$9+~o-2mYQoCRe8!t*!_>JA8x;jUhZzu2qP;VUR3 zk>E143hDs}ULgzW2?)NR%LODfJVAP>?6^#Ta25JNYW2SWMpsr<6iymjR8`%ftf<`W z>Fe%HB)lfm4HKgbc2McEvH(w~zNgQ*HJq9Fhv$-G@cc{HtH%{|qJF9&PP~BO&Y$~5 z&krV0XXE(^zw_tGz6HSq>UX?D(IY1mRaY0>Rpc%#xeEmW`*@Rl)U8hg<3e4XyJZDx zJ8&6}Dmr=@j_UX%fOAP^D)1BnUGsk#9J^4K+pSaQK)HDJ#h(XhQBx8XzYw9}vA&s!A%0CKPtc$_&)A`${y`arjEKkOJ}DQNP7dn0MN5 zDQP^oC&@xMFu^@UxOF%_Rs1%%y+!i3iAX1byR1!w#+iwK{#(3n2_Dstsi-Ie3U6_A z8wMH~9vTfDMG7P*8~^!dUf?f-z%}e2lWxOp>g0MC$WLTZ*9{ z_eUF?6m=U76J*0lTh|K4AcfPm9x+xC6N~s4n5u!q;ad+WDkdr5;?q@H1`zVAt2Dw8 zzN@QbQ;5XXRcZ=?ZFTh?BoHa9hji*pCWN?GJrr%~AVCDOrs|^neSUfJi%_?`diW0) zT`B@7E?n%92uGXV99>vcRaI2zrrCBs}&BK zdN|1^nvPsJu)<^tq>bZ-S?{>CFH2sU%9^WZRxRzxvSV?1th+MYRYleKr^wtT)g;yK z)k_z#I5)0p^yW0?vbyHgxJhQ3!)cu2@&=TppR;K7-0G#DvbdhOz0qUD_@o6kkXejY z-00}hG<2^d)vu}Rz*t-hT=M8fdBKFD0H>(utu=LbGS)^tBfbfB>lludOV-w5&IpOE`E$ec$B`Nf91PYF5e5s7q%0Qu-6bsa*0e91yq z67uUu>ZTKN&Z9EtK0HViQvb$uPI8$z_}Y8Cmg4`9Pb=9P~wE-Ne_qNoIm5_B~*xUbbRhd^dba;7T zd!>o?7@NuzMR91F;#4)oi*(ITRW(h;PZK{?^*S|&Q&Tim(FBp#@WpDH*T5hAr>Kfk z@iIdJ8T|wsnCVY!tP{Y1sya2*>10lSip?Ht*BlOqivJysSf^sMvl7ObU1JJ!G|~dh ztk|OH0mXb_B@2C=b(lS`q-6L*%3N0G^8?ycW><{8OrHbBK#F3XRfhSMRK-wCPj6y3 zm{&2A4VALrY*7Lte@2mjOBUXati886;~h!vk=O+?+e%T)?MiR+EtySMSkGVg9)0fl z=YF2PI*@ICD#2+=lsW6vj^~`c&BxxA#hg`&QWgDzraA&_B1GH5hWYuXxD;cJMG#wL zGE*^b<=@67YHz=*a6LXwJArTW_($V@mPF}VY|je(PJMmm_$}Gy!mSG58sCxK#xKMt zYp)^~pOBDoZmXyZ;}ZlZ34LyP03}+6}F@hv}wbLSUYj_*Zs2FHQ8vZePxX7Hvgos^U|1FY~GNRu|9oHcEVx#T!wka+A57N`ooAWql8AM=yIDC9EaeKc`SP`I)D#J6bpD#G@_C zb}D?Px0_3+%$qwDe$?ABdfQILbU;OJz;=Tm&euN8V-bk%4i{9%t?Q;BH|ko%<-QKL z9af;e=xcJg+)-pkieV6X-VWLLaDh&@3%!x94K(Tz{v5b|(SYiLDrz@Q{nsew zhOJ|`C`HCbrcm-XC89!IiaAt@shHm?MKLwv5P5|dXjMEhWfkkgccye|kPU02-f_^! z=5-7+>hgN2joj2WW_oE{zQetzllYyfx09VMO6{(0Wc;JlA>ypa6KzBMOvVE zWk&3EA(FTV5|OUq_C=sXT5^Aq5vzflU(Sf_8Y<KH*va_jzqYS#-6n$>ukVqg<=`I0>`a&uS($uZ z<}qWhgfH1k3osDXfwm)A^gan)xKAR? zd`q|V=>3sR0&3r17HC`c6E3Y@#erJw64-_60f96QT1eSEi}N&)J-VkyAC!!8#D5f_ z&iAA`zwh3Lg@|PPfFNz&YaTT$W~+(svFh$UOTyH$Iso6g!O7Hv3isT6hx2`!T%qB- zo3nVIUPtS_FRPat{#;`=UavzZKBBivQbN(7tlrRj0IN;z#v??uL<|44+?vrSljlj!RLQZfem?9T}mF=o6g|O2| zDc-G-LRe~TDFoJ%B7}~VLg1wo5?xCQft6D1wJ{HWuj_=cZOyx_B-YXeD2%N&V!%dz zygfT!Xa=nbV=XZ(bXbZoUWy^mIq@`@BAKO$)z*bDvPfNQuMtAnS#4bitR+MU9hL(9 zDezJViH?oz5rLJuSjCs$nR*x28O2<}ibRKzV~Bh!WeHr}o6O2X(lV7U00;s}Ymr@x ztrs!zq&|)By>`Lac2McP%_^(xD<~X5(@PQ<@Uevie=a)AI3MLd(IfvR{5JwD_sd1u0T@ORPZOx6&B~Ziz}-i&Qc?Jg~ut?6-OmX7ZmOeh|*N5b}gjq zo*nP6EuG|xAgc!1!`V%)A``1VTq~58t}Vfm`Z~zo{Lf@tK=ylfe9LQ9+JaEp|a zY^9dIYdSBY8e6IK;dyc2L>0o$=-bo*zt?!1@r`}EkU#cg-{hMBfj4T0Ph<1KKY{U) ztAh6Y6GG&{A%b~&O*V?0S9EEb<+v(YYMNK+sdUo3rrSVS{qE{u1IHl#^j(cQ!vQoE zAE_gPP8RzM5mB*es@HE5rFv~yYAQT4c!MYJ>dpr9#C~xspLgol&UP5%jUO10;+TaL z1h4AXjRfy@Z=6nw0+fX3=liv^OM<3%ih}UEyj1>-&^#AN^-ytImWQ`y>nkXHcZeB=i?tJAtAnbpjoW+ zrm?>X?Rc>XA;aO9b6X|cBKC)WcDM;YDmVQqW^9TzY%B>$JK$Zp|6&I$OTX5BT9$jQ z{dDz$fp@F6DjSOu4H?1H2dA=Pe&^sXsllD|cd`dLD~NCTpckEv{f6-%%!pyvL?I4o zptOaSy2A#9G}yb~4mOg{DY%7|@J|cgqk`59$t1;mG-QI+HU~Ipw@(d=7V|AH9v;V^ ze9-JvTXTZO>2=5n*+!#S1FzRX8+jev271D)=*ze^G=;6;&4*5)ZeKpMLVcC-W`(`z zGrTZK+M4RsvQ%tFNX%g2SlR?-e@2_2==pKHdr^lNS-F#>qglc05Ne-Zl&ik8oNxZH zFYh)inXTma4jXKREkr}G$#YsgbaWTNdyaa;L$S8#q ziZ%-Qx5Gy#eCp6t#DwZlkir<|SJ7x@^$4FZq6>+)X~c{K8T|;ET@)$EUF3}ttr%Dw z%ZjSU7dK<1p`|0oVQeLow4jdZT~fhb=8uf>xk9GN&z6v0-WZjFQE+-xat$JTR7@kx zzZCMonjtkO5LrMJOuOBB27T_A6G*C$Y2@E9JMA&ca^z zAEljG8E;&ck?vd5Ny}7w&>vVK{`6#q-)}Pmy=iX=fM#Z50Webd@Uo=1=y+iuG|gd0 zjBRWdcFAI%TUr*MMZ08;+$ap+9Hv2=e&$ov9){xA%t^MtqD0L=C<{ATLxmFpC2K}G zKU33`Yd};ClN4fBFYvBoGSuCSM;}k-)5fH;F?_?ArvEM^meSO3hn~IK zFw=Q!O<+pI!y*H2S!2^>u#NhC?AY|^BUneyGE_V3h*$^KjZO58ix;6q3NN~h7QoFW zsxOHODOxSQsZEt8sOAvWX`(`ksM5wI1|rJJ6jXDG>Nrs$1*$Cd6f)$79A}-hfq?%e zT*TAxkt3ML3FZ}t!F-UIkrK>DEarn?{#%$CaXrbLNR6oYvaR5rPgUF@^n$dIpUp8yhsV&85Zw!@RI)}H2|w8 z>E`a(5I^IyxjSTsO+E_d3Svfz#Je&+!2z;>(SrLPZl@kx&ig$)NBwFA|L);eJ?AO5 zcrr^T>wykue?=D0%>+g<-~ zyxM~}V*RRg%+xG3(W?qaLow5ej5{U%MhkE5LIe>s2y4xA#_W!!g$=o@qN78?6Q2aF-gt$AV>;=4P zQhRm%QNDXpC4KIj+>1TLmrc&}u4aA*)*-UI%uYCxa!@LbQkXW$SN?JGf7v_EaXyV4 zY5x7RME>-|#(d(V_n@ziKRR5_^=6dM?oOnoMKM3dX#n}c_f5${FPLs%;>eq%ZbPVe zZxvrLMfj!fO%Yz|4^tY^a_g8Xfay~wvjcq1)TwAg`)M~kfHtU+ZBVajgCA=dh7>Jn zuq-SQv@&?H1i}jv{?UJ9d0$R@60IqJtbcvsgOHP>m_N8=J3e|WqZuucwGB`17=+=W zYE(>y*J^rvd)ZIIZiT^0OYIY}#kfllQ-Nn))Pet7`|14ne_=m&|4aK>p{cfjjZW6Q z);W(6=Sd$9+9{F&A?G+%qBV_lkbVj%-&ADVO~ zu8PH^)3CB7o6LJuHerwQft8JQ>S^H&8B_SAN_n=YS$mtGc-z&(8n0%)Iyl0>aKNb% zl4Ke<$>1AS_o14`&7?{5`ZWt+QGI6JM)T;RSxNOQ!*Y1-A>V$_EIN0n4y^6R$o_Ag zol2jQ*|Fl-!VW`*LSN>5cI(Dd874#+k_~|hzk{Ykc;N=d-|Y5m8h2O4rnn_m@hcLG zrox8BglUsqvxN7qx`k?(StZUp-l*z9xyx0eTHhQ2ymd|_aDxC=pPcii%AVtM=jHx8 z$F3vG=Dp^RjC2O$`4{sOn&WWBfpM8uaF-Y2I=k<%|v$^;UOrto#OCIc4w^Y&W0<;5!X{IZ-Ca{U-=BQp7%mdXlizV z{h25^HE(~RCE4(gFSNlJ_Pr?9kI3HzLXy(l0!(L2Cu?Dm&-1Si~I68APE288H zk}RN!ZQ`izcGv7`D#2#R`hDTP?mzp2Q%yI{FzrX)J-e+TdyufCAk|rbqpONW3{7W zWObO)9%Gkw^U}}61r3^3r5|dYE5q&Dm`VD%w6PS$6#qT_#t2!v|BqxPMb0A8SIy@q z|DKo*C7^y0pcIa*Vx~qOltUL=ga-v^W1=uiBJCyYc;-+BLWK?r;N%5g`w8Qt$RrRi?A%*7ub4{oVRL`>e!ow!WvW@1(^N{v5uzjm6BZ7T}Eaow`I4 ztgya2t?!rC_wl6?zsmaVvA*A0->06F_;uFzfb~6VeP=F{_?MTlZlWcJEx=ZV*%OE6jtPS$Rt)NVkT2exq<$9UZ*Pw6@<$N0dMPl2NLEA9 zz2xx>GmYNnytyG9AnBSCd7EqNTyAWMulqHTkR6_;b)I+Lk_-#QJrqEuFGt{{vrLfh zH*8CsIxe2i+|obLkR~Iw=M!^IZhY#ygR946dm12%D9GFwgUh~nmo947|!MzMlv8sd}#Y_0lJ6tURWQp4%>bzkIdGXM3og z{eOJ5UgC2+^`P;-mv`Z4!5yO`Et3t2Y~jaKUwy$Uh7sF>BfC~a6dPP~eA#lV*gC*p z-uU!OGN0oy_`z*4fvWdVt_dF~9T$Qcxf@WaAAd9B%(){kA9^Vuh20z$KHFml{@e$L zKa=(n@A3XRhsQVq`K$UpH5zuD$YSqX>_DCP#P2BsEB)#BI$z8 zeo;$ZDkAWDxN)+Kci&jI?M2ZoIi5ORij=BGU$;j$kD^sBD@-_JdOM@Om9C^T$s-NA zqGI~=c4R1_>=cp3d>3Kxutz zZvJsaGjNeJo-UV7A~D02&!)<;r56>5o0B;nmpLb*DWFh|hJ5Qbe_vs5vMU;7n?DI9 zIdtQ%Zo6*25@r#l7!BPN1Mj&aJeIOOI4;o4nKVo@WrBw3U$%>3y0m{0-@3h9CYj5z zC6ix!jK*m{kAdvh+kd?{O%B^e$bPvSb71$x z^~c3^f0y}e53#+uaOGB+&+)W3{>H0zwDEZWUova{*OO)GH1>Dx`dMakJnh6(m%}^l zY*Jr%o72VI6`Fg7?o9Q$z_I_IUr#%6?6r$D;G&V=edUv>$3;Hd6T{c;jK3$ttVE#C zb)zDFb+;HHK)qQ%RV;pB#q}Y8y9250{fcM4dy*vT@1gRieDvDNr=>Qi_(LHqmG0fu z=%&Y*yFIa0pWAlsLrV@Cg!3=`ep2SMJ#omd3*)mrxZV~)pW|s`jO7z|`BS`r&)GZo zs~?3TvpqiKe}D3qUw;<)98YWG0lsaQKS1y&{-~P$yG7?m{_FGaFOm5iPb+IsDJJd@ z?;v;QJG!ak&Lly1{yuHqB&qROWPe(>@3_?gB<6=pm!A^Z98ZQZ!QF;9UWpK$XabX8 zT>pB7EP|Ty+p-hCh-{9hr5x2m@d<PpX{DDJ!D_$My@-9xGrA2 zeBnxn3;B2csId)VnjW^598cN}Fpus=OKB{~lxzuSE}v`HF_dw))Z2q~?UYmNrpqSy zko|VwP~-P3B-Qj?=vYI0j@a&EZUvf+<2cO3Wazb=qotBjf)m0{)FvATH)=RYp<5ApX5 znsD5WAwI2fSC58X=6Nv|8tKL(a7%hBMr_YBmwosc&h4XTlhO)g3rbbAfCQ zWOH{tzW+c1e`&wLoc!W}8yaA%M>p3=<&TN0b+~&Kb07uSkq7^Vwcck}duTnN|0VA$ zeDQ&F2i>=G8z`l#&q=budlB6kTjX~|=8YnM)U3k^;k#nD9_*0fv}*De!8;^Ya!{%2 z*bpF!o9}l>6nDzxJiV1BpWj8LxYK=)yDtCnU?*89E@=4<-;^FfyuF5))@NaUlNgq6 zgEO~Y!->mw;4~vR->KoOqZ{{QoweAXW^`lwYKv{%+q_i84F1xg3|A93meBj$_L|~x ziw?ynMHK&TP4PC}xEBsDU)n#Fzj%0H^cp<<_;6d4tinjP2J)dtx-=dw-5K~a zLyc5Qmt>pk4=eojBc1Wy=jS8Mui|y`w4<49Hoxa+Q+5{*9Bm$`U3L;VIaHTX8y*h@ zT??KF|HEpzVpcGt18$L9dE{*Ia7>VeH?S9RX7l8Hq|VYV0x38KS?W3N`yjUAIEv+c zPRJfIK~BT%AH>B7gy4G3;wQ|*A0!~+JNJVYeWJ7&V$J>rKbK;DHQD0ZgKBDziCq96u# ztAs}TEHn-v)?l#NLxwU6{?fJNNkC`AgcpgEJa|wJ&Q+Ke>g!WZuSpb6RhKh+3a(^0r3WLk7N{E&fA{sL!p|*fNM?(9`aeRqSj4S=lz=dTxFWaoAE7rpVGJEi zN#x0Y{7se<&+q)0f>v4I-PuM;61sSTp$?1iI>Wbiuz*nn!zm@wui%Bk6qksU!7#Y- zNd}MoGM4{v+UcNo5_nQ@>2w*iclLV`^jz@Cg9#$iL~#|l4X1Of@YSKbFB186pJrX- zss$ZtPWjaTr(nJl%@Hkq`_q|`VfH&L@Q)U}Pzw6|)c8%grM zkexb!=bpLx7**W-z@-=DTK7zR$P9wn0V)`}DWh{cr8& zTi8u9KlI&$R9TnrZwgtmDEn_er90NLXk9jIJ9g9!2Yc-caBrs_PNm z#F;Ads`V61Tmp|X6*^7Q@m5m7X|^7%PziL>q8NJ+KL*?(j=+lfJyGe#W{RPb!(o-j zE2A2!n{h2Js!7Wz@SxO}{#=NK*&tu_DEd}@ve69E^w7rs7>Z_k)zD>C_z*FQ{u5u^ zcr6j!9$VNFd5p+E*eI`S40)N<1VoZ(oMIZF=&n4?qr>CVv6)%)a!3*HhZNnRn6FeJ z0<7ruA>CU&uY`WeqnmYV3+}Si`H~J0$vyq^PhB)*Goe;oT9U`dEGEZ_nqV8dQzNj)o;#j;?Fcp#satT*sfyevz_-BC5c4Q9okixjQGPi1 zfbf3KloaAXG_0rx6m%2%5%h<)z*!)@dNt~(>-c##>vxUL?3}rhwzotsWf-??}{md&?J`Yo_7RE#G!7UHcT@v-m!Sm#B%q zq$lDgBsZ+<~JRau-zDyR{U9*KT%-q#cl zK=rzX#bfe@7v_8M_h=FcPdjx7Nl*tFJnH0$zb0yU)XBU48mG~t&i=nP)97_%#joia zUQzNjzczQ`{idFjKZwuxHA%y3Nq+IyW_9u8QBTVoT|RVVY2o-hUiq6($3sWmSN2d@ zdBF%GHE?TPcML5+=bz{N`f{YNSSVjqm*}PD2EZfBBnV zLs*P^E;Mb7;Gpg)Ef_Vru(TkT>O)xwgYv&#NY)S&=0z8hG=$gq6UZaR$yZ-U(GZ2? z?_WsM5NP96epZ@JdbB%ZBE)6}?G}cyYt;yL^4ay^)CaK;V@N zUw3&-U|%Gn0}y!aqDPM?C@X_ed-Drn*<=f=YteNCWFsIR)YYzIR=BpUSHY;FgIB?* zj*r4Hk7T9-&*E9y`rmss+AquP*tT7`TzZwJ|Iw?_f!Zi4z7o&QhWr^twa|;ScmC*g z@4?z!RM%R-jMBoAf)V*`vogc=y!B@=)v@o-V9^RZLCd=mLodVwt4fFUac%tsF8&F4zR0aA}}_)Rc%>p;1Cv>K_f>F2MEu7bUXeHh|Wmk1&{6+ zZDsIIM|U9IHVYU$z0sY75jni25vcZ5CSJ@)l*r*>j6h{kqwxGiqC^glTXcu`cN+r0 z;n|Ar%pW;=&~O^Fcxj?L%gRef4=zXUBy#S)fd0UN8L12UP1^=cB43@Zef(aFWUz z1PD21?6`-<6E{c*iTkYJi1-l@*H`tr9V31O#JyC#ZeGNXfVgj}*Bu}6BVZ+fo?c^z z^t-YCzIv^kIBE=zwK1>y zM=&@KUpvkf^%lWCHkK4TX=Bmq*15rNZ7jo%!$dtJ_=Ao4g4f%bN1Zb-c)Oi-PDH#^ zZvd4RU_s3H7K|$xJQ@pL9Y}F%UT~?MHBryc3%+Y-*Qu-K2YGwocYiUAQpV+Q}MUheJ9&Mhx9{og>J_K*qtH z-jGK1pHasa1}B0&4rh6~L5qF3B2m+Yg~4@B)=Zthh`#Z`vrfG0e`8Vb4+5UDfIVCQ z{=O)9hl@31&j!~zSwgUqV4I%}K1;CE5_XzklNJZR2TXl#u`Dq&3c#(4gV#p^c*X)& z5OC^};L`+Lu|!fHB;d{^!G93&OAF|&1K{IJgRScTxN51SEFs{YrNM~={MG_)A>dQb z1@{wh-E)%C&;dN~TrgP&@T>(KNWhuPg2e=Ud6}eKPr$>=f^QS>oCMBs16K8Xu&x`h zjn7NMJ_J1WeDDDP12{t0jgiGLqmiZF!SMd15#gX(kAhn$Tt0LtPQ-LK+@ui?g+Yk+ z=$?MX!-_|aE%x3eXPi}lHIDG(giVBZ0Dvk<%m4rY From da3d8d18b4099f84494b46282ef6d2d68e255a52 Mon Sep 17 00:00:00 2001 From: Kayan Date: Thu, 4 Apr 2019 16:00:07 +0800 Subject: [PATCH 286/680] remove debug print --- unittests/api_tests.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 367b2c6bcf5..a83b1a94b9a 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -2377,11 +2377,6 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { CALL_TEST_FUNCTION_SCOPE_EXPECT_FAIL( *this, "test_action", "test_action_ordinal1", {}, vector{ N(testapi)}); - for (auto &at : txn_trace->action_traces) { - std::cout << "\n\n"; - std::cout << FC_LOG_MESSAGE(info, "${t}", ("t", at)).get_message(); - } - BOOST_REQUIRE_EQUAL( validate(), true ); BOOST_REQUIRE_EQUAL( txn_trace != nullptr, true); From bff645372d7a3250668d7fbfd857a08c02beaeda Mon Sep 17 00:00:00 2001 From: Kayan Date: Thu, 4 Apr 2019 17:58:08 +0800 Subject: [PATCH 287/680] fix test case --- unittests/api_tests.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index a83b1a94b9a..1290b02c33a 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -735,7 +735,7 @@ BOOST_FIXTURE_TEST_CASE(cf_action_tests, TESTER) { try { auto ttrace = CALL_TEST_FUNCTION( *this, "test_transaction", "send_cf_action", {} ); BOOST_REQUIRE_EQUAL(ttrace->action_traces.size(), 2); - BOOST_CHECK_EQUAL((int)(ttrace->action_traces[1].creator_action_ordinal), 0); + BOOST_CHECK_EQUAL((int)(ttrace->action_traces[1].creator_action_ordinal), 1); BOOST_CHECK_EQUAL(ttrace->action_traces[1].receiver, account_name("dummy")); BOOST_CHECK_EQUAL(ttrace->action_traces[1].act.account, account_name("dummy")); BOOST_CHECK_EQUAL(ttrace->action_traces[1].act.name, account_name("event1")); From 3be5baaa031925407806c5903272acc25e7b582e Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Thu, 4 Apr 2019 18:19:28 +0800 Subject: [PATCH 288/680] Add additional API for push transaction backward compatibility --- plugins/chain_api_plugin/chain_api_plugin.cpp | 4 +- plugins/chain_plugin/chain_plugin.cpp | 101 +++++++++++++++--- .../eosio/chain_plugin/chain_plugin.hpp | 8 ++ 3 files changed, 98 insertions(+), 15 deletions(-) diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index 6433ce02ba0..bab3f5ce266 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -106,7 +106,9 @@ void chain_api_plugin::plugin_startup() { CHAIN_RO_CALL(get_transaction_id, 200), CHAIN_RW_CALL_ASYNC(push_block, chain_apis::read_write::push_block_results, 202), CHAIN_RW_CALL_ASYNC(push_transaction, chain_apis::read_write::push_transaction_results, 202), - CHAIN_RW_CALL_ASYNC(push_transactions, chain_apis::read_write::push_transactions_results, 202) + CHAIN_RW_CALL_ASYNC(push_transactions, chain_apis::read_write::push_transactions_results, 202), + CHAIN_RW_CALL_ASYNC(send_transaction, chain_apis::read_write::send_transaction_results, 202), + CHAIN_RW_CALL_ASYNC(send_transactions, chain_apis::read_write::send_transactions_results, 202) }); } diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index aca508db776..b586e6c2b6e 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1880,7 +1880,80 @@ void read_write::push_block(read_write::push_block_params&& params, next_functio } CATCH_AND_CALL(next); } +static fc::variant convert_tx_trace_to_tree_struct(const fc::variant& tx_result) { + fc::mutable_variant_object tx_trace_mvo(tx_result); + + std::multimap act_trace_multimap; + for (auto& act_trace: tx_trace_mvo["action_traces"].get_array()) { + act_trace_multimap.emplace(act_trace["parent_action_ordinal"].as(), act_trace); + } + std::function(fc::unsigned_int)> convert_act_trace_to_tree_struct = [&](fc::unsigned_int parent_action_ordinal) { + vector reordered_act_traces; + auto range = act_trace_multimap.equal_range(parent_action_ordinal); + for (auto it = range.first; it != range.second; it++) { + fc::mutable_variant_object act_trace_mvo(it->second); + act_trace_mvo["inline_traces"] = convert_act_trace_to_tree_struct(it->second["action_ordinal"].as()); + act_trace_mvo.erase("action_ordinal"); + act_trace_mvo.erase("creator_action_ordinal"); + act_trace_mvo.erase("parent_action_ordinal"); + act_trace_mvo.erase("receiver"); + reordered_act_traces.push_back(act_trace_mvo); + } + std::sort(reordered_act_traces.begin(), reordered_act_traces.end(), [](auto& a, auto&b) { + return a["receipt"]["global_sequence"] < b["receipt"]["global_sequence"]; + }); + return reordered_act_traces; + }; + tx_trace_mvo["action_traces"] = convert_act_trace_to_tree_struct(0); + + tx_trace_mvo.erase("account_ram_delta"); + + return tx_trace_mvo; +} + void read_write::push_transaction(const read_write::push_transaction_params& params, next_function next) { + try { + auto wrapped_next = [=](const fc::static_variant& result) { + try { + if (result.contains()) { + next(result); + } else { + read_write::send_transaction_results modified_result = std::move(result.get()); + modified_result.processed = convert_tx_trace_to_tree_struct(modified_result.processed); + next(modified_result); + } + } CATCH_AND_CALL(next); + }; + send_transaction(params, wrapped_next); + } catch ( boost::interprocess::bad_alloc& ) { + chain_plugin::handle_db_exhaustion(); + } CATCH_AND_CALL(next); +} + +void read_write::push_transactions(const read_write::push_transactions_params& params, next_function next) { + try { + auto wrapped_next = [=](const fc::static_variant& result) { + try { + if (result.contains()) { + next(result); + } else { + read_write::send_transactions_results modified_results = std::move(result.get()); + for (auto& modified_result: modified_results) { + if (modified_result.transaction_id != transaction_id_type()) { + modified_result.processed = convert_tx_trace_to_tree_struct(modified_result.processed); + } + } + next(modified_results); + } + } CATCH_AND_CALL(next); + }; + send_transactions(params, wrapped_next); + } catch ( boost::interprocess::bad_alloc& ) { + chain_plugin::handle_db_exhaustion(); + } CATCH_AND_CALL(next); +} + +void read_write::send_transaction(const read_write::send_transaction_params& params, next_function next) { try { auto pretty_input = std::make_shared(); @@ -1906,50 +1979,50 @@ void read_write::push_transaction(const read_write::push_transaction_params& par } const chain::transaction_id_type& id = trx_trace_ptr->id; - next(read_write::push_transaction_results{id, output}); + next(read_write::send_transaction_results{id, output}); } CATCH_AND_CALL(next); } }); - - } catch ( boost::interprocess::bad_alloc& ) { chain_plugin::handle_db_exhaustion(); } CATCH_AND_CALL(next); } -static void push_recurse(read_write* rw, int index, const std::shared_ptr& params, const std::shared_ptr& results, const next_function& next) { - auto wrapped_next = [=](const fc::static_variant& result) { +static void send_recurse(read_write* rw, int index, const std::shared_ptr& params, const std::shared_ptr& results, const next_function& next) { + auto wrapped_next = [=](const fc::static_variant& result) { if (result.contains()) { const auto& e = result.get(); - results->emplace_back( read_write::push_transaction_results{ transaction_id_type(), fc::mutable_variant_object( "error", e->to_detail_string() ) } ); + results->emplace_back( read_write::send_transaction_results{ transaction_id_type(), fc::mutable_variant_object( "error", e->to_detail_string() ) } ); } else { - const auto& r = result.get(); + const auto& r = result.get(); results->emplace_back( r ); } size_t next_index = index + 1; if (next_index < params->size()) { - push_recurse(rw, next_index, params, results, next ); + send_recurse(rw, next_index, params, results, next ); } else { next(*results); } }; - rw->push_transaction(params->at(index), wrapped_next); + rw->send_transaction(params->at(index), wrapped_next); } -void read_write::push_transactions(const read_write::push_transactions_params& params, next_function next) { +void read_write::send_transactions(const read_write::send_transactions_params& params, next_function next) { try { EOS_ASSERT( params.size() <= 1000, too_many_tx_at_once, "Attempt to push too many transactions at once" ); - auto params_copy = std::make_shared(params.begin(), params.end()); - auto result = std::make_shared(); + auto params_copy = std::make_shared(params.begin(), params.end()); + auto result = std::make_shared(); result->reserve(params.size()); - push_recurse(this, 0, params_copy, result, next); - + send_recurse(this, 0, params_copy, result, next); + } catch ( boost::interprocess::bad_alloc& ) { + chain_plugin::handle_db_exhaustion(); } CATCH_AND_CALL(next); } + read_only::get_abi_results read_only::get_abi( const get_abi_params& params )const { get_abi_results result; result.account_name = params.account_name; diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 733bc32825f..80353e5be46 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -600,6 +600,14 @@ class read_write { using push_transactions_results = vector; void push_transactions(const push_transactions_params& params, chain::plugin_interface::next_function next); + using send_transaction_params = push_transaction_params; + using send_transaction_results = push_transaction_results; + void send_transaction(const send_transaction_params& params, chain::plugin_interface::next_function next); + + using send_transactions_params = vector; + using send_transactions_results = vector; + void send_transactions(const send_transactions_params& params, chain::plugin_interface::next_function next); + friend resolver_factory; }; From 0cfa5b3bce938b348692023612234c4b771b3961 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 4 Apr 2019 11:23:09 -0400 Subject: [PATCH 289/680] Update to fc master with set_os_thread_name --- libraries/fc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/fc b/libraries/fc index 809c8b7434e..ae6ec564f0d 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit 809c8b7434e6797efa8dd1bfba546b551e4d830e +Subproject commit ae6ec564f0db6d3378348ef6b475042e332e612a From 2b20aa3902ca28a0f75db284baff6719f14533e4 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 4 Apr 2019 11:30:01 -0400 Subject: [PATCH 290/680] Fix warning about unneeded capture --- plugins/http_plugin/http_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/http_plugin/http_plugin.cpp b/plugins/http_plugin/http_plugin.cpp index befa9686287..3345fcdb68c 100644 --- a/plugins/http_plugin/http_plugin.cpp +++ b/plugins/http_plugin/http_plugin.cpp @@ -304,7 +304,7 @@ namespace eosio { try { handler_itr->second( resource, body, [&ioc, &bytes_in_flight, con]( int code, fc::variant response_body ) { - boost::asio::post( ioc, [&ioc, response_body{std::move( response_body )}, &bytes_in_flight, con, code]() mutable { + boost::asio::post( ioc, [response_body{std::move( response_body )}, &bytes_in_flight, con, code]() mutable { std::string json = fc::json::to_string( response_body ); response_body.clear(); const size_t json_size = json.size(); From 3b0261aa86428ef8b612ee2bc76648859757aef5 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Thu, 4 Apr 2019 13:31:10 -0400 Subject: [PATCH 291/680] Removed deprecated operating systems from Buildkite pipelines --- .buildkite/long_running_tests.yml | 118 +------------- .buildkite/pipeline.yml | 260 +++--------------------------- 2 files changed, 28 insertions(+), 350 deletions(-) diff --git a/.buildkite/long_running_tests.yml b/.buildkite/long_running_tests.yml index dd0d6cbee9d..c242d219b0e 100644 --- a/.buildkite/long_running_tests.yml +++ b/.buildkite/long_running_tests.yml @@ -1,6 +1,5 @@ steps: - - - command: | + - command: | # Ubuntu 16.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -21,7 +20,7 @@ steps: workdir: /data/job timeout: 60 - - command: | + - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -42,7 +41,7 @@ steps: workdir: /data/job timeout: 60 - - command: | + - command: | # CentOS 7 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -63,28 +62,7 @@ steps: workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":aws: 1 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" - workdir: /data/job - timeout: 60 - - - command: | + - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -105,28 +83,7 @@ steps: workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":fedora: 27 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - workdir: /data/job - timeout: 60 - - - command: | + - command: | # macOS Mojave Build echo "--- Creating symbolic link to job directory :file_folder:" sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job echo "+++ Building :hammer:" @@ -140,20 +97,6 @@ steps: artifact_paths: "build.tar.gz" timeout: 60 - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job - echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y - echo "--- Compressing build directory :compression:" - tar -pczf build.tar.gz build/ - label: ":darwin: High Sierra Build" - agents: - - "role=builder-v2-1" - - "os=high-sierra" - artifact_paths: "build.tar.gz" - timeout: 60 - - wait - command: | # Ubuntu 16.04 Tests @@ -216,26 +159,6 @@ steps: workdir: /data/job timeout: 90 - - command: | # Amazon AWS-1 Linux Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":aws: 1 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" - workdir: /data/job - timeout: 90 - - command: | # Amazon AWS-2 Linux Tests echo "--- :arrow_down: Downloading Build Directory" buildkite-agent artifact download "build.tar.gz" . --step ":aws: 2 Build" @@ -256,37 +179,6 @@ steps: workdir: /data/job timeout: 90 - - command: | # Fedora Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" - echo "+++ :microscope: Running LR Tests" - ./scripts/long-running-test.sh - label: ":fedora: 27 LR Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - workdir: /data/job - timeout: 90 - - - command: | # High Sierra Tests - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" - echo "+++ :microscope: Running LR Tests" - ln -s "$(pwd)" /data/job && ./scripts/long-running-test.sh - label: ":darwin: High Sierra LR Tests" - agents: - - "role=tester-v2-1" - - "os=high-sierra" - timeout: 90 - - command: | # Mojave Tests echo "--- :arrow_down: Downloading Build Directory" buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index c1500cd77ee..e98cba4902f 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -1,6 +1,5 @@ steps: - - - command: | + - command: | # Ubuntu 16.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -21,7 +20,7 @@ steps: workdir: /data/job timeout: 60 - - command: | + - command: | # Ubuntu 18.04 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -42,7 +41,7 @@ steps: workdir: /data/job timeout: 60 - - command: | + - command: | # CentOS 7 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -63,28 +62,7 @@ steps: workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":aws: 1 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" - workdir: /data/job - timeout: 60 - - - command: | + - command: | # Amazon Linux 2 Build echo "+++ :hammer: Building" ./scripts/eosio_build.sh -y echo "--- :compression: Compressing build directory" @@ -105,28 +83,7 @@ steps: workdir: /data/job timeout: 60 - - command: | - echo "+++ :hammer: Building" - ./scripts/eosio_build.sh -y - echo "--- :compression: Compressing build directory" - tar -pczf build.tar.gz build/ - label: ":fedora: 27 Build" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: "build.tar.gz" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - workdir: /data/job - timeout: 60 - - - command: | + - command: | # macOS Mojave Build echo "--- Creating symbolic link to job directory :file_folder:" sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job echo "+++ Building :hammer:" @@ -140,20 +97,6 @@ steps: artifact_paths: "build.tar.gz" timeout: 60 - - command: | - echo "--- Creating symbolic link to job directory :file_folder:" - sleep 5 && ln -s "$(pwd)" /data/job && cd /data/job - echo "+++ Building :hammer:" - ./scripts/eosio_build.sh -y - echo "--- Compressing build directory :compression:" - tar -pczf build.tar.gz build/ - label: ":darwin: High Sierra Build" - agents: - - "role=builder-v2-1" - - "os=high-sierra" - artifact_paths: "build.tar.gz" - timeout: 60 - - wait # Ubuntu 16.04 Tests @@ -279,47 +222,6 @@ steps: workdir: /data/job timeout: 60 - # Amazon AWS-1 Linux Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" - echo "+++ :microscope: Running Tests" - ./scripts/parallel-test.sh - label: ":aws: 1 Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":aws: 1 Build" - echo "+++ :microscope: Running Tests" - ./scripts/serial-test.sh - label: ":aws: 1 NP Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:amazonlinux1_2-2" - workdir: /data/job - timeout: 60 - # Amazon AWS-2 Linux Tests - command: | echo "--- :arrow_down: Downloading Build Directory" @@ -361,71 +263,6 @@ steps: workdir: /data/job timeout: 60 - # Fedora Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" - echo "+++ :microscope: Running Tests" - ./scripts/parallel-test.sh - label: ":fedora: 27 Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - workdir: /data/job - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" - echo "+++ :microscope: Running Tests" - ./scripts/serial-test.sh - label: ":fedora: 27 NP Tests" - agents: - queue: "automation-large-builder-fleet" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - workdir: /data/job - timeout: 60 - - # High Sierra Tests - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" - echo "+++ :microscope: Running Tests" - ln -s "$(pwd)" /data/job - ./scripts/parallel-test.sh - label: ":darwin: High Sierra Tests" - agents: - - "role=tester-v2-1" - - "os=high-sierra" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading Build Directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" - echo "+++ :microscope: Running Tests" - ln -s "$(pwd)" /data/job && ./scripts/serial-test.sh - label: ":darwin: High Sierra NP Tests" - agents: - - "role=tester-v2-1" - - "os=high-sierra" - timeout: 60 - # Mojave Tests - command: | echo "--- :arrow_down: Downloading Build Directory" @@ -452,37 +289,7 @@ steps: - wait - - command: | - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: High Sierra Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew - label: ":darwin: High Sierra Package Builder" - agents: - - "role=builder-v2-1" - - "os=high-sierra" - artifact_paths: - - "build/packages/*.tar.gz" - - "build/packages/*.rb" - timeout: 60 - - - command: | - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew - label: ":darwin: Mojave Package Builder" - agents: - - "role=builder-v2-1" - - "os=mojave" - artifact_paths: - - "build/packages/*.tar.gz" - - "build/packages/*.rb" - timeout: 60 - - - command: | + - command: | # Ubuntu 16.04 Package Builder echo "--- :arrow_down: Downloading build directory" buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 16.04 Build" tar -zxf build.tar.gz @@ -508,7 +315,7 @@ steps: PKGTYPE: "deb" timeout: 60 - - command: | + - command: | # Ubuntu 18.04 Package Builder echo "--- :arrow_down: Downloading build directory" buildkite-agent artifact download "build.tar.gz" . --step ":ubuntu: 18.04 Build" tar -zxf build.tar.gz @@ -534,40 +341,7 @@ steps: PKGTYPE: "deb" timeout: 60 - - command: | - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":fedora: 27 Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - yum install -y rpm-build - mkdir -p /root/rpmbuild/BUILD - mkdir -p /root/rpmbuild/BUILDROOT - mkdir -p /root/rpmbuild/RPMS - mkdir -p /root/rpmbuild/SOURCES - mkdir -p /root/rpmbuild/SPECS - mkdir -p /root/rpmbuild/SRPMS - cd /data/job/build/packages && bash generate_package.sh rpm - label: ":fedora: 27 Package builder" - agents: - queue: "automation-large-builder-fleet" - artifact_paths: - - "build/packages/*.rpm" - plugins: - ecr#v1.1.4: - login: true - account_ids: "436617320021" - no-include-email: true - region: "us-west-2" - docker#v2.1.0: - debug: true - image: "436617320021.dkr.ecr.us-west-2.amazonaws.com/ci:fedora27_2-2" - workdir: /data/job - env: - OS: "fc27" - PKGTYPE: "rpm" - timeout: 60 - - - command: | + - command: | # CentOS 7 Package Builder echo "--- :arrow_down: Downloading build directory" buildkite-agent artifact download "build.tar.gz" . --step ":centos: 7 Build" tar -zxf build.tar.gz @@ -600,18 +374,30 @@ steps: PKGTYPE: "rpm" timeout: 60 + - command: | # macOS Mojave Package Builder + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew + label: ":darwin: Mojave Package Builder" + agents: + - "role=builder-v2-1" + - "os=mojave" + artifact_paths: + - "build/packages/*.tar.gz" + - "build/packages/*.rb" + timeout: 60 + - wait - command: | echo "--- :arrow_down: Downloading brew files" - buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: High Sierra Package Builder" - mv build/packages/eosio.rb build/packages/eosio_highsierra.rb buildkite-agent artifact download "build/packages/eosio.rb" . --step ":darwin: Mojave Package Builder" label: ":darwin: Brew Updater" agents: queue: "automation-large-builder-fleet" artifact_paths: - - "build/packages/eosio_highsierra.rb" - "build/packages/eosio.rb" timeout: 60 From cddd437300e4269b6e1be88f187fc75402625f39 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Thu, 4 Apr 2019 13:42:37 -0400 Subject: [PATCH 292/680] YAML is space-sensitive --- .buildkite/pipeline.yml | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index e98cba4902f..19bbdf114ff 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -375,19 +375,19 @@ steps: timeout: 60 - command: | # macOS Mojave Package Builder - echo "--- :arrow_down: Downloading build directory" - buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" - tar -zxf build.tar.gz - echo "+++ :microscope: Starting package build" - ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew - label: ":darwin: Mojave Package Builder" - agents: - - "role=builder-v2-1" - - "os=mojave" - artifact_paths: - - "build/packages/*.tar.gz" - - "build/packages/*.rb" - timeout: 60 + echo "--- :arrow_down: Downloading build directory" + buildkite-agent artifact download "build.tar.gz" . --step ":darwin: Mojave Build" + tar -zxf build.tar.gz + echo "+++ :microscope: Starting package build" + ln -s "$(pwd)" /data/job && cd /data/job/build/packages && bash generate_package.sh brew + label: ":darwin: Mojave Package Builder" + agents: + - "role=builder-v2-1" + - "os=mojave" + artifact_paths: + - "build/packages/*.tar.gz" + - "build/packages/*.rb" + timeout: 60 - wait From 302270b10d215a6c2451600193d9ddb3419f947d Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 4 Apr 2019 15:01:05 -0400 Subject: [PATCH 293/680] fix rpm command for uninstalling eosio --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7bce246fbc3..e4ec6e0b69d 100644 --- a/README.md +++ b/README.md @@ -63,7 +63,7 @@ $ sudo yum install ./eosio-1.7.0-rc1.el7.x86_64.rpm ``` #### Centos RPM Package Uninstall ```sh -$ sudo yum remove eosio.cdt +$ sudo yum remove eosio ``` #### Fedora RPM Package Install ```sh From c61b8d35c0126e49a2c45c1b8e23a98c9edf3859 Mon Sep 17 00:00:00 2001 From: Kyle Morgan Date: Thu, 4 Apr 2019 16:35:21 -0400 Subject: [PATCH 294/680] Make the submodule regression check more robust Previously, the git submodule regression check was based on the commit timestamp alone. There are cases where a pull request was branched off an old version of the base branch that would trigger this check, even though the submodule itself was not changed. Now, if the timestamp-based check detects the submodule is out-of-date, the branch's diff is checked as well to see if the submodule was modified. --- scripts/submodule_check.sh | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/scripts/submodule_check.sh b/scripts/submodule_check.sh index 16aace418bf..b9ec13204fa 100755 --- a/scripts/submodule_check.sh +++ b/scripts/submodule_check.sh @@ -29,7 +29,13 @@ for k in "${!BASE_MAP[@]}"; do echo " timestamp on $BUILDKITE_BRANCH: $pr_ts" echo " timestamp on $BUILDKITE_PULL_REQUEST_BASE_BRANCH: $base_ts" if (( $pr_ts < $base_ts)); then - echo "ERROR: $k is older on $BUILDKITE_BRANCH than $BUILDKITE_PULL_REQUEST_BASE_BRANCH" - exit 1 + echo "$k is older on $BUILDKITE_BRANCH than $BUILDKITE_PULL_REQUEST_BASE_BRANCH; investigating..." + + if for c in `git log $BUILDKITE_BRANCH ^$BUILDKITE_PULL_REQUEST_BASE_BRANCH --pretty=format:"%H"`; do git show --pretty="" --name-only $c; done | grep -q "^$k$"; then + echo "ERROR: $k has regressed" + exit 1 + else + echo "$k was not in the diff; no regression detected" + fi fi done From 324260dfc10d6d3f9ea3f18dd41e23d9c87582c7 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 4 Apr 2019 18:44:34 -0400 Subject: [PATCH 295/680] implement NO_DUPLICATE_DEFERRED_ID protocol feature #6115 --- libraries/chain/apply_context.cpp | 36 ++++++++- libraries/chain/controller.cpp | 20 ++++- .../include/eosio/chain/block_header.hpp | 35 --------- .../chain/include/eosio/chain/exceptions.hpp | 4 + .../eosio/chain/protocol_feature_manager.hpp | 1 + .../chain/include/eosio/chain/transaction.hpp | 74 +++++++++---------- libraries/chain/include/eosio/chain/types.hpp | 37 ++++++++++ libraries/chain/protocol_feature_manager.cpp | 13 ++++ libraries/chain/transaction.cpp | 49 ++++++++++++ libraries/chain/transaction_context.cpp | 14 +++- 10 files changed, 202 insertions(+), 81 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 37c78ff7c7b..edffc87029c 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -288,13 +288,45 @@ void apply_context::execute_context_free_inline( action&& a ) { void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, account_name payer, transaction&& trx, bool replace_existing ) { EOS_ASSERT( trx.context_free_actions.size() == 0, cfa_inside_generated_tx, "context free actions are not currently allowed in generated transactions" ); - trx.expiration = control.pending_block_time() + fc::microseconds(999'999); // Rounds up to nearest second (makes expiration check unnecessary) - trx.set_reference_block(control.head_block_id()); // No TaPoS check necessary bool enforce_actor_whitelist_blacklist = trx_context.enforce_whiteblacklist && control.is_producing_block() && !control.sender_avoids_whitelist_blacklist_enforcement( receiver ); trx_context.validate_referenced_accounts( trx, enforce_actor_whitelist_blacklist ); + if( control.is_builtin_activated( builtin_protocol_feature_t::no_duplicate_deferred_id ) ) { + auto exts = trx.validate_and_extract_extensions(); + if( exts.size() > 0 ) { + EOS_ASSERT( exts.size() == 1, invalid_transaction_extension, + "only one extension is currently supported for deferred transactions" + ); + const auto& context = exts.front().get(); + EOS_ASSERT( context.sender == receiver, ill_formed_deferred_transaction_generation_context, + "deferred transaction generaction context contains mismatching sender", + ("expected", receiver)("actual", context.sender) + ); + EOS_ASSERT( context.sender_id == sender_id, ill_formed_deferred_transaction_generation_context, + "deferred transaction generaction context contains mismatching sender_id", + ("expected", sender_id)("actual", context.sender_id) + ); + EOS_ASSERT( context.sender_trx_id == trx_context.id, ill_formed_deferred_transaction_generation_context, + "deferred transaction generaction context contains mismatching sender_trx_id", + ("expected", trx_context.id)("actual", context.sender_trx_id) + ); + } else { + FC_ASSERT( trx.transaction_extensions.size() == 0, "invariant failure" ); + trx.transaction_extensions.emplace_back( + deferred_transaction_generation_context::extension_id(), + fc::raw::pack( deferred_transaction_generation_context( trx_context.id, sender_id, receiver ) ) + ); + } + trx.expiration = {}; + trx.ref_block_num = 0; + trx.ref_block_prefix = 0; + } else { + trx.expiration = control.pending_block_time() + fc::microseconds(999'999); // Rounds up to nearest second (makes expiration check unnecessary) + trx.set_reference_block(control.head_block_id()); // No TaPoS check necessary + } + // Charge ahead of time for the additional net usage needed to retire the deferred transaction // whether that be by successfully executing, soft failure, hard failure, or expiration. const auto& cfg = control.get_global_properties().configuration; diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 2e732c869dc..744ca4bf04b 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -954,8 +954,14 @@ struct controller_impl { // Deliver onerror action containing the failed deferred transaction directly back to the sender. etrx.actions.emplace_back( vector{{gtrx.sender, config::active_name}}, onerror( gtrx.sender_id, gtrx.packed_trx.data(), gtrx.packed_trx.size() ) ); - etrx.expiration = self.pending_block_time() + fc::microseconds(999'999); // Round up to avoid appearing expired - etrx.set_reference_block( self.head_block_id() ); + if( self.is_builtin_activated( builtin_protocol_feature_t::no_duplicate_deferred_id ) ) { + etrx.expiration = {}; + etrx.ref_block_num = 0; + etrx.ref_block_prefix = 0; + } else { + etrx.expiration = self.pending_block_time() + fc::microseconds(999'999); // Round up to nearest second to avoid appearing expired + etrx.set_reference_block( self.head_block_id() ); + } transaction_context trx_context( self, etrx, etrx.id(), start ); trx_context.deadline = deadline; @@ -2138,8 +2144,14 @@ struct controller_impl { signed_transaction trx; trx.actions.emplace_back(std::move(on_block_act)); - trx.set_reference_block(self.head_block_id()); - trx.expiration = self.pending_block_time() + fc::microseconds(999'999); // Round up to nearest second to avoid appearing expired + if( self.is_builtin_activated( builtin_protocol_feature_t::no_duplicate_deferred_id ) ) { + trx.expiration = {}; + trx.ref_block_num = 0; + trx.ref_block_prefix = 0; + } else { + trx.expiration = self.pending_block_time() + fc::microseconds(999'999); // Round up to nearest second to avoid appearing expired + trx.set_reference_block( self.head_block_id() ); + } return trx; } diff --git a/libraries/chain/include/eosio/chain/block_header.hpp b/libraries/chain/include/eosio/chain/block_header.hpp index adbdb7d3def..fc751826d95 100644 --- a/libraries/chain/include/eosio/chain/block_header.hpp +++ b/libraries/chain/include/eosio/chain/block_header.hpp @@ -8,41 +8,6 @@ namespace eosio { namespace chain { namespace detail { - struct extract_match { - bool enforce_unique = false; - }; - - template - struct decompose; - - template<> - struct decompose<> { - template - static auto extract( uint16_t id, const vector& data, ResultVariant& result ) - -> fc::optional - { - return {}; - } - }; - - template - struct decompose { - using head_t = T; - using tail_t = decompose< Rest... >; - - template - static auto extract( uint16_t id, const vector& data, ResultVariant& result ) - -> fc::optional - { - if( id == head_t::extension_id() ) { - result = fc::raw::unpack( data ); - return { extract_match{ head_t::enforce_unique() } }; - } - - return tail_t::template extract( id, data, result ); - } - }; - template struct block_header_extension_types { using block_header_extensions_t = fc::static_variant< Ts... >; diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index a80213e0425..3246fd2602e 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -196,6 +196,10 @@ namespace eosio { namespace chain { 3040013, "Transaction is too big" ) FC_DECLARE_DERIVED_EXCEPTION( unknown_transaction_compression, transaction_exception, 3040014, "Unknown transaction compression" ) + FC_DECLARE_DERIVED_EXCEPTION( invalid_transaction_extension, transaction_exception, + 3040015, "Invalid transaction extension" ) + FC_DECLARE_DERIVED_EXCEPTION( ill_formed_deferred_transaction_generation_context, transaction_exception, + 3040016, "Transaction includes an ill-formed deferred transaction generation context extension" ) FC_DECLARE_DERIVED_EXCEPTION( action_validate_exception, chain_exception, diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index f9f55dffb7a..987855c6f8b 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -17,6 +17,7 @@ enum class builtin_protocol_feature_t : uint32_t { preactivate_feature, only_link_to_existing_permission, replace_deferred, + no_duplicate_deferred_id, fix_linkauth_restriction, disallow_empty_producer_schedule }; diff --git a/libraries/chain/include/eosio/chain/transaction.hpp b/libraries/chain/include/eosio/chain/transaction.hpp index db61e5b17cb..d115c4a507a 100644 --- a/libraries/chain/include/eosio/chain/transaction.hpp +++ b/libraries/chain/include/eosio/chain/transaction.hpp @@ -9,6 +9,39 @@ namespace eosio { namespace chain { + struct deferred_transaction_generation_context : fc::reflect_init { + static constexpr uint16_t extension_id() { return 0; } + static constexpr bool enforce_unique() { return true; } + + deferred_transaction_generation_context() = default; + + deferred_transaction_generation_context( const transaction_id_type& sender_trx_id, uint128_t sender_id, account_name sender ) + :sender_trx_id( sender_trx_id ) + ,sender_id( sender_id ) + ,sender( sender ) + {} + + void reflector_init(); + + transaction_id_type sender_trx_id; + uint128_t sender_id; + account_name sender; + }; + + namespace detail { + template + struct transaction_extension_types { + using transaction_extensions_t = fc::static_variant< Ts... >; + using decompose_t = decompose< Ts... >; + }; + } + + using transaction_extension_types = detail::transaction_extension_types< + deferred_transaction_generation_context + >; + + using transaction_extensions = transaction_extension_types::transaction_extensions_t; + /** * The transaction header contains the fixed-sized data * associated with each transaction. It is separated from @@ -74,6 +107,7 @@ namespace eosio { namespace chain { return account_name(); } + vector validate_and_extract_extensions()const; }; struct signed_transaction : public transaction @@ -173,47 +207,11 @@ namespace eosio { namespace chain { using packed_transaction_ptr = std::shared_ptr; - /** - * When a transaction is generated it can be scheduled to occur - * in the future. It may also fail to execute for some reason in - * which case the sender needs to be notified. When the sender - * sends a transaction they will assign it an ID which will be - * passed back to the sender if the transaction fails for some - * reason. - */ - struct deferred_transaction : public signed_transaction - { - uint128_t sender_id; /// ID assigned by sender of generated, accessible via WASM api when executing normal or error - account_name sender; /// receives error handler callback - account_name payer; - time_point_sec execute_after; /// delayed execution - - deferred_transaction() = default; - - deferred_transaction(uint128_t sender_id, account_name sender, account_name payer,time_point_sec execute_after, - const signed_transaction& txn) - : signed_transaction(txn), - sender_id(sender_id), - sender(sender), - payer(payer), - execute_after(execute_after) - {} - }; - - struct deferred_reference { - deferred_reference(){} - deferred_reference( const account_name& sender, const uint128_t& sender_id) - :sender(sender),sender_id(sender_id) - {} - - account_name sender; - uint128_t sender_id; - }; - uint128_t transaction_id_to_sender_id( const transaction_id_type& tid ); } } /// namespace eosio::chain +FC_REFLECT(eosio::chain::deferred_transaction_generation_context, (sender_trx_id)(sender_id)(sender) ) FC_REFLECT( eosio::chain::transaction_header, (expiration)(ref_block_num)(ref_block_prefix) (max_net_usage_words)(max_cpu_usage_ms)(delay_sec) ) FC_REFLECT_DERIVED( eosio::chain::transaction, (eosio::chain::transaction_header), (context_free_actions)(actions)(transaction_extensions) ) @@ -221,5 +219,3 @@ FC_REFLECT_DERIVED( eosio::chain::signed_transaction, (eosio::chain::transaction FC_REFLECT_ENUM( eosio::chain::packed_transaction::compression_type, (none)(zlib)) // @ignore unpacked_trx FC_REFLECT( eosio::chain::packed_transaction, (signatures)(compression)(packed_context_free_data)(packed_trx) ) -FC_REFLECT_DERIVED( eosio::chain::deferred_transaction, (eosio::chain::signed_transaction), (sender_id)(sender)(payer)(execute_after) ) -FC_REFLECT( eosio::chain::deferred_reference, (sender)(sender_id) ) diff --git a/libraries/chain/include/eosio/chain/types.hpp b/libraries/chain/include/eosio/chain/types.hpp index 1cea911d9d9..d681c349844 100644 --- a/libraries/chain/include/eosio/chain/types.hpp +++ b/libraries/chain/include/eosio/chain/types.hpp @@ -269,6 +269,43 @@ namespace eosio { namespace chain { }; // enum_hash needed to support old gcc compiler of Ubuntu 16.04 + namespace detail { + struct extract_match { + bool enforce_unique = false; + }; + + template + struct decompose; + + template<> + struct decompose<> { + template + static auto extract( uint16_t id, const vector& data, ResultVariant& result ) + -> fc::optional + { + return {}; + } + }; + + template + struct decompose { + using head_t = T; + using tail_t = decompose< Rest... >; + + template + static auto extract( uint16_t id, const vector& data, ResultVariant& result ) + -> fc::optional + { + if( id == head_t::extension_id() ) { + result = fc::raw::unpack( data ); + return { extract_match{ head_t::enforce_unique() } }; + } + + return tail_t::template extract( id, data, result ); + } + }; + } + } } // eosio::chain FC_REFLECT( eosio::chain::void_t, ) diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 78c84ff9410..66f2eaf183d 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -53,6 +53,19 @@ Also corrects the RAM usage of accounts affected by the replace deferred transac */ {} } ) + ( builtin_protocol_feature_t::no_duplicate_deferred_id, builtin_protocol_feature_spec{ + "NO_DUPLICATE_DEFERRED_ID", + fc::variant("45967387ee92da70171efd9fefd1ca8061b5efe6f124d269cd2468b47f1575a0").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: NO_DUPLICATE_DEFERRED_ID +Depends on: REPLACE_DEFERRED + +Ensures transactions generated by contracts for deferred execution are adjusted to avoid transaction ID conflicts. +Also allows a contract to send a deferred transaction in a manner that enables the contract to know the transaction ID ahead of time. +*/ + {builtin_protocol_feature_t::replace_deferred} + } ) ( builtin_protocol_feature_t::fix_linkauth_restriction, builtin_protocol_feature_spec{ "FIX_LINKAUTH_RESTRICTION", fc::variant("a98241c83511dc86c857221b9372b4aa7cea3aaebc567a48604e1d3db3557050").as(), diff --git a/libraries/chain/transaction.cpp b/libraries/chain/transaction.cpp index e1910ce02eb..1ebdfeccc01 100644 --- a/libraries/chain/transaction.cpp +++ b/libraries/chain/transaction.cpp @@ -50,6 +50,16 @@ typedef multi_index_container< > > recovery_cache_type; +void deferred_transaction_generation_context::reflector_init() { + static_assert( fc::raw::has_feature_reflector_init_on_unpacked_reflected_types, + "deferred_transaction_generation_context expects FC to support reflector_init" ); + + + EOS_ASSERT( sender != account_name(), ill_formed_deferred_transaction_generation_context, + "Deferred transaction generation context extension must have a non-empty sender account", + ); +} + void transaction_header::set_reference_block( const block_id_type& reference_block ) { ref_block_num = fc::endian_reverse_u32(reference_block._hash[0]); ref_block_prefix = reference_block._hash[1]; @@ -134,6 +144,45 @@ fc::microseconds transaction::get_signature_keys( const vector& return sig_cpu_usage; } FC_CAPTURE_AND_RETHROW() } +vector transaction::validate_and_extract_extensions()const { + using transaction_extensions_t = transaction_extension_types::transaction_extensions_t; + using decompose_t = transaction_extension_types::decompose_t; + + static_assert( std::is_same::value, + "transaction_extensions is not setup as expected" ); + + vector results; + + uint16_t id_type_lower_bound = 0; + + for( size_t i = 0; i < transaction_extensions.size(); ++i ) { + const auto& e = transaction_extensions[i]; + auto id = e.first; + + EOS_ASSERT( id >= id_type_lower_bound, invalid_transaction_extension, + "Transaction extensions are not in the correct order (ascending id types required)" + ); + + results.emplace_back(); + + auto match = decompose_t::extract( id, e.second, results.back() ); + EOS_ASSERT( match, invalid_transaction_extension, + "Transaction extension with id type ${id} is not supported", + ("id", id) + ); + + if( match->enforce_unique ) { + EOS_ASSERT( i == 0 || id > id_type_lower_bound, invalid_transaction_extension, + "Transaction extension with id type ${id} is not allowed to repeat", + ("id", id) + ); + } + + id_type_lower_bound = id; + } + + return results; +} const signature_type& signed_transaction::sign(const private_key_type& key, const chain_id_type& chain_id) { signatures.push_back(key.sign(sig_digest(chain_id, context_free_data))); diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 226e6863a16..cc67b49eaec 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -166,7 +166,10 @@ namespace bacc = boost::accumulators; trace->block_time = c.pending_block_time(); trace->producer_block_id = c.pending_producer_block_id(); executed.reserve( trx.total_actions() ); - EOS_ASSERT( trx.transaction_extensions.size() == 0, unsupported_feature, "we don't support any extensions yet" ); + EOS_ASSERT( trx.transaction_extensions.size() == 0 + || control.is_builtin_activated( builtin_protocol_feature_t::no_duplicate_deferred_id ), + unsupported_feature, "we don't support any extensions yet" + ); // This assert may not be necessary. Consider removing. } void transaction_context::init(uint64_t initial_net_usage) @@ -278,6 +281,8 @@ namespace bacc = boost::accumulators; void transaction_context::init_for_implicit_trx( uint64_t initial_net_usage ) { + EOS_ASSERT( trx.transaction_extensions.size() == 0, unsupported_feature, + "no transaction extensions supported yet for implicit transactions" ); published = control.pending_block_time(); init( initial_net_usage); } @@ -286,6 +291,9 @@ namespace bacc = boost::accumulators; uint64_t packed_trx_prunable_size, bool skip_recording ) { + EOS_ASSERT( trx.transaction_extensions.size() == 0, unsupported_feature, + "no transaction extensions supported yet for input transactions" ); + const auto& cfg = control.get_global_properties().configuration; uint64_t discounted_size_for_pruned_data = packed_trx_prunable_size; @@ -322,6 +330,10 @@ namespace bacc = boost::accumulators; void transaction_context::init_for_deferred_trx( fc::time_point p ) { + EOS_ASSERT( (trx.expiration.sec_since_epoch() == 0) || (trx.transaction_extensions.size() == 0), unsupported_feature, + "no transaction extensions supported yet for deferred transactions" + ); + published = p; trace->scheduled = true; apply_context_free = false; From 363311937ab3396cb7f989d3cebb44619af7b0e9 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 4 Apr 2019 19:24:14 -0400 Subject: [PATCH 296/680] fix proxy test contract to work even after NO_DUPLICATE_DEFERRED_ID activation #6115 --- unittests/test-contracts/README.md | 2 +- unittests/test-contracts/proxy/proxy.cpp | 1 + unittests/test-contracts/proxy/proxy.wasm | Bin 18398 -> 18481 bytes 3 files changed, 2 insertions(+), 1 deletion(-) diff --git a/unittests/test-contracts/README.md b/unittests/test-contracts/README.md index 157455c7202..aa9c0f8dee9 100644 --- a/unittests/test-contracts/README.md +++ b/unittests/test-contracts/README.md @@ -2,6 +2,6 @@ test_ram_limit contract was compiled with eosio.cdt v1.4.1 That contract was ported to compile with eosio.cdt v1.5.0, but the test that uses it is very sensitive to stdlib/eosiolib changes, compilation flags and linker flags. -deferred_test contract was compiled with eosio.cdt v1.6.1 +deferred_test and proxy contracts were compiled with eosio.cdt v1.6.1 The remaining contracts have been ported to compile with eosio.cdt v1.6.x. They were compiled with a patched version of eosio.cdt v1.6.0-rc1 (commit 1c9180ff5a1e431385180ce459e11e6a1255c1a4). diff --git a/unittests/test-contracts/proxy/proxy.cpp b/unittests/test-contracts/proxy/proxy.cpp index 1a199c4a5ba..c9fc324cad5 100644 --- a/unittests/test-contracts/proxy/proxy.cpp +++ b/unittests/test-contracts/proxy/proxy.cpp @@ -59,6 +59,7 @@ void proxy::on_error( uint128_t sender_id, eosio::ignore> ) { get_datastream() >> packed_trx_size; transaction trx; get_datastream() >> trx; + trx.transaction_extensions.clear(); trx.delay_sec = cfg.delay; trx.send( id, get_self() ); diff --git a/unittests/test-contracts/proxy/proxy.wasm b/unittests/test-contracts/proxy/proxy.wasm index c09311385bec5e82bc38b6306b33942f0dc9bee1..b40249782bac38ca92d735ae5363f1218e7f2496 100755 GIT binary patch delta 4312 zcmai1dvH|M89(Pf_U>}C$(3Z6Wb?ZBW|M3l8$z-K$OHBUl7JQvq==|2n`9$d!*0kf z4_joRrQT=&`iIuGQ?Rzw)^<9hGi78@N7Vk#*$qT0 zZ8CfAIp6o4@BO&<&yUlWr|EQLla$NR+*ZEs+e+}0au1TNYHlC?2o8I(?Au58iuFOU z=0L}90Po^tf`O95lWdj6H42u8AF4n6@?&hd+~{#_Pi!BVe1N#vnA|`$_Ly8n-RuKe zT~ai%b80G)%Eq$e+Y`jYPRoH6nw&`O(r{&adRKzD_{kfKCH7cC3TU;V?ZG z8=FYSvzhVi7?C-|9gA(>IguUfBs!ax%5yl6j_(rZ0y;iAdhK|P=cwRW#7Q*B*})|! zh0{c0_9=D!1m2mQ!`GM<#bVj1ovD%R*eGs$#cft6SJGluCIxGO9vk1YGnL7XWyVPf zM|%V)e`Z6pks2&b{T0IaTN3Hacsdr(WD--^*hqRbL2hAB)5e^K-z*v(j%BjxsYGnN zD?;XSkQbnxlcVu$9#$&%^^!85N({~A^Qo$)$+GHEHCgp*^UBLBDSPLDwB(EYo#XyP~4$(WprrW0y^nw$vp(8g+<6Lejyg zEt@3iXh~nv4=BH{_+@&oWgn@8etzSYpGiRpcLh-1RKym1ZN{TMM;yIvax{mL>tz3-N?fYUXZm| zM~ro%hbi4BflHWtKMIx(X_Ajc;R7$%(4pM`b8jsI6l(E1EEj#8cCZr1p6d=kl9Y5v zHw437KrVMq0D0LNr#joxUs>!9z=@`do1hpG_E3MjBoX#66gTibW=52 zEybahONU|#jt%U%s|pIe?y9EuvVXb`>07umREIWUwqL8D5%xW8C6xJ_*4^*e3Ri{{ zlzCO~lue3kfYTIHf?Jmh4T7-N6c{%i@H(LGq_Kj<+)e)1gfddpXcUUB6Q*5ePrFx@ z#Ne@zR0ev*F^nd5#oh03G%3uT;q13X5P+&~^UQ_3s$4trB?lKYJw_L)8mhi)xerw`%&`y>fBuVg_Lc-= zF0em&cR-Of#od?)4;QbZeeA8`XX$NhcgcqSuW{YO#B}Hmo_a1PlteumLPJ+bYPYEX z^Uy|+X%QVH7m2VMyY{5f&Ai6Aufvjx5DsFg_;J^n#OOZxi1BNRKx~{F4B(<_q1sG! zh*vJ8%ABzbgOAKz;9nh(qJZ(Nvhy`yx{TY{+jBeV8dg%;1};lWH(+I@O;}HtMzMZg z8pK*x#v!ZAu0u`=$U9|eSiRb}jCQlfd}FZvvX4{N_#@?4cz}^v3OCkLp&L(2!_AWZ zo*R$^rWBB;{arUl>hl(LAnn`2p$?Uwd65YhQ z0$U2K;dEjtC_CIPPC67y9ST^X?lenYWKsEREN35IZQiNG|8qxkxrdwHh{RLaE|4(O=b< z6E&gUm{rax?mgJ&ydiRW5FF%?bdT-A5b2Wc5zYjvADzcJe{)Hf1j_XS+Wkz|BB%sg z;~r2kOW5aCKKdamt8S$|%&D%DY!AD)dTH+VfDjQ%8tb4$B>y;rnplr4GCjC%Bc35C zYP->(WR#jSnY3KMsZN$h+}3#Rsm>mhG!KX-@)|PTrc0!!=`r0LAv?h&=d?*2`qACd z#n|iBW$smZf8EO7t8S(rF{`&sRuHPjnoi$GyfvAqNRb7+5lg_NYnsaQ7*a?(qzLbZ z{Mw0{g)sCVHA%2qSIZ;3t+ox2qqPrXwah6x%#N6Ak-dL6dCq&R1$2btb;Fn_4U~I)Hh&1Tpt$jN)K1-gNkO5dKvVejJ|ZdNY#XLJ5+1OFZ0B$5L&@zI=0=NoyI& zA+T+J_dTGjN*Z^wm)gcE3q7!q3U`qpafurF%X~k*n_ZcI z6q?O6x^i|JL+A>-C>q~K?ce%85~|pV1ziPYulG_-DEsLG&Z(w@J7;0X3M7)%%lC&n zzN%_@VV>{!A5|OZ{3ZsUk{^5;j8SF=-%mON)Q^!TncB>_!}}}h4_R~}QgcICzvS$2 z<*lNG{@dMoed=mv<7eVk7&n~c$%PNk9wh}#Ap+f9n{N;<@EWEqrb^edD1zG;-K};L zyrP~wv1pj`!7W)XKFnq;xi+|4m+S%k50|8A_eo#(i+t3d>j|1SzoF|#DV15gqkNX@ zd3L!gXZ4vcwa!A0>7)4^+o*pkv9%WOmX^7JHTBid1Y6lRJiF=Qofh_pfWOjb7b;Zb zEyti;6_B6w{RrwGTe>c9F1xZcgIq{1Ysp1)UH?KY$9E9E$y&zZ;}eNdGn+Q!6BFqX zd;yx1@%s`Paz`SY9Z%h7rgx_jQyfnSe-YQc=I-(AHghyF5r2TgE&?Xa!3J-m`n+@s z-?8yjW(;>mz!9v|DNcqrRDGT~l1{Nr1HLl6rQ!o8o)UulIR2@#?7%=XJ;RO<)X{g? z8v|kK;H&J5fePt~S6N`tqNiBzU<*CZwhh(}F1JvE+9;Bb{48(czP zViyLh>1*uc!3e#~!b3}>=gzThLyPGJ_RLUi&4>5~RqjhiDE0vEw+ZmtWbD7^X#JqmW9Z|Z1P@WVV!c5%PL3WNAy%A z6rKvrW}=DsR5+9g5gS{SowR^GBsc9Xza zW)i6kA-v58N=Gup!auC|SV$a+VAy|24U^WXsmNkxu9GNSX`7l_SennwbrF01GZYT5 z|4c8G?>)(Ie}Jp1*GBOlm1Uo0$v4M=7^QY=f%6!mu4 zxWWM-rjn^dGLepic_X|u8%d=}Iae0VE~QeDcxEaSU5JoE_91m=ZSwX3Qc-DhIGrxJ zN_M(s*=DP@*=#j6_{ST6wKf}<%5Jx+@}_E&RjX>T6}nZTs5D6_Rb@)3#UW>@x$-j6 zvcz0@W9?-vplT?YoWGa&3(r142Na@5w8tpXTMTwUanqe_QQ1T_c1H2hUF-#=v0zZs zOrlR|8VeMDmtJNk3I|F8Uezb*hB9g=CNVFwONISDjHN3E)m8IJ(UP9lE#~v0sTi`U z#|+Xe6TOXfYxmJ2_6yBdb>cOmOPZnTvmV8y9*bsBL&0YeyQ0<5{mfoeOZPB;(W9Gh zGAtu@su_wN)(qJvxjYm;NlFw;Cz1i`E2uK>#?IPHHVtQ}0Kil_|jJpl$lPX}Zn7UrV`iqAg zc7tL{Ijd`;t7+)_+1cXT={Wne*hh<*qvRQS6MLhiiB7OjOBy!4AONudrFTV(NAq&L#X*cW;={Uxg` zZI;{g8D^Hw0KT7=?$f$p`A-r9sNHAFst|;`%#CMHS$jhdphFt_yoBSYnghsCP$CA= zC%FGNNlXv2hst&!o;S<->jGYhM?^P9B=vp?nX$gE=eC(O&jq z#RTnRdZoiZpZ1MZHdh}8xP9xpQXiWuswYq780YU6*2ElJv?yc#~D+%{2bEZn5r*Qp5O%=WQ!t*t+NOYLp zx9P6DB@iVZ1xo{##W9m2h%1|V=m={x5;rg@Sv7I9-x}37kGKsB=HZKZl`%Wfc$}1{ zVAR9?=}3y9Xg^nE>?F%dww@qS-}B0l^2Ll*`@t)G)hpocl~-7s$0^%D;@uuj;vabO zBvv<4#(bE=hsl|L6x8fw|M0lwHuF3ScwD;8{2kQD-_u^oO91Cu zYix-0f~YbmyV`o4#Mn9Sj?DvZkqTc--wu6u?)5ULyu%GFQ8^}qQUg0!Uu)}uUgs1) zR^LJ|veLc^Spjyxsqd=1_&U!n=q^>Cs|grJ)&i?F|kvVXUDCAsugsxpKB7&jjRw z(}E}AT0Vq(D}Q%X!9Vza)s>H@DK=x?qld+aEusIB>B zB(dY|uE|0-a0hDgBwsG3bzIaqF^q^k=HXO+A9(@s_)wgG9*ZzOwrJYG60G;Uy=-2F zp~d9D)ift*D{JpC5&6aDMm+!B-17gzp@nRsr3de) zIEMA?YFnXqvd7y-@cgK)w)l3eO^_;x(k{caaLbU$!~!o}TlQ{9uZ`QTaIH-M#@oq0iyBYv3GK(ccE{P!ADJ*Ds81pW*xQiph84*LEC* za$skI9=h<<&S$vd4wIKTsHF*wJ>BJ8EjVlVbpT5`RPN!W<_3hhHAk2s?MwE+=3u@` zo)|hv_py(LX4d15`iKxk`VQ>!=2xyDCD(w4dFAO{KU4YHLSchDJ?zfm*4#|kBg1Ks zkdF8#Cp4{(N?GKAnA9NE*oS7l#7^Wvqz&cIC%r%HMT)2)l@fmRgcb?oyQgO12dMp8 zG2y*=77325Iq3uoSpD_W5K4pGm2d+a48h$L(*u~wgE_gfq54s7&cgD;FVsi{QQ z*;VT)K>dB2w+z4`keEsO;RL5!D1T*p7RtK|qAfZvZ{~mLqG3B?(wK-kcg*Jdh&Yk) zAQe#U%YybH?wZfR{NQz>Nkr?SG;2CMvN&9!2JL|@2Gc|Q42QV+84iGmlL|(@%O|lX zRi81-+onV4N-&7%PRuM$83v8%v+SyRjD}g#(wz-Zx-0>V0QqHW9HFCFAle#5A(X4 zF~}_lo(>`+woNTBM{n#JG5$T4E7NCLNbSsiE`|Tq>N0$AakMUd?d`9q51j5EN;n0GIThSCWPOKG`n$6G;_BRj?9PdH6X*~ zNRtZDz@dWl%NDD3n*GB4bDX1R})Z&4 z=Pyk)LTT Date: Thu, 4 Apr 2019 19:34:11 -0400 Subject: [PATCH 297/680] better way of rejecting disallowed transaction extensions #6115 Allows the `num_failed` tracker and blacklist of producer_plugin to work as intended. Preserves the current pattern of not retiring (except for case with expired status) deferred transaction with invalid extensions even after NO_DUPLICATE_DEFERRED_ID activation. --- libraries/chain/controller.cpp | 4 +++ .../chain/include/eosio/chain/exceptions.hpp | 2 ++ .../eosio/chain/transaction_context.hpp | 2 ++ libraries/chain/transaction_context.cpp | 33 ++++++++++++------- 4 files changed, 30 insertions(+), 11 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 744ca4bf04b..8d4adab795f 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -984,6 +984,8 @@ struct controller_impl { trx_context.squash(); restore.cancel(); return trace; + } catch( const disallowed_transaction_extensions_bad_block_exception& ) { + throw; } catch( const protocol_feature_bad_block_exception& ) { throw; } catch( const fc::exception& e ) { @@ -1118,6 +1120,8 @@ struct controller_impl { restore.cancel(); return trace; + } catch( const disallowed_transaction_extensions_bad_block_exception& ) { + throw; } catch( const protocol_feature_bad_block_exception& ) { throw; } catch( const fc::exception& e ) { diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index 3246fd2602e..2976dcbb7f1 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -200,6 +200,8 @@ namespace eosio { namespace chain { 3040015, "Invalid transaction extension" ) FC_DECLARE_DERIVED_EXCEPTION( ill_formed_deferred_transaction_generation_context, transaction_exception, 3040016, "Transaction includes an ill-formed deferred transaction generation context extension" ) + FC_DECLARE_DERIVED_EXCEPTION( disallowed_transaction_extensions_bad_block_exception, transaction_exception, + 3250002, "Transaction includes disallowed extensions (invalid block)" ) FC_DECLARE_DERIVED_EXCEPTION( action_validate_exception, chain_exception, diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index b0327dafb18..6238a6cedf6 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -73,6 +73,8 @@ namespace eosio { namespace chain { void validate_cpu_usage_to_bill( int64_t u, bool check_minimum = true )const; + void disallow_transaction_extensions( const char* error_msg )const; + /// Fields: public: diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index cc67b49eaec..beff789aeba 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -166,10 +166,14 @@ namespace bacc = boost::accumulators; trace->block_time = c.pending_block_time(); trace->producer_block_id = c.pending_producer_block_id(); executed.reserve( trx.total_actions() ); - EOS_ASSERT( trx.transaction_extensions.size() == 0 - || control.is_builtin_activated( builtin_protocol_feature_t::no_duplicate_deferred_id ), - unsupported_feature, "we don't support any extensions yet" - ); // This assert may not be necessary. Consider removing. + } + + void transaction_context::disallow_transaction_extensions( const char* error_msg )const { + if( control.is_producing_block() ) { + EOS_THROW( subjective_block_production_exception, error_msg ); + } else { + EOS_THROW( disallowed_transaction_extensions_bad_block_exception, error_msg ); + } } void transaction_context::init(uint64_t initial_net_usage) @@ -281,8 +285,10 @@ namespace bacc = boost::accumulators; void transaction_context::init_for_implicit_trx( uint64_t initial_net_usage ) { - EOS_ASSERT( trx.transaction_extensions.size() == 0, unsupported_feature, - "no transaction extensions supported yet for implicit transactions" ); + if( trx.transaction_extensions.size() > 0 ) { + disallow_transaction_extensions( "no transaction extensions supported yet for implicit transactions" ); + } + published = control.pending_block_time(); init( initial_net_usage); } @@ -291,8 +297,9 @@ namespace bacc = boost::accumulators; uint64_t packed_trx_prunable_size, bool skip_recording ) { - EOS_ASSERT( trx.transaction_extensions.size() == 0, unsupported_feature, - "no transaction extensions supported yet for input transactions" ); + if( trx.transaction_extensions.size() > 0 ) { + disallow_transaction_extensions( "no transaction extensions supported yet for input transactions" ); + } const auto& cfg = control.get_global_properties().configuration; @@ -330,9 +337,13 @@ namespace bacc = boost::accumulators; void transaction_context::init_for_deferred_trx( fc::time_point p ) { - EOS_ASSERT( (trx.expiration.sec_since_epoch() == 0) || (trx.transaction_extensions.size() == 0), unsupported_feature, - "no transaction extensions supported yet for deferred transactions" - ); + if( (trx.expiration.sec_since_epoch() != 0) && (trx.transaction_extensions.size() > 0) ) { + disallow_transaction_extensions( "no transaction extensions supported yet for deferred transactions" ); + } + // If (trx.expiration.sec_since_epoch() == 0) then it was created after NO_DUPLICATE_DEFERRED_ID activation, + // and so validation of its extensions was done either in: + // * apply_context::schedule_deferred_transaction for contract-generated transactions; + // * or transaction_context::init_for_input_trx for delayed input transactions. published = p; trace->scheduled = true; From 04e7b9010fcc7306b6a1862575daa7e086e7bec7 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 4 Apr 2019 20:00:55 -0400 Subject: [PATCH 298/680] fix compilation errors on certain linux platforms #6115 --- libraries/chain/apply_context.cpp | 2 +- libraries/chain/controller.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index edffc87029c..3dea34973be 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -319,7 +319,7 @@ void apply_context::schedule_deferred_transaction( const uint128_t& sender_id, a fc::raw::pack( deferred_transaction_generation_context( trx_context.id, sender_id, receiver ) ) ); } - trx.expiration = {}; + trx.expiration = time_point_sec(); trx.ref_block_num = 0; trx.ref_block_prefix = 0; } else { diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 8d4adab795f..47bc6ad7e7f 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -955,7 +955,7 @@ struct controller_impl { etrx.actions.emplace_back( vector{{gtrx.sender, config::active_name}}, onerror( gtrx.sender_id, gtrx.packed_trx.data(), gtrx.packed_trx.size() ) ); if( self.is_builtin_activated( builtin_protocol_feature_t::no_duplicate_deferred_id ) ) { - etrx.expiration = {}; + etrx.expiration = time_point_sec(); etrx.ref_block_num = 0; etrx.ref_block_prefix = 0; } else { @@ -2149,7 +2149,7 @@ struct controller_impl { signed_transaction trx; trx.actions.emplace_back(std::move(on_block_act)); if( self.is_builtin_activated( builtin_protocol_feature_t::no_duplicate_deferred_id ) ) { - trx.expiration = {}; + trx.expiration = time_point_sec(); trx.ref_block_num = 0; trx.ref_block_prefix = 0; } else { From dd782de9941c4ec174438cd2856665c3b1b0699c Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 4 Apr 2019 21:21:30 -0400 Subject: [PATCH 299/680] log provided feature digests in bios_boot.sh and try a different way of iterating through the provided list #6115 --- testnet.template | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testnet.template b/testnet.template index ab9051f0601..e36e8ba4f80 100644 --- a/testnet.template +++ b/testnet.template @@ -17,8 +17,6 @@ if [ -z "$bioscontractpath" ]; then bioscontractpath="unittests/contracts/eosio.bios" fi -featuredigests=($FEATURE_DIGESTS) - wddir=eosio-ignition-wd wdaddr=localhost:8899 wdurl=http://$wdaddr @@ -42,6 +40,8 @@ mkdir $wddir step=1 echo Initializing ignition sequence at $(date) | tee $logfile +echo "FEATURE_DIGESTS: $FEATURE_DIGESTS" >> $logfile + echo "http-server-address = $wdaddr" > $wddir/config.ini programs/keosd/keosd --config-dir $wddir --data-dir $wddir 2> $wddir/wdlog.txt & @@ -85,7 +85,7 @@ wcmd create --to-console -n ignition ecmd set contract eosio $bioscontractpath eosio.bios.wasm eosio.bios.abi # Preactivate all digests -for digest in "${featuredigests[@]}"; +for digest in $FEATURE_DIGESTS; do ecmd push action eosio preactivate "{\"feature_digest\":\"$digest\"}" -p eosio done From 9c47d56aedcbcea76b04d5b4c8235a3f1c853337 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 4 Apr 2019 21:56:32 -0400 Subject: [PATCH 300/680] log what the FEATURE_DIGESTS environment variable is set to as well before calling bios_boot.sh #6115 --- tests/Cluster.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/Cluster.py b/tests/Cluster.py index 3476bf9de03..cb24bfac2d0 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -929,6 +929,7 @@ def bios_bootstrap(self, biosNode, totalNodes, pfSetupPolicy, silent=False): if pfSetupPolicy == PFSetupPolicy.FULL: allBuiltinProtocolFeatureDigests = biosNode.getAllBuiltinFeatureDigestsToPreactivate() env["FEATURE_DIGESTS"] = " ".join(allBuiltinProtocolFeatureDigests) + Utils.Print("Set FEATURE_DIGESTS to: %s" % env["FEATURE_DIGESTS"]) if 0 != subprocess.call(cmd.split(), stdout=Utils.FNull, env=env): if not silent: Utils.Print("Launcher failed to shut down eos cluster.") From 8ec809873002b9ee9872f06ad54662e29539c98e Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 4 Apr 2019 22:12:08 -0400 Subject: [PATCH 301/680] another attempt to prevent the shuffling of FEATURE_DIGESTS on certain platforms #6115 --- testnet.template | 3 +++ tests/Cluster.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/testnet.template b/testnet.template index e36e8ba4f80..432e04a11b5 100644 --- a/testnet.template +++ b/testnet.template @@ -85,10 +85,13 @@ wcmd create --to-console -n ignition ecmd set contract eosio $bioscontractpath eosio.bios.wasm eosio.bios.abi # Preactivate all digests +old_IFS=${IFS} +IFS=";" for digest in $FEATURE_DIGESTS; do ecmd push action eosio preactivate "{\"feature_digest\":\"$digest\"}" -p eosio done +IFS=${old_IFS} # Create required system accounts ecmd create key --to-console diff --git a/tests/Cluster.py b/tests/Cluster.py index cb24bfac2d0..d8356b6d5c2 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -928,7 +928,7 @@ def bios_bootstrap(self, biosNode, totalNodes, pfSetupPolicy, silent=False): if pfSetupPolicy == PFSetupPolicy.FULL: allBuiltinProtocolFeatureDigests = biosNode.getAllBuiltinFeatureDigestsToPreactivate() - env["FEATURE_DIGESTS"] = " ".join(allBuiltinProtocolFeatureDigests) + env["FEATURE_DIGESTS"] = ";".join(allBuiltinProtocolFeatureDigests) Utils.Print("Set FEATURE_DIGESTS to: %s" % env["FEATURE_DIGESTS"]) if 0 != subprocess.call(cmd.split(), stdout=Utils.FNull, env=env): From 063bb595dd37afae0d4766a8fed4622abbafe373 Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 4 Apr 2019 22:51:21 -0400 Subject: [PATCH 302/680] revert previous commit (the issue was not in the bash script) and fix the getAllBuiltinFeatureDigestsToPreactivate function in Node.py #6115 --- testnet.template | 3 --- tests/Cluster.py | 2 +- tests/Node.py | 16 ++++++++++------ 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/testnet.template b/testnet.template index 432e04a11b5..e36e8ba4f80 100644 --- a/testnet.template +++ b/testnet.template @@ -85,13 +85,10 @@ wcmd create --to-console -n ignition ecmd set contract eosio $bioscontractpath eosio.bios.wasm eosio.bios.abi # Preactivate all digests -old_IFS=${IFS} -IFS=";" for digest in $FEATURE_DIGESTS; do ecmd push action eosio preactivate "{\"feature_digest\":\"$digest\"}" -p eosio done -IFS=${old_IFS} # Create required system accounts ecmd create key --to-console diff --git a/tests/Cluster.py b/tests/Cluster.py index d8356b6d5c2..cb24bfac2d0 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -928,7 +928,7 @@ def bios_bootstrap(self, biosNode, totalNodes, pfSetupPolicy, silent=False): if pfSetupPolicy == PFSetupPolicy.FULL: allBuiltinProtocolFeatureDigests = biosNode.getAllBuiltinFeatureDigestsToPreactivate() - env["FEATURE_DIGESTS"] = ";".join(allBuiltinProtocolFeatureDigests) + env["FEATURE_DIGESTS"] = " ".join(allBuiltinProtocolFeatureDigests) Utils.Print("Set FEATURE_DIGESTS to: %s" % env["FEATURE_DIGESTS"]) if 0 != subprocess.call(cmd.split(), stdout=Utils.FNull, env=env): diff --git a/tests/Node.py b/tests/Node.py index 9621186c9f9..67407531d38 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1486,15 +1486,19 @@ def activatePreactivateFeature(self): # Wait for the next block to be produced so the scheduled protocol feature is activated self.waitForHeadToAdvance() - # Return an array of feature digests to be preactivated + # Return an array of feature digests to be preactivated in a correct order respecting dependencies # Require producer_api_plugin def getAllBuiltinFeatureDigestsToPreactivate(self): protocolFeatures = [] - protocolFeatureDict = self.getSupportedProtocolFeatureDict() - for k, v in protocolFeatureDict.items(): - # Filter out "PREACTIVATE_FEATURE" - if k != "PREACTIVATE_FEATURE": - protocolFeatures.append(v["feature_digest"]) + supportedProtocolFeatures = self.getSupportedProtocolFeatures() + for protocolFeature in supportedProtocolFeatures: + for spec in protocolFeature["specification"]: + if (spec["name"] == "builtin_feature_codename"): + codename = spec["value"] + # Filter out "PREACTIVATE_FEATURE" + if codename != "PREACTIVATE_FEATURE": + protocolFeatures.append(protocolFeature["feature_digest"]) + break return protocolFeatures # Require PREACTIVATE_FEATURE to be activated and require eosio.bios with preactivate_feature From 88093a2902f3772c66e1d0ca48c01e80dc13e2d4 Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Sat, 6 Apr 2019 02:59:07 +0900 Subject: [PATCH 303/680] Remove unused skip_flag in base_tester::produce_block --- .../testing/include/eosio/testing/tester.hpp | 26 +++++++++---------- libraries/testing/tester.cpp | 2 +- unittests/snapshot_tests.cpp | 8 +++--- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 4328bda7ee8..265bf770f83 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -89,8 +89,8 @@ namespace eosio { namespace testing { void open( const snapshot_reader_ptr& snapshot ); bool is_same_chain( base_tester& other ); - virtual signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ ) = 0; - virtual signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ ) = 0; + virtual signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) ) = 0; + virtual signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) ) = 0; virtual signed_block_ptr finish_block() = 0; void produce_blocks( uint32_t n = 1, bool empty = false ); void produce_blocks_until_end_of_round(); @@ -281,7 +281,7 @@ namespace eosio { namespace testing { } protected: - signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs = false, uint32_t skip_flag = 0 ); + signed_block_ptr _produce_block( fc::microseconds skip_time, bool skip_pending_trxs = false ); void _start_block(fc::time_point block_time); signed_block_ptr _finish_block(); @@ -308,13 +308,13 @@ namespace eosio { namespace testing { init(config); } - signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { - return _produce_block(skip_time, false, skip_flag); + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { + return _produce_block(skip_time, false); } - signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { + signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { control->abort_block(); - return _produce_block(skip_time, true, skip_flag); + return _produce_block(skip_time, true); } signed_block_ptr finish_block()override { @@ -388,16 +388,16 @@ namespace eosio { namespace testing { init(config); } - signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0 /*skip_missed_block_penalty*/ )override { - auto sb = _produce_block(skip_time, false, skip_flag | 2); + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { + auto sb = _produce_block(skip_time, false); auto bs = validating_node->create_block_state_future( sb ); validating_node->push_block( bs ); return sb; } - signed_block_ptr produce_block_no_validation( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0 /*skip_missed_block_penalty*/ ) { - return _produce_block(skip_time, false, skip_flag | 2); + signed_block_ptr produce_block_no_validation( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) ) { + return _produce_block(skip_time, false); } void validate_push_block(const signed_block_ptr& sb) { @@ -405,9 +405,9 @@ namespace eosio { namespace testing { validating_node->push_block( bs ); } - signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0 /*skip_missed_block_penalty*/ )override { + signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { control->abort_block(); - auto sb = _produce_block(skip_time, true, skip_flag | 2); + auto sb = _produce_block(skip_time, true); auto bs = validating_node->create_block_state_future( sb ); validating_node->push_block( bs ); diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 63a0788931f..79e0c95d11c 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -154,7 +154,7 @@ namespace eosio { namespace testing { return b; } - signed_block_ptr base_tester::_produce_block( fc::microseconds skip_time, bool skip_pending_trxs, uint32_t skip_flag) { + signed_block_ptr base_tester::_produce_block( fc::microseconds skip_time, bool skip_pending_trxs) { auto head = control->head_block_state(); auto head_time = control->head_block_time(); auto next_time = head_time + skip_time; diff --git a/unittests/snapshot_tests.cpp b/unittests/snapshot_tests.cpp index c3578e15750..a3749f9656a 100644 --- a/unittests/snapshot_tests.cpp +++ b/unittests/snapshot_tests.cpp @@ -49,13 +49,13 @@ class snapshotted_tester : public base_tester { init(copied_config, snapshot); } - signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { - return _produce_block(skip_time, false, skip_flag); + signed_block_ptr produce_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { + return _produce_block(skip_time, false); } - signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms), uint32_t skip_flag = 0/*skip_missed_block_penalty*/ )override { + signed_block_ptr produce_empty_block( fc::microseconds skip_time = fc::milliseconds(config::block_interval_ms) )override { control->abort_block(); - return _produce_block(skip_time, true, skip_flag); + return _produce_block(skip_time, true); } signed_block_ptr finish_block()override { From 572dd5d5c42f36efb120fabf44cbd73275fe7498 Mon Sep 17 00:00:00 2001 From: Brian Johnson Date: Fri, 5 Apr 2019 14:08:37 -0500 Subject: [PATCH 304/680] Fix for bad alloc for catchup test. --- tests/nodeos_startup_catchup.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/nodeos_startup_catchup.py b/tests/nodeos_startup_catchup.py index e75fe165230..03a55936385 100755 --- a/tests/nodeos_startup_catchup.py +++ b/tests/nodeos_startup_catchup.py @@ -7,6 +7,7 @@ from WalletMgr import WalletMgr from Node import BlockType from Node import Node +import signal from TestHelper import AppArgs from TestHelper import TestHelper @@ -167,6 +168,8 @@ def head(node): Print("Verify catchup node is advancing to producer") # verify catchup node is advancing to producer catchupNode.waitForBlock(lastLibNum, timeout=(numBlocksToCatchup)/2, blockType=BlockType.lib) + catchupNode.kill(signal.SIGTERM) + catchupNode.popenProc=None testSuccessful=True From 0fe727a587b681b0dd637b6ad8f45ad51fb4ce50 Mon Sep 17 00:00:00 2001 From: Zach Butler Date: Fri, 5 Apr 2019 16:40:34 -0400 Subject: [PATCH 305/680] Removed remaining bnet tests --- tests/CMakeLists.txt | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index f2e6958eb15..747c2f85b0f 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -51,12 +51,8 @@ add_test(NAME plugin_test COMMAND plugin_test --report_level=detailed --color_ou add_test(NAME nodeos_sanity_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_sanity_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_sanity_bnet_test COMMAND tests/nodeos_run_test.py -v --sanity-test --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_sanity_bnet_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME nodeos_run_test COMMAND tests/nodeos_run_test.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST nodeos_run_test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME nodeos_run_bnet_test COMMAND tests/nodeos_run_test.py -v --clean-run --p2p-plugin bnet --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST nodeos_run_bnet_test PROPERTY LABELS nonparallelizable_tests) add_test(NAME p2p_dawn515_test COMMAND tests/p2p_tests/dawn_515/test.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST p2p_dawn515_test PROPERTY LABELS nonparallelizable_tests) @@ -67,8 +63,6 @@ endif() add_test(NAME distributed-transactions-test COMMAND tests/distributed-transactions-test.py -d 2 -p 4 -n 6 -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST distributed-transactions-test PROPERTY LABELS nonparallelizable_tests) -add_test(NAME distributed-transactions-bnet-test COMMAND tests/distributed-transactions-test.py -d 2 -p 1 -n 4 --p2p-plugin bnet -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) -set_property(TEST distributed-transactions-bnet-test PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-resync COMMAND tests/restart-scenarios-test.py -c resync -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) set_property(TEST restart-scenarios-test-resync PROPERTY LABELS nonparallelizable_tests) add_test(NAME restart-scenarios-test-hard_replay COMMAND tests/restart-scenarios-test.py -c hardReplay -p4 -v --clean-run --dump-error-details WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -122,7 +116,7 @@ if(ENABLE_COVERAGE_TESTING) endif() # NOT GENHTML_PATH # no spaces allowed within tests list - set(ctest_tests 'plugin_test|p2p_dawn515_test|nodeos_run_test|bnet_nodeos_run_test|distributed-transactions-test|restart-scenarios-test_resync') + set(ctest_tests 'plugin_test|p2p_dawn515_test|nodeos_run_test|distributed-transactions-test|restart-scenarios-test_resync') set(ctest_exclude_tests 'nodeos_run_remote_test|nodeos_run_test-mongodb|distributed-transactions-remote-test|restart-scenarios-test_replay') # Setup target From 2ae9463ebe4235870747cf8634a5d6a152a73395 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Fri, 5 Apr 2019 19:53:30 -0400 Subject: [PATCH 306/680] Increase the timeout for nodeos_startup_catchup_lr_test from default of 1500 to 3000 seconds --- tests/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index f2e6958eb15..d57f4d9d81b 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -103,6 +103,7 @@ add_test(NAME nodeos_under_min_avail_ram_lr_test COMMAND tests/nodeos_under_min_ set_property(TEST nodeos_under_min_avail_ram_lr_test PROPERTY LABELS long_running_tests) add_test(NAME nodeos_startup_catchup_lr_test COMMAND tests/nodeos_startup_catchup.py -v --clean-run --dump-error-detail WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) +set_tests_properties(nodeos_startup_catchup_lr_test PROPERTIES TIMEOUT 3000) set_property(TEST nodeos_startup_catchup_lr_test PROPERTY LABELS long_running_tests) if(ENABLE_COVERAGE_TESTING) From c622c8d0188f3b583cdf542a0fb21b257e55ba69 Mon Sep 17 00:00:00 2001 From: Kayan Date: Mon, 8 Apr 2019 16:14:02 +0800 Subject: [PATCH 307/680] improve test cases --- .../testing/include/eosio/testing/tester.hpp | 2 +- libraries/testing/tester.cpp | 4 +- unittests/api_tests.cpp | 150 +++++++++++++----- .../test-contracts/test_api/test_action.cpp | 12 +- 4 files changed, 118 insertions(+), 50 deletions(-) diff --git a/libraries/testing/include/eosio/testing/tester.hpp b/libraries/testing/include/eosio/testing/tester.hpp index 154ebef7410..0bccbfe8034 100644 --- a/libraries/testing/include/eosio/testing/tester.hpp +++ b/libraries/testing/include/eosio/testing/tester.hpp @@ -126,7 +126,7 @@ namespace eosio { namespace testing { vector get_scheduled_transactions() const; transaction_trace_ptr push_transaction( packed_transaction& trx, fc::time_point deadline = fc::time_point::maximum(), uint32_t billed_cpu_time_us = DEFAULT_BILLED_CPU_TIME_US ); - transaction_trace_ptr push_transaction( signed_transaction& trx, fc::time_point deadline = fc::time_point::maximum(), uint32_t billed_cpu_time_us = DEFAULT_BILLED_CPU_TIME_US ); + transaction_trace_ptr push_transaction( signed_transaction& trx, fc::time_point deadline = fc::time_point::maximum(), uint32_t billed_cpu_time_us = DEFAULT_BILLED_CPU_TIME_US, bool no_throw = false ); action_result push_action(action&& cert_act, uint64_t authorizer); // TODO/QUESTION: Is this needed? transaction_trace_ptr push_action( const account_name& code, diff --git a/libraries/testing/tester.cpp b/libraries/testing/tester.cpp index 93dc4b5f61f..82f91b90576 100644 --- a/libraries/testing/tester.cpp +++ b/libraries/testing/tester.cpp @@ -454,7 +454,8 @@ namespace eosio { namespace testing { transaction_trace_ptr base_tester::push_transaction( signed_transaction& trx, fc::time_point deadline, - uint32_t billed_cpu_time_us + uint32_t billed_cpu_time_us, + bool no_throw ) { try { if( !control->is_building_block() ) @@ -466,6 +467,7 @@ namespace eosio { namespace testing { } auto r = control->push_transaction( std::make_shared(trx,c), deadline, billed_cpu_time_us ); + if (no_throw) return r; if( r->except_ptr ) std::rethrow_exception( r->except_ptr ); if( r->except) throw *r->except; return r; diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 1290b02c33a..c94c9fdb2b3 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -218,7 +218,7 @@ transaction_trace_ptr CallAction(TESTER& test, T ac, const vector& } template -transaction_trace_ptr CallFunction(TESTER& test, T ac, const vector& data, const vector& scope = {N(testapi)}) { +transaction_trace_ptr CallFunction(TESTER& test, T ac, const vector& data, const vector& scope = {N(testapi)}, bool no_throw = false) { { signed_transaction trx; @@ -238,41 +238,11 @@ transaction_trace_ptr CallFunction(TESTER& test, T ac, const vector& data, flat_set keys; trx.get_signature_keys(test.control->get_chain_id(), fc::time_point::maximum(), keys); - auto res = test.push_transaction(trx); - BOOST_CHECK_EQUAL(res->receipt->status, transaction_receipt::executed); - test.produce_block(); - return res; - } -} - -template -transaction_trace_ptr CallFunctionExpectFail(TESTER& test, T ac, const vector& data, const vector& scope = {N(testapi)}) { - { - signed_transaction trx; - - auto pl = vector{{scope[0], config::active_name}}; - if (scope.size() > 1) - for (unsigned int i=1; i < scope.size(); i++) - pl.push_back({scope[i], config::active_name}); - - action act(pl, ac); - act.data = data; - act.authorization = {{N(testapi), config::active_name}}; - trx.actions.push_back(act); - - test.set_transaction_headers(trx, test.DEFAULT_EXPIRATION_DELTA); - auto sigs = trx.sign(test.get_private_key(scope[0], "active"), test.control->get_chain_id()); - - flat_set keys; - trx.get_signature_keys(test.control->get_chain_id(), fc::time_point::maximum(), keys); - - auto c = packed_transaction::none; - - if( fc::raw::pack_size(trx) > 1000 ) { - c = packed_transaction::zlib; + auto res = test.push_transaction(trx, fc::time_point::maximum(), TESTER::DEFAULT_BILLED_CPU_TIME_US, no_throw); + if (!no_throw) { + BOOST_CHECK_EQUAL(res->receipt->status, transaction_receipt::executed); } - - auto res = test.control->push_transaction(std::make_shared(trx,c), fc::time_point::maximum(), 100); + test.produce_block(); return res; } } @@ -280,7 +250,7 @@ transaction_trace_ptr CallFunctionExpectFail(TESTER& test, T ac, const vector{}, DATA) #define CALL_TEST_FUNCTION_SYSTEM(_TESTER, CLS, MTH, DATA) CallFunction(_TESTER, test_chain_action{}, DATA, {config::system_account_name} ) #define CALL_TEST_FUNCTION_SCOPE(_TESTER, CLS, MTH, DATA, ACCOUNT) CallFunction(_TESTER, test_api_action{}, DATA, ACCOUNT) -#define CALL_TEST_FUNCTION_SCOPE_EXPECT_FAIL(_TESTER, CLS, MTH, DATA, ACCOUNT) CallFunctionExpectFail(_TESTER, test_api_action{}, DATA, ACCOUNT) +#define CALL_TEST_FUNCTION_NO_THROW(_TESTER, CLS, MTH, DATA) CallFunction(_TESTER, test_api_action{}, DATA, {N(testapi)}, true) #define CALL_TEST_FUNCTION_AND_CHECK_EXCEPTION(_TESTER, CLS, MTH, DATA, EXC, EXC_MESSAGE) \ BOOST_CHECK_EXCEPTION( \ CALL_TEST_FUNCTION( _TESTER, CLS, MTH, DATA), \ @@ -2127,66 +2097,99 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_test, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); BOOST_REQUIRE_EQUAL((int)atrace[0].parent_action_ordinal, 0); + BOOST_REQUIRE_EQUAL(atrace[0].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[0].receipt.valid(), true); int start_gseq = atrace[0].receipt->global_sequence; BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal,2); BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[1].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[1].receiver.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[1].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[1].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[1].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[1].receipt->global_sequence, start_gseq + 1); BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[2].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[2].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); BOOST_REQUIRE_EQUAL(atrace[2].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[2].receipt->global_sequence, start_gseq + 4); BOOST_REQUIRE_EQUAL((int)atrace[3].action_ordinal, 4); BOOST_REQUIRE_EQUAL((int)atrace[3].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[3].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[3].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.name.value, TEST_METHOD("test_action", "test_action_ordinal3")); BOOST_REQUIRE_EQUAL(atrace[3].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[3].receipt->global_sequence, start_gseq + 8); BOOST_REQUIRE_EQUAL((int)atrace[4].action_ordinal, 5); BOOST_REQUIRE_EQUAL((int)atrace[4].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[4].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[4].receiver.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[4].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[4].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[4].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[4].receipt->global_sequence, start_gseq + 2); BOOST_REQUIRE_EQUAL((int)atrace[5].action_ordinal, 6); BOOST_REQUIRE_EQUAL((int)atrace[5].creator_action_ordinal, 2); BOOST_REQUIRE_EQUAL((int)atrace[5].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[5].receiver.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.account.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_foo")); BOOST_REQUIRE_EQUAL(atrace[5].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[5].receipt->global_sequence, start_gseq + 9); BOOST_REQUIRE_EQUAL((int)atrace[6].action_ordinal, 7); BOOST_REQUIRE_EQUAL((int)atrace[6].creator_action_ordinal,2); BOOST_REQUIRE_EQUAL((int)atrace[6].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[6].receiver.value, N(david)); + BOOST_REQUIRE_EQUAL(atrace[6].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[6].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[6].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[6].receipt->global_sequence, start_gseq + 3); BOOST_REQUIRE_EQUAL((int)atrace[7].action_ordinal, 8); BOOST_REQUIRE_EQUAL((int)atrace[7].creator_action_ordinal, 5); BOOST_REQUIRE_EQUAL((int)atrace[7].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[7].receiver.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.account.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_bar")); BOOST_REQUIRE_EQUAL(atrace[7].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[7].receipt->global_sequence, start_gseq + 10); BOOST_REQUIRE_EQUAL((int)atrace[8].action_ordinal, 9); BOOST_REQUIRE_EQUAL((int)atrace[8].creator_action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[8].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[8].receiver.value, N(david)); + BOOST_REQUIRE_EQUAL(atrace[8].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[8].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); BOOST_REQUIRE_EQUAL(atrace[8].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[8].receipt->global_sequence, start_gseq + 5); BOOST_REQUIRE_EQUAL((int)atrace[9].action_ordinal, 10); BOOST_REQUIRE_EQUAL((int)atrace[9].creator_action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[9].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[9].receiver.value, N(erin)); + BOOST_REQUIRE_EQUAL(atrace[9].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[9].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); BOOST_REQUIRE_EQUAL(atrace[9].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[9].receipt->global_sequence, start_gseq + 6); BOOST_REQUIRE_EQUAL((int)atrace[10].action_ordinal, 11); BOOST_REQUIRE_EQUAL((int)atrace[10].creator_action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[10].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[10].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[10].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[10].act.name.value, TEST_METHOD("test_action", "test_action_ordinal4")); BOOST_REQUIRE_EQUAL(atrace[10].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[10].receipt->global_sequence, start_gseq + 7); } FC_LOG_AND_RETHROW() } @@ -2218,8 +2221,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest1, TESTER) { try { produce_blocks(1); transaction_trace_ptr txn_trace = - CALL_TEST_FUNCTION_SCOPE_EXPECT_FAIL( *this, "test_action", "test_action_ordinal1", - {}, vector{ N(testapi)}); + CALL_TEST_FUNCTION_NO_THROW( *this, "test_action", "test_action_ordinal1", {}); BOOST_REQUIRE_EQUAL( validate(), true ); @@ -2232,6 +2234,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest1, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); BOOST_REQUIRE_EQUAL((int)atrace[0].parent_action_ordinal, 0); + BOOST_REQUIRE_EQUAL(atrace[0].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[0].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[0].except.valid(), true); BOOST_REQUIRE_EQUAL(atrace[0].except->code(), 3050003); @@ -2240,6 +2245,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest1, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal, 2); BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[1].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[1].receiver.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[1].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[1].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[1].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[1].except.valid(), false); @@ -2247,6 +2255,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest1, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[2].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[2].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); BOOST_REQUIRE_EQUAL(atrace[2].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[2].except.valid(), false); @@ -2278,8 +2289,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { produce_blocks(1); transaction_trace_ptr txn_trace = - CALL_TEST_FUNCTION_SCOPE_EXPECT_FAIL( *this, "test_action", "test_action_ordinal1", - {}, vector{ N(testapi)}); + CALL_TEST_FUNCTION_NO_THROW( *this, "test_action", "test_action_ordinal1", {}); BOOST_REQUIRE_EQUAL( validate(), true ); @@ -2292,6 +2302,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); BOOST_REQUIRE_EQUAL((int)atrace[0].parent_action_ordinal, 0); + BOOST_REQUIRE_EQUAL(atrace[0].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[0].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[0].except.valid(), false); int start_gseq = atrace[0].receipt->global_sequence; @@ -2300,6 +2313,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal,2); BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[1].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[1].receiver.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[1].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[1].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[1].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[1].receipt->global_sequence, start_gseq + 1); @@ -2307,6 +2323,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[2].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[2].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); BOOST_REQUIRE_EQUAL(atrace[2].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[2].except.valid(), false); @@ -2314,6 +2333,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[3].action_ordinal, 4); BOOST_REQUIRE_EQUAL((int)atrace[3].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[3].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[3].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.name.value, TEST_METHOD("test_action", "test_action_ordinal3")); BOOST_REQUIRE_EQUAL(atrace[3].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[3].except.valid(), false); @@ -2321,6 +2343,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[4].action_ordinal, 5); BOOST_REQUIRE_EQUAL((int)atrace[4].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[4].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[4].receiver.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[4].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[4].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[4].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[4].except.valid(), true); BOOST_REQUIRE_EQUAL(atrace[4].except->code(), 3050003); @@ -2329,6 +2354,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[5].action_ordinal, 6); BOOST_REQUIRE_EQUAL((int)atrace[5].creator_action_ordinal, 2); BOOST_REQUIRE_EQUAL((int)atrace[5].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[5].receiver.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.account.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_foo")); BOOST_REQUIRE_EQUAL(atrace[5].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[5].except.valid(), false); @@ -2336,6 +2364,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[6].action_ordinal, 7); BOOST_REQUIRE_EQUAL((int)atrace[6].creator_action_ordinal,2); BOOST_REQUIRE_EQUAL((int)atrace[6].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[6].receiver.value, N(david)); + BOOST_REQUIRE_EQUAL(atrace[6].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[6].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[6].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[6].except.valid(), false); @@ -2343,6 +2374,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[7].action_ordinal, 8); BOOST_REQUIRE_EQUAL((int)atrace[7].creator_action_ordinal, 5); BOOST_REQUIRE_EQUAL((int)atrace[7].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[7].receiver.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.account.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_bar")); BOOST_REQUIRE_EQUAL(atrace[7].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[7].except.valid(), false); @@ -2374,8 +2408,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { produce_blocks(1); transaction_trace_ptr txn_trace = - CALL_TEST_FUNCTION_SCOPE_EXPECT_FAIL( *this, "test_action", "test_action_ordinal1", - {}, vector{ N(testapi)}); + CALL_TEST_FUNCTION_NO_THROW( *this, "test_action", "test_action_ordinal1", {}); BOOST_REQUIRE_EQUAL( validate(), true ); @@ -2388,6 +2421,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); BOOST_REQUIRE_EQUAL((int)atrace[0].parent_action_ordinal, 0); + BOOST_REQUIRE_EQUAL(atrace[0].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[0].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[0].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[0].except.valid(), false); int start_gseq = atrace[0].receipt->global_sequence; @@ -2396,6 +2432,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal,2); BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[1].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[1].receiver.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[1].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[1].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[1].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[1].receipt->global_sequence, start_gseq + 1); @@ -2403,6 +2442,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[2].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[2].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[2].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); BOOST_REQUIRE_EQUAL(atrace[2].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[2].receipt->global_sequence, start_gseq + 4); @@ -2410,6 +2452,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[3].action_ordinal, 4); BOOST_REQUIRE_EQUAL((int)atrace[3].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[3].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[3].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[3].act.name.value, TEST_METHOD("test_action", "test_action_ordinal3")); BOOST_REQUIRE_EQUAL(atrace[3].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[3].except.valid(), true); BOOST_REQUIRE_EQUAL(atrace[3].except->code(), 3050003); @@ -2418,6 +2463,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[4].action_ordinal, 5); BOOST_REQUIRE_EQUAL((int)atrace[4].creator_action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[4].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[4].receiver.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[4].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[4].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[4].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[4].receipt->global_sequence, start_gseq + 2); @@ -2425,6 +2473,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[5].action_ordinal, 6); BOOST_REQUIRE_EQUAL((int)atrace[5].creator_action_ordinal, 2); BOOST_REQUIRE_EQUAL((int)atrace[5].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[5].receiver.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.account.value, N(bob)); + BOOST_REQUIRE_EQUAL(atrace[5].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_foo")); BOOST_REQUIRE_EQUAL(atrace[5].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[5].except.valid(), false); @@ -2432,6 +2483,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[6].action_ordinal, 7); BOOST_REQUIRE_EQUAL((int)atrace[6].creator_action_ordinal,2); BOOST_REQUIRE_EQUAL((int)atrace[6].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[6].receiver.value, N(david)); + BOOST_REQUIRE_EQUAL(atrace[6].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[6].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); BOOST_REQUIRE_EQUAL(atrace[6].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[6].receipt->global_sequence, start_gseq + 3); @@ -2439,6 +2493,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[7].action_ordinal, 8); BOOST_REQUIRE_EQUAL((int)atrace[7].creator_action_ordinal, 5); BOOST_REQUIRE_EQUAL((int)atrace[7].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL(atrace[7].receiver.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.account.value, N(charlie)); + BOOST_REQUIRE_EQUAL(atrace[7].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_bar")); BOOST_REQUIRE_EQUAL(atrace[7].receipt.valid(), false); BOOST_REQUIRE_EQUAL(atrace[7].except.valid(), false); @@ -2446,6 +2503,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[8].action_ordinal, 9); BOOST_REQUIRE_EQUAL((int)atrace[8].creator_action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[8].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[8].receiver.value, N(david)); + BOOST_REQUIRE_EQUAL(atrace[8].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[8].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); BOOST_REQUIRE_EQUAL(atrace[8].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[8].receipt->global_sequence, start_gseq + 5); @@ -2453,6 +2513,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[9].action_ordinal, 10); BOOST_REQUIRE_EQUAL((int)atrace[9].creator_action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[9].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[9].receiver.value, N(erin)); + BOOST_REQUIRE_EQUAL(atrace[9].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[9].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); BOOST_REQUIRE_EQUAL(atrace[9].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[9].receipt->global_sequence, start_gseq + 6); @@ -2460,6 +2523,9 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[10].action_ordinal, 11); BOOST_REQUIRE_EQUAL((int)atrace[10].creator_action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[10].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL(atrace[10].receiver.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[10].act.account.value, N(testapi)); + BOOST_REQUIRE_EQUAL(atrace[10].act.name.value, TEST_METHOD("test_action", "test_action_ordinal4")); BOOST_REQUIRE_EQUAL(atrace[10].receipt.valid(), true); BOOST_REQUIRE_EQUAL(atrace[10].receipt->global_sequence, start_gseq + 7); diff --git a/unittests/test-contracts/test_api/test_action.cpp b/unittests/test-contracts/test_api/test_action.cpp index 6424d05f513..e371f336cc0 100644 --- a/unittests/test-contracts/test_api/test_action.cpp +++ b/unittests/test-contracts/test_api/test_action.cpp @@ -264,12 +264,12 @@ void test_action::test_action_ordinal1(uint64_t receiver, uint64_t code, uint64_ uint64_t _self = receiver; if (receiver == "testapi"_n.value) { print("exec 1"); - eosio::require_recipient( "bob"_n ); //-> exec2 + eosio::require_recipient( "bob"_n ); //-> exec 2 which would then cause execution of 4, 10 eosio::action act1({name(_self), "active"_n}, name(_self), name(WASM_TEST_ACTION("test_action", "test_action_ordinal2")), std::tuple<>()); - act1.send(); // -> exec 5, 6, 7 + act1.send(); // -> exec 5 which would then cause execution of 6, 7, 8 if (is_account("fail1"_n)) { eosio_assert(false, "fail at point 1"); @@ -280,14 +280,14 @@ void test_action::test_action_ordinal1(uint64_t receiver, uint64_t code, uint64_ std::tuple<>()); act2.send(); // -> exec 9 - eosio::require_recipient( "charlie"_n ); // -> exec 3 + eosio::require_recipient( "charlie"_n ); // -> exec 3 which would then cause execution of 11 } else if (receiver == "bob"_n.value) { print("exec 2"); eosio::action act1({name(_self), "active"_n}, name(_self), name(WASM_TEST_ACTION("test_action", "test_action_ordinal_foo")), std::tuple<>()); - act1.send(); + act1.send(); // -> exec 10 eosio::require_recipient( "david"_n ); // -> exec 4 } else if (receiver == "charlie"_n.value) { @@ -311,8 +311,8 @@ void test_action::test_action_ordinal2(uint64_t receiver, uint64_t code, uint64_ uint64_t _self = receiver; if (receiver == "testapi"_n.value) { print("exec 5"); - eosio::require_recipient( "david"_n ); - eosio::require_recipient( "erin"_n ); + eosio::require_recipient( "david"_n ); // -> exec 6 + eosio::require_recipient( "erin"_n ); // -> exec 7 eosio::action act1({name(_self), "active"_n}, name(_self), name(WASM_TEST_ACTION("test_action", "test_action_ordinal4")), From dcde1978c3f431c112fb2713a9e0a9af0e0c449b Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Mon, 8 Apr 2019 17:57:45 +0800 Subject: [PATCH 308/680] Refactor push_transaction API to use direct approach instead of wrapping next --- plugins/chain_api_plugin/chain_api_plugin.cpp | 3 +- plugins/chain_plugin/chain_plugin.cpp | 169 +++++++++--------- .../eosio/chain_plugin/chain_plugin.hpp | 4 - 3 files changed, 85 insertions(+), 91 deletions(-) diff --git a/plugins/chain_api_plugin/chain_api_plugin.cpp b/plugins/chain_api_plugin/chain_api_plugin.cpp index bab3f5ce266..3a591eedbb5 100644 --- a/plugins/chain_api_plugin/chain_api_plugin.cpp +++ b/plugins/chain_api_plugin/chain_api_plugin.cpp @@ -107,8 +107,7 @@ void chain_api_plugin::plugin_startup() { CHAIN_RW_CALL_ASYNC(push_block, chain_apis::read_write::push_block_results, 202), CHAIN_RW_CALL_ASYNC(push_transaction, chain_apis::read_write::push_transaction_results, 202), CHAIN_RW_CALL_ASYNC(push_transactions, chain_apis::read_write::push_transactions_results, 202), - CHAIN_RW_CALL_ASYNC(send_transaction, chain_apis::read_write::send_transaction_results, 202), - CHAIN_RW_CALL_ASYNC(send_transactions, chain_apis::read_write::send_transactions_results, 202) + CHAIN_RW_CALL_ASYNC(send_transaction, chain_apis::read_write::send_transaction_results, 202) }); } diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index b586e6c2b6e..de6a9f77e8f 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1880,81 +1880,7 @@ void read_write::push_block(read_write::push_block_params&& params, next_functio } CATCH_AND_CALL(next); } -static fc::variant convert_tx_trace_to_tree_struct(const fc::variant& tx_result) { - fc::mutable_variant_object tx_trace_mvo(tx_result); - - std::multimap act_trace_multimap; - for (auto& act_trace: tx_trace_mvo["action_traces"].get_array()) { - act_trace_multimap.emplace(act_trace["parent_action_ordinal"].as(), act_trace); - } - std::function(fc::unsigned_int)> convert_act_trace_to_tree_struct = [&](fc::unsigned_int parent_action_ordinal) { - vector reordered_act_traces; - auto range = act_trace_multimap.equal_range(parent_action_ordinal); - for (auto it = range.first; it != range.second; it++) { - fc::mutable_variant_object act_trace_mvo(it->second); - act_trace_mvo["inline_traces"] = convert_act_trace_to_tree_struct(it->second["action_ordinal"].as()); - act_trace_mvo.erase("action_ordinal"); - act_trace_mvo.erase("creator_action_ordinal"); - act_trace_mvo.erase("parent_action_ordinal"); - act_trace_mvo.erase("receiver"); - reordered_act_traces.push_back(act_trace_mvo); - } - std::sort(reordered_act_traces.begin(), reordered_act_traces.end(), [](auto& a, auto&b) { - return a["receipt"]["global_sequence"] < b["receipt"]["global_sequence"]; - }); - return reordered_act_traces; - }; - tx_trace_mvo["action_traces"] = convert_act_trace_to_tree_struct(0); - - tx_trace_mvo.erase("account_ram_delta"); - - return tx_trace_mvo; -} - void read_write::push_transaction(const read_write::push_transaction_params& params, next_function next) { - try { - auto wrapped_next = [=](const fc::static_variant& result) { - try { - if (result.contains()) { - next(result); - } else { - read_write::send_transaction_results modified_result = std::move(result.get()); - modified_result.processed = convert_tx_trace_to_tree_struct(modified_result.processed); - next(modified_result); - } - } CATCH_AND_CALL(next); - }; - send_transaction(params, wrapped_next); - } catch ( boost::interprocess::bad_alloc& ) { - chain_plugin::handle_db_exhaustion(); - } CATCH_AND_CALL(next); -} - -void read_write::push_transactions(const read_write::push_transactions_params& params, next_function next) { - try { - auto wrapped_next = [=](const fc::static_variant& result) { - try { - if (result.contains()) { - next(result); - } else { - read_write::send_transactions_results modified_results = std::move(result.get()); - for (auto& modified_result: modified_results) { - if (modified_result.transaction_id != transaction_id_type()) { - modified_result.processed = convert_tx_trace_to_tree_struct(modified_result.processed); - } - } - next(modified_results); - } - } CATCH_AND_CALL(next); - }; - send_transactions(params, wrapped_next); - } catch ( boost::interprocess::bad_alloc& ) { - chain_plugin::handle_db_exhaustion(); - } CATCH_AND_CALL(next); -} - -void read_write::send_transaction(const read_write::send_transaction_params& params, next_function next) { - try { auto pretty_input = std::make_shared(); auto resolver = make_resolver(this, abi_serializer_max_time); @@ -1974,12 +1900,51 @@ void read_write::send_transaction(const read_write::send_transaction_params& par fc::variant output; try { output = db.to_variant_with_abi( *trx_trace_ptr, abi_serializer_max_time ); + + // Create map of (parent_action_ordinal, global_sequence) with action trace + std::map< std::pair, fc::mutable_variant_object > act_traces_map; + for (auto& act_trace: output["action_traces"].get_array()) { + if (act_trace["receipt"].is_null() && act_trace["except"].is_null()) continue; + auto parent_action_ordinal = act_trace["parent_action_ordinal"].as().value; + auto global_sequence = act_trace["receipt"].is_null() ? + std::numeric_limits::max() : + act_trace["receipt"]["global_sequence"].as(); + act_traces_map.emplace( std::make_pair( parent_action_ordinal, global_sequence ), act_trace ); + } + + std::function(uint32_t)> convert_act_trace_to_tree_struct = [&](uint32_t parent_action_ordinal) { + vector restructured_act_traces; + auto it = act_traces_map.lower_bound( std::make_pair(parent_action_ordinal, 0) ); + for (; it != act_traces_map.end(); it++) { + if (it->first.first != parent_action_ordinal) break; + auto& act_trace_mvo = it->second; + + auto action_ordinal = act_trace_mvo["action_ordinal"].as().value; + act_trace_mvo["inline_traces"] = convert_act_trace_to_tree_struct(action_ordinal); + if (act_trace_mvo["receipt"].is_null()) { + act_trace_mvo["receipt"] = fc::mutable_variant_object()("abi_sequence", 0) + ("act_digest", digest_type::hash(trx_trace_ptr->action_traces[action_ordinal-1].act)) + ("auth_sequence", flat_map()) + ("code_sequence", 0) + ("global_sequence", 0) + ("receiver", act_trace_mvo["receiver"]) + ("recv_sequence", 0); + } + restructured_act_traces.push_back( std::move(act_trace_mvo) ); + } + return restructured_act_traces; + }; + + fc::mutable_variant_object output_mvo(output); + output_mvo["action_traces"] = convert_act_trace_to_tree_struct(0); + + output = output_mvo; } catch( chain::abi_exception& ) { output = *trx_trace_ptr; } const chain::transaction_id_type& id = trx_trace_ptr->id; - next(read_write::send_transaction_results{id, output}); + next(read_write::push_transaction_results{id, output}); } CATCH_AND_CALL(next); } }); @@ -1988,40 +1953,74 @@ void read_write::send_transaction(const read_write::send_transaction_params& par } CATCH_AND_CALL(next); } -static void send_recurse(read_write* rw, int index, const std::shared_ptr& params, const std::shared_ptr& results, const next_function& next) { - auto wrapped_next = [=](const fc::static_variant& result) { +static void push_recurse(read_write* rw, int index, const std::shared_ptr& params, const std::shared_ptr& results, const next_function& next) { + auto wrapped_next = [=](const fc::static_variant& result) { if (result.contains()) { const auto& e = result.get(); - results->emplace_back( read_write::send_transaction_results{ transaction_id_type(), fc::mutable_variant_object( "error", e->to_detail_string() ) } ); + results->emplace_back( read_write::push_transaction_results{ transaction_id_type(), fc::mutable_variant_object( "error", e->to_detail_string() ) } ); } else { - const auto& r = result.get(); + const auto& r = result.get(); results->emplace_back( r ); } size_t next_index = index + 1; if (next_index < params->size()) { - send_recurse(rw, next_index, params, results, next ); + push_recurse(rw, next_index, params, results, next ); } else { next(*results); } }; - rw->send_transaction(params->at(index), wrapped_next); + rw->push_transaction(params->at(index), wrapped_next); } -void read_write::send_transactions(const read_write::send_transactions_params& params, next_function next) { +void read_write::push_transactions(const read_write::push_transactions_params& params, next_function next) { try { EOS_ASSERT( params.size() <= 1000, too_many_tx_at_once, "Attempt to push too many transactions at once" ); - auto params_copy = std::make_shared(params.begin(), params.end()); - auto result = std::make_shared(); + auto params_copy = std::make_shared(params.begin(), params.end()); + auto result = std::make_shared(); result->reserve(params.size()); - send_recurse(this, 0, params_copy, result, next); + push_recurse(this, 0, params_copy, result, next); } catch ( boost::interprocess::bad_alloc& ) { chain_plugin::handle_db_exhaustion(); } CATCH_AND_CALL(next); } +void read_write::send_transaction(const read_write::send_transaction_params& params, next_function next) { + + try { + auto pretty_input = std::make_shared(); + auto resolver = make_resolver(this, abi_serializer_max_time); + transaction_metadata_ptr ptrx; + try { + abi_serializer::from_variant(params, *pretty_input, resolver, abi_serializer_max_time); + ptrx = std::make_shared( pretty_input ); + } EOS_RETHROW_EXCEPTIONS(chain::packed_transaction_type_exception, "Invalid packed transaction") + + app().get_method()(ptrx, true, [this, next](const fc::static_variant& result) -> void{ + if (result.contains()) { + next(result.get()); + } else { + auto trx_trace_ptr = result.get(); + + try { + fc::variant output; + try { + output = db.to_variant_with_abi( *trx_trace_ptr, abi_serializer_max_time ); + } catch( chain::abi_exception& ) { + output = *trx_trace_ptr; + } + + const chain::transaction_id_type& id = trx_trace_ptr->id; + next(read_write::send_transaction_results{id, output}); + } CATCH_AND_CALL(next); + } + }); + } catch ( boost::interprocess::bad_alloc& ) { + chain_plugin::handle_db_exhaustion(); + } CATCH_AND_CALL(next); +} read_only::get_abi_results read_only::get_abi( const get_abi_params& params )const { get_abi_results result; diff --git a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp index 80353e5be46..6c85c354481 100644 --- a/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp +++ b/plugins/chain_plugin/include/eosio/chain_plugin/chain_plugin.hpp @@ -604,10 +604,6 @@ class read_write { using send_transaction_results = push_transaction_results; void send_transaction(const send_transaction_params& params, chain::plugin_interface::next_function next); - using send_transactions_params = vector; - using send_transactions_results = vector; - void send_transactions(const send_transactions_params& params, chain::plugin_interface::next_function next); - friend resolver_factory; }; From b70da87923db1612dbce7250568c43a23e617e92 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 8 Apr 2019 08:15:14 -0500 Subject: [PATCH 309/680] Prevent core dump by catching exception --- programs/eosio-launcher/main.cpp | 43 +++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/programs/eosio-launcher/main.cpp b/programs/eosio-launcher/main.cpp index 35f12b94e75..7e6bfbaf7b3 100644 --- a/programs/eosio-launcher/main.cpp +++ b/programs/eosio-launcher/main.cpp @@ -1673,20 +1673,35 @@ launcher_def::kill (launch_modes mode, string sig_opt) { case LM_LOCAL: case LM_REMOTE : { bfs::path source = "last_run.json"; - fc::json::from_file(source).as(last_run); - for (auto &info : last_run.running_nodes) { - if (mode == LM_ALL || (info.remote && mode == LM_REMOTE) || - (!info.remote && mode == LM_LOCAL)) { - if (info.pid_file.length()) { - string pid; - fc::json::from_file(info.pid_file).as(pid); - string kill_cmd = "kill " + sig_opt + " " + pid; - boost::process::system (kill_cmd); - } - else { - boost::process::system (info.kill_cmd); - } - } + try { + fc::json::from_file( source ).as( last_run ); + for( auto& info : last_run.running_nodes ) { + if( mode == LM_ALL || (info.remote && mode == LM_REMOTE) || + (!info.remote && mode == LM_LOCAL) ) { + try { + if( info.pid_file.length() ) { + string pid; + fc::json::from_file( info.pid_file ).as( pid ); + string kill_cmd = "kill " + sig_opt + " " + pid; + boost::process::system( kill_cmd ); + } else { + boost::process::system( info.kill_cmd ); + } + } catch( fc::exception& fce ) { + cerr << "unable to kill fc::exception=" << fce.to_detail_string() << endl; + } catch( std::exception& stde ) { + cerr << "unable to kill std::exception=" << stde.what() << endl; + } catch( ... ) { + cerr << "Unable to kill" << endl; + } + } + } + } catch( fc::exception& fce ) { + cerr << "unable to open " << source << " fc::exception=" << fce.to_detail_string() << endl; + } catch( std::exception& stde ) { + cerr << "unable to open " << source << " std::exception=" << stde.what() << endl; + } catch( ... ) { + cerr << "Unable to open " << source << endl; } } } From 2122f8452f86b24a2eb730f36cdda8882e429770 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Mon, 8 Apr 2019 11:20:12 -0500 Subject: [PATCH 310/680] read_delay_timer runs on net_plugin thread_pool so app().post for execution of start_read_message --- plugins/net_plugin/net_plugin.cpp | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 9341191beb6..7b2f2cd10bf 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -2044,12 +2044,13 @@ namespace eosio { } if( !conn->read_delay_timer ) return; conn->read_delay_timer->expires_from_now( def_read_delay_for_full_write_queue ); - conn->read_delay_timer->async_wait( - app().get_priority_queue().wrap( priority::low, [this, weak_conn]( boost::system::error_code ) { - auto conn = weak_conn.lock(); - if( !conn ) return; - start_read_message( conn ); - } ) ); + conn->read_delay_timer->async_wait( [this, weak_conn]( boost::system::error_code ec ) { + app().post( priority::low, [this, weak_conn]() { + auto conn = weak_conn.lock(); + if( !conn ) return; + start_read_message( conn ); + } ); + } ); return; } From 9a12ae17b6522ded72b373daa600485f8dd053f9 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 8 Apr 2019 15:04:12 -0400 Subject: [PATCH 311/680] small changes to implementation of read_write::push_transaction --- plugins/chain_plugin/chain_plugin.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index de6a9f77e8f..e367d0b85d2 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1903,20 +1903,20 @@ void read_write::push_transaction(const read_write::push_transaction_params& par // Create map of (parent_action_ordinal, global_sequence) with action trace std::map< std::pair, fc::mutable_variant_object > act_traces_map; - for (auto& act_trace: output["action_traces"].get_array()) { + for( const auto& act_trace : output["action_traces"].get_array() ) { if (act_trace["receipt"].is_null() && act_trace["except"].is_null()) continue; auto parent_action_ordinal = act_trace["parent_action_ordinal"].as().value; auto global_sequence = act_trace["receipt"].is_null() ? std::numeric_limits::max() : act_trace["receipt"]["global_sequence"].as(); - act_traces_map.emplace( std::make_pair( parent_action_ordinal, global_sequence ), act_trace ); + act_traces_map.emplace( std::make_pair( parent_action_ordinal, global_sequence ), + act_trace.get_object() ); } std::function(uint32_t)> convert_act_trace_to_tree_struct = [&](uint32_t parent_action_ordinal) { vector restructured_act_traces; auto it = act_traces_map.lower_bound( std::make_pair(parent_action_ordinal, 0) ); - for (; it != act_traces_map.end(); it++) { - if (it->first.first != parent_action_ordinal) break; + for( ; it != act_traces_map.end() && it->first.first == parent_action_ordinal; ++it ) { auto& act_trace_mvo = it->second; auto action_ordinal = act_trace_mvo["action_ordinal"].as().value; From 6d6f20cc6c8909ae93b8b0d1e2617ba36e2bb672 Mon Sep 17 00:00:00 2001 From: Adam Mitz Date: Mon, 8 Apr 2019 16:46:12 -0500 Subject: [PATCH 312/680] Enhanced the build script with the ability to specify a prefix directory (instead of assuming HOME) and a pre-built Boost. Also cleaned up the usage message and wait to make changes on the filesystem until preliminary checks are done. --- scripts/eosio_build.sh | 127 +++++++++++++++++++--------------- scripts/eosio_build_amazon.sh | 14 ++-- scripts/eosio_build_centos.sh | 14 ++-- scripts/eosio_build_darwin.sh | 8 +-- scripts/eosio_build_fedora.sh | 12 ++-- scripts/eosio_build_ubuntu.sh | 12 ++-- 6 files changed, 101 insertions(+), 86 deletions(-) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index a97ceaa5058..047bc9ce590 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -30,62 +30,21 @@ # https://github.com/EOSIO/eos/blob/master/LICENSE ########################################################################## -VERSION=2.2 # Build script version +VERSION=2.3 # Build script version + +# defaults for command-line arguments CMAKE_BUILD_TYPE=Release DOXYGEN=false ENABLE_COVERAGE_TESTING=false CORE_SYMBOL_NAME="SYS" -START_MAKE=true +NONINTERACTIVE=0 +PREFIX=$HOME TIME_BEGIN=$( date -u +%s ) txtbld=$(tput bold) bldred=${txtbld}$(tput setaf 1) txtrst=$(tput sgr0) -export SRC_LOCATION=${HOME}/src -export OPT_LOCATION=${HOME}/opt -export VAR_LOCATION=${HOME}/var -export ETC_LOCATION=${HOME}/etc -export BIN_LOCATION=${HOME}/bin -export DATA_LOCATION=${HOME}/data -export CMAKE_VERSION_MAJOR=3 -export CMAKE_VERSION_MINOR=13 -export CMAKE_VERSION_PATCH=2 -export CMAKE_VERSION=${CMAKE_VERSION_MAJOR}.${CMAKE_VERSION_MINOR}.${CMAKE_VERSION_PATCH} -export MONGODB_VERSION=3.6.3 -export MONGODB_ROOT=${OPT_LOCATION}/mongodb-${MONGODB_VERSION} -export MONGODB_CONF=${ETC_LOCATION}/mongod.conf -export MONGODB_LOG_LOCATION=${VAR_LOCATION}/log/mongodb -export MONGODB_LINK_LOCATION=${OPT_LOCATION}/mongodb -export MONGODB_DATA_LOCATION=${DATA_LOCATION}/mongodb -export MONGO_C_DRIVER_VERSION=1.13.0 -export MONGO_C_DRIVER_ROOT=${SRC_LOCATION}/mongo-c-driver-${MONGO_C_DRIVER_VERSION} -export MONGO_CXX_DRIVER_VERSION=3.4.0 -export MONGO_CXX_DRIVER_ROOT=${SRC_LOCATION}/mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION} -export BOOST_VERSION_MAJOR=1 -export BOOST_VERSION_MINOR=67 -export BOOST_VERSION_PATCH=0 -export BOOST_VERSION=${BOOST_VERSION_MAJOR}_${BOOST_VERSION_MINOR}_${BOOST_VERSION_PATCH} -export BOOST_ROOT=${SRC_LOCATION}/boost_${BOOST_VERSION} -export BOOST_LINK_LOCATION=${OPT_LOCATION}/boost -export LLVM_VERSION=release_40 -export LLVM_ROOT=${OPT_LOCATION}/llvm -export LLVM_DIR=${LLVM_ROOT}/lib/cmake/llvm -export DOXYGEN_VERSION=1_8_14 -export DOXYGEN_ROOT=${SRC_LOCATION}/doxygen-${DOXYGEN_VERSION} -export TINI_VERSION=0.18.0 -export DISK_MIN=5 - -# Setup directories -mkdir -p $SRC_LOCATION -mkdir -p $OPT_LOCATION -mkdir -p $VAR_LOCATION -mkdir -p $BIN_LOCATION -mkdir -p $VAR_LOCATION/log -mkdir -p $ETC_LOCATION -mkdir -p $MONGODB_LOG_LOCATION -mkdir -p $MONGODB_DATA_LOCATION - SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" REPO_ROOT="${SCRIPT_DIR}/.." BUILD_DIR="${REPO_ROOT}/build" @@ -101,12 +60,19 @@ fi function usage() { - printf "Usage: %s \\n[Build Option -o ] \\n[CodeCoverage -c] \\n[Doxygen -d] \\n[CoreSymbolName -s <1-7 characters>] \\n[Avoid Compiling -a]\\n[Noninteractive -y]\\n\\n" "$0" 1>&2 + cat >&2 < (default: Release) + -p DIR Prefix directory for dependencies & EOS install (default: $HOME) + -b DIR Use pre-built boost in DIR + -c Enable Code Coverage + -d Generate Doxygen + -s NAME Core Symbol Name <1-7 characters> (default: SYS) + -y Noninteractive mode (this script) +EOT exit 1 } -NONINTERACTIVE=0 - if [ $# -ne 0 ]; then while getopts ":cdo:s:ahy" opt; do case "${opt}" in @@ -134,10 +100,13 @@ if [ $# -ne 0 ]; then else CORE_SYMBOL_NAME="${OPTARG}" fi - ;; - a) - START_MAKE=false - ;; + ;; + b) + BOOST_ARG=$OPTARG + ;; + p) + PREFIX=$OPTARG + ;; h) usage exit 1 @@ -170,6 +139,42 @@ if [ ! -d "${REPO_ROOT}/.git" ]; then exit 1 fi +export CMAKE_VERSION_MAJOR=3 +export CMAKE_VERSION_MINOR=13 +export CMAKE_VERSION_PATCH=2 +export CMAKE_VERSION=${CMAKE_VERSION_MAJOR}.${CMAKE_VERSION_MINOR}.${CMAKE_VERSION_PATCH} + +export SRC_LOCATION=$PREFIX/src +export OPT_LOCATION=$PREFIX/opt +export VAR_LOCATION=$PREFIX/var +export ETC_LOCATION=$PREFIX/etc +export BIN_LOCATION=$PREFIX/bin +export DATA_LOCATION=$PREFIX/data + +export MONGODB_VERSION=3.6.3 +export MONGODB_ROOT=${OPT_LOCATION}/mongodb-${MONGODB_VERSION} +export MONGODB_CONF=${ETC_LOCATION}/mongod.conf +export MONGODB_LOG_LOCATION=${VAR_LOCATION}/log/mongodb +export MONGODB_LINK_LOCATION=${OPT_LOCATION}/mongodb +export MONGODB_DATA_LOCATION=${DATA_LOCATION}/mongodb +export MONGO_C_DRIVER_VERSION=1.13.0 +export MONGO_C_DRIVER_ROOT=${SRC_LOCATION}/mongo-c-driver-${MONGO_C_DRIVER_VERSION} +export MONGO_CXX_DRIVER_VERSION=3.4.0 +export MONGO_CXX_DRIVER_ROOT=${SRC_LOCATION}/mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION} +export BOOST_VERSION_MAJOR=1 +export BOOST_VERSION_MINOR=67 +export BOOST_VERSION_PATCH=0 +export BOOST_VERSION=${BOOST_VERSION_MAJOR}_${BOOST_VERSION_MINOR}_${BOOST_VERSION_PATCH} +export BOOST_ROOT=${BOOST_ARG:-${SRC_LOCATION}/boost_${BOOST_VERSION}} +export BOOST_LINK_LOCATION=${OPT_LOCATION}/boost +export LLVM_VERSION=release_40 +export LLVM_ROOT=${OPT_LOCATION}/llvm +export LLVM_DIR=${LLVM_ROOT}/lib/cmake/llvm +export DOXYGEN_VERSION=1_8_14 +export DOXYGEN_ROOT=${SRC_LOCATION}/doxygen-${DOXYGEN_VERSION} +export TINI_VERSION=0.18.0 +export DISK_MIN=5 + cd $REPO_ROOT STALE_SUBMODS=$(( $(git submodule status --recursive | grep -c "^[+\-]") )) @@ -179,6 +184,16 @@ if [ $STALE_SUBMODS -gt 0 ]; then exit 1 fi +# Setup directories +mkdir -p $SRC_LOCATION +mkdir -p $OPT_LOCATION +mkdir -p $VAR_LOCATION +mkdir -p $BIN_LOCATION +mkdir -p $VAR_LOCATION/log +mkdir -p $ETC_LOCATION +mkdir -p $MONGODB_LOG_LOCATION +mkdir -p $MONGODB_DATA_LOCATION + printf "\\nBeginning build version: %s\\n" "${VERSION}" printf "%s\\n" "$( date -u )" printf "User: %s\\n" "$( whoami )" @@ -256,7 +271,7 @@ if [ "$ARCH" == "Darwin" ]; then export OS_NAME=MacOSX # opt/gettext: cleos requires Intl, which requires gettext; it's keg only though and we don't want to force linking: https://github.com/EOSIO/eos/issues/2240#issuecomment-396309884 # HOME/lib/cmake: mongo_db_plugin.cpp:25:10: fatal error: 'bsoncxx/builder/basic/kvp.hpp' file not found - LOCAL_CMAKE_FLAGS="-DCMAKE_PREFIX_PATH=/usr/local/opt/gettext;$HOME/lib/cmake ${LOCAL_CMAKE_FLAGS}" + LOCAL_CMAKE_FLAGS="-DCMAKE_PREFIX_PATH=/usr/local/opt/gettext;$HOME/lib/cmake ${LOCAL_CMAKE_FLAGS}" FILE="${REPO_ROOT}/scripts/eosio_build_darwin.sh" CXX_COMPILER=clang++ C_COMPILER=clang @@ -283,6 +298,7 @@ $CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX_COMP -DCMAKE_C_COMPILER="${C_COMPILER}" -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ + -DCMAKE_PREFIX_PATH=$PREFIX \ -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS "${REPO_ROOT}" if [ $? -ne 0 ]; then exit -1; fi make -j"${JOBS}" @@ -306,7 +322,7 @@ printf "======================================================================== printf "(Optional) Testing Instructions:\\n" print_instructions printf "${BIN_LOCATION}/mongod --dbpath ${MONGODB_DATA_LOCATION} -f ${MONGODB_CONF} --logpath ${MONGODB_LOG_LOCATION}/mongod.log &\\n" -printf "cd ./build && PATH=\$PATH:$HOME/opt/mongodb/bin make test\\n" # PATH is set as currently 'mongo' binary is required for the mongodb test +printf "cd ./build && PATH=\$PATH:$MONGODB_LINK_LOCATION/bin make test\\n" # PATH is set as currently 'mongo' binary is required for the mongodb test printf "${txtrst}==============================================================================================\\n" printf "For more information:\\n" printf "EOSIO website: https://eos.io\\n" @@ -314,4 +330,3 @@ printf "EOSIO Telegram channel @ https://t.me/EOSProject\\n" printf "EOSIO resources: https://eos.io/resources/\\n" printf "EOSIO Stack Exchange: https://eosio.stackexchange.com\\n" printf "EOSIO wiki: https://github.com/EOSIO/eos/wiki\\n\\n\\n" - diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index ff655496a7b..1fb2af1ebc9 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -9,16 +9,16 @@ DISK_TOTAL=$(( DISK_TOTAL_KB / 1048576 )) DISK_AVAIL=$(( DISK_AVAIL_KB / 1048576 )) if [[ "${OS_NAME}" == "Amazon Linux AMI" ]]; then # Amazonlinux1 - DEP_ARRAY=( + DEP_ARRAY=( sudo procps util-linux which gcc72 gcc72-c++ autoconf automake libtool make doxygen graphviz \ bzip2 bzip2-devel openssl-devel gmp gmp-devel libstdc++72 python27 python27-devel python36 python36-devel \ libedit-devel ncurses-devel swig wget file libcurl-devel libusb1-devel ) else # Amazonlinux2 - DEP_ARRAY=( + DEP_ARRAY=( git procps-ng util-linux gcc gcc-c++ autoconf automake libtool make bzip2 \ bzip2-devel openssl-devel gmp-devel libstdc++ libcurl-devel libusbx-devel \ - python3 python3-devel python-devel libedit-devel doxygen graphviz + python3 python3-devel python-devel libedit-devel doxygen graphviz ) fi @@ -125,7 +125,7 @@ if [ ! -e $CMAKE ]; then curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \ && tar -xzf cmake-$CMAKE_VERSION.tar.gz \ && cd cmake-$CMAKE_VERSION \ - && ./bootstrap --prefix=$HOME \ + && ./bootstrap --prefix=$PREFIX \ && make -j"${JOBS}" \ && make install \ && cd .. \ @@ -142,7 +142,7 @@ printf "\\n" printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" -BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) +BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then printf "Installing Boost library...\\n" curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ @@ -193,7 +193,7 @@ if [ ! -d $MONGO_C_DRIVER_ROOT ]; then && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ && mkdir -p cmake-build \ && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ && make -j"${JOBS}" \ && make install \ && cd ../.. \ @@ -210,7 +210,7 @@ if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ && make -j"${JOBS}" VERBOSE=1 \ && make install \ && cd ../.. \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 621001d0a97..3988aef50a4 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -21,7 +21,7 @@ printf "CPU speed: ${CPU_SPEED}Mhz\\n" printf "CPU cores: ${CPU_CORE}\\n" printf "Physical Memory: ${MEM_MEG}Mgb\\n" printf "Disk install: ${DISK_INSTALL}\\n" -printf "Disk space total: ${DISK_TOTAL%.*}G\\n" +printf "Disk space total: ${DISK_TOTAL%.*}G\\n" printf "Disk space available: ${DISK_AVAIL%.*}G\\n" printf "Concurrent Jobs (make -j): ${JOBS}\\n" @@ -118,7 +118,7 @@ fi printf "\\n" -DEP_ARRAY=( +DEP_ARRAY=( git autoconf automake libtool make bzip2 doxygen graphviz \ bzip2-devel openssl-devel gmp-devel \ ocaml libicu-devel python python-devel rh-python36 \ @@ -175,7 +175,7 @@ if [ ! -e $CMAKE ]; then curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \ && tar -xzf cmake-$CMAKE_VERSION.tar.gz \ && cd cmake-$CMAKE_VERSION \ - && ./bootstrap --prefix=$HOME \ + && ./bootstrap --prefix=$PREFIX \ && make -j"${JOBS}" \ && make install \ && cd .. \ @@ -193,7 +193,7 @@ printf "\\n" export CPATH="${CPATH}:${PYTHON3PATH}/root/usr/include/python3.6m" # m on the end causes problems with boost finding python3 printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" -BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) +BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then printf "Installing Boost library...\\n" curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ @@ -234,7 +234,7 @@ if [ ! -d $MONGODB_ROOT ]; then printf " - MongoDB successfully installed @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" else printf " - MongoDB found with correct version @ ${MONGODB_ROOT} (Symlinked to ${MONGODB_LINK_LOCATION}).\\n" -fi +fi if [ $? -ne 0 ]; then exit -1; fi printf "Checking MongoDB C driver installation...\\n" if [ ! -d $MONGO_C_DRIVER_ROOT ]; then @@ -244,7 +244,7 @@ if [ ! -d $MONGO_C_DRIVER_ROOT ]; then && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ && mkdir -p cmake-build \ && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ && make -j"${JOBS}" \ && make install \ && cd ../.. \ @@ -261,7 +261,7 @@ if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ && make -j"${JOBS}" VERBOSE=1 \ && make install \ && cd ../.. \ diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 224b0839f1d..947dbb848c6 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -164,7 +164,7 @@ printf "\\n" export CPATH="$(python-config --includes | awk '{print $1}' | cut -dI -f2):$CPATH" # Boost has trouble finding pyconfig.h printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" -BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) +BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then printf "Installing Boost library...\\n" curl -LO https://dl.bintray.com/boostorg/release/$BOOST_VERSION_MAJOR.$BOOST_VERSION_MINOR.$BOOST_VERSION_PATCH/source/boost_$BOOST_VERSION.tar.bz2 \ @@ -216,7 +216,7 @@ if [ ! -d $MONGO_C_DRIVER_ROOT ]; then && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ && mkdir -p cmake-build \ && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ && make -j"${JOBS}" \ && make install \ && cd ../.. \ @@ -228,12 +228,12 @@ else fi if [ $? -ne 0 ]; then exit -1; fi printf "Checking MongoDB C++ driver installation...\\n" -if [ "$(grep "Version:" $HOME/lib/pkgconfig/libmongocxx-static.pc 2>/dev/null | tr -s ' ' | awk '{print $2}')" != $MONGO_CXX_DRIVER_VERSION ]; then +if [ "$(grep "Version:" $PREFIX/lib/pkgconfig/libmongocxx-static.pc 2>/dev/null | tr -s ' ' | awk '{print $2}')" != $MONGO_CXX_DRIVER_VERSION ]; then printf "Installing MongoDB C++ driver...\\n" curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ && make -j"${JOBS}" VERBOSE=1 \ && make install \ && cd ../.. \ diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index c27f47658d3..b1429b01931 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -41,7 +41,7 @@ printf "Disk space total: ${DISK_TOTAL%.*}G\\n" printf "Disk space available: ${DISK_AVAIL%.*}G\\n" # llvm is symlinked from /usr/lib64/llvm4.0 into user's home -DEP_ARRAY=( +DEP_ARRAY=( git sudo procps-ng which gcc gcc-c++ autoconf automake libtool make \ bzip2-devel wget bzip2 compat-openssl10 graphviz doxygen \ openssl-devel gmp-devel libstdc++-devel python2 python2-devel python3 python3-devel \ @@ -116,7 +116,7 @@ if [ ! -e $CMAKE ]; then curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \ && tar -xzf cmake-$CMAKE_VERSION.tar.gz \ && cd cmake-$CMAKE_VERSION \ - && ./bootstrap --prefix=$HOME \ + && ./bootstrap --prefix=$PREFIX \ && make -j"${JOBS}" \ && make install \ && cd .. \ @@ -133,7 +133,7 @@ printf "\\n" printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" -BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) +BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then printf "Installing Boost library...\\n" curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ @@ -184,7 +184,7 @@ if [ ! -d $MONGO_C_DRIVER_ROOT ]; then && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ && mkdir -p cmake-build \ && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ && make -j"${JOBS}" \ && make install \ && cd ../.. \ @@ -201,7 +201,7 @@ if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ && make -j"${JOBS}" VERBOSE=1 \ && make install \ && cd ../.. \ @@ -233,4 +233,4 @@ printf "\\n" function print_instructions() { return 0 -} \ No newline at end of file +} diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 5561b14a450..a62d264394b 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -111,7 +111,7 @@ for (( i=0; i<${#DEP_ARRAY[@]}; i++ )); do done if [ "${COUNT}" -gt 1 ]; then printf "\\nThe following dependencies are required to install EOSIO:\\n" - printf "${DISPLAY}\\n\\n" + printf "${DISPLAY}\\n\\n" if [ $ANSWER != 1 ]; then read -p "Do you wish to install these packages? (y/n) " ANSWER; fi case $ANSWER in 1 | [Yy]* ) @@ -125,7 +125,7 @@ if [ "${COUNT}" -gt 1 ]; then [Nn]* ) echo "User aborting installation of required dependencies, Exiting now."; exit;; * ) echo "Please type 'y' for yes or 'n' for no."; exit;; esac -else +else printf " - No required APT dependencies to install.\\n" fi @@ -139,7 +139,7 @@ if [ ! -e $CMAKE ]; then curl -LO https://cmake.org/files/v$CMAKE_VERSION_MAJOR.$CMAKE_VERSION_MINOR/cmake-$CMAKE_VERSION.tar.gz \ && tar -xzf cmake-$CMAKE_VERSION.tar.gz \ && cd cmake-$CMAKE_VERSION \ - && ./bootstrap --prefix=$HOME \ + && ./bootstrap --prefix=$PREFIX \ && make -j"${JOBS}" \ && make install \ && cd .. \ @@ -156,7 +156,7 @@ printf "\\n" printf "Checking Boost library (${BOOST_VERSION}) installation...\\n" -BOOSTVERSION=$( grep "#define BOOST_VERSION" "$HOME/opt/boost/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) +BOOSTVERSION=$( grep "#define BOOST_VERSION" "$BOOST_ROOT/include/boost/version.hpp" 2>/dev/null | tail -1 | tr -s ' ' | cut -d\ -f3 ) if [ "${BOOSTVERSION}" != "${BOOST_VERSION_MAJOR}0${BOOST_VERSION_MINOR}0${BOOST_VERSION_PATCH}" ]; then printf "Installing Boost library...\\n" curl -LO https://dl.bintray.com/boostorg/release/${BOOST_VERSION_MAJOR}.${BOOST_VERSION_MINOR}.${BOOST_VERSION_PATCH}/source/boost_$BOOST_VERSION.tar.bz2 \ @@ -207,7 +207,7 @@ if [ ! -d $MONGO_C_DRIVER_ROOT ]; then && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ && mkdir -p cmake-build \ && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX -DENABLE_BSON=ON -DENABLE_SSL=OPENSSL -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ && make -j"${JOBS}" \ && make install \ && cd ../.. \ @@ -224,7 +224,7 @@ if [ ! -d $MONGO_CXX_DRIVER_ROOT ]; then curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$PREFIX .. \ && make -j"${JOBS}" VERBOSE=1 \ && make install \ && cd ../.. \ From 105f3e6f6b2ddf3939128f576941f53a499514ee Mon Sep 17 00:00:00 2001 From: Adam Mitz Date: Mon, 8 Apr 2019 16:56:09 -0500 Subject: [PATCH 313/680] updated getopts --- scripts/eosio_build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 047bc9ce590..5c05d1c3c76 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -74,7 +74,7 @@ EOT } if [ $# -ne 0 ]; then - while getopts ":cdo:s:ahy" opt; do + while getopts ":cdo:s:p:b:hy" opt; do case "${opt}" in o ) options=( "Debug" "Release" "RelWithDebInfo" "MinSizeRel" ) From 4e8ba132cfbec03621c21f0eec845cd331f53054 Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 8 Apr 2019 18:55:14 -0400 Subject: [PATCH 314/680] Added protocol_feature_tests/no_duplicate_deferred_id_test unit test to test the NO_DUPLICATE_DEFERRED_ID protocol feature. #6115 Updated the deferred_test test contract to support testing requirements of new test. --- unittests/protocol_feature_tests.cpp | 132 ++++++++++++++++++ .../deferred_test/deferred_test.cpp | 21 +++ .../deferred_test/deferred_test.wasm | Bin 8216 -> 10152 bytes 3 files changed, 153 insertions(+) diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index c510904864a..ec994592fcb 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -4,6 +4,7 @@ */ #include #include +#include #include #include @@ -510,6 +511,137 @@ BOOST_AUTO_TEST_CASE( replace_deferred_test ) try { } FC_LOG_AND_RETHROW() +BOOST_AUTO_TEST_CASE( no_duplicate_deferred_id_test ) try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + tester c2( setup_policy::none ); + + c.create_accounts( {N(alice), N(test)} ); + c.set_code( N(test), contracts::deferred_test_wasm() ); + c.set_abi( N(test), contracts::deferred_test_abi().data() ); + c.produce_block(); + + push_blocks( c, c2 ); + + c2.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 1) + ("contract", "test") + ("payload", 50) + ); + + c2.finish_block(); + + BOOST_CHECK_EXCEPTION( + c2.produce_block(), + fc::exception, + fc_exception_message_is( "no transaction extensions supported yet for deferred transactions" ) + ); + + c2.produce_empty_block( fc::minutes(10) ); + + transaction_trace_ptr trace0; + auto h = c2.control->applied_transaction.connect( [&]( const transaction_trace_ptr& t) { + if( t && t->receipt && t->receipt->status == transaction_receipt::expired) { + trace0 = t; + } + } ); + + c2.produce_block(); + + h.disconnect(); + + BOOST_REQUIRE( trace0 ); + + c.produce_block(); + + const auto& pfm = c.control->get_protocol_feature_manager(); + + auto d1 = pfm.get_builtin_digest( builtin_protocol_feature_t::replace_deferred ); + BOOST_REQUIRE( d1 ); + auto d2 = pfm.get_builtin_digest( builtin_protocol_feature_t::no_duplicate_deferred_id ); + BOOST_REQUIRE( d2 ); + + c.preactivate_protocol_features( {*d1, *d2} ); + c.produce_block(); + + auto& index = c.control->db().get_index(); + + auto check_generation_context = []( auto&& data, + const transaction_id_type& sender_trx_id, + unsigned __int128 sender_id, + account_name sender ) + { + transaction trx; + fc::datastream ds1( data.data(), data.size() ); + fc::raw::unpack( ds1, trx ); + BOOST_REQUIRE_EQUAL( trx.transaction_extensions.size(), 1 ); + BOOST_REQUIRE_EQUAL( trx.transaction_extensions.back().first, 0 ); + + fc::datastream ds2( trx.transaction_extensions.back().second.data(), + trx.transaction_extensions.back().second.size() ); + + transaction_id_type actual_sender_trx_id; + fc::raw::unpack( ds2, actual_sender_trx_id ); + BOOST_CHECK_EQUAL( actual_sender_trx_id, sender_trx_id ); + + unsigned __int128 actual_sender_id; + fc::raw::unpack( ds2, actual_sender_id ); + BOOST_CHECK( actual_sender_id == sender_id ); + + uint64_t actual_sender; + fc::raw::unpack( ds2, actual_sender ); + BOOST_CHECK_EQUAL( account_name(actual_sender), sender ); + }; + + BOOST_CHECK_EXCEPTION( + c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 1) + ("contract", "test") + ("payload", 77 ) + ), + ill_formed_deferred_transaction_generation_context, + fc_exception_message_is( "deferred transaction generaction context contains mismatching sender" ) + ); + + BOOST_REQUIRE_EQUAL(0, index.size()); + + auto trace1 = c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 1) + ("contract", "test") + ("payload", 40) + ); + + BOOST_REQUIRE_EQUAL(1, index.size()); + + check_generation_context( index.begin()->packed_trx, + trace1->id, + ((static_cast(N(alice)) << 64) | 1), + N(test) ); + + c.produce_block(); + + BOOST_REQUIRE_EQUAL(0, index.size()); + + auto trace2 = c.push_action( N(test), N(defercall), N(alice), fc::mutable_variant_object() + ("payer", "alice") + ("sender_id", 1) + ("contract", "test") + ("payload", 50) + ); + + BOOST_REQUIRE_EQUAL(1, index.size()); + + check_generation_context( index.begin()->packed_trx, + trace2->id, + ((static_cast(N(alice)) << 64) | 1), + N(test) ); + + c.produce_block(); + +} FC_LOG_AND_RETHROW() + BOOST_AUTO_TEST_CASE( fix_linkauth_restriction ) { try { tester chain( setup_policy::preactivate_feature_and_new_bios ); diff --git a/unittests/test-contracts/deferred_test/deferred_test.cpp b/unittests/test-contracts/deferred_test/deferred_test.cpp index 4ee7465537c..b24096de146 100644 --- a/unittests/test-contracts/deferred_test/deferred_test.cpp +++ b/unittests/test-contracts/deferred_test/deferred_test.cpp @@ -4,6 +4,8 @@ */ #include "deferred_test.hpp" #include +#include +#include using namespace eosio; @@ -16,6 +18,25 @@ void deferred_test::defercall( name payer, uint64_t sender_id, name contract, ui deferfunc_action a( contract, {get_self(), "active"_n} ); trx.actions.emplace_back( a.to_action( payload ) ); bool replace_existing = (payload >= 100); + + if( (50 <= payload && payload < 100) || payload >= 150 ) { + size_t tx_size = transaction_size(); + char* buffer = (char*)malloc( tx_size ); + read_transaction( buffer, tx_size ); + auto tx_id = sha256( buffer, tx_size ); + char context_buffer[56]; + trx.transaction_extensions.emplace_back( (uint16_t)0, std::vector() ); + auto& context_vector = std::get<1>( trx.transaction_extensions.back() ); + context_vector.resize(56); + datastream ds( context_vector.data(), 56 ); + ds << tx_id.extract_as_byte_array(); + ds << ((static_cast(payer.value) << 64) | sender_id); + if( payload != 77 ) + ds << get_self(); + else + ds << payer; + } + trx.send( (static_cast(payer.value) << 64) | sender_id, payer, replace_existing ); } diff --git a/unittests/test-contracts/deferred_test/deferred_test.wasm b/unittests/test-contracts/deferred_test/deferred_test.wasm index fbfdaf14f0841a352371938b55345e21362908ff..588e38fadf2ed8f3d8a7952114113f3bc43ebd61 100755 GIT binary patch delta 3707 zcmZu!YiwLc6`q-U_wKImuJ`)Aer!DV#))?m60aSXbhRD}KRw#%cM7M>eifE7`HL8#rR8=zKAvYj?6d@r{5eOksfr2PPDoFj?@SRyZ zF(tNq&zW=1oY$OlX8g+Jou_P8T)M~^V|=;wx;>mRf*@e$&0uvEAc92@@L&sJ*N=6k z%pf3^SY2h}A7<|l=lAfysa1=WrQ&q0yil21Du1QK;zp^m>{Ls|nW-H-W)V4ATv{qs zYg0=XibEr#tbu6uBidk{vO{WQaHvjmwxp`%#nQ!@!O?u399TP5#iyo<2+JaT%2EFvf-Pj z`c3BSw!hDr(;I$9^rVkP3Tta?uYUB=8^3y&N%6EfDVgNX@o-hN^U3gK(aU#*Z;O3V zW!s85L*ae#PwA#?r%$zy3Z_`f7L)WVnF@T*l-zgR@ZZK&?u8Ze+<`qFKbhpDQ%k#s zEe+b870e#zBG4Pb@Af$M{oTy;yB<54WVS7tZ86E5e!G#86o^=PcAc6F_eb{gz2UPF z7vGi0-p>7>l#4+gp&SS|A|t^yVemk#@?bYp`8m(%14{~ND93kN3(Pf@<8sAZ0X!lm zjY-Xt!gnng=SrM``A8vlO`6ggWNh6t3*#@YyAkjL-{sN-&o!k`-1mf%r;|J(kQ%W0 zsU!;&Q}1jt$3j>#GG@d8wiJySjBF!+j_)#Zgj!&`yJZNrp`527tXFhP3Cw*ha23*k^)W6^` z0&|4=+B<}C$587UGeCP|izd{Y8WWD`JQ9E z5~YQY7e&2+^f%^9paBJGjf(Ri<^8%SzIRcr!+@e_ly7K5jTe{dPd=8{tR{-g(t5ThMr4u+i46u%W=IZ)TFw!B}w=oP$M&T#or(3=HidsbVrlsu%jldl zCW-FG$wX9*qf%u|#??5Phfx_IHF=L0Q9W;K&j2vkNR(H5p(AMX&*)B#j%_~eVX!PAE&e4`}pn(^g zQ|z2RmZ;)RvLl;*dUqCD98m@y9ahiEO`s2pANSN4oEPU^Gb z{C4kATR-f}`Ud9->a5N1tI;XmAO0!2$;ZN*u`Y4rUbqq4+nl2TC@}z2gkb6qgdfJr zZT+smoJQKvf_1fH%ucE@YN`;tonVIMY}NS24hZ{ zmpqg58BsKmI2u(wz>hu+xDK(HG;r-eP<{O1!EtIG%r#|PLVF`uXSyjejzihnk9*Qm zX3gn$qY1;t_2F9hi^5lOkl9WH`ba>nYe%}ClE}9HtdjOc&?|X5{2e_ULaTz0i zoF<1+pAlrDUOc#aFyF{kW%^!%!b;Yo6Lft^6Stg9szqN4M7uoaHsbb7&bcW{8|p2N zs}j`&gE&Q+{g%4wBVIj>xJ(k;Bz6SO5+C;$NpM&qUo28>;x&3`;*81BKG9sWEyk21?Q9_It$V&k_(E)`z32ip&K=`5OA z;nZuDM(SAjGy4!f68^_Nhw~W+} z1TTa?ZF-){In#WW9}UaR3$&1#gJG;C(|HsfjRa_A!0Ifz+oTV8}$)m--M`{q6%Rz0K-*#0tOHx|baJ zW$S4IskU6~Buw}C&WUicEz{gnPpqld%rmHP6VJSbHkW6@>uqh~wR_>6wk(0)w(TSE zaUJYy|2#hwuC`|%w|%2Mi|y~Xw;X+qMwXre+bF7oFm9iJlbLC00nxyI)!r|y|dT@pNJBKNs#m+MXe$Y8V z;DgSK1isXziMP9EX_e@HRfAn4;h(#&5c+J-06!6az302MkeSaCsAgWNOU)5j*)>Ao z?yloc{!S#Ay0gjb=VpuL`O=K6Ey&{h{K9mxR+5XwFPE0srAq0_VrjYtW~o|TsLFCh z&X%u~W~N?Psg;)G^g?B+R=qS`Td0Ojy=fcoMO!p5hUa29+`FINigrZ4up&Wv6L^!n zT&`V^^iNI+%4Ud;L$ljvO0%Wv?4`=IoQ4Z9w@@M8d*Q9#4t_t}=-n%>-3$NHo0hn( ZZ0mBhTq}W(MA{MhUl2I6&8*AVe*wVT$p`=d delta 1860 zcmZuyTWl0n82-;?rb}nro|fIRJDs*?rh6^46zEOl($k1gF&Kmx9|S3M!EOsQEe1&p zTNM*b6pP18HO43pa(OX`JP6UW2}FbXBq4@qOw>0dYT$toWBkvoT!PFd=fBSX{g?C4 z9(!!M&w;^-odf{L=Gwzf=iyC>I4hM(u!WaO`;pN7`+?r1btey`iJqn#N|h{=1}92k zj%`Jwl-f4uG(xMYc}^UHaTJJ@{)1hyVl|GKr+?-yiRqjCc@o!qtw-Au0a;Z=3|cn@ z1wL~mGmbe5PA{P)ijWHJ4FM4`{ioHKjRml(3LHm*V}k_YI@MsjP>E99DQi95Y0oD~ zy~XzNe9_J%o9^|Cpg-m-^{m}hLbyW=@mfGlk&(bkfwUy=VFwXA&%ozub_N8ITu^l> z@C}QhGF|e_- zs99E&WlkbsFNQKscpM}}kumtc1BxPxy|i-F3%iV3iBU`d5S~*uiCd=FT_$Peu~$CB zxFpO#Qj?dZzBW-Mk*xn$ctYgLA$o6xz@)r%j*7rG1}<&Us<+0LkOlhfSUW~J zAKQqh8_(nUP`nS%m*XisPlTF_cWIW^=Q-km{;_Tp3mcfbk_7sdxl_!maZc^1|D2K~ z+D~q#Skep0wWM8tm)vcH74F((yDM$URjwHDFj0vY7pEsW^$|BqGWvr1K55lE8Xjy# zPnx400WM^Yc1jo#)mTAUWh$Vi->An5zin{6aHeQYlm|kUcW0)WdKr^ItGC*a2`}H% zjj&!(bUEiJvCOW_z(_!G>NJHrSpJEcfQP+OlY9VIo8#Vo@A(N_RJ;Vx!+K3iRmIwH zfY)Yqv@u7KbmN?nY%P;RjT=Z>|I(PNyH7Z;DYss@IO#~y#}zTL2##4@!_~8Kq>`$L zs6dB2i}<#BGu%kPEj=D`w&3L*#zgG?s>j1b&gi$A+U^H-v1wtW0W+y55HoQvrk2eM zOy=vFTMU_KUX0`YxcT8k8^&ijnhU7u2WDmVh+^lt%-#=axlFeiq_C!%|JNk2CUyCC zF_&ia>$1~~=DZv*MEJ`LS?BLGKOiowChcoq&||F=irynp_KsmA=KwG^UYV# zvE@yebZX_^;XTjDiNda-!nhnBGJ2eb6CWyUFN|-0cGot!ZE$q7FeG>HGPdKo qBRh|r(QC39dKr_>CgfFgxA%?@? Date: Mon, 8 Apr 2019 19:18:36 -0400 Subject: [PATCH 315/680] remove trailing comma from action_trace_v0 ABI of state_history_plugin --- plugins/state_history_plugin/state_history_plugin_abi.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index 95affaf57c2..b35882a7b83 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -103,7 +103,7 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "elapsed", "type": "int64" }, { "name": "console", "type": "string" }, { "name": "account_ram_deltas", "type": "account_delta[]" }, - { "name": "except", "type": "string?" }, + { "name": "except", "type": "string?" } ] }, { From c64044054300a78cc570aa2d58b75b404819816c Mon Sep 17 00:00:00 2001 From: arhag Date: Mon, 8 Apr 2019 20:14:35 -0400 Subject: [PATCH 316/680] implement ONLY_BILL_FIRST_AUTHORIZER protocol feature #6332 --- libraries/chain/controller.cpp | 8 +++++++- .../include/eosio/chain/protocol_feature_manager.hpp | 3 ++- libraries/chain/include/eosio/chain/transaction.hpp | 3 ++- libraries/chain/protocol_feature_manager.cpp | 11 +++++++++++ libraries/chain/transaction_context.cpp | 12 ++++++++---- unittests/auth_tests.cpp | 5 +++-- 6 files changed, 33 insertions(+), 9 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 2e732c869dc..87a2e3ee0f0 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1088,7 +1088,13 @@ struct controller_impl { trx_context.init_for_deferred_trx( gtrx.published ); if( trx_context.enforce_whiteblacklist && pending->_block_status == controller::block_status::incomplete ) { - check_actor_list( trx_context.bill_to_accounts ); // Assumes bill_to_accounts is the set of actors authorizing the transaction + flat_set actors; + for( const auto& act : trx_context.trx.actions ) { + for( const auto& auth : act.authorization ) { + actors.insert( auth.actor ); + } + } + check_actor_list( actors ); } trx_context.exec(); diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index 773546eead0..097b8772d1a 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -19,7 +19,8 @@ enum class builtin_protocol_feature_t : uint32_t { replace_deferred, fix_linkauth_restriction, disallow_empty_producer_schedule, - restrict_action_to_self + restrict_action_to_self, + only_bill_first_authorizer }; struct protocol_feature_subjective_restrictions { diff --git a/libraries/chain/include/eosio/chain/transaction.hpp b/libraries/chain/include/eosio/chain/transaction.hpp index db61e5b17cb..54ea0869730 100644 --- a/libraries/chain/include/eosio/chain/transaction.hpp +++ b/libraries/chain/include/eosio/chain/transaction.hpp @@ -66,7 +66,8 @@ namespace eosio { namespace chain { bool allow_duplicate_keys = false) const; uint32_t total_actions()const { return context_free_actions.size() + actions.size(); } - account_name first_authorizor()const { + + account_name first_authorizer()const { for( const auto& a : actions ) { for( const auto& u : a.authorization ) return u.actor; diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 67dc76e36ef..a4d60baa273 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -86,6 +86,17 @@ Builtin protocol feature: RESTRICT_ACTION_TO_SELF Disallows bypass of authorization checks by unprivileged contracts when sending inline actions or deferred transactions. The original protocol rules allow a bypass of authorization checks for actions sent by a contract to itself. This protocol feature removes that bypass. +*/ + {} + } ) + ( builtin_protocol_feature_t::only_bill_first_authorizer, builtin_protocol_feature_spec{ + "ONLY_BILL_FIRST_AUTHORIZER", + fc::variant("2f1f13e291c79da5a2bbad259ed7c1f2d34f697ea460b14b565ac33b063b73e2").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: ONLY_BILL_FIRST_AUTHORIZER + +Adds CPU and network bandwidth usage to only the first authorizer of a transaction. */ {} } ) diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 226e6863a16..196b363e80f 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -218,9 +218,13 @@ namespace bacc = boost::accumulators; validate_cpu_usage_to_bill( billed_cpu_time_us, false ); // Fail early if the amount to be billed is too high // Record accounts to be billed for network and CPU usage - for( const auto& act : trx.actions ) { - for( const auto& auth : act.authorization ) { - bill_to_accounts.insert( auth.actor ); + if( control.is_builtin_activated(builtin_protocol_feature_t::only_bill_first_authorizer) ) { + bill_to_accounts.insert( trx.first_authorizer() ); + } else { + for( const auto& act : trx.actions ) { + for( const auto& auth : act.authorization ) { + bill_to_accounts.insert( auth.actor ); + } } } validate_ram_usage.reserve( bill_to_accounts.size() ); @@ -583,7 +587,7 @@ namespace bacc = boost::accumulators; + static_cast(config::transaction_id_net_usage) ); // Will exit early if net usage cannot be payed. } - auto first_auth = trx.first_authorizor(); + auto first_auth = trx.first_authorizer(); uint32_t trx_size = 0; const auto& cgto = control.mutable_db().create( [&]( auto& gto ) { diff --git a/unittests/auth_tests.cpp b/unittests/auth_tests.cpp index a238c7246a3..0422487aae8 100644 --- a/unittests/auth_tests.cpp +++ b/unittests/auth_tests.cpp @@ -371,7 +371,8 @@ BOOST_AUTO_TEST_CASE( any_auth ) { try { BOOST_AUTO_TEST_CASE(no_double_billing) { try { - TESTER chain; + validating_tester chain( validating_tester::default_config() ); + chain.execute_setup_policy( setup_policy::preactivate_feature_and_new_bios ); chain.produce_block(); @@ -490,7 +491,7 @@ BOOST_AUTO_TEST_CASE( linkauth_special ) { try { chain.create_account(N(tester)); chain.create_account(N(tester2)); chain.produce_blocks(); - + chain.push_action(config::system_account_name, updateauth::get_name(), tester_account, fc::mutable_variant_object() ("account", "tester") ("permission", "first") From a5e9c109691c40ee8b04d5378b96a4029b4a72e8 Mon Sep 17 00:00:00 2001 From: Kayan Date: Tue, 9 Apr 2019 17:58:35 +0800 Subject: [PATCH 317/680] add test case to 6332 (only bill resouce to first authorizer) --- unittests/protocol_feature_tests.cpp | 97 ++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index c510904864a..200310e6b9a 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -607,4 +607,101 @@ BOOST_AUTO_TEST_CASE( disallow_empty_producer_schedule_test ) { try { } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE( only_bill_to_first_authorizer ) { try { + tester chain( setup_policy::preactivate_feature_and_new_bios ); + + const auto& tester_account = N(tester); + const auto& tester_account2 = N(tester2); + + chain.produce_blocks(); + chain.create_account(tester_account); + chain.create_account(tester_account2); + + resource_limits_manager& mgr = chain.control->get_mutable_resource_limits_manager(); + mgr.set_account_limits( tester_account, 10000, 1000, 1000 ); + mgr.set_account_limits( tester_account2, 10000, 1000, 1000 ); + mgr.process_account_limit_updates(); + + chain.produce_blocks(); + + { + action act; + act.account = tester_account; + act.name = N(null); + act.authorization = vector{ + {tester_account, config::active_name}, + {tester_account2, config::active_name}}; + + signed_transaction trx; + trx.actions.emplace_back(std::move(act)); + chain.set_transaction_headers(trx); + + trx.sign(get_private_key(tester_account, "active"), chain.control->get_chain_id()); + trx.sign(get_private_key(tester_account2, "active"), chain.control->get_chain_id()); + + + auto tester_cpu_limit0 = mgr.get_account_cpu_limit_ex(tester_account); + auto tester2_cpu_limit0 = mgr.get_account_cpu_limit_ex(tester_account2); + auto tester_net_limit0 = mgr.get_account_net_limit_ex(tester_account); + auto tester2_net_limit0 = mgr.get_account_net_limit_ex(tester_account2); + + chain.push_transaction(trx); + + auto tester_cpu_limit1 = mgr.get_account_cpu_limit_ex(tester_account); + auto tester2_cpu_limit1 = mgr.get_account_cpu_limit_ex(tester_account2); + auto tester_net_limit1 = mgr.get_account_net_limit_ex(tester_account); + auto tester2_net_limit1 = mgr.get_account_net_limit_ex(tester_account2); + + BOOST_CHECK(tester_cpu_limit1.used > tester_cpu_limit0.used); + BOOST_CHECK(tester2_cpu_limit1.used > tester2_cpu_limit0.used); + BOOST_CHECK(tester_net_limit1.used > tester_net_limit0.used); + BOOST_CHECK(tester2_net_limit1.used > tester2_net_limit0.used); + + BOOST_CHECK_EQUAL(tester_cpu_limit1.used - tester_cpu_limit0.used, tester2_cpu_limit1.used - tester2_cpu_limit0.used); + BOOST_CHECK_EQUAL(tester_net_limit1.used - tester_net_limit0.used, tester2_net_limit1.used - tester2_net_limit0.used); + } + + const auto& pfm = chain.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest( builtin_protocol_feature_t::only_bill_first_authorizer ); + BOOST_REQUIRE( d ); + + chain.preactivate_protocol_features( {*d} ); + chain.produce_blocks(); + + { + action act; + act.account = tester_account; + act.name = N(null2); + act.authorization = vector{ + {tester_account, config::active_name}, + {tester_account2, config::active_name}}; + + signed_transaction trx; + trx.actions.emplace_back(std::move(act)); + chain.set_transaction_headers(trx); + + trx.sign(get_private_key(tester_account, "active"), chain.control->get_chain_id()); + trx.sign(get_private_key(tester_account2, "active"), chain.control->get_chain_id()); + + auto tester_cpu_limit0 = mgr.get_account_cpu_limit_ex(tester_account); + auto tester2_cpu_limit0 = mgr.get_account_cpu_limit_ex(tester_account2); + auto tester_net_limit0 = mgr.get_account_net_limit_ex(tester_account); + auto tester2_net_limit0 = mgr.get_account_net_limit_ex(tester_account2); + + chain.push_transaction(trx); + + auto tester_cpu_limit1 = mgr.get_account_cpu_limit_ex(tester_account); + auto tester2_cpu_limit1 = mgr.get_account_cpu_limit_ex(tester_account2); + auto tester_net_limit1 = mgr.get_account_net_limit_ex(tester_account); + auto tester2_net_limit1 = mgr.get_account_net_limit_ex(tester_account2); + + BOOST_CHECK(tester_cpu_limit1.used > tester_cpu_limit0.used); + BOOST_CHECK(tester2_cpu_limit1.used == tester2_cpu_limit0.used); + BOOST_CHECK(tester_net_limit1.used > tester_net_limit0.used); + BOOST_CHECK(tester2_net_limit1.used == tester2_net_limit0.used); + } + +} FC_LOG_AND_RETHROW() } + + BOOST_AUTO_TEST_SUITE_END() From 20b698c02c736c5f60a93e9ce733dee9a52d8709 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Tue, 9 Apr 2019 18:24:40 +0800 Subject: [PATCH 318/680] Add missing change in execute_inline --- libraries/chain/apply_context.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 153582429cf..ad711c0be84 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -208,7 +208,7 @@ void apply_context::execute_inline( action&& a ) { bool enforce_actor_whitelist_blacklist = trx_context.enforce_whiteblacklist && control.is_producing_block(); flat_set actors; - bool disallow_send_to_self_bypass = false; // eventually set to whether the appropriate protocol feature has been activated + bool disallow_send_to_self_bypass = control.is_builtin_activated( builtin_protocol_feature_t::restrict_action_to_self ); bool send_to_self = (a.account == receiver); bool inherit_parent_authorizations = (!disallow_send_to_self_bypass && send_to_self && (receiver == act.account) && control.is_producing_block()); From 389ac8ec268ec09e5244568b4adeb8f8a9d3d221 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Tue, 9 Apr 2019 18:27:28 +0800 Subject: [PATCH 319/680] Add unit test for RESTRICT_ACTION_TO_SELF --- unittests/contracts.hpp.in | 1 + unittests/protocol_feature_tests.cpp | 60 +++++++++++++++++++ unittests/test-contracts/CMakeLists.txt | 1 + .../restrict_action_test/CMakeLists.txt | 6 ++ .../restrict_action_test.cpp | 48 +++++++++++++++ .../restrict_action_test.hpp | 34 +++++++++++ 6 files changed, 150 insertions(+) create mode 100644 unittests/test-contracts/restrict_action_test/CMakeLists.txt create mode 100644 unittests/test-contracts/restrict_action_test/restrict_action_test.cpp create mode 100644 unittests/test-contracts/restrict_action_test/restrict_action_test.hpp diff --git a/unittests/contracts.hpp.in b/unittests/contracts.hpp.in index bc61854d403..35c01501436 100644 --- a/unittests/contracts.hpp.in +++ b/unittests/contracts.hpp.in @@ -44,6 +44,7 @@ namespace eosio { MAKE_READ_WASM_ABI(noop, noop, test-contracts) MAKE_READ_WASM_ABI(payloadless, payloadless, test-contracts) MAKE_READ_WASM_ABI(proxy, proxy, test-contracts) + MAKE_READ_WASM_ABI(restrict_action_test, restrict_action_test, test-contracts) MAKE_READ_WASM_ABI(snapshot_test, snapshot_test, test-contracts) MAKE_READ_WASM_ABI(test_api, test_api, test-contracts) MAKE_READ_WASM_ABI(test_api_db, test_api_db, test-contracts) diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index c510904864a..121c6e17354 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -607,4 +607,64 @@ BOOST_AUTO_TEST_CASE( disallow_empty_producer_schedule_test ) { try { } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE( restrict_action_to_self_test ) { try { + tester c( setup_policy::preactivate_feature_and_new_bios ); + + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest( builtin_protocol_feature_t::restrict_action_to_self ); + BOOST_REQUIRE( d ); + + c.create_accounts( {N(testacc), N(acctonotify), N(alice)} ); + c.set_code( N(testacc), contracts::restrict_action_test_wasm() ); + c.set_abi( N(testacc), contracts::restrict_action_test_abi().data() ); + + c.set_code( N(acctonotify), contracts::restrict_action_test_wasm() ); + c.set_abi( N(acctonotify), contracts::restrict_action_test_abi().data() ); + + // Before the protocol feature is preactivated + // - Sending inline action to self = no problem + // - Sending deferred trx to self = throw subjective exception + // - Sending inline action to self from notification = throw subjective exception + // - Sending deferred trx to self from notification = throw subjective exception + BOOST_CHECK_NO_THROW( c.push_action( N(testacc), N(sendinline), N(alice), mutable_variant_object()("authorizer", "alice")) ); + BOOST_REQUIRE_EXCEPTION( c.push_action( N(testacc), N(senddefer), N(alice), + mutable_variant_object()("authorizer", "alice")("senderid", 0)), + subjective_block_production_exception, + fc_exception_message_starts_with( "Authorization failure with sent deferred transaction" ) ); + + BOOST_REQUIRE_EXCEPTION( c.push_action( N(testacc), N(notifyinline), N(alice), + mutable_variant_object()("acctonotify", "acctonotify")("authorizer", "alice")), + subjective_block_production_exception, + fc_exception_message_starts_with( "Authorization failure with inline action sent to self" ) ); + + BOOST_REQUIRE_EXCEPTION( c.push_action( N(testacc), N(notifydefer), N(alice), + mutable_variant_object()("acctonotify", "acctonotify")("authorizer", "alice")("senderid", 1)), + subjective_block_production_exception, + fc_exception_message_starts_with( "Authorization failure with sent deferred transaction" ) ); + + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + + // After the protocol feature is preactivated, all the 4 cases will throw an objective unsatisfied_authorization exception + BOOST_REQUIRE_EXCEPTION( c.push_action( N(testacc), N(sendinline), N(alice), mutable_variant_object()("authorizer", "alice") ), + unsatisfied_authorization, + fc_exception_message_starts_with( "transaction declares authority" ) ); + + BOOST_REQUIRE_EXCEPTION( c.push_action( N(testacc), N(senddefer), N(alice), + mutable_variant_object()("authorizer", "alice")("senderid", 3)), + unsatisfied_authorization, + fc_exception_message_starts_with( "transaction declares authority" ) ); + + BOOST_REQUIRE_EXCEPTION( c.push_action( N(testacc), N(notifyinline), N(alice), + mutable_variant_object()("acctonotify", "acctonotify")("authorizer", "alice") ), + unsatisfied_authorization, + fc_exception_message_starts_with( "transaction declares authority" ) ); + + BOOST_REQUIRE_EXCEPTION( c.push_action( N(testacc), N(notifydefer), N(alice), + mutable_variant_object()("acctonotify", "acctonotify")("authorizer", "alice")("senderid", 4)), + unsatisfied_authorization, + fc_exception_message_starts_with( "transaction declares authority" ) ); + +} FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/test-contracts/CMakeLists.txt b/unittests/test-contracts/CMakeLists.txt index 59f4ec0c28d..749b0471349 100644 --- a/unittests/test-contracts/CMakeLists.txt +++ b/unittests/test-contracts/CMakeLists.txt @@ -13,6 +13,7 @@ add_subdirectory( integration_test ) add_subdirectory( noop ) add_subdirectory( payloadless ) add_subdirectory( proxy ) +add_subdirectory( restrict_action_test ) add_subdirectory( snapshot_test ) add_subdirectory( test_api ) add_subdirectory( test_api_db ) diff --git a/unittests/test-contracts/restrict_action_test/CMakeLists.txt b/unittests/test-contracts/restrict_action_test/CMakeLists.txt new file mode 100644 index 00000000000..5ffe32ae7da --- /dev/null +++ b/unittests/test-contracts/restrict_action_test/CMakeLists.txt @@ -0,0 +1,6 @@ +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( restrict_action_test restrict_action_test restrict_action_test.cpp ) +else() + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/restrict_action_test.wasm ${CMAKE_CURRENT_BINARY_DIR}/restrict_action_test.wasm COPYONLY ) + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/restrict_action_test.abi ${CMAKE_CURRENT_BINARY_DIR}/restrict_action_test.abi COPYONLY ) +endif() diff --git a/unittests/test-contracts/restrict_action_test/restrict_action_test.cpp b/unittests/test-contracts/restrict_action_test/restrict_action_test.cpp new file mode 100644 index 00000000000..5c8f2b596b5 --- /dev/null +++ b/unittests/test-contracts/restrict_action_test/restrict_action_test.cpp @@ -0,0 +1,48 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include "restrict_action_test.hpp" +#include + +using namespace eosio; + +void restrict_action_test::noop( ) { + +} + +void restrict_action_test::sendinline( name authorizer ) { + action( + permission_level{authorizer,"active"_n}, + get_self(), + "noop"_n, + std::make_tuple() + ).send(); +} + +void restrict_action_test::senddefer( name authorizer, uint32_t senderid ) { + transaction trx; + trx.actions.emplace_back( + permission_level{authorizer,"active"_n}, + get_self(), + "noop"_n, + std::make_tuple() + ); + trx.send(senderid, get_self()); +} + +void restrict_action_test::notifyinline( name acctonotify, name authorizer ) { + require_recipient(acctonotify); +} + +void restrict_action_test::notifydefer( name acctonotify, name authorizer, uint32_t senderid ) { + require_recipient(acctonotify); +} + +void restrict_action_test::on_notify_inline( name acctonotify, name authorizer ) { + sendinline(authorizer); +} + +void restrict_action_test::on_notify_defer( name acctonotify, name authorizer, uint32_t senderid ) { + senddefer(authorizer, senderid); +} diff --git a/unittests/test-contracts/restrict_action_test/restrict_action_test.hpp b/unittests/test-contracts/restrict_action_test/restrict_action_test.hpp new file mode 100644 index 00000000000..f5ab48e385b --- /dev/null +++ b/unittests/test-contracts/restrict_action_test/restrict_action_test.hpp @@ -0,0 +1,34 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#pragma once + +#include + +class [[eosio::contract]] restrict_action_test : public eosio::contract { +public: + using eosio::contract::contract; + + [[eosio::action]] + void noop( ); + + [[eosio::action]] + void sendinline( eosio::name authorizer ); + + [[eosio::action]] + void senddefer( eosio::name authorizer, uint32_t senderid ); + + + [[eosio::action]] + void notifyinline( eosio::name acctonotify, eosio::name authorizer ); + + [[eosio::action]] + void notifydefer( eosio::name acctonotify, eosio::name authorizer, uint32_t senderid ); + + [[eosio::on_notify("testacc::notifyinline")]] + void on_notify_inline( eosio::name acctonotify, eosio::name authorizer ); + + [[eosio::on_notify("testacc::notifydefer")]] + void on_notify_defer( eosio::name acctonotify, eosio::name authorizer, uint32_t senderid ); +}; From 62ffe2594d0f41cf64f0ad70ed012571d2ab0c0c Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Tue, 9 Apr 2019 18:38:17 +0800 Subject: [PATCH 320/680] Add the compiled contract of restrict_action_test --- .../restrict_action_test.abi | 98 ++++++++++++++++++ .../restrict_action_test.wasm | Bin 0 -> 7994 bytes 2 files changed, 98 insertions(+) create mode 100644 unittests/test-contracts/restrict_action_test/restrict_action_test.abi create mode 100755 unittests/test-contracts/restrict_action_test/restrict_action_test.wasm diff --git a/unittests/test-contracts/restrict_action_test/restrict_action_test.abi b/unittests/test-contracts/restrict_action_test/restrict_action_test.abi new file mode 100644 index 00000000000..37db4926071 --- /dev/null +++ b/unittests/test-contracts/restrict_action_test/restrict_action_test.abi @@ -0,0 +1,98 @@ +{ + "____comment": "This file was generated with eosio-abigen. DO NOT EDIT ", + "version": "eosio::abi/1.1", + "types": [], + "structs": [ + { + "name": "noop", + "base": "", + "fields": [] + }, + { + "name": "notifydefer", + "base": "", + "fields": [ + { + "name": "acctonotify", + "type": "name" + }, + { + "name": "authorizer", + "type": "name" + }, + { + "name": "senderid", + "type": "uint32" + } + ] + }, + { + "name": "notifyinline", + "base": "", + "fields": [ + { + "name": "acctonotify", + "type": "name" + }, + { + "name": "authorizer", + "type": "name" + } + ] + }, + { + "name": "senddefer", + "base": "", + "fields": [ + { + "name": "authorizer", + "type": "name" + }, + { + "name": "senderid", + "type": "uint32" + } + ] + }, + { + "name": "sendinline", + "base": "", + "fields": [ + { + "name": "authorizer", + "type": "name" + } + ] + } + ], + "actions": [ + { + "name": "noop", + "type": "noop", + "ricardian_contract": "" + }, + { + "name": "notifydefer", + "type": "notifydefer", + "ricardian_contract": "" + }, + { + "name": "notifyinline", + "type": "notifyinline", + "ricardian_contract": "" + }, + { + "name": "senddefer", + "type": "senddefer", + "ricardian_contract": "" + }, + { + "name": "sendinline", + "type": "sendinline", + "ricardian_contract": "" + } + ], + "tables": [], + "ricardian_clauses": [], + "variants": [] +} \ No newline at end of file diff --git a/unittests/test-contracts/restrict_action_test/restrict_action_test.wasm b/unittests/test-contracts/restrict_action_test/restrict_action_test.wasm new file mode 100755 index 0000000000000000000000000000000000000000..31acb952b197323236e7b6eb9d588088c38511dc GIT binary patch literal 7994 zcmds6TZ~;*8D5utYxkMXuGTUwHLkrG;Z#8@lqnL8XIJi^BJrVb)5FXGW}j*2S}cal z84V9!crZo<5{M*d@I_2CJ|Kw>PrP74M4~2y7(*Zu)WqP6590UzYoAM}Gi@;;AxzVA z_FilK|N1ZAe_89O_R0yZl+wqt<4PS5duzR3PaQY>uz1uxKG&aX$CX|?uGbX4IgSUr zwuVp56&fTVNJtv=zzA$GY5r)&ZRg~@V=JALv(vMkyE@Cuomr(h7PjwPSYB0%&yCK) z%G|p6r#*Lada=FSKG9k2 zEU(~kri)DmTxJ&U^IY8B=c5U@*OZf%@nbJ0sJ_0w zK0+k~-+lGfKi%TjZza|Yb(^a;+pzeS|0R5a@D$ETa zQ|0|tVIfBml<`ubuOBm90_8zgp00`zb9v;f(+)G;B3{M@Q2YK0j0^?Q^~mYUMZ&}R zH6ci>%&F`HsWY$z|qak*Paq(`&SWv%w$p7Z)%}p~jSL!FY;Jf4Ef`(wf~0cb}Q$0#i0BO2q%SVw?VcHroqQ)8dA*88~5*n|Tt46=SMlu%!_o;2Su| zIiFRNkReU*0nskrEJ{88;WSj4R_PU5uNiD}g?h9lm_BG9Z@~&XsA#FL#W0a_1fyUI z%RFJc5--4s(83uXj_b@4M{Gt{9JE-Ulv#FB0TGT^(ECNddghTUF#{nluP8Rfb@XBe z@c7pQz?3`@!V}=Gj+jZW@lRTkML)cG)V=EEU!xDL0Hmf& zs%&zqsbm5(O!+qc;ye}=FlEaBV!+;t^%1!1 zRd6vby7}TFMu?>@&MwNCMqtCB%9U+7TdHdnBo*#RCf=}`@v8xufcY$snFSgeWZ6d) zu;bz?j~SeZ#%_*)bRj}Ew?lpwta2=<`H%)11`atJI~S1z9QDzNKD)t`$X&5r4jW*D zGLa1^iGV?YX?}}t>g0b=O4KW-Ys9lcb*UcUJ1V_EPwIZ^ZV${*oE|F;u06bn+XCubxGNr zheJaIHXM}Te>ikfh_u+8#~#i|fRTswq2^+SN#^2m0&4rniY5MFX$*7r0A?rasI!0) zLN20)uwKp1&=W9a)n{MsK`Q2&P$aAJ9pKTd@||8S?|}jirQcBo$Sm#Vs7kp8a$>^T zl*cQV@+ra7V5eghSRNa`-vhW&O0Xyw2Rd+H%tQx`Oj*2QlBGnX>Zg#^5n_}f11jPVoqNaC<$wR_eP|JC@lT3&(w*yb` zIRXgwuyntH6f_WAOzlv$60S~=Xz+ThB1-UZk1UcHFy%ZP!9|D$pdtr1mry9CrjUF> zOr$?&l%SrtZV^l>q6HX+K8XvU57Mr@W%$aFq_XCI@mtU|SrL&*;36bZya%L&jClmO zflrijZp!TOU*OAN1TI3TCK6fHaS^_^{QXlvQS3 zs#*i-Fq}qa$S-0wq>e=&wYD~%C~avx2;$Xu0rQay#90a@feKj0i)K6n*Ia3_J;VY; zCr5W|ZrTw``dbAQUbd3_6i|LkKQW8g+E2AS@Y8nkNc}jMJQJhx9C+8|nb_VwDC|GN zL7}JD!kz<;=?YWstlO!y0{-;X=q^|e4C?@j?9up}1)}B-MhajpV7=PxIqw>zQL;zN z!x}1C@n`2Dg4qF24-Dolh67R)*+dS>AS>c7vP*DkTSnjzyNC%231rC`upmG}pi;QN zi%oL?Y`{8lad%rrAOUh9jR&3R4ZLA+pw#61MybQ1R6B)RHMBHo0~jfPBi7Ns?_W?BF9nIe900C;sE3t z1(-;P`cf6Kw+7lBcA!8Z0Tk6a%eOG#TqB4&Ob&E-N5YR^i16d*5#IN_JdSukGh|>k zScJl(VWaSZeFnWo&dm<9TSQReBoL(6Nj)kI&stfdU?%k?r(Bm)SCH-YZ6}IH31JqO zVUyE91MGw& zDq1icZ=TygE6MDR{@(cR&jL++mgHx7I*|mAO2fkqkj3*0q+&U=SjHBtnH6|G#0vLq zYl5C#gZS%cgg42*y#~^N*bJo?=wymx3%HFw_1b!!S4Qp%YHh;4D%3(6@u0kasbv2! zsxnTVrVI#W%iNBm`9YiIaprR9SH7$&-xAsK&Eex)!$;Ki+prF2joW?q^cDX#3*#h5 zHy-$G@5%BrFd~9(^z3&HrsE?x!-*xE{mJ_D=)mz+9N1_~e84N>0gI^0J}Uez8AQTB zj&`yVdS#blDb9M?WzYw!|d??&Wr5%NA4K<|;rr-*T`r;NhP~sjmc7q!z?;97@=coEU z_FI7sal-1~-}?(-Af>+F!nKqg?K%6-=cP2|7;r}}q34MKIdrkNW!zG1e+`HBRgbdb zdf&c=rICW~S(pI}gg!bp>1f&fZYhYUX-5R9AqYX_!d6a*ye$qXBJp{Y#6!FZ-Yq`O_o?Pdg#p(O8i6{MhAit)i`|fJb&39(q>Vj*} oz-YR4_M@9wN95h9Jgx;(eq!OL?SDh>I|ES^;w-0qb6A1}?;=>Px# literal 0 HcmV?d00001 From 36dbc01fe8ed49ad1149946b47c1990de3f86407 Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 9 Apr 2019 18:25:52 -0400 Subject: [PATCH 321/680] address review comments on #7044 --- libraries/chain/controller.cpp | 2 +- .../chain/include/eosio/chain/apply_context.hpp | 4 ++-- libraries/chain/include/eosio/chain/trace.hpp | 1 - .../include/eosio/chain/transaction_context.hpp | 14 +++++++------- libraries/chain/transaction_context.cpp | 14 ++++++++++---- 5 files changed, 20 insertions(+), 15 deletions(-) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index a32a4ce1b2c..14fffa061b5 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -966,7 +966,7 @@ struct controller_impl { try { trx_context.init_for_implicit_trx(); trx_context.published = gtrx.published; - trx_context.execute_action( trx_context.schedule_action( etrx.actions.back(), gtrx.sender ) ); + trx_context.execute_action( trx_context.schedule_action( etrx.actions.back(), gtrx.sender, false, 0, 0 ), 0 ); trx_context.finalize(); // Automatically rounds up network and CPU usage in trace and bills payers if successful auto restore = make_block_restore_point(); diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index 805bb74e485..d1eac6495dd 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -466,8 +466,8 @@ class apply_context { bool cancel_deferred_transaction( const uint128_t& sender_id ) { return cancel_deferred_transaction(sender_id, receiver); } protected: - uint32_t schedule_action( uint32_t ordinal_of_action_to_schedule, account_name receiver, bool context_free = false ); - uint32_t schedule_action( action&& act_to_schedule, account_name receiver, bool context_free = false ); + uint32_t schedule_action( uint32_t ordinal_of_action_to_schedule, account_name receiver, bool context_free ); + uint32_t schedule_action( action&& act_to_schedule, account_name receiver, bool context_free ); /// Authorization methods: diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 67e7f62a7ee..5331bef1563 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -28,7 +28,6 @@ namespace eosio { namespace chain { uint32_t action_ordinal, uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ); action_trace( const transaction_trace& trace, action&& act, account_name receiver, bool context_free, uint32_t action_ordinal, uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ); - //action_trace( const action_receipt& r ):receipt(r){} action_trace(){} fc::unsigned_int action_ordinal; diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index 636b3a87c82..bbfe53810f6 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -68,18 +68,18 @@ namespace eosio { namespace chain { const action_trace& get_action_trace( uint32_t action_ordinal )const; /** invalidates any action_trace references returned by get_action_trace */ - uint32_t schedule_action( const action& act, account_name receiver, bool context_free = false, - uint32_t creator_action_ordinal = 0, uint32_t parent_action_ordinal = 0 ); + uint32_t schedule_action( const action& act, account_name receiver, bool context_free, + uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ); /** invalidates any action_trace references returned by get_action_trace */ - uint32_t schedule_action( action&& act, account_name receiver, bool context_free = false, - uint32_t creator_action_ordinal = 0, uint32_t parent_action_ordinal = 0 ); + uint32_t schedule_action( action&& act, account_name receiver, bool context_free, + uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ); /** invalidates any action_trace references returned by get_action_trace */ - uint32_t schedule_action( uint32_t action_ordinal, account_name receiver, bool context_free = false, - uint32_t creator_action_ordinal = 0, uint32_t parent_action_ordinal = 0 ); + uint32_t schedule_action( uint32_t action_ordinal, account_name receiver, bool context_free, + uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ); - void execute_action( uint32_t action_ordinal, uint32_t recurse_depth = 0 ); + void execute_action( uint32_t action_ordinal, uint32_t recurse_depth ); void schedule_transaction(); void record_transaction( const transaction_id_type& id, fc::time_point_sec expire ); diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 730658538e7..0cfd87fef8d 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -333,13 +333,13 @@ namespace bacc = boost::accumulators; if( apply_context_free ) { for( const auto& act : trx.context_free_actions ) { - schedule_action( act, act.account, true ); + schedule_action( act, act.account, true, 0, 0 ); } } if( delay == fc::microseconds() ) { for( const auto& act : trx.actions ) { - schedule_action( act, act.account ); + schedule_action( act, act.account, false, 0, 0 ); } } @@ -574,13 +574,19 @@ namespace bacc = boost::accumulators; action_trace& transaction_context::get_action_trace( uint32_t action_ordinal ) { EOS_ASSERT( 0 < action_ordinal && action_ordinal <= trace->action_traces.size() , - transaction_exception, "invalid action_ordinal" ); + transaction_exception, + "action_ordinal ${ordinal} is outside allowed range [1,${max}]", + ("ordinal", action_ordinal)("max", trace->action_traces.size()) + ); return trace->action_traces[action_ordinal-1]; } const action_trace& transaction_context::get_action_trace( uint32_t action_ordinal )const { EOS_ASSERT( 0 < action_ordinal && action_ordinal <= trace->action_traces.size() , - transaction_exception, "invalid action_ordinal" ); + transaction_exception, + "action_ordinal ${ordinal} is outside allowed range [1,${max}]", + ("ordinal", action_ordinal)("max", trace->action_traces.size()) + ); return trace->action_traces[action_ordinal-1]; } From 6701e63e7e2903c623c894a71fe85b84372f01d0 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Tue, 9 Apr 2019 18:41:16 -0400 Subject: [PATCH 322/680] ship: remove state_history_summary --- .../state_history_log.hpp | 35 ++++++------------- .../state_history_plugin_abi.cpp | 2 +- 2 files changed, 12 insertions(+), 25 deletions(-) diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp index 5f97b0280a4..96bfd1ec552 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp @@ -21,9 +21,9 @@ namespace eosio { * +---------+----------------+-----------+------------------+-----+---------+----------------+ * * *.index: - * +-----------+-------------+-----+-----------+ - * | Summary i | Summary i+1 | ... | Summary z | - * +-----------+-------------+-----+-----------+ + * +----------------+------------------+-----+----------------+ + * | Pos of Entry i | Pos of Entry i+1 | ... | Pos of Entry z | + * +----------------+------------------+-----+----------------+ * * each entry: * uint32_t block_num @@ -31,13 +31,6 @@ namespace eosio { * uint64_t size of payload * uint8_t version * payload - * - * each summary: - * uint64_t position of entry in *.log - * - * state payload: - * uint32_t size of deltas - * char[] deltas */ // todo: look into switching this to serialization instead of memcpy @@ -51,10 +44,6 @@ struct state_history_log_header { uint8_t version = 0; }; -struct state_history_summary { - uint64_t pos = 0; -}; - class state_history_log { private: const char* const name = ""; @@ -107,8 +96,7 @@ class state_history_log { log.write((char*)&pos, sizeof(pos)); index.seekg(0, std::ios_base::end); - state_history_summary summary{.pos = pos}; - index.write((char*)&summary, sizeof(summary)); + index.write((char*)&pos, sizeof(pos)); if (_begin_block == _end_block) _begin_block = header.block_num; _end_block = header.block_num + 1; @@ -208,7 +196,7 @@ class state_history_log { void open_index() { index.open(index_filename, std::ios_base::binary | std::ios_base::in | std::ios_base::out | std::ios_base::app); index.seekg(0, std::ios_base::end); - if (index.tellg() == (static_cast(_end_block) - _begin_block) * sizeof(state_history_summary)) + if (index.tellg() == (static_cast(_end_block) - _begin_block) * sizeof(uint64_t)) return; ilog("Regenerate ${name}.index", ("name", name)); index.close(); @@ -233,8 +221,7 @@ class state_history_log { // ("b", header.block_num)("pos", pos)("end", suffix_pos + sizeof(suffix))("suffix", suffix)("fs", size)); EOS_ASSERT(suffix == pos, chain::plugin_exception, "corrupt ${name}.log (8)", ("name", name)); - state_history_summary summary{.pos = pos}; - index.write((char*)&summary, sizeof(summary)); + index.write((char*)&pos, sizeof(pos)); pos = suffix_pos + sizeof(suffix); if (!(++num_found % 10000)) { printf("%10u blocks found, log pos=%12llu\r", (unsigned)num_found, (unsigned long long)pos); @@ -244,10 +231,10 @@ class state_history_log { } uint64_t get_pos(uint32_t block_num) { - state_history_summary summary; - index.seekg((block_num - _begin_block) * sizeof(summary)); - index.read((char*)&summary, sizeof(summary)); - return summary.pos; + uint64_t pos; + index.seekg((block_num - _begin_block) * sizeof(pos)); + index.read((char*)&pos, sizeof(pos)); + return pos; } void truncate(uint32_t block_num) { @@ -267,7 +254,7 @@ class state_history_log { log.seekg(0); index.seekg(0); boost::filesystem::resize_file(log_filename, pos); - boost::filesystem::resize_file(index_filename, (block_num - _begin_block) * sizeof(state_history_summary)); + boost::filesystem::resize_file(index_filename, (block_num - _begin_block) * sizeof(uint64_t)); _end_block = block_num; } log.sync(); diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index 95affaf57c2..b35882a7b83 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -103,7 +103,7 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "elapsed", "type": "int64" }, { "name": "console", "type": "string" }, { "name": "account_ram_deltas", "type": "account_delta[]" }, - { "name": "except", "type": "string?" }, + { "name": "except", "type": "string?" } ] }, { From ea18fd9212083827749179229a526f54a49a0853 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Tue, 9 Apr 2019 18:24:40 +0800 Subject: [PATCH 323/680] Add missing change in execute_inline --- libraries/chain/apply_context.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 153582429cf..ad711c0be84 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -208,7 +208,7 @@ void apply_context::execute_inline( action&& a ) { bool enforce_actor_whitelist_blacklist = trx_context.enforce_whiteblacklist && control.is_producing_block(); flat_set actors; - bool disallow_send_to_self_bypass = false; // eventually set to whether the appropriate protocol feature has been activated + bool disallow_send_to_self_bypass = control.is_builtin_activated( builtin_protocol_feature_t::restrict_action_to_self ); bool send_to_self = (a.account == receiver); bool inherit_parent_authorizations = (!disallow_send_to_self_bypass && send_to_self && (receiver == act.account) && control.is_producing_block()); From 0069bd5ce15c13d39c097c38c383f385bb499b36 Mon Sep 17 00:00:00 2001 From: arhag Date: Tue, 9 Apr 2019 19:59:02 -0400 Subject: [PATCH 324/680] fix tests that broke because of RESTRICT_ACTION_TO_SELF protocol feature --- tests/get_table_tests.cpp | 68 ++++++++++++++++++++------------------ unittests/forked_tests.cpp | 11 +++--- 2 files changed, 43 insertions(+), 36 deletions(-) diff --git a/tests/get_table_tests.cpp b/tests/get_table_tests.cpp index 6abeb325913..91fca59ef3b 100644 --- a/tests/get_table_tests.cpp +++ b/tests/get_table_tests.cpp @@ -38,6 +38,34 @@ using namespace fc; BOOST_AUTO_TEST_SUITE(get_table_tests) +transaction_trace_ptr +issue_tokens( TESTER& t, account_name issuer, account_name to, const asset& amount, + std::string memo = "", account_name token_contract = N(eosio.token) ) +{ + signed_transaction trx; + + trx.actions.emplace_back( t.get_action( token_contract, N(issue), + vector{{issuer, config::active_name}}, + mutable_variant_object() + ("to", issuer.to_string()) + ("quantity", amount) + ("memo", memo) + ) ); + + trx.actions.emplace_back( t.get_action( token_contract, N(transfer), + vector{{issuer, config::active_name}}, + mutable_variant_object() + ("from", issuer.to_string()) + ("to", to.to_string()) + ("quantity", amount) + ("memo", memo) + ) ); + + t.set_transaction_headers(trx); + trx.sign( t.get_private_key( issuer, "active" ), t.control->get_chain_id() ); + return t.push_transaction( trx ); +} + BOOST_FIXTURE_TEST_CASE( get_scope_test, TESTER ) try { produce_blocks(2); @@ -60,11 +88,7 @@ BOOST_FIXTURE_TEST_CASE( get_scope_test, TESTER ) try { // issue for (account_name a: accs) { - push_action( N(eosio.token), N(issue), "eosio", mutable_variant_object() - ("to", name(a) ) - ("quantity", eosio::chain::asset::from_string("999.0000 SYS") ) - ("memo", "") - ); + issue_tokens( *this, config::system_account_name, a, eosio::chain::asset::from_string("999.0000 SYS") ); } produce_blocks(1); @@ -136,11 +160,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { // issue for (account_name a: accs) { - push_action( N(eosio.token), N(issue), "eosio", mutable_variant_object() - ("to", name(a) ) - ("quantity", eosio::chain::asset::from_string("10000.0000 SYS") ) - ("memo", "") - ); + issue_tokens( *this, config::system_account_name, a, eosio::chain::asset::from_string("10000.0000 SYS") ); } produce_blocks(1); @@ -151,11 +171,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { push_action(N(eosio.token), N(create), N(eosio.token), act ); // issue for (account_name a: accs) { - push_action( N(eosio.token), N(issue), "eosio", mutable_variant_object() - ("to", name(a) ) - ("quantity", eosio::chain::asset::from_string("9999.0000 AAA") ) - ("memo", "") - ); + issue_tokens( *this, config::system_account_name, a, eosio::chain::asset::from_string("9999.0000 AAA") ); } produce_blocks(1); @@ -166,11 +182,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { push_action(N(eosio.token), N(create), N(eosio.token), act ); // issue for (account_name a: accs) { - push_action( N(eosio.token), N(issue), "eosio", mutable_variant_object() - ("to", name(a) ) - ("quantity", eosio::chain::asset::from_string("7777.0000 CCC") ) - ("memo", "") - ); + issue_tokens( *this, config::system_account_name, a, eosio::chain::asset::from_string("7777.0000 CCC") ); } produce_blocks(1); @@ -181,11 +193,7 @@ BOOST_FIXTURE_TEST_CASE( get_table_test, TESTER ) try { push_action(N(eosio.token), N(create), N(eosio.token), act ); // issue for (account_name a: accs) { - push_action( N(eosio.token), N(issue), "eosio", mutable_variant_object() - ("to", name(a) ) - ("quantity", eosio::chain::asset::from_string("8888.0000 BBB") ) - ("memo", "") - ); + issue_tokens( *this, config::system_account_name, a, eosio::chain::asset::from_string("8888.0000 BBB") ); } produce_blocks(1); @@ -331,17 +339,13 @@ BOOST_FIXTURE_TEST_CASE( get_table_by_seckey_test, TESTER ) try { // issue for (account_name a: accs) { - push_action( N(eosio.token), N(issue), "eosio", mutable_variant_object() - ("to", name(a) ) - ("quantity", eosio::chain::asset::from_string("10000.0000 SYS") ) - ("memo", "") - ); + issue_tokens( *this, config::system_account_name, a, eosio::chain::asset::from_string("10000.0000 SYS") ); } produce_blocks(1); - + set_code( config::system_account_name, contracts::eosio_system_wasm() ); set_abi( config::system_account_name, contracts::eosio_system_abi().data() ); - + base_tester::push_action(config::system_account_name, N(init), config::system_account_name, mutable_variant_object() ("version", 0) diff --git a/unittests/forked_tests.cpp b/unittests/forked_tests.cpp index feeebf7664f..6f8f1f29c84 100644 --- a/unittests/forked_tests.cpp +++ b/unittests/forked_tests.cpp @@ -162,15 +162,18 @@ BOOST_AUTO_TEST_CASE( forking ) try { ("maximum_supply", core_from_string("10000000.0000")) ); - wdump((fc::json::to_pretty_string(cr))); - cr = c.push_action( N(eosio.token), N(issue), config::system_account_name, mutable_variant_object() - ("to", "dan" ) + ("to", "eosio" ) ("quantity", core_from_string("100.0000")) ("memo", "") ); - wdump((fc::json::to_pretty_string(cr))); + cr = c.push_action( N(eosio.token), N(transfer), config::system_account_name, mutable_variant_object() + ("from", "eosio") + ("to", "dan" ) + ("quantity", core_from_string("100.0000")) + ("memo", "") + ); tester c2; From 0214edaf1362910246fc92ce21290b14ef404a67 Mon Sep 17 00:00:00 2001 From: Kayan Date: Wed, 10 Apr 2019 11:29:25 +0800 Subject: [PATCH 325/680] fix unit test case --- unittests/protocol_feature_tests.cpp | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 200310e6b9a..29f5830b58a 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -617,10 +617,19 @@ BOOST_AUTO_TEST_CASE( only_bill_to_first_authorizer ) { try { chain.create_account(tester_account); chain.create_account(tester_account2); - resource_limits_manager& mgr = chain.control->get_mutable_resource_limits_manager(); - mgr.set_account_limits( tester_account, 10000, 1000, 1000 ); - mgr.set_account_limits( tester_account2, 10000, 1000, 1000 ); - mgr.process_account_limit_updates(); + chain.push_action(config::system_account_name, N(setalimits), config::system_account_name, fc::mutable_variant_object() + ("account", name(tester_account).to_string()) + ("ram_bytes", 10000) + ("net_weight", 1000) + ("cpu_weight", 1000)); + + chain.push_action(config::system_account_name, N(setalimits), config::system_account_name, fc::mutable_variant_object() + ("account", name(tester_account2).to_string()) + ("ram_bytes", 10000) + ("net_weight", 1000) + ("cpu_weight", 1000)); + + const resource_limits_manager& mgr = chain.control->get_resource_limits_manager(); chain.produce_blocks(); From 2b8848c0c6c79569ab9daaff0c5c4e54a753134a Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 10 Apr 2019 11:57:42 +0800 Subject: [PATCH 326/680] Add missing loadSystemContract --- tests/Cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/Cluster.py b/tests/Cluster.py index f9cab40bec1..3ccbabe43aa 100644 --- a/tests/Cluster.py +++ b/tests/Cluster.py @@ -430,7 +430,7 @@ def connectGroup(group, producerNodes, bridgeNodes) : if not loadSystemContract: useBiosBootFile=False #ensure we use Cluster.bootstrap if onlyBios or not useBiosBootFile: - self.biosNode=self.bootstrap(biosNode, startedNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios, onlySetProds) + self.biosNode=self.bootstrap(biosNode, startedNodes, prodCount, totalProducers, pfSetupPolicy, onlyBios, onlySetProds, loadSystemContract) if self.biosNode is None: Utils.Print("ERROR: Bootstrap failed.") return False From acd22cdfc60f1ff0575f6461eb3dd3febd0c1505 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 10 Apr 2019 02:02:38 -0400 Subject: [PATCH 327/680] update build scripts --- scripts/eosio_build.sh | 45 ++++++++++++++++++++--------------- scripts/eosio_build_amazon.sh | 32 +++++++++++++++++++++++++ scripts/eosio_build_centos.sh | 32 +++++++++++++++++++++++++ scripts/eosio_build_darwin.sh | 32 +++++++++++++++++++++++++ scripts/eosio_build_fedora.sh | 34 +++++++++++++++++++++++++- scripts/eosio_build_ubuntu.sh | 32 +++++++++++++++++++++++++ 6 files changed, 187 insertions(+), 20 deletions(-) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index a97ceaa5058..43b06c13937 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -179,6 +179,30 @@ if [ $STALE_SUBMODS -gt 0 ]; then exit 1 fi +BUILD_CLANG8=false +if [ $NONINTERACTIVE -eq 0 ]; then + printf "#include \nint main(){ std::cout << \"Hello, World!\" << std::endl; }" &> $TEMP_DIR/test.cpp + `c++ -c -std=c++17 $TEMP_DIR/test.cpp -o $TEMP_DIR/test.o &> /dev/null` + if [ $? -ne 0 ]; then + `CXX -c -std=c++17 $TEMP_DIR/test.cpp -o $TEMP_DIR/test.o &> /dev/null` + if [ $? -ne 0 ]; then + printf "Error no C++17 support.\\nEnter Y/y or N/n to continue with downloading and building a viable compiler or exit now.\\nIf you already have a C++17 compiler installed or would like to install your own, export CXX to point to the compiler of your choosing." + read -p "Enter Y/y or N/n to continue with downloading and building a viable compiler or exit now." yn + case $yn in + [Yy]* ) BUILD_CLANG8=true; break;; + [Nn]* ) exit 1;; + * ) echo "Improper input"; exit 1;; + esac + fi + else + CXX=c++ + fi +else + BUILD_CLANG8=true +fi + +export BUILD_CLANG8=$BUILD_CLANG8 + printf "\\nBeginning build version: %s\\n" "${VERSION}" printf "%s\\n" "$( date -u )" printf "User: %s\\n" "$( whoami )" @@ -210,39 +234,25 @@ if [ "$ARCH" == "Linux" ]; then case "$OS_NAME" in "Amazon Linux AMI"|"Amazon Linux") FILE="${REPO_ROOT}/scripts/eosio_build_amazon.sh" - CXX_COMPILER=g++ - C_COMPILER=gcc ;; "CentOS Linux") FILE="${REPO_ROOT}/scripts/eosio_build_centos.sh" - CXX_COMPILER=g++ - C_COMPILER=gcc ;; "elementary OS") FILE="${REPO_ROOT}/scripts/eosio_build_ubuntu.sh" - CXX_COMPILER=clang++-4.0 - C_COMPILER=clang-4.0 ;; "Fedora") export CPATH=/usr/include/llvm4.0:$CPATH # llvm4.0 for fedora package path inclusion FILE="${REPO_ROOT}/scripts/eosio_build_fedora.sh" - CXX_COMPILER=g++ - C_COMPILER=gcc ;; "Linux Mint") FILE="${REPO_ROOT}/scripts/eosio_build_ubuntu.sh" - CXX_COMPILER=clang++-4.0 - C_COMPILER=clang-4.0 ;; "Ubuntu") FILE="${REPO_ROOT}/scripts/eosio_build_ubuntu.sh" - CXX_COMPILER=clang++-4.0 - C_COMPILER=clang-4.0 ;; "Debian GNU/Linux") FILE="${REPO_ROOT}/scripts/eosio_build_ubuntu.sh" - CXX_COMPILER=clang++-4.0 - C_COMPILER=clang-4.0 ;; *) printf "\\nUnsupported Linux Distribution. Exiting now.\\n\\n" @@ -258,8 +268,6 @@ if [ "$ARCH" == "Darwin" ]; then # HOME/lib/cmake: mongo_db_plugin.cpp:25:10: fatal error: 'bsoncxx/builder/basic/kvp.hpp' file not found LOCAL_CMAKE_FLAGS="-DCMAKE_PREFIX_PATH=/usr/local/opt/gettext;$HOME/lib/cmake ${LOCAL_CMAKE_FLAGS}" FILE="${REPO_ROOT}/scripts/eosio_build_darwin.sh" - CXX_COMPILER=clang++ - C_COMPILER=clang OPENSSL_ROOT_DIR=/usr/local/opt/openssl fi @@ -279,8 +287,8 @@ printf "## ENABLE_COVERAGE_TESTING=%s\\n" "${ENABLE_COVERAGE_TESTING}" mkdir -p $BUILD_DIR cd $BUILD_DIR -$CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX_COMPILER}" \ - -DCMAKE_C_COMPILER="${C_COMPILER}" -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ +$CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" \ + -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS "${REPO_ROOT}" @@ -314,4 +322,3 @@ printf "EOSIO Telegram channel @ https://t.me/EOSProject\\n" printf "EOSIO resources: https://eos.io/resources/\\n" printf "EOSIO Stack Exchange: https://eosio.stackexchange.com\\n" printf "EOSIO wiki: https://github.com/EOSIO/eos/wiki\\n\\n\\n" - diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index ff655496a7b..3ab12ca9d54 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -248,6 +248,38 @@ if [ $? -ne 0 ]; then exit -1; fi cd .. printf "\\n" +if [ $BUILD_CLANG8 ]; then + printf "Checking Clang 8 support...\\n" + if [ ! -d $CLANG8_ROOT ]; then + printf "Installing Clang 8...\\n" + cd ${OPT_LOCATION} \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && cd tools \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/lld.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/polly.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && mkdir extra && cd extra \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang-tools-extra.git \ + && cd ../../../../projects \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxx.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libunwind.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ + && cd ${OPT_LOCATION}/clang8 \ + && mkdir build && cd build \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + || exit 1 + printf " - Clang 8 successfully installed @ ${CLANG8_ROOT}\\n" + else + printf " - Clang 8 found @ ${CLANG8_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + printf "\\n" +fi + function print_instructions() { return 0 } diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 621001d0a97..292f5699464 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -299,6 +299,38 @@ if [ $? -ne 0 ]; then exit -1; fi cd .. printf "\\n" +if [ $BUILD_CLANG8 ]; then + printf "Checking Clang 8 support...\\n" + if [ ! -d $CLANG8_ROOT ]; then + printf "Installing Clang 8...\\n" + cd ${OPT_LOCATION} \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && cd tools \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/lld.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/polly.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && mkdir extra && cd extra \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang-tools-extra.git \ + && cd ../../../../projects \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxx.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libunwind.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ + && cd ${OPT_LOCATION}/clang8 \ + && mkdir build && cd build \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + || exit 1 + printf " - Clang 8 successfully installed @ ${CLANG8_ROOT}\\n" + else + printf " - Clang 8 found @ ${CLANG8_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + printf "\\n" +fi + function print_instructions() { printf "source ${PYTHON3PATH}/enable\\n" printf "source /opt/rh/devtoolset-7/enable\\n" diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 224b0839f1d..4c1f53bf88f 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -262,6 +262,38 @@ fi cd .. printf "\\n" +if [ $BUILD_CLANG8 ]; then + printf "Checking Clang 8 support...\\n" + if [ ! -d $CLANG8_ROOT ]; then + printf "Installing Clang 8...\\n" + cd ${OPT_LOCATION} \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && cd tools \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/lld.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/polly.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && mkdir extra && cd extra \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang-tools-extra.git \ + && cd ../../../../projects \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxx.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libunwind.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ + && cd ${OPT_LOCATION}/clang8 \ + && mkdir build && cd build \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DLLVM_CREATE_XCODE_TOOLCHAIN=ON .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + || exit 1 + printf " - Clang 8 successfully installed @ ${CLANG8_ROOT}\\n" + else + printf " - Clang 8 found @ ${CLANG8_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + printf "\\n" +fi + function print_instructions() { return 0 } diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index c27f47658d3..d3c2880a0a9 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -231,6 +231,38 @@ if [ $? -ne 0 ]; then exit -1; fi cd .. printf "\\n" +if [ $BUILD_CLANG8 ]; then + printf "Checking Clang 8 support...\\n" + if [ ! -d $CLANG8_ROOT ]; then + printf "Installing Clang 8...\\n" + cd ${OPT_LOCATION} \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && cd tools \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/lld.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/polly.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && mkdir extra && cd extra \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang-tools-extra.git \ + && cd ../../../../projects \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxx.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libunwind.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ + && cd ${OPT_LOCATION}/clang8 \ + && mkdir build && cd build \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + || exit 1 + printf " - Clang 8 successfully installed @ ${CLANG8_ROOT}\\n" + else + printf " - Clang 8 found @ ${CLANG8_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + printf "\\n" +fi + function print_instructions() { return 0 -} \ No newline at end of file +} diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 5561b14a450..d174d7b0127 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -254,6 +254,38 @@ if [ $? -ne 0 ]; then exit -1; fi cd .. printf "\\n" +if [ $BUILD_CLANG8 ]; then + printf "Checking Clang 8 support...\\n" + if [ ! -d $CLANG8_ROOT ]; then + printf "Installing Clang 8...\\n" + cd ${OPT_LOCATION} \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && cd tools \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/lld.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/polly.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && mkdir extra && cd extra \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang-tools-extra.git \ + && cd ../../../../projects \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxx.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libunwind.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ + && cd ${OPT_LOCATION}/clang8 \ + && mkdir build && cd build \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + || exit 1 + printf " - Clang 8 successfully installed @ ${CLANG8_ROOT}\\n" + else + printf " - Clang 8 found @ ${CLANG8_ROOT}.\\n" + fi + if [ $? -ne 0 ]; then exit -1; fi + + printf "\\n" +fi + function print_instructions() { return 0 } From b4e67e5be66599c19f48d3e3201a26d309ee5652 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 10 Apr 2019 14:34:06 +0800 Subject: [PATCH 328/680] Remove unneeded import --- tests/Node.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/Node.py b/tests/Node.py index 16f51464353..408929fac9a 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -1564,7 +1564,6 @@ def getActivatedProtocolFeatures(self): return latestBlockHeaderState["activated_protocol_features"]["protocol_features"] def modifyBuiltinPFSubjRestrictions(self, nodeId, featureCodename, subjectiveRestriction={}): - from Cluster import Cluster jsonPath = os.path.join(Utils.getNodeConfigDir(nodeId), "protocol_features", "BUILTIN-{}.json".format(featureCodename)) From 69e4b7e3b79e2f63e582308eb295e882fa1aa056 Mon Sep 17 00:00:00 2001 From: Andrianto Lie Date: Wed, 10 Apr 2019 17:12:11 +0800 Subject: [PATCH 329/680] Use processCurlCommand instead of sendRpcApi and remove sendRpcApi --- tests/Node.py | 26 ++--------------- ..._multiple_version_protocol_feature_test.py | 29 ++++++++++--------- 2 files changed, 18 insertions(+), 37 deletions(-) diff --git a/tests/Node.py b/tests/Node.py index 408929fac9a..334d9d2e7d5 100644 --- a/tests/Node.py +++ b/tests/Node.py @@ -7,10 +7,6 @@ import datetime import json import signal -import urllib.request -import urllib.parse -from urllib.error import HTTPError -import tempfile from core_symbol import CORE_SYMBOL from testUtils import Utils @@ -1455,26 +1451,10 @@ def reportStatus(self): Utils.Print(" hbn : %s (%s)" % (self.lastRetrievedHeadBlockNum, status)) Utils.Print(" lib : %s (%s)" % (self.lastRetrievedLIB, status)) - def sendRpcApi(self, relativeUrl, data={}): - url = urllib.parse.urljoin(self.endpointHttp, relativeUrl) - req = urllib.request.Request(url) - req.add_header('Content-Type', 'application/json; charset=utf-8') - reqData = json.dumps(data).encode("utf-8") - rpcApiResult = None - try: - response = urllib.request.urlopen(req, reqData) - rpcApiResult = json.loads(response.read().decode("utf-8")) - except HTTPError as e: - Utils.Print("Fail to send RPC API to {} with data {} ({})".format(url, data, e.read())) - raise e - except Exception as e: - Utils.Print("Fail to send RPC API to {} with data {} ({})".format(url, data, e)) - raise e - return rpcApiResult - # Require producer_api_plugin def scheduleProtocolFeatureActivations(self, featureDigests=[]): - self.sendRpcApi("v1/producer/schedule_protocol_feature_activations", {"protocol_features_to_activate": featureDigests}) + param = { "protocol_features_to_activate": featureDigests } + self.processCurlCmd("producer", "schedule_protocol_feature_activations", json.dumps(param)) # Require producer_api_plugin def getSupportedProtocolFeatures(self, excludeDisabled=False, excludeUnactivatable=False): @@ -1482,7 +1462,7 @@ def getSupportedProtocolFeatures(self, excludeDisabled=False, excludeUnactivatab "exclude_disabled": excludeDisabled, "exclude_unactivatable": excludeUnactivatable } - res = self.sendRpcApi("v1/producer/get_supported_protocol_features", param) + res = self.processCurlCmd("producer", "get_supported_protocol_features", json.dumps(param)) return res # This will return supported protocol features in a dict (feature codename as the key), i.e. diff --git a/tests/nodeos_multiple_version_protocol_feature_test.py b/tests/nodeos_multiple_version_protocol_feature_test.py index f2bd4ee8516..be3324f969e 100755 --- a/tests/nodeos_multiple_version_protocol_feature_test.py +++ b/tests/nodeos_multiple_version_protocol_feature_test.py @@ -82,6 +82,7 @@ def hasBlockBecomeIrr(): associatedNodeLabels = { "3": "170" } + Utils.Print("Alternate Version Labels File is {}".format(alternateVersionLabelsFile)) assert exists(alternateVersionLabelsFile), "Alternate version labels file does not exist" assert cluster.launch(pnodes=4, totalNodes=4, prodCount=1, totalProducers=4, extraNodeosArgs=" --plugin eosio::producer_api_plugin ", @@ -91,31 +92,31 @@ def hasBlockBecomeIrr(): alternateVersionLabelsFile=alternateVersionLabelsFile, associatedNodeLabels=associatedNodeLabels), "Unable to launch cluster" - def pauseBlockProduction(nodes:[Node]): - for node in nodes: - node.sendRpcApi("v1/producer/pause") + newNodeIds = [0, 1, 2] + oldNodeId = 3 + newNodes = list(map(lambda id: cluster.getNode(id), newNodeIds)) + oldNode = cluster.getNode(oldNodeId) + allNodes = [*newNodes, oldNode] - def resumeBlockProduction(nodes:[Node]): - for node in nodes: - node.sendRpcApi("v1/producer/resume") + def pauseBlockProductions(): + for node in allNodes: + if not node.killed: node.processCurlCmd("producer", "pause", "") + + def resumeBlockProductions(): + for node in allNodes: + if not node.killed: node.processCurlCmd("producer", "resume", "") def shouldNodesBeInSync(nodes:[Node]): # Pause all block production to ensure the head is not moving - pauseBlockProduction(nodes) + pauseBlockProductions() time.sleep(1) # Wait for some time to ensure all blocks are propagated headBlockIds = [] for node in nodes: headBlockId = node.getInfo()["head_block_id"] headBlockIds.append(headBlockId) - resumeBlockProduction(nodes) + resumeBlockProductions() return len(set(headBlockIds)) == 1 - newNodeIds = [0, 1, 2] - oldNodeId = 3 - newNodes = list(map(lambda id: cluster.getNode(id), newNodeIds)) - oldNode = cluster.getNode(oldNodeId) - allNodes = [*newNodes, oldNode] - # Before everything starts, all nodes (new version and old version) should be in sync assert shouldNodesBeInSync(allNodes), "Nodes are not in sync before preactivation" From 29520adc33a8dc31ac867d5287d4ed4d32d24bcc Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 10 Apr 2019 14:28:50 -0400 Subject: [PATCH 330/680] Pull over from feature/cpp17 branch and new stuff for pinning the compiler --- CMakeLists.txt | 4 +- libraries/chainbase | 2 +- libraries/fc | 2 +- libraries/wasm-jit/CMakeLists.txt | 1 + .../include/eosio/wallet_plugin/se_wallet.hpp | 4 +- .../include/eosio/wallet_plugin/wallet.hpp | 2 +- .../eosio/wallet_plugin/wallet_api.hpp | 2 +- .../eosio/wallet_plugin/yubihsm_wallet.hpp | 4 +- plugins/wallet_plugin/se_wallet.cpp | 8 +- plugins/wallet_plugin/wallet.cpp | 10 +- plugins/wallet_plugin/wallet_manager.cpp | 4 +- plugins/wallet_plugin/yubihsm_wallet.cpp | 6 +- programs/cleos/main.cpp | 8 +- scripts/eosio_build.sh | 10 +- scripts/eosio_build_amazon.sh | 2 +- scripts/eosio_build_centos.sh | 2 +- scripts/eosio_build_darwin.sh | 118 +++++++++--------- scripts/eosio_build_fedora.sh | 2 +- scripts/eosio_build_ubuntu.sh | 2 +- 19 files changed, 98 insertions(+), 95 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 17c3df72451..0aea9883ac5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required( VERSION 3.5 ) +cmake_minimum_required( VERSION 3.8 ) project( EOSIO ) include(CTest) # suppresses DartConfiguration.tcl error @@ -25,7 +25,7 @@ include( InstallDirectoryPermissions ) include( MASSigning ) set( BLOCKCHAIN_NAME "EOSIO" ) -set( CMAKE_CXX_STANDARD 14 ) +set( CMAKE_CXX_STANDARD 17 ) set( CMAKE_CXX_EXTENSIONS ON ) set( CXX_STANDARD_REQUIRED ON) diff --git a/libraries/chainbase b/libraries/chainbase index eb2d0c28bc1..d4a6fd350a9 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit eb2d0c28bc1f1328e8a5fc899291336ad487b084 +Subproject commit d4a6fd350a9f4a812a3e33019c864d198bdb2e10 diff --git a/libraries/fc b/libraries/fc index ae6ec564f0d..89a102d0ca7 160000 --- a/libraries/fc +++ b/libraries/fc @@ -1 +1 @@ -Subproject commit ae6ec564f0db6d3378348ef6b475042e332e612a +Subproject commit 89a102d0ca71bb32b009cf94db32a082da9aa403 diff --git a/libraries/wasm-jit/CMakeLists.txt b/libraries/wasm-jit/CMakeLists.txt index c06e45b5252..fcd74c7d4cd 100644 --- a/libraries/wasm-jit/CMakeLists.txt +++ b/libraries/wasm-jit/CMakeLists.txt @@ -36,6 +36,7 @@ include_directories(${WAVM_INCLUDE_DIR}) # set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address") # endif() +set(CMAKE_CXX_STANDARD 11) option(WAVM_METRICS_OUTPUT "controls printing the timings of some operations to stdout" OFF) if(WAVM_METRICS_OUTPUT) add_definitions("-DWAVM_METRICS_OUTPUT=1") diff --git a/plugins/wallet_plugin/include/eosio/wallet_plugin/se_wallet.hpp b/plugins/wallet_plugin/include/eosio/wallet_plugin/se_wallet.hpp index e5c70f1a307..9e33b194a37 100644 --- a/plugins/wallet_plugin/include/eosio/wallet_plugin/se_wallet.hpp +++ b/plugins/wallet_plugin/include/eosio/wallet_plugin/se_wallet.hpp @@ -32,10 +32,10 @@ class se_wallet final : public wallet_api { string create_key(string key_type) override; bool remove_key(string key) override; - optional try_sign_digest(const digest_type digest, const public_key_type public_key) override; + fc::optional try_sign_digest(const digest_type digest, const public_key_type public_key) override; private: std::unique_ptr my; }; -}} \ No newline at end of file +}} diff --git a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet.hpp b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet.hpp index 480e7a32a44..900577d082c 100644 --- a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet.hpp +++ b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet.hpp @@ -181,7 +181,7 @@ class soft_wallet final : public wallet_api /* Attempts to sign a digest via the given public_key */ - optional try_sign_digest( const digest_type digest, const public_key_type public_key ) override; + fc::optional try_sign_digest( const digest_type digest, const public_key_type public_key ) override; std::shared_ptr my; void encrypt_keys(); diff --git a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_api.hpp b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_api.hpp index 0627eceff33..61929b04733 100644 --- a/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_api.hpp +++ b/plugins/wallet_plugin/include/eosio/wallet_plugin/wallet_api.hpp @@ -101,7 +101,7 @@ class wallet_api /** Returns a signature given the digest and public_key, if this wallet can sign via that public key */ - virtual optional try_sign_digest( const digest_type digest, const public_key_type public_key ) = 0; + virtual fc::optional try_sign_digest( const digest_type digest, const public_key_type public_key ) = 0; }; }} diff --git a/plugins/wallet_plugin/include/eosio/wallet_plugin/yubihsm_wallet.hpp b/plugins/wallet_plugin/include/eosio/wallet_plugin/yubihsm_wallet.hpp index e1c0da99118..49caa9c184b 100644 --- a/plugins/wallet_plugin/include/eosio/wallet_plugin/yubihsm_wallet.hpp +++ b/plugins/wallet_plugin/include/eosio/wallet_plugin/yubihsm_wallet.hpp @@ -32,10 +32,10 @@ class yubihsm_wallet final : public wallet_api { string create_key(string key_type) override; bool remove_key(string key) override; - optional try_sign_digest(const digest_type digest, const public_key_type public_key) override; + fc::optional try_sign_digest(const digest_type digest, const public_key_type public_key) override; private: std::unique_ptr my; }; -}} \ No newline at end of file +}} diff --git a/plugins/wallet_plugin/se_wallet.cpp b/plugins/wallet_plugin/se_wallet.cpp index 8b43d569881..2f9ccfffc06 100644 --- a/plugins/wallet_plugin/se_wallet.cpp +++ b/plugins/wallet_plugin/se_wallet.cpp @@ -186,10 +186,10 @@ struct se_wallet_impl { return pub; } - optional try_sign_digest(const digest_type d, const public_key_type public_key) { + fc::optional try_sign_digest(const digest_type d, const public_key_type public_key) { auto it = _keys.find(public_key); if(it == _keys.end()) - return optional{}; + return fc::optional{}; fc::ecdsa_sig sig = ECDSA_SIG_new(); CFErrorRef error = nullptr; @@ -370,8 +370,8 @@ bool se_wallet::remove_key(string key) { return my->remove_key(key); } -optional se_wallet::try_sign_digest(const digest_type digest, const public_key_type public_key) { +fc::optional se_wallet::try_sign_digest(const digest_type digest, const public_key_type public_key) { return my->try_sign_digest(digest, public_key); } -}} \ No newline at end of file +}} diff --git a/plugins/wallet_plugin/wallet.cpp b/plugins/wallet_plugin/wallet.cpp index 53d57697ccd..a40027cb0a9 100644 --- a/plugins/wallet_plugin/wallet.cpp +++ b/plugins/wallet_plugin/wallet.cpp @@ -120,18 +120,18 @@ class soft_wallet_impl string get_wallet_filename() const { return _wallet_filename; } - optional try_get_private_key(const public_key_type& id)const + fc::optional try_get_private_key(const public_key_type& id)const { auto it = _keys.find(id); if( it != _keys.end() ) return it->second; - return optional(); + return fc::optional(); } - optional try_sign_digest( const digest_type digest, const public_key_type public_key ) { + fc::optional try_sign_digest( const digest_type digest, const public_key_type public_key ) { auto it = _keys.find(public_key); if( it == _keys.end() ) - return optional{}; + return fc::optional{}; return it->second.sign(digest); } @@ -401,7 +401,7 @@ private_key_type soft_wallet::get_private_key( public_key_type pubkey )const return my->get_private_key( pubkey ); } -optional soft_wallet::try_sign_digest( const digest_type digest, const public_key_type public_key ) { +fc::optional soft_wallet::try_sign_digest( const digest_type digest, const public_key_type public_key ) { return my->try_sign_digest(digest, public_key); } diff --git a/plugins/wallet_plugin/wallet_manager.cpp b/plugins/wallet_plugin/wallet_manager.cpp index b5287173670..43fa37bc61a 100644 --- a/plugins/wallet_plugin/wallet_manager.cpp +++ b/plugins/wallet_plugin/wallet_manager.cpp @@ -237,7 +237,7 @@ wallet_manager::sign_transaction(const chain::signed_transaction& txn, const fla bool found = false; for (const auto& i : wallets) { if (!i.second->is_locked()) { - optional sig = i.second->try_sign_digest(stxn.sig_digest(id, stxn.context_free_data), pk); + fc::optional sig = i.second->try_sign_digest(stxn.sig_digest(id, stxn.context_free_data), pk); if (sig) { stxn.signatures.push_back(*sig); found = true; @@ -260,7 +260,7 @@ wallet_manager::sign_digest(const chain::digest_type& digest, const public_key_t try { for (const auto& i : wallets) { if (!i.second->is_locked()) { - optional sig = i.second->try_sign_digest(digest, key); + fc::optional sig = i.second->try_sign_digest(digest, key); if (sig) return *sig; } diff --git a/plugins/wallet_plugin/yubihsm_wallet.cpp b/plugins/wallet_plugin/yubihsm_wallet.cpp index cda0d208333..0f367457fa0 100644 --- a/plugins/wallet_plugin/yubihsm_wallet.cpp +++ b/plugins/wallet_plugin/yubihsm_wallet.cpp @@ -139,10 +139,10 @@ struct yubihsm_wallet_impl { }); } - optional try_sign_digest(const digest_type d, const public_key_type public_key) { + fc::optional try_sign_digest(const digest_type d, const public_key_type public_key) { auto it = _keys.find(public_key); if(it == _keys.end()) - return optional{}; + return fc::optional{}; size_t der_sig_sz = 128; uint8_t der_sig[der_sig_sz]; @@ -265,7 +265,7 @@ bool yubihsm_wallet::remove_key(string key) { return true; } -optional yubihsm_wallet::try_sign_digest(const digest_type digest, const public_key_type public_key) { +fc::optional yubihsm_wallet::try_sign_digest(const digest_type digest, const public_key_type public_key) { return my->try_sign_digest(digest, public_key); } diff --git a/programs/cleos/main.cpp b/programs/cleos/main.cpp index caa24ae5ccf..743446fa17a 100644 --- a/programs/cleos/main.cpp +++ b/programs/cleos/main.cpp @@ -382,14 +382,14 @@ void print_action( const fc::variant& at ) { } //resolver for ABI serializer to decode actions in proposed transaction in multisig contract -auto abi_serializer_resolver = [](const name& account) -> optional { - static unordered_map > abi_cache; +auto abi_serializer_resolver = [](const name& account) -> fc::optional { + static unordered_map > abi_cache; auto it = abi_cache.find( account ); if ( it == abi_cache.end() ) { auto result = call(get_abi_func, fc::mutable_variant_object("account_name", account)); auto abi_results = result.as(); - optional abis; + fc::optional abis; if( abi_results.abi.valid() ) { abis.emplace( *abi_results.abi, abi_serializer_max_time ); } else { @@ -480,7 +480,7 @@ void print_result( const fc::variant& result ) { try { cerr << " us\n"; if( status == "failed" ) { - auto soft_except = processed["except"].as>(); + auto soft_except = processed["except"].as>(); if( soft_except ) { edump((soft_except->to_detail_string())); } diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 43b06c13937..f813b4f99eb 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -71,6 +71,8 @@ export BOOST_LINK_LOCATION=${OPT_LOCATION}/boost export LLVM_VERSION=release_40 export LLVM_ROOT=${OPT_LOCATION}/llvm export LLVM_DIR=${LLVM_ROOT}/lib/cmake/llvm +export CLANG8_ROOT=${OPT_LOCATION}/clang8 +export PINNED_COMPILER_VERSION=release_80 export DOXYGEN_VERSION=1_8_14 export DOXYGEN_ROOT=${SRC_LOCATION}/doxygen-${DOXYGEN_VERSION} export TINI_VERSION=0.18.0 @@ -199,6 +201,7 @@ if [ $NONINTERACTIVE -eq 0 ]; then fi else BUILD_CLANG8=true + CXX=${OPT_LOCATION}/clang8/bin/clang++ fi export BUILD_CLANG8=$BUILD_CLANG8 @@ -287,11 +290,10 @@ printf "## ENABLE_COVERAGE_TESTING=%s\\n" "${ENABLE_COVERAGE_TESTING}" mkdir -p $BUILD_DIR cd $BUILD_DIR -$CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" \ - -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \ - -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ - -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ +$CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=false \ + -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS "${REPO_ROOT}" + if [ $? -ne 0 ]; then exit -1; fi make -j"${JOBS}" if [ $? -ne 0 ]; then exit -1; fi diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 3ab12ca9d54..4dc2eb6fbaf 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -266,7 +266,7 @@ if [ $BUILD_CLANG8 ]; then && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ && cd ${OPT_LOCATION}/clang8 \ && mkdir build && cd build \ - && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all .. \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ && make install \ && cd ../.. \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 292f5699464..33cf68da6ff 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -317,7 +317,7 @@ if [ $BUILD_CLANG8 ]; then && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ && cd ${OPT_LOCATION}/clang8 \ && mkdir build && cd build \ - && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all .. \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ && make install \ && cd ../.. \ diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 4c1f53bf88f..54d12582972 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -188,64 +188,64 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" -printf "Checking MongoDB installation...\\n" -if [ ! -d $MONGODB_ROOT ]; then - printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" - curl -OL https://fastdl.mongodb.org/osx/mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ - && tar -xzf mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ - && mv $SRC_LOCATION/mongodb-osx-x86_64-$MONGODB_VERSION $MONGODB_ROOT \ - && touch $MONGODB_LOG_LOCATION/mongod.log \ - && rm -f mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ - && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ - && mkdir -p $MONGODB_DATA_LOCATION \ - && rm -rf $MONGODB_LINK_LOCATION \ - && rm -rf $BIN_LOCATION/mongod \ - && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ - && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ - || exit 1 - printf " - MongoDB successfully installed @ ${MONGODB_ROOT}\\n" -else - printf " - MongoDB found with correct version @ ${MONGODB_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C driver installation...\\n" -if [ ! -d $MONGO_C_DRIVER_ROOT ]; then - printf "Installing MongoDB C driver...\\n" - curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ - && mkdir -p cmake-build \ - && cd cmake-build \ - && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ - && make -j"${JOBS}" \ - && make install \ - && cd ../.. \ - && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi -printf "Checking MongoDB C++ driver installation...\\n" -if [ "$(grep "Version:" $HOME/lib/pkgconfig/libmongocxx-static.pc 2>/dev/null | tr -s ' ' | awk '{print $2}')" != $MONGO_CXX_DRIVER_VERSION ]; then - printf "Installing MongoDB C++ driver...\\n" - curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ - && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ - && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ - && make -j"${JOBS}" VERBOSE=1 \ - && make install \ - && cd ../.. \ - && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ - || exit 1 - printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -else - printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -fi -if [ $? -ne 0 ]; then exit -1; fi - -printf "\\n" +#printf "Checking MongoDB installation...\\n" +#if [ ! -d $MONGODB_ROOT ]; then +# printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" +# curl -OL https://fastdl.mongodb.org/osx/mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ +# && tar -xzf mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ +# && mv $SRC_LOCATION/mongodb-osx-x86_64-$MONGODB_VERSION $MONGODB_ROOT \ +# && touch $MONGODB_LOG_LOCATION/mongod.log \ +# && rm -f mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ +# && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ +# && mkdir -p $MONGODB_DATA_LOCATION \ +# && rm -rf $MONGODB_LINK_LOCATION \ +# && rm -rf $BIN_LOCATION/mongod \ +# && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ +# && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ +# || exit 1 +# printf " - MongoDB successfully installed @ ${MONGODB_ROOT}\\n" +#else +# printf " - MongoDB found with correct version @ ${MONGODB_ROOT}.\\n" +#fi +#if [ $? -ne 0 ]; then exit -1; fi +#printf "Checking MongoDB C driver installation...\\n" +#if [ ! -d $MONGO_C_DRIVER_ROOT ]; then +# printf "Installing MongoDB C driver...\\n" +# curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ +# && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ +# && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ +# && mkdir -p cmake-build \ +# && cd cmake-build \ +# && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ +# && make -j"${JOBS}" \ +# && make install \ +# && cd ../.. \ +# && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ +# || exit 1 +# printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" +#else +# printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" +#fi +#if [ $? -ne 0 ]; then exit -1; fi +#printf "Checking MongoDB C++ driver installation...\\n" +#if [ "$(grep "Version:" $HOME/lib/pkgconfig/libmongocxx-static.pc 2>/dev/null | tr -s ' ' | awk '{print $2}')" != $MONGO_CXX_DRIVER_VERSION ]; then +# printf "Installing MongoDB C++ driver...\\n" +# curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ +# && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ +# && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ +# && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ +# && make -j"${JOBS}" VERBOSE=1 \ +# && make install \ +# && cd ../.. \ +# && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ +# || exit 1 +# printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" +#else +# printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" +#fi +#if [ $? -ne 0 ]; then exit -1; fi +# +#printf "\\n" # We install llvm into /usr/local/opt using brew install llvm@4 @@ -280,7 +280,7 @@ if [ $BUILD_CLANG8 ]; then && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ && cd ${OPT_LOCATION}/clang8 \ && mkdir build && cd build \ - && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DLLVM_CREATE_XCODE_TOOLCHAIN=ON .. \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DLLVM_CREATE_XCODE_TOOLCHAIN=ON -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ && make install \ && cd ../.. \ diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index d3c2880a0a9..ca9af6e67fe 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -249,7 +249,7 @@ if [ $BUILD_CLANG8 ]; then && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ && cd ${OPT_LOCATION}/clang8 \ && mkdir build && cd build \ - && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all .. \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ && make install \ && cd ../.. \ diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index d174d7b0127..15c96234173 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -272,7 +272,7 @@ if [ $BUILD_CLANG8 ]; then && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ && cd ${OPT_LOCATION}/clang8 \ && mkdir build && cd build \ - && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all .. \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ && make install \ && cd ../.. \ From b1739bb89d22377ffea62fd39e8443e3f8922bbb Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 10 Apr 2019 15:48:31 -0400 Subject: [PATCH 331/680] use libcxxabi --- scripts/eosio_build_amazon.sh | 1 + scripts/eosio_build_centos.sh | 1 + scripts/eosio_build_darwin.sh | 1 + scripts/eosio_build_fedora.sh | 1 + scripts/eosio_build_ubuntu.sh | 1 + 5 files changed, 5 insertions(+) diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index 4dc2eb6fbaf..f0214df603e 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -262,6 +262,7 @@ if [ $BUILD_CLANG8 ]; then && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang-tools-extra.git \ && cd ../../../../projects \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxx.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxxabi.git \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libunwind.git \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ && cd ${OPT_LOCATION}/clang8 \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index 33cf68da6ff..e2f98fd9214 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -313,6 +313,7 @@ if [ $BUILD_CLANG8 ]; then && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang-tools-extra.git \ && cd ../../../../projects \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxx.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxxabi.git \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libunwind.git \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ && cd ${OPT_LOCATION}/clang8 \ diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 54d12582972..9d5fa74bc10 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -276,6 +276,7 @@ if [ $BUILD_CLANG8 ]; then && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang-tools-extra.git \ && cd ../../../../projects \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxx.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxxabi.git \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libunwind.git \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ && cd ${OPT_LOCATION}/clang8 \ diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index ca9af6e67fe..49591fcf105 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -245,6 +245,7 @@ if [ $BUILD_CLANG8 ]; then && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang-tools-extra.git \ && cd ../../../../projects \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxx.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxxabi.git \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libunwind.git \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ && cd ${OPT_LOCATION}/clang8 \ diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index 15c96234173..dd5136b6446 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -268,6 +268,7 @@ if [ $BUILD_CLANG8 ]; then && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang-tools-extra.git \ && cd ../../../../projects \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxx.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxxabi.git \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libunwind.git \ && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ && cd ${OPT_LOCATION}/clang8 \ From 637afd9f6011a470280bcd550c93e3706e68f84d Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Wed, 10 Apr 2019 16:36:58 -0400 Subject: [PATCH 332/680] small changes for clang 8 build --- libraries/wabt | 2 +- scripts/eosio_build_darwin.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/wabt b/libraries/wabt index bf353aa719c..a023d5132bd 160000 --- a/libraries/wabt +++ b/libraries/wabt @@ -1 +1 @@ -Subproject commit bf353aa719c88b7152ee09e7f877a507cb7df27b +Subproject commit a023d5132bd4aaa611c88c6d43486eb02c5e7411 diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 9d5fa74bc10..07fa2f53cac 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -281,7 +281,7 @@ if [ $BUILD_CLANG8 ]; then && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ && cd ${OPT_LOCATION}/clang8 \ && mkdir build && cd build \ - && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DLLVM_CREATE_XCODE_TOOLCHAIN=ON -DCMAKE_BUILD_TYPE=Release .. \ + && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ && make -j"${JOBS}" \ && make install \ && cd ../.. \ From acce9d1a2a59ec5dcd17ebd46a6b4e0320be4508 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 10 Apr 2019 17:51:04 -0400 Subject: [PATCH 333/680] rename parent_action_ordinal to closest_unnotified_ancestor_action_ordinal in action_trace --- libraries/chain/include/eosio/chain/trace.hpp | 10 ++- .../eosio/chain/transaction_context.hpp | 6 +- libraries/chain/trace.cpp | 8 +- libraries/chain/transaction_context.cpp | 18 +++-- plugins/chain_plugin/chain_plugin.cpp | 34 +++++---- .../state_history_serialization.hpp | 2 +- .../state_history_plugin_abi.cpp | 2 +- unittests/api_tests.cpp | 76 +++++++++---------- 8 files changed, 86 insertions(+), 70 deletions(-) diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index 5331bef1563..e60b8333a31 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -25,14 +25,16 @@ namespace eosio { namespace chain { struct action_trace { action_trace( const transaction_trace& trace, const action& act, account_name receiver, bool context_free, - uint32_t action_ordinal, uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ); + uint32_t action_ordinal, uint32_t creator_action_ordinal, + uint32_t closest_unnotified_ancestor_action_ordinal ); action_trace( const transaction_trace& trace, action&& act, account_name receiver, bool context_free, - uint32_t action_ordinal, uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ); + uint32_t action_ordinal, uint32_t creator_action_ordinal, + uint32_t closest_unnotified_ancestor_action_ordinal ); action_trace(){} fc::unsigned_int action_ordinal; fc::unsigned_int creator_action_ordinal; - fc::unsigned_int parent_action_ordinal; + fc::unsigned_int closest_unnotified_ancestor_action_ordinal; fc::optional receipt; action_name receiver; action act; @@ -70,7 +72,7 @@ FC_REFLECT( eosio::chain::account_delta, (account)(delta) ) FC_REFLECT( eosio::chain::action_trace, - (action_ordinal)(creator_action_ordinal)(parent_action_ordinal)(receipt) + (action_ordinal)(creator_action_ordinal)(closest_unnotified_ancestor_action_ordinal)(receipt) (receiver)(act)(context_free)(elapsed)(console)(trx_id)(block_num)(block_time) (producer_block_id)(account_ram_deltas)(except) ) diff --git a/libraries/chain/include/eosio/chain/transaction_context.hpp b/libraries/chain/include/eosio/chain/transaction_context.hpp index bbfe53810f6..a3779d439a3 100644 --- a/libraries/chain/include/eosio/chain/transaction_context.hpp +++ b/libraries/chain/include/eosio/chain/transaction_context.hpp @@ -69,15 +69,15 @@ namespace eosio { namespace chain { /** invalidates any action_trace references returned by get_action_trace */ uint32_t schedule_action( const action& act, account_name receiver, bool context_free, - uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ); + uint32_t creator_action_ordinal, uint32_t closest_unnotified_ancestor_action_ordinal ); /** invalidates any action_trace references returned by get_action_trace */ uint32_t schedule_action( action&& act, account_name receiver, bool context_free, - uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ); + uint32_t creator_action_ordinal, uint32_t closest_unnotified_ancestor_action_ordinal ); /** invalidates any action_trace references returned by get_action_trace */ uint32_t schedule_action( uint32_t action_ordinal, account_name receiver, bool context_free, - uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ); + uint32_t creator_action_ordinal, uint32_t closest_unnotified_ancestor_action_ordinal ); void execute_action( uint32_t action_ordinal, uint32_t recurse_depth ); diff --git a/libraries/chain/trace.cpp b/libraries/chain/trace.cpp index 2379018518f..44a7ba94cfd 100644 --- a/libraries/chain/trace.cpp +++ b/libraries/chain/trace.cpp @@ -8,11 +8,11 @@ namespace eosio { namespace chain { action_trace::action_trace( const transaction_trace& trace, const action& act, account_name receiver, bool context_free, - uint32_t action_ordinal, uint32_t creator_action_ordinal, uint32_t parent_action_ordinal + uint32_t action_ordinal, uint32_t creator_action_ordinal, uint32_t closest_unnotified_ancestor_action_ordinal ) :action_ordinal( action_ordinal ) ,creator_action_ordinal( creator_action_ordinal ) -,parent_action_ordinal( parent_action_ordinal ) +,closest_unnotified_ancestor_action_ordinal( closest_unnotified_ancestor_action_ordinal ) ,receiver( receiver ) ,act( act ) ,context_free( context_free ) @@ -24,11 +24,11 @@ action_trace::action_trace( action_trace::action_trace( const transaction_trace& trace, action&& act, account_name receiver, bool context_free, - uint32_t action_ordinal, uint32_t creator_action_ordinal, uint32_t parent_action_ordinal + uint32_t action_ordinal, uint32_t creator_action_ordinal, uint32_t closest_unnotified_ancestor_action_ordinal ) :action_ordinal( action_ordinal ) ,creator_action_ordinal( creator_action_ordinal ) -,parent_action_ordinal( parent_action_ordinal ) +,closest_unnotified_ancestor_action_ordinal( closest_unnotified_ancestor_action_ordinal ) ,receiver( receiver ) ,act( std::move(act) ) ,context_free( context_free ) diff --git a/libraries/chain/transaction_context.cpp b/libraries/chain/transaction_context.cpp index 2f196687d1f..4380192a7d8 100644 --- a/libraries/chain/transaction_context.cpp +++ b/libraries/chain/transaction_context.cpp @@ -595,29 +595,34 @@ namespace bacc = boost::accumulators; } uint32_t transaction_context::schedule_action( const action& act, account_name receiver, bool context_free, - uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ) + uint32_t creator_action_ordinal, + uint32_t closest_unnotified_ancestor_action_ordinal ) { uint32_t new_action_ordinal = trace->action_traces.size() + 1; trace->action_traces.emplace_back( *trace, act, receiver, context_free, - new_action_ordinal, creator_action_ordinal, parent_action_ordinal ); + new_action_ordinal, creator_action_ordinal, + closest_unnotified_ancestor_action_ordinal ); return new_action_ordinal; } uint32_t transaction_context::schedule_action( action&& act, account_name receiver, bool context_free, - uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ) + uint32_t creator_action_ordinal, + uint32_t closest_unnotified_ancestor_action_ordinal ) { uint32_t new_action_ordinal = trace->action_traces.size() + 1; trace->action_traces.emplace_back( *trace, std::move(act), receiver, context_free, - new_action_ordinal, creator_action_ordinal, parent_action_ordinal ); + new_action_ordinal, creator_action_ordinal, + closest_unnotified_ancestor_action_ordinal ); return new_action_ordinal; } uint32_t transaction_context::schedule_action( uint32_t action_ordinal, account_name receiver, bool context_free, - uint32_t creator_action_ordinal, uint32_t parent_action_ordinal ) + uint32_t creator_action_ordinal, + uint32_t closest_unnotified_ancestor_action_ordinal ) { uint32_t new_action_ordinal = trace->action_traces.size() + 1; @@ -628,7 +633,8 @@ namespace bacc = boost::accumulators; // The reserve above is required so that the emplace_back below does not invalidate the provided_action reference. trace->action_traces.emplace_back( *trace, provided_action, receiver, context_free, - new_action_ordinal, creator_action_ordinal, parent_action_ordinal ); + new_action_ordinal, creator_action_ordinal, + closest_unnotified_ancestor_action_ordinal ); return new_action_ordinal; } diff --git a/plugins/chain_plugin/chain_plugin.cpp b/plugins/chain_plugin/chain_plugin.cpp index e245e3017fd..dec98b41b55 100644 --- a/plugins/chain_plugin/chain_plugin.cpp +++ b/plugins/chain_plugin/chain_plugin.cpp @@ -1899,34 +1899,42 @@ void read_write::push_transaction(const read_write::push_transaction_params& par try { output = db.to_variant_with_abi( *trx_trace_ptr, abi_serializer_max_time ); - // Create map of (parent_action_ordinal, global_sequence) with action trace + // Create map of (closest_unnotified_ancestor_action_ordinal, global_sequence) with action trace std::map< std::pair, fc::mutable_variant_object > act_traces_map; for( const auto& act_trace : output["action_traces"].get_array() ) { if (act_trace["receipt"].is_null() && act_trace["except"].is_null()) continue; - auto parent_action_ordinal = act_trace["parent_action_ordinal"].as().value; + auto closest_unnotified_ancestor_action_ordinal = + act_trace["closest_unnotified_ancestor_action_ordinal"].as().value; auto global_sequence = act_trace["receipt"].is_null() ? std::numeric_limits::max() : act_trace["receipt"]["global_sequence"].as(); - act_traces_map.emplace( std::make_pair( parent_action_ordinal, global_sequence ), + act_traces_map.emplace( std::make_pair( closest_unnotified_ancestor_action_ordinal, + global_sequence ), act_trace.get_object() ); } - std::function(uint32_t)> convert_act_trace_to_tree_struct = [&](uint32_t parent_action_ordinal) { + std::function(uint32_t)> convert_act_trace_to_tree_struct = + [&](uint32_t closest_unnotified_ancestor_action_ordinal) { vector restructured_act_traces; - auto it = act_traces_map.lower_bound( std::make_pair(parent_action_ordinal, 0) ); - for( ; it != act_traces_map.end() && it->first.first == parent_action_ordinal; ++it ) { + auto it = act_traces_map.lower_bound( + std::make_pair( closest_unnotified_ancestor_action_ordinal, 0) + ); + for( ; + it != act_traces_map.end() && it->first.first == closest_unnotified_ancestor_action_ordinal; ++it ) + { auto& act_trace_mvo = it->second; auto action_ordinal = act_trace_mvo["action_ordinal"].as().value; act_trace_mvo["inline_traces"] = convert_act_trace_to_tree_struct(action_ordinal); if (act_trace_mvo["receipt"].is_null()) { - act_trace_mvo["receipt"] = fc::mutable_variant_object()("abi_sequence", 0) - ("act_digest", digest_type::hash(trx_trace_ptr->action_traces[action_ordinal-1].act)) - ("auth_sequence", flat_map()) - ("code_sequence", 0) - ("global_sequence", 0) - ("receiver", act_trace_mvo["receiver"]) - ("recv_sequence", 0); + act_trace_mvo["receipt"] = fc::mutable_variant_object() + ("abi_sequence", 0) + ("act_digest", digest_type::hash(trx_trace_ptr->action_traces[action_ordinal-1].act)) + ("auth_sequence", flat_map()) + ("code_sequence", 0) + ("global_sequence", 0) + ("receiver", act_trace_mvo["receiver"]) + ("recv_sequence", 0); } restructured_act_traces.push_back( std::move(act_trace_mvo) ); } diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp index 3251abef7fb..99356baea25 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp @@ -482,7 +482,7 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper(obj.obj.action_ordinal)); fc::raw::pack(ds, as_type(obj.obj.creator_action_ordinal)); - fc::raw::pack(ds, as_type(obj.obj.parent_action_ordinal)); + fc::raw::pack(ds, as_type(obj.obj.closest_unnotified_ancestor_action_ordinal)); fc::raw::pack(ds, bool(obj.obj.receipt)); if (obj.obj.receipt) { fc::raw::pack(ds, make_history_serial_wrapper(obj.db, as_type(*obj.obj.receipt))); diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index b35882a7b83..b0a2f5b79e1 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -95,7 +95,7 @@ extern const char* const state_history_plugin_abi = R"({ "name": "action_trace_v0", "fields": [ { "name": "action_ordinal", "type": "varuint32" }, { "name": "creator_action_ordinal", "type": "varuint32" }, - { "name": "parent_action_ordinal", "type": "varuint32" }, + { "name": "closest_unnotified_ancestor_action_ordinal", "type": "varuint32" }, { "name": "receipt", "type": "action_receipt?" }, { "name": "receiver", "type": "name" }, { "name": "act", "type": "action" }, diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 04d2a69fffb..151bf82ebcf 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -2086,7 +2086,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_test, TESTER) { try { set_code( N(erin), contracts::test_api_wasm() ); produce_blocks(1); - transaction_trace_ptr txn_trace = CALL_TEST_FUNCTION_SCOPE( *this, "test_action", "test_action_ordinal1", + transaction_trace_ptr txn_trace = CALL_TEST_FUNCTION_SCOPE( *this, "test_action", "test_action_ordinal1", {}, vector{ N(testapi)}); BOOST_REQUIRE_EQUAL( validate(), true ); @@ -2097,7 +2097,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_test, TESTER) { try { auto &atrace = txn_trace->action_traces; BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); - BOOST_REQUIRE_EQUAL((int)atrace[0].parent_action_ordinal, 0); + BOOST_REQUIRE_EQUAL((int)atrace[0].closest_unnotified_ancestor_action_ordinal, 0); BOOST_REQUIRE_EQUAL(atrace[0].receiver.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[0].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[0].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); @@ -2106,7 +2106,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_test, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal,2); BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); - BOOST_REQUIRE_EQUAL((int)atrace[1].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[1].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[1].receiver.value, N(bob)); BOOST_REQUIRE_EQUAL(atrace[1].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[1].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); @@ -2115,7 +2115,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_test, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); - BOOST_REQUIRE_EQUAL((int)atrace[2].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[2].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[2].receiver.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[2].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[2].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); @@ -2124,7 +2124,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_test, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[3].action_ordinal, 4); BOOST_REQUIRE_EQUAL((int)atrace[3].creator_action_ordinal, 1); - BOOST_REQUIRE_EQUAL((int)atrace[3].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[3].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[3].receiver.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[3].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[3].act.name.value, TEST_METHOD("test_action", "test_action_ordinal3")); @@ -2133,7 +2133,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_test, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[4].action_ordinal, 5); BOOST_REQUIRE_EQUAL((int)atrace[4].creator_action_ordinal, 1); - BOOST_REQUIRE_EQUAL((int)atrace[4].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[4].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[4].receiver.value, N(charlie)); BOOST_REQUIRE_EQUAL(atrace[4].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[4].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); @@ -2142,7 +2142,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_test, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[5].action_ordinal, 6); BOOST_REQUIRE_EQUAL((int)atrace[5].creator_action_ordinal, 2); - BOOST_REQUIRE_EQUAL((int)atrace[5].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[5].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[5].receiver.value, N(bob)); BOOST_REQUIRE_EQUAL(atrace[5].act.account.value, N(bob)); BOOST_REQUIRE_EQUAL(atrace[5].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_foo")); @@ -2151,7 +2151,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_test, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[6].action_ordinal, 7); BOOST_REQUIRE_EQUAL((int)atrace[6].creator_action_ordinal,2); - BOOST_REQUIRE_EQUAL((int)atrace[6].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[6].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[6].receiver.value, N(david)); BOOST_REQUIRE_EQUAL(atrace[6].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[6].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); @@ -2160,7 +2160,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_test, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[7].action_ordinal, 8); BOOST_REQUIRE_EQUAL((int)atrace[7].creator_action_ordinal, 5); - BOOST_REQUIRE_EQUAL((int)atrace[7].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[7].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[7].receiver.value, N(charlie)); BOOST_REQUIRE_EQUAL(atrace[7].act.account.value, N(charlie)); BOOST_REQUIRE_EQUAL(atrace[7].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_bar")); @@ -2169,7 +2169,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_test, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[8].action_ordinal, 9); BOOST_REQUIRE_EQUAL((int)atrace[8].creator_action_ordinal, 3); - BOOST_REQUIRE_EQUAL((int)atrace[8].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[8].closest_unnotified_ancestor_action_ordinal, 3); BOOST_REQUIRE_EQUAL(atrace[8].receiver.value, N(david)); BOOST_REQUIRE_EQUAL(atrace[8].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[8].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); @@ -2178,7 +2178,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_test, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[9].action_ordinal, 10); BOOST_REQUIRE_EQUAL((int)atrace[9].creator_action_ordinal, 3); - BOOST_REQUIRE_EQUAL((int)atrace[9].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[9].closest_unnotified_ancestor_action_ordinal, 3); BOOST_REQUIRE_EQUAL(atrace[9].receiver.value, N(erin)); BOOST_REQUIRE_EQUAL(atrace[9].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[9].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); @@ -2187,7 +2187,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_test, TESTER) { try { BOOST_REQUIRE_EQUAL((int)atrace[10].action_ordinal, 11); BOOST_REQUIRE_EQUAL((int)atrace[10].creator_action_ordinal, 3); - BOOST_REQUIRE_EQUAL((int)atrace[10].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[10].closest_unnotified_ancestor_action_ordinal, 3); BOOST_REQUIRE_EQUAL(atrace[10].receiver.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[10].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[10].act.name.value, TEST_METHOD("test_action", "test_action_ordinal4")); @@ -2221,7 +2221,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest1, TESTER) { try { create_account(N(fail1) ); // <- make first action fails in the middle produce_blocks(1); - transaction_trace_ptr txn_trace = + transaction_trace_ptr txn_trace = CALL_TEST_FUNCTION_NO_THROW( *this, "test_action", "test_action_ordinal1", {}); BOOST_REQUIRE_EQUAL( validate(), true ); @@ -2231,10 +2231,10 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest1, TESTER) { try { auto &atrace = txn_trace->action_traces; - // fails here after creating one notify action and one inline action + // fails here after creating one notify action and one inline action BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); - BOOST_REQUIRE_EQUAL((int)atrace[0].parent_action_ordinal, 0); + BOOST_REQUIRE_EQUAL((int)atrace[0].closest_unnotified_ancestor_action_ordinal, 0); BOOST_REQUIRE_EQUAL(atrace[0].receiver.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[0].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[0].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); @@ -2245,7 +2245,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest1, TESTER) { try { // not executed BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal, 2); BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); - BOOST_REQUIRE_EQUAL((int)atrace[1].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[1].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[1].receiver.value, N(bob)); BOOST_REQUIRE_EQUAL(atrace[1].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[1].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); @@ -2255,7 +2255,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest1, TESTER) { try { // not executed BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); - BOOST_REQUIRE_EQUAL((int)atrace[2].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[2].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[2].receiver.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[2].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[2].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); @@ -2289,7 +2289,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { create_account(N(fail3) ); // <- make action 3 fails in the middle produce_blocks(1); - transaction_trace_ptr txn_trace = + transaction_trace_ptr txn_trace = CALL_TEST_FUNCTION_NO_THROW( *this, "test_action", "test_action_ordinal1", {}); BOOST_REQUIRE_EQUAL( validate(), true ); @@ -2302,7 +2302,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { // executed BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); - BOOST_REQUIRE_EQUAL((int)atrace[0].parent_action_ordinal, 0); + BOOST_REQUIRE_EQUAL((int)atrace[0].closest_unnotified_ancestor_action_ordinal, 0); BOOST_REQUIRE_EQUAL(atrace[0].receiver.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[0].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[0].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); @@ -2313,7 +2313,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { // executed BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal,2); BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); - BOOST_REQUIRE_EQUAL((int)atrace[1].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[1].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[1].receiver.value, N(bob)); BOOST_REQUIRE_EQUAL(atrace[1].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[1].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); @@ -2323,7 +2323,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { // not executed BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); - BOOST_REQUIRE_EQUAL((int)atrace[2].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[2].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[2].receiver.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[2].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[2].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); @@ -2333,7 +2333,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { // not executed BOOST_REQUIRE_EQUAL((int)atrace[3].action_ordinal, 4); BOOST_REQUIRE_EQUAL((int)atrace[3].creator_action_ordinal, 1); - BOOST_REQUIRE_EQUAL((int)atrace[3].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[3].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[3].receiver.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[3].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[3].act.name.value, TEST_METHOD("test_action", "test_action_ordinal3")); @@ -2343,7 +2343,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { // hey exception is here BOOST_REQUIRE_EQUAL((int)atrace[4].action_ordinal, 5); BOOST_REQUIRE_EQUAL((int)atrace[4].creator_action_ordinal, 1); - BOOST_REQUIRE_EQUAL((int)atrace[4].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[4].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[4].receiver.value, N(charlie)); BOOST_REQUIRE_EQUAL(atrace[4].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[4].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); @@ -2354,7 +2354,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { // not executed BOOST_REQUIRE_EQUAL((int)atrace[5].action_ordinal, 6); BOOST_REQUIRE_EQUAL((int)atrace[5].creator_action_ordinal, 2); - BOOST_REQUIRE_EQUAL((int)atrace[5].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[5].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[5].receiver.value, N(bob)); BOOST_REQUIRE_EQUAL(atrace[5].act.account.value, N(bob)); BOOST_REQUIRE_EQUAL(atrace[5].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_foo")); @@ -2364,7 +2364,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { // not executed BOOST_REQUIRE_EQUAL((int)atrace[6].action_ordinal, 7); BOOST_REQUIRE_EQUAL((int)atrace[6].creator_action_ordinal,2); - BOOST_REQUIRE_EQUAL((int)atrace[6].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[6].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[6].receiver.value, N(david)); BOOST_REQUIRE_EQUAL(atrace[6].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[6].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); @@ -2374,7 +2374,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest2, TESTER) { try { // not executed BOOST_REQUIRE_EQUAL((int)atrace[7].action_ordinal, 8); BOOST_REQUIRE_EQUAL((int)atrace[7].creator_action_ordinal, 5); - BOOST_REQUIRE_EQUAL((int)atrace[7].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[7].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[7].receiver.value, N(charlie)); BOOST_REQUIRE_EQUAL(atrace[7].act.account.value, N(charlie)); BOOST_REQUIRE_EQUAL(atrace[7].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_bar")); @@ -2408,7 +2408,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { create_account(N(failnine) ); // <- make action 9 fails in the middle produce_blocks(1); - transaction_trace_ptr txn_trace = + transaction_trace_ptr txn_trace = CALL_TEST_FUNCTION_NO_THROW( *this, "test_action", "test_action_ordinal1", {}); BOOST_REQUIRE_EQUAL( validate(), true ); @@ -2421,7 +2421,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { // executed BOOST_REQUIRE_EQUAL((int)atrace[0].action_ordinal, 1); BOOST_REQUIRE_EQUAL((int)atrace[0].creator_action_ordinal, 0); - BOOST_REQUIRE_EQUAL((int)atrace[0].parent_action_ordinal, 0); + BOOST_REQUIRE_EQUAL((int)atrace[0].closest_unnotified_ancestor_action_ordinal, 0); BOOST_REQUIRE_EQUAL(atrace[0].receiver.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[0].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[0].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); @@ -2432,7 +2432,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { // executed BOOST_REQUIRE_EQUAL((int)atrace[1].action_ordinal,2); BOOST_REQUIRE_EQUAL((int)atrace[1].creator_action_ordinal, 1); - BOOST_REQUIRE_EQUAL((int)atrace[1].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[1].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[1].receiver.value, N(bob)); BOOST_REQUIRE_EQUAL(atrace[1].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[1].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); @@ -2442,7 +2442,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { // executed BOOST_REQUIRE_EQUAL((int)atrace[2].action_ordinal, 3); BOOST_REQUIRE_EQUAL((int)atrace[2].creator_action_ordinal, 1); - BOOST_REQUIRE_EQUAL((int)atrace[2].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[2].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[2].receiver.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[2].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[2].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); @@ -2452,7 +2452,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { // fails here BOOST_REQUIRE_EQUAL((int)atrace[3].action_ordinal, 4); BOOST_REQUIRE_EQUAL((int)atrace[3].creator_action_ordinal, 1); - BOOST_REQUIRE_EQUAL((int)atrace[3].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[3].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[3].receiver.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[3].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[3].act.name.value, TEST_METHOD("test_action", "test_action_ordinal3")); @@ -2463,7 +2463,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { // executed BOOST_REQUIRE_EQUAL((int)atrace[4].action_ordinal, 5); BOOST_REQUIRE_EQUAL((int)atrace[4].creator_action_ordinal, 1); - BOOST_REQUIRE_EQUAL((int)atrace[4].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[4].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[4].receiver.value, N(charlie)); BOOST_REQUIRE_EQUAL(atrace[4].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[4].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); @@ -2473,7 +2473,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { // not executed BOOST_REQUIRE_EQUAL((int)atrace[5].action_ordinal, 6); BOOST_REQUIRE_EQUAL((int)atrace[5].creator_action_ordinal, 2); - BOOST_REQUIRE_EQUAL((int)atrace[5].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[5].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[5].receiver.value, N(bob)); BOOST_REQUIRE_EQUAL(atrace[5].act.account.value, N(bob)); BOOST_REQUIRE_EQUAL(atrace[5].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_foo")); @@ -2483,7 +2483,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { // executed BOOST_REQUIRE_EQUAL((int)atrace[6].action_ordinal, 7); BOOST_REQUIRE_EQUAL((int)atrace[6].creator_action_ordinal,2); - BOOST_REQUIRE_EQUAL((int)atrace[6].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[6].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[6].receiver.value, N(david)); BOOST_REQUIRE_EQUAL(atrace[6].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[6].act.name.value, TEST_METHOD("test_action", "test_action_ordinal1")); @@ -2493,7 +2493,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { // not executed BOOST_REQUIRE_EQUAL((int)atrace[7].action_ordinal, 8); BOOST_REQUIRE_EQUAL((int)atrace[7].creator_action_ordinal, 5); - BOOST_REQUIRE_EQUAL((int)atrace[7].parent_action_ordinal, 1); + BOOST_REQUIRE_EQUAL((int)atrace[7].closest_unnotified_ancestor_action_ordinal, 1); BOOST_REQUIRE_EQUAL(atrace[7].receiver.value, N(charlie)); BOOST_REQUIRE_EQUAL(atrace[7].act.account.value, N(charlie)); BOOST_REQUIRE_EQUAL(atrace[7].act.name.value, TEST_METHOD("test_action", "test_action_ordinal_bar")); @@ -2503,7 +2503,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { // executed BOOST_REQUIRE_EQUAL((int)atrace[8].action_ordinal, 9); BOOST_REQUIRE_EQUAL((int)atrace[8].creator_action_ordinal, 3); - BOOST_REQUIRE_EQUAL((int)atrace[8].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[8].closest_unnotified_ancestor_action_ordinal, 3); BOOST_REQUIRE_EQUAL(atrace[8].receiver.value, N(david)); BOOST_REQUIRE_EQUAL(atrace[8].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[8].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); @@ -2513,7 +2513,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { // executed BOOST_REQUIRE_EQUAL((int)atrace[9].action_ordinal, 10); BOOST_REQUIRE_EQUAL((int)atrace[9].creator_action_ordinal, 3); - BOOST_REQUIRE_EQUAL((int)atrace[9].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[9].closest_unnotified_ancestor_action_ordinal, 3); BOOST_REQUIRE_EQUAL(atrace[9].receiver.value, N(erin)); BOOST_REQUIRE_EQUAL(atrace[9].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[9].act.name.value, TEST_METHOD("test_action", "test_action_ordinal2")); @@ -2523,7 +2523,7 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { // executed BOOST_REQUIRE_EQUAL((int)atrace[10].action_ordinal, 11); BOOST_REQUIRE_EQUAL((int)atrace[10].creator_action_ordinal, 3); - BOOST_REQUIRE_EQUAL((int)atrace[10].parent_action_ordinal, 3); + BOOST_REQUIRE_EQUAL((int)atrace[10].closest_unnotified_ancestor_action_ordinal, 3); BOOST_REQUIRE_EQUAL(atrace[10].receiver.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[10].act.account.value, N(testapi)); BOOST_REQUIRE_EQUAL(atrace[10].act.name.value, TEST_METHOD("test_action", "test_action_ordinal4")); From a2d19275c4e531332370d27b6d822ddb8fdd4961 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 10 Apr 2019 18:18:12 -0400 Subject: [PATCH 334/680] also propagate bad block exceptions out of push_transaction for consistency --- libraries/chain/controller.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 47bc6ad7e7f..5300e686e74 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -1312,6 +1312,10 @@ struct controller_impl { unapplied_transactions.erase( trx->signed_id ); } return trace; + } catch( const disallowed_transaction_extensions_bad_block_exception& ) { + throw; + } catch( const protocol_feature_bad_block_exception& ) { + throw; } catch (const fc::exception& e) { trace->except = e; trace->except_ptr = std::current_exception(); From 0d5f3d768c87faafbea5ab89e2ee27bc756756d1 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 10 Apr 2019 18:37:14 -0400 Subject: [PATCH 335/680] remove closest_unnotified_ancestor_action_ordinal from state_history_plugin --- .../eosio/state_history_plugin/state_history_serialization.hpp | 1 - plugins/state_history_plugin/state_history_plugin_abi.cpp | 1 - 2 files changed, 2 deletions(-) diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp index 99356baea25..9b22a0ab6c2 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp @@ -482,7 +482,6 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper(obj.obj.action_ordinal)); fc::raw::pack(ds, as_type(obj.obj.creator_action_ordinal)); - fc::raw::pack(ds, as_type(obj.obj.closest_unnotified_ancestor_action_ordinal)); fc::raw::pack(ds, bool(obj.obj.receipt)); if (obj.obj.receipt) { fc::raw::pack(ds, make_history_serial_wrapper(obj.db, as_type(*obj.obj.receipt))); diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index b0a2f5b79e1..f53fe3759e3 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -95,7 +95,6 @@ extern const char* const state_history_plugin_abi = R"({ "name": "action_trace_v0", "fields": [ { "name": "action_ordinal", "type": "varuint32" }, { "name": "creator_action_ordinal", "type": "varuint32" }, - { "name": "closest_unnotified_ancestor_action_ordinal", "type": "varuint32" }, { "name": "receipt", "type": "action_receipt?" }, { "name": "receiver", "type": "name" }, { "name": "act", "type": "action" }, From f4e908e11158e1611a0296c004e93b01988550b6 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Wed, 10 Apr 2019 18:47:35 -0400 Subject: [PATCH 336/680] serialize state_history_log_header --- .../state_history_log.hpp | 108 +++++++++++------- .../state_history_plugin.cpp | 6 +- 2 files changed, 69 insertions(+), 45 deletions(-) diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp index 96bfd1ec552..c8634894d2c 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -26,23 +27,23 @@ namespace eosio { * +----------------+------------------+-----+----------------+ * * each entry: - * uint32_t block_num - * block_id_type block_id - * uint64_t size of payload - * uint8_t version - * payload + * state_history_log_header + * payload */ -// todo: look into switching this to serialization instead of memcpy -// todo: consider reworking versioning -// todo: consider dropping block_num since it's included in block_id -// todo: currently only checks version on the first record. Need in recover_blocks +inline uint64_t ship_magic(uint32_t version) { return N(ship) | version; } +inline bool is_ship(uint64_t magic) { return (magic & 0xffff'ffff'0000'0000) == N(ship); } +inline uint32_t get_ship_version(uint64_t magic) { return magic; } +inline bool is_ship_supported_version(uint64_t magic) { return is_ship(magic) && get_ship_version(magic) == 0; } + struct state_history_log_header { - uint32_t block_num = 0; - chain::block_id_type block_id; + uint64_t magic = ship_magic(0); + chain::block_id_type block_id = {}; uint64_t payload_size = 0; - uint8_t version = 0; }; +static const int state_history_log_header_serial_size = sizeof(state_history_log_header::magic) + + sizeof(state_history_log_header::block_id) + + sizeof(state_history_log_header::payload_size); class state_history_log { private: @@ -67,39 +68,58 @@ class state_history_log { uint32_t begin_block() const { return _begin_block; } uint32_t end_block() const { return _end_block; } + void read_header(state_history_log_header& header, bool assert_version = true) { + char bytes[state_history_log_header_serial_size]; + log.read(bytes, sizeof(bytes)); + fc::datastream ds(bytes, sizeof(bytes)); + fc::raw::unpack(ds, header); + EOS_ASSERT(!ds.remaining(), chain::plugin_exception, "state_history_log_header_serial_size mismatch"); + EOS_ASSERT(!assert_version || is_ship_supported_version(header.magic), chain::plugin_exception, + "corrupt ${name}.log (0)", ("name", name)); + } + + void write_header(const state_history_log_header& header) { + char bytes[state_history_log_header_serial_size]; + fc::datastream ds(bytes, sizeof(bytes)); + fc::raw::pack(ds, header); + EOS_ASSERT(!ds.remaining(), chain::plugin_exception, "state_history_log_header_serial_size mismatch"); + log.write(bytes, sizeof(bytes)); + } + template void write_entry(const state_history_log_header& header, const chain::block_id_type& prev_id, F write_payload) { - EOS_ASSERT(_begin_block == _end_block || header.block_num <= _end_block, chain::plugin_exception, + auto block_num = chain::block_header::num_from_id(header.block_id); + EOS_ASSERT(_begin_block == _end_block || block_num <= _end_block, chain::plugin_exception, "missed a block in ${name}.log", ("name", name)); - if (_begin_block != _end_block && header.block_num > _begin_block) { - if (header.block_num == _end_block) { + if (_begin_block != _end_block && block_num > _begin_block) { + if (block_num == _end_block) { EOS_ASSERT(prev_id == last_block_id, chain::plugin_exception, "missed a fork change in ${name}.log", ("name", name)); } else { state_history_log_header prev; - get_entry(header.block_num - 1, prev); + get_entry(block_num - 1, prev); EOS_ASSERT(prev_id == prev.block_id, chain::plugin_exception, "missed a fork change in ${name}.log", ("name", name)); } } - if (header.block_num < _end_block) - truncate(header.block_num); + if (block_num < _end_block) + truncate(block_num); log.seekg(0, std::ios_base::end); uint64_t pos = log.tellg(); - log.write((char*)&header, sizeof(header)); + write_header(header); write_payload(log); uint64_t end = log.tellg(); - EOS_ASSERT(end == pos + sizeof(header) + header.payload_size, chain::plugin_exception, + EOS_ASSERT(end == pos + state_history_log_header_serial_size + header.payload_size, chain::plugin_exception, "wrote payload with incorrect size to ${name}.log", ("name", name)); log.write((char*)&pos, sizeof(pos)); index.seekg(0, std::ios_base::end); index.write((char*)&pos, sizeof(pos)); if (_begin_block == _end_block) - _begin_block = header.block_num; - _end_block = header.block_num + 1; + _begin_block = block_num; + _end_block = block_num + 1; last_block_id = header.block_id; } @@ -108,7 +128,7 @@ class state_history_log { EOS_ASSERT(block_num >= _begin_block && block_num < _end_block, chain::plugin_exception, "read non-existing block in ${name}.log", ("name", name)); log.seekg(get_pos(block_num)); - log.read((char*)&header, sizeof(header)); + read_header(header); return log; } @@ -124,17 +144,18 @@ class state_history_log { uint64_t suffix; log.seekg(size - sizeof(suffix)); log.read((char*)&suffix, sizeof(suffix)); - if (suffix > size || suffix + sizeof(header) > size) { + if (suffix > size || suffix + state_history_log_header_serial_size > size) { elog("corrupt ${name}.log (2)", ("name", name)); return false; } log.seekg(suffix); - log.read((char*)&header, sizeof(header)); - if (suffix + sizeof(header) + header.payload_size + sizeof(suffix) != size) { + read_header(header, false); + if (!is_ship_supported_version(header.magic) || + suffix + state_history_log_header_serial_size + header.payload_size + sizeof(suffix) != size) { elog("corrupt ${name}.log (3)", ("name", name)); return false; } - _end_block = header.block_num + 1; + _end_block = chain::block_header::num_from_id(header.block_id) + 1; last_block_id = header.block_id; if (_begin_block >= _end_block) { elog("corrupt ${name}.log (4)", ("name", name)); @@ -149,18 +170,19 @@ class state_history_log { uint32_t num_found = 0; while (true) { state_history_log_header header; - if (pos + sizeof(header) > size) + if (pos + state_history_log_header_serial_size > size) break; log.seekg(pos); - log.read((char*)&header, sizeof(header)); + read_header(header, false); uint64_t suffix; - if (header.payload_size > size || pos + sizeof(header) + header.payload_size + sizeof(suffix) > size) + if (!is_ship_supported_version(header.magic) || header.payload_size > size || + pos + state_history_log_header_serial_size + header.payload_size + sizeof(suffix) > size) break; - log.seekg(pos + sizeof(header) + header.payload_size); + log.seekg(pos + state_history_log_header_serial_size + header.payload_size); log.read((char*)&suffix, sizeof(suffix)); if (suffix != pos) break; - pos = pos + sizeof(header) + header.payload_size + sizeof(suffix); + pos = pos + state_history_log_header_serial_size + header.payload_size + sizeof(suffix); if (!(++num_found % 10000)) { printf("%10u blocks found, log pos=%12llu\r", (unsigned)num_found, (unsigned long long)pos); fflush(stdout); @@ -176,13 +198,14 @@ class state_history_log { log.open(log_filename, std::ios_base::binary | std::ios_base::in | std::ios_base::out | std::ios_base::app); log.seekg(0, std::ios_base::end); uint64_t size = log.tellg(); - if (size >= sizeof(state_history_log_header)) { + if (size >= state_history_log_header_serial_size) { state_history_log_header header; log.seekg(0); - log.read((char*)&header, sizeof(header)); - EOS_ASSERT(header.version == 0 && sizeof(header) + header.payload_size + sizeof(uint64_t) <= size, + read_header(header, false); + EOS_ASSERT(is_ship_supported_version(header.magic) && + state_history_log_header_serial_size + header.payload_size + sizeof(uint64_t) <= size, chain::plugin_exception, "corrupt ${name}.log (1)", ("name", name)); - _begin_block = header.block_num; + _begin_block = chain::block_header::num_from_id(header.block_id); last_block_id = header.block_id; if (!get_last_block(size)) recover_blocks(size); @@ -208,13 +231,14 @@ class state_history_log { uint32_t num_found = 0; while (pos < size) { state_history_log_header header; - EOS_ASSERT(pos + sizeof(header) <= size, chain::plugin_exception, "corrupt ${name}.log (6)", ("name", name)); + EOS_ASSERT(pos + state_history_log_header_serial_size <= size, chain::plugin_exception, + "corrupt ${name}.log (6)", ("name", name)); log.seekg(pos); - log.read((char*)&header, sizeof(header)); - uint64_t suffix_pos = pos + sizeof(header) + header.payload_size; + read_header(header, false); + uint64_t suffix_pos = pos + state_history_log_header_serial_size + header.payload_size; uint64_t suffix; - EOS_ASSERT(suffix_pos + sizeof(suffix) <= size, chain::plugin_exception, "corrupt ${name}.log (7)", - ("name", name)); + EOS_ASSERT(is_ship_supported_version(header.magic) && suffix_pos + sizeof(suffix) <= size, + chain::plugin_exception, "corrupt ${name}.log (7)", ("name", name)); log.seekg(suffix_pos); log.read((char*)&suffix, sizeof(suffix)); // ilog("block ${b} at ${pos}-${end} suffix=${suffix} file_size=${fs}", @@ -264,3 +288,5 @@ class state_history_log { }; // state_history_log } // namespace eosio + +FC_REFLECT(eosio::state_history_log_header, (magic)(block_id)(payload_size)) diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 0df317198d7..2680165b5c0 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -392,8 +392,7 @@ struct state_history_plugin_impl : std::enable_shared_from_thisblock->block_num(), - .block_id = block_state->block->id(), + state_history_log_header header{.block_id = block_state->block->id(), .payload_size = sizeof(uint32_t) + traces_bin.size()}; trace_log->write_entry(header, block_state->block->previous, [&](auto& stream) { uint32_t s = (uint32_t)traces_bin.size(); @@ -487,8 +486,7 @@ struct state_history_plugin_impl : std::enable_shared_from_thisblock->block_num(), - .block_id = block_state->block->id(), + state_history_log_header header{.block_id = block_state->block->id(), .payload_size = sizeof(uint32_t) + deltas_bin.size()}; chain_state_log->write_entry(header, block_state->block->previous, [&](auto& stream) { uint32_t s = (uint32_t)deltas_bin.size(); From 5636b2e196b9b04a18f2874952cc385ac237e814 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Wed, 10 Apr 2019 18:52:00 -0400 Subject: [PATCH 337/680] ship: remove parent_action_ordinal --- .../eosio/state_history_plugin/state_history_serialization.hpp | 1 - plugins/state_history_plugin/state_history_plugin_abi.cpp | 1 - 2 files changed, 2 deletions(-) diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp index 3251abef7fb..9b22a0ab6c2 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp @@ -482,7 +482,6 @@ datastream& operator<<(datastream& ds, const history_serial_wrapper(obj.obj.action_ordinal)); fc::raw::pack(ds, as_type(obj.obj.creator_action_ordinal)); - fc::raw::pack(ds, as_type(obj.obj.parent_action_ordinal)); fc::raw::pack(ds, bool(obj.obj.receipt)); if (obj.obj.receipt) { fc::raw::pack(ds, make_history_serial_wrapper(obj.db, as_type(*obj.obj.receipt))); diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index b35882a7b83..f53fe3759e3 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -95,7 +95,6 @@ extern const char* const state_history_plugin_abi = R"({ "name": "action_trace_v0", "fields": [ { "name": "action_ordinal", "type": "varuint32" }, { "name": "creator_action_ordinal", "type": "varuint32" }, - { "name": "parent_action_ordinal", "type": "varuint32" }, { "name": "receipt", "type": "action_receipt?" }, { "name": "receiver", "type": "name" }, { "name": "act", "type": "action" }, From c3817b3f965aaf3d7ac3be5809893ef17aa770f6 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 10 Apr 2019 19:06:40 -0400 Subject: [PATCH 338/680] add error_code to transaction and action traces #6898 --- libraries/chain/apply_context.cpp | 8 +++- libraries/chain/controller.cpp | 40 ++++++++++++++++++- .../chain/include/eosio/chain/controller.hpp | 2 + libraries/chain/include/eosio/chain/trace.hpp | 6 ++- .../state_history_serialization.hpp | 2 + .../state_history_plugin_abi.cpp | 4 +- unittests/api_tests.cpp | 11 +++++ 7 files changed, 68 insertions(+), 5 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index ba1f2b2d521..616c8e31dac 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -82,7 +82,13 @@ void apply_context::exec_one() } catch( const wasm_exit& ) {} } } FC_RETHROW_EXCEPTIONS( warn, "pending console output: ${console}", ("console", _pending_console_output) ) - } catch( fc::exception& e ) { + } catch( const eosio_assert_code_exception& e ) { + action_trace& trace = trx_context.get_action_trace( action_ordinal ); + trace.error_code = controller::convert_exception_to_error_code( e ); + trace.except = e; + finalize_trace( trace, start ); + throw; + } catch( const fc::exception& e ) { action_trace& trace = trx_context.get_action_trace( action_ordinal ); trace.except = e; finalize_trace( trace, start ); diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index c3ee723308b..bccad2c784b 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -981,6 +981,11 @@ struct controller_impl { return trace; } catch( const protocol_feature_bad_block_exception& ) { throw; + } catch( const eosio_assert_code_exception& e ) { + cpu_time_to_bill_us = trx_context.update_billed_cpu_time( fc::time_point::now() ); + trace->error_code = controller::convert_exception_to_error_code( e ); + trace->except = e; + trace->except_ptr = std::current_exception(); } catch( const fc::exception& e ) { cpu_time_to_bill_us = trx_context.update_billed_cpu_time( fc::time_point::now() ); trace->except = e; @@ -1123,6 +1128,11 @@ struct controller_impl { return trace; } catch( const protocol_feature_bad_block_exception& ) { throw; + } catch( const eosio_assert_code_exception& e ) { + trace->error_code = controller::convert_exception_to_error_code( e ); + trace->except = e; + trace->except_ptr = std::current_exception(); + trace->elapsed = fc::time_point::now() - trx_context.start; } catch( const fc::exception& e ) { cpu_time_to_bill_us = trx_context.update_billed_cpu_time( fc::time_point::now() ); trace->except = e; @@ -1314,7 +1324,11 @@ struct controller_impl { unapplied_transactions.erase( trx->signed_id ); } return trace; - } catch (const fc::exception& e) { + } catch( const eosio_assert_code_exception& e ) { + trace->error_code = controller::convert_exception_to_error_code( e ); + trace->except = e; + trace->except_ptr = std::current_exception(); + } catch( const fc::exception& e ) { trace->except = e; trace->except_ptr = std::current_exception(); } @@ -2983,6 +2997,30 @@ bool controller::all_subjective_mitigations_disabled()const { return my->conf.disable_all_subjective_mitigations; } +fc::optional controller::convert_exception_to_error_code( const eosio_assert_code_exception& e ) { + const auto& logs = e.get_log(); + + if( logs.size() == 0 ) return {}; + + const auto msg = logs[0].get_message(); + + auto pos = msg.find( ": " ); + + if( pos == std::string::npos || (pos + 2) >= msg.size() ) return {}; + + pos += 2; + + uint64_t error_code = 0; + + try { + error_code = std::strtoull( msg.c_str() + pos, nullptr, 10 ); + } catch( ... ) { + return {}; + } + + return error_code; +} + /// Protocol feature activation handlers: template<> diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index fc6d28132f5..31b6ae4b830 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -275,6 +275,8 @@ namespace eosio { namespace chain { void add_to_ram_correction( account_name account, uint64_t ram_bytes ); bool all_subjective_mitigations_disabled()const; + static fc::optional convert_exception_to_error_code( const eosio_assert_code_exception& e ); + signal pre_accepted_block; signal accepted_block_header; signal accepted_block; diff --git a/libraries/chain/include/eosio/chain/trace.hpp b/libraries/chain/include/eosio/chain/trace.hpp index e60b8333a31..0db1be762ff 100644 --- a/libraries/chain/include/eosio/chain/trace.hpp +++ b/libraries/chain/include/eosio/chain/trace.hpp @@ -47,6 +47,7 @@ namespace eosio { namespace chain { fc::optional producer_block_id; flat_set account_ram_deltas; fc::optional except; + fc::optional error_code; }; struct transaction_trace { @@ -63,6 +64,7 @@ namespace eosio { namespace chain { transaction_trace_ptr failed_dtrx_trace; fc::optional except; + fc::optional error_code; std::exception_ptr except_ptr; }; @@ -74,8 +76,8 @@ FC_REFLECT( eosio::chain::account_delta, FC_REFLECT( eosio::chain::action_trace, (action_ordinal)(creator_action_ordinal)(closest_unnotified_ancestor_action_ordinal)(receipt) (receiver)(act)(context_free)(elapsed)(console)(trx_id)(block_num)(block_time) - (producer_block_id)(account_ram_deltas)(except) ) + (producer_block_id)(account_ram_deltas)(except)(error_code) ) FC_REFLECT( eosio::chain::transaction_trace, (id)(block_num)(block_time)(producer_block_id) (receipt)(elapsed)(net_usage)(scheduled) - (action_traces)(account_ram_delta)(failed_dtrx_trace)(except) ) + (action_traces)(account_ram_delta)(failed_dtrx_trace)(except)(error_code) ) diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp index 9b22a0ab6c2..5893c49dde4 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_serialization.hpp @@ -497,6 +497,7 @@ datastream& operator<<(datastream& ds, const history_serial_wrapperto_string(); fc::raw::pack(ds, as_type>(e)); + fc::raw::pack(ds, as_type>(obj.obj.error_code)); return ds; } @@ -533,6 +534,7 @@ datastream& operator<<(datastream& if (obj.obj.except) e = obj.obj.except->to_string(); fc::raw::pack(ds, as_type>(e)); + fc::raw::pack(ds, as_type>(obj.obj.error_code)); fc::raw::pack(ds, bool(obj.obj.failed_dtrx_trace)); if (obj.obj.failed_dtrx_trace) { diff --git a/plugins/state_history_plugin/state_history_plugin_abi.cpp b/plugins/state_history_plugin/state_history_plugin_abi.cpp index f53fe3759e3..74c7f8d7520 100644 --- a/plugins/state_history_plugin/state_history_plugin_abi.cpp +++ b/plugins/state_history_plugin/state_history_plugin_abi.cpp @@ -102,7 +102,8 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "elapsed", "type": "int64" }, { "name": "console", "type": "string" }, { "name": "account_ram_deltas", "type": "account_delta[]" }, - { "name": "except", "type": "string?" } + { "name": "except", "type": "string?" }, + { "name": "error_code", "type": "uint64?" } ] }, { @@ -117,6 +118,7 @@ extern const char* const state_history_plugin_abi = R"({ { "name": "action_traces", "type": "action_trace[]" }, { "name": "account_ram_delta", "type": "account_delta?" }, { "name": "except", "type": "string?" }, + { "name": "error_code", "type": "uint64?" }, { "name": "failed_dtrx_trace", "type": "transaction_trace?" } ] }, diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 151bf82ebcf..f2e20a948dc 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -2046,6 +2046,17 @@ BOOST_FIXTURE_TEST_CASE(eosio_assert_code_tests, TESTER) { try { BOOST_CHECK_EXCEPTION( CALL_TEST_FUNCTION( *this, "test_action", "test_assert_code", fc::raw::pack((uint64_t)42) ), eosio_assert_code_exception, eosio_assert_code_is(42) ); + + auto trace = CALL_TEST_FUNCTION_NO_THROW( *this, "test_action", "test_assert_code", fc::raw::pack((uint64_t)42) ); + BOOST_REQUIRE( trace ); + BOOST_REQUIRE( trace->except ); + BOOST_REQUIRE( trace->error_code ); + BOOST_REQUIRE_EQUAL( *trace->error_code, 42 ); + BOOST_REQUIRE_EQUAL( trace->action_traces.size(), 1 ); + BOOST_REQUIRE( trace->action_traces[0].except ); + BOOST_REQUIRE( trace->action_traces[0].error_code ); + BOOST_REQUIRE_EQUAL( *trace->action_traces[0].error_code, 42 ); + produce_block(); auto omsg1 = abis.get_error_message(1); From a91d0410a9c5805f048572eb97a9b6d7692afa5d Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 10 Apr 2019 21:12:41 -0400 Subject: [PATCH 339/680] implement FORWARD_SETCODE protocol feature #6988 --- libraries/chain/apply_context.cpp | 10 +++++++--- .../include/eosio/chain/protocol_feature_manager.hpp | 3 ++- libraries/chain/protocol_feature_manager.cpp | 11 +++++++++++ 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 4eefb4824b3..ded5df63936 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -70,9 +70,13 @@ void apply_context::exec_one() (*native)( *this ); } - if( a.code.size() > 0 - && !(act->account == config::system_account_name && act->name == N( setcode ) && - receiver == config::system_account_name) ) { + if( (a.code.size() > 0) && + ( control.is_builtin_activated( builtin_protocol_feature_t::forward_setcode ) + || !( act->account == config::system_account_name + && act->name == N( setcode ) + && receiver == config::system_account_name ) + ) + ) { if( trx_context.enforce_whiteblacklist && control.is_producing_block() ) { control.check_contract_list( receiver ); control.check_action_list( act->account, act->name ); diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index 118709a9b17..18a52f7e6b8 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -21,7 +21,8 @@ enum class builtin_protocol_feature_t : uint32_t { fix_linkauth_restriction, disallow_empty_producer_schedule, restrict_action_to_self, - only_bill_first_authorizer + only_bill_first_authorizer, + forward_setcode }; struct protocol_feature_subjective_restrictions { diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 56cefb770be..18071049fac 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -110,6 +110,17 @@ This protocol feature removes that bypass. Builtin protocol feature: ONLY_BILL_FIRST_AUTHORIZER Adds CPU and network bandwidth usage to only the first authorizer of a transaction. +*/ + {} + } ) + ( builtin_protocol_feature_t::forward_setcode, builtin_protocol_feature_spec{ + "FORWARD_SETCODE", + fc::variant("898082c59f921d0042e581f00a59d5ceb8be6f1d9c7a45b6f07c0e26eaee0222").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: FORWARD_SETCODE + +Forward eosio::setcode actions to the WebAssembly code deployed on the eosio account. */ {} } ) From 20288072091dd973067ec558ebd8c3f3d3fc3df0 Mon Sep 17 00:00:00 2001 From: arhag Date: Wed, 10 Apr 2019 22:29:06 -0400 Subject: [PATCH 340/680] Added protocol_feature_tests/forward_setcode_test unit test to test the FORWARD_SETCODE protocol feature. #6988 Added a new test contract, reject_all, to enable the unit test. --- unittests/contracts.hpp.in | 1 + unittests/protocol_feature_tests.cpp | 60 ++++++++++++++++++ unittests/test-contracts/CMakeLists.txt | 1 + unittests/test-contracts/README.md | 2 +- .../test-contracts/reject_all/CMakeLists.txt | 5 ++ .../test-contracts/reject_all/reject_all.cpp | 31 +++++++++ .../test-contracts/reject_all/reject_all.wasm | Bin 0 -> 1013 bytes 7 files changed, 99 insertions(+), 1 deletion(-) create mode 100644 unittests/test-contracts/reject_all/CMakeLists.txt create mode 100644 unittests/test-contracts/reject_all/reject_all.cpp create mode 100755 unittests/test-contracts/reject_all/reject_all.wasm diff --git a/unittests/contracts.hpp.in b/unittests/contracts.hpp.in index 35c01501436..72be3742cb9 100644 --- a/unittests/contracts.hpp.in +++ b/unittests/contracts.hpp.in @@ -44,6 +44,7 @@ namespace eosio { MAKE_READ_WASM_ABI(noop, noop, test-contracts) MAKE_READ_WASM_ABI(payloadless, payloadless, test-contracts) MAKE_READ_WASM_ABI(proxy, proxy, test-contracts) + MAKE_READ_WASM_ABI(reject_all, reject_all, test-contracts) MAKE_READ_WASM_ABI(restrict_action_test, restrict_action_test, test-contracts) MAKE_READ_WASM_ABI(snapshot_test, snapshot_test, test-contracts) MAKE_READ_WASM_ABI(test_api, test_api, test-contracts) diff --git a/unittests/protocol_feature_tests.cpp b/unittests/protocol_feature_tests.cpp index 92a929ead47..13f07ba5e0c 100644 --- a/unittests/protocol_feature_tests.cpp +++ b/unittests/protocol_feature_tests.cpp @@ -906,4 +906,64 @@ BOOST_AUTO_TEST_CASE( only_bill_to_first_authorizer ) { try { } FC_LOG_AND_RETHROW() } +BOOST_AUTO_TEST_CASE( forward_setcode_test ) { try { + tester c( setup_policy::preactivate_feature_only ); + + const auto& tester1_account = N(tester1); + const auto& tester2_account = N(tester2); + c.create_accounts( {tester1_account, tester2_account} ); + + // Deploy contract that rejects all actions dispatched to it with the following exceptions: + // * eosio::setcode to set code on the eosio is allowed (unless the rejectall account exists) + // * eosio::newaccount is allowed only if it creates the rejectall account. + c.set_code( config::system_account_name, contracts::reject_all_wasm() ); + c.produce_block(); + + // Before activation, deploying a contract should work since setcode won't be forwarded to the WASM on eosio. + c.set_code( tester1_account, contracts::noop_wasm() ); + + // Activate FORWARD_SETCODE protocol feature and then return contract on eosio back to what it was. + const auto& pfm = c.control->get_protocol_feature_manager(); + const auto& d = pfm.get_builtin_digest( builtin_protocol_feature_t::forward_setcode ); + BOOST_REQUIRE( d ); + c.set_bios_contract(); + c.preactivate_protocol_features( {*d} ); + c.produce_block(); + c.set_code( config::system_account_name, contracts::reject_all_wasm() ); + c.produce_block(); + + // After activation, deploying a contract causes setcode to be dispatched to the WASM on eosio, + // and in this case the contract is configured to reject the setcode action. + BOOST_REQUIRE_EXCEPTION( c.set_code( tester2_account, contracts::noop_wasm() ), + eosio_assert_message_exception, + eosio_assert_message_is( "rejecting all actions" ) ); + + + tester c2(setup_policy::none); + push_blocks( c, c2 ); // make a backup of the chain to enable testing further conditions. + + c.set_bios_contract(); // To allow pushing further actions for setting up the other part of the test. + c.create_account( N(rejectall) ); + c.produce_block(); + // The existence of the rejectall account will make the reject_all contract reject all actions with no exception. + + // It will now not be possible to deploy the reject_all contract to the eosio account, + // because after it is set by the native function, it is called immediately after which will reject the transaction. + BOOST_REQUIRE_EXCEPTION( c.set_code( config::system_account_name, contracts::reject_all_wasm() ), + eosio_assert_message_exception, + eosio_assert_message_is( "rejecting all actions" ) ); + + + // Going back to the backup chain, we can create the rejectall account while the reject_all contract is + // already deployed on eosio. + c2.create_account( N(rejectall) ); + c2.produce_block(); + // Now all actions dispatched to the eosio account should be rejected. + + // However, it should still be possible to set the bios contract because the WASM on eosio is called after the + // native setcode function completes. + c2.set_bios_contract(); + c2.produce_block(); +} FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/test-contracts/CMakeLists.txt b/unittests/test-contracts/CMakeLists.txt index 749b0471349..c5942b66fa9 100644 --- a/unittests/test-contracts/CMakeLists.txt +++ b/unittests/test-contracts/CMakeLists.txt @@ -13,6 +13,7 @@ add_subdirectory( integration_test ) add_subdirectory( noop ) add_subdirectory( payloadless ) add_subdirectory( proxy ) +add_subdirectory( reject_all ) add_subdirectory( restrict_action_test ) add_subdirectory( snapshot_test ) add_subdirectory( test_api ) diff --git a/unittests/test-contracts/README.md b/unittests/test-contracts/README.md index aa9c0f8dee9..9b0138ed7a6 100644 --- a/unittests/test-contracts/README.md +++ b/unittests/test-contracts/README.md @@ -2,6 +2,6 @@ test_ram_limit contract was compiled with eosio.cdt v1.4.1 That contract was ported to compile with eosio.cdt v1.5.0, but the test that uses it is very sensitive to stdlib/eosiolib changes, compilation flags and linker flags. -deferred_test and proxy contracts were compiled with eosio.cdt v1.6.1 +deferred_test, proxy, and reject_all contracts were compiled with eosio.cdt v1.6.1 The remaining contracts have been ported to compile with eosio.cdt v1.6.x. They were compiled with a patched version of eosio.cdt v1.6.0-rc1 (commit 1c9180ff5a1e431385180ce459e11e6a1255c1a4). diff --git a/unittests/test-contracts/reject_all/CMakeLists.txt b/unittests/test-contracts/reject_all/CMakeLists.txt new file mode 100644 index 00000000000..027aa487fa1 --- /dev/null +++ b/unittests/test-contracts/reject_all/CMakeLists.txt @@ -0,0 +1,5 @@ +if( EOSIO_COMPILE_TEST_CONTRACTS ) + add_contract( reject_all reject_all reject_all.cpp ) +else() + configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/reject_all.wasm ${CMAKE_CURRENT_BINARY_DIR}/reject_all.wasm COPYONLY ) +endif() diff --git a/unittests/test-contracts/reject_all/reject_all.cpp b/unittests/test-contracts/reject_all/reject_all.cpp new file mode 100644 index 00000000000..40f26fd827b --- /dev/null +++ b/unittests/test-contracts/reject_all/reject_all.cpp @@ -0,0 +1,31 @@ +/** + * @file + * @copyright defined in eos/LICENSE + */ +#include + +using namespace eosio; + +extern "C" { + void apply( uint64_t receiver, uint64_t first_receiver, uint64_t action ) { + check( receiver == first_receiver, "rejecting all notifications" ); + + // reject all actions with only the following exceptions: + // * do not reject an eosio::setcode that sets code on the eosio account unless the rejectall account exists; + // * do not reject an eosio::newaccount that creates the rejectall account. + + if( first_receiver == "eosio"_n.value ) { + if( action == "setcode"_n.value ) { + auto accnt = unpack_action_data(); + if( accnt == "eosio"_n && !is_account("rejectall"_n) ) + return; + } else if( action == "newaccount"_n.value ) { + auto accnts = unpack_action_data< std::pair >(); + if( accnts.second == "rejectall"_n ) + return; + } + } + + check( false , "rejecting all actions" ); + } +} diff --git a/unittests/test-contracts/reject_all/reject_all.wasm b/unittests/test-contracts/reject_all/reject_all.wasm new file mode 100755 index 0000000000000000000000000000000000000000..ee794557a9838a2f86cff0881e430244f95223c8 GIT binary patch literal 1013 zcmY*YF>ljA6n^i{j@?|M-qeBV5;~L>i7=9@1QkLI?H@?2)U9HYIIT_9GQ>gZn1K3FN%WtJmaD-i=sdv zya3$_1s53|ayFY0YBMpe$#`7=(C>XLiPGPKk!J0YR)VR(XAds51VB1Y=}kIkkv&+SC!JT_dc{4LxnL;V#+YQByRevb2g9HBKDcFcz-5)nwrI)dguXa#b0rI?Tmw zXgzB~xFZ^%LBFvf=(>zUJ7NwW4-gk!B}je5z)PxJCtVZpY&Wf%Y<=&o4iJ__4XuWUX!x9R2;`>*DO&d7-(<4FT7>VdNbd zUMp=^lYJ?5E$I~_txncS9SNMqY&l_z4Xwk1=xW&(_NDjO$?Kp+4OVb3>}q1C2iOZR zU9P=b7=Z4}(D}B(lXAMBr3!X6OmIx0_R$18EeXhpVp~7CgRNA#B@ADw%B8A=Dmzt% z)1@l9%G{|^sagdXsj@P_KPWe?2xkR;N-J2mxNJH;u;JEq%WzUP;V Date: Thu, 11 Apr 2019 18:20:30 +0800 Subject: [PATCH 341/680] feature get_sender 7028 --- libraries/chain/apply_context.cpp | 18 ++++++++++++++++++ libraries/chain/controller.cpp | 8 ++++++++ .../include/eosio/chain/apply_context.hpp | 3 +++ .../eosio/chain/protocol_feature_manager.hpp | 3 ++- libraries/chain/protocol_feature_manager.cpp | 11 +++++++++++ libraries/chain/wasm_interface.cpp | 4 ++++ 6 files changed, 46 insertions(+), 1 deletion(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index ded5df63936..5636d233163 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -131,6 +131,8 @@ void apply_context::exec() { _notified.emplace_back( receiver, action_ordinal ); exec_one(); + + _in_notification = true; for( uint32_t i = 1; i < _notified.size(); ++i ) { std::tie( receiver, action_ordinal ) = _notified[i]; exec_one(); @@ -813,5 +815,21 @@ void apply_context::add_ram_usage( account_name account, int64_t ram_delta ) { } } +action_name apply_context::get_sender() const { + action_trace& trace = trx_context.get_action_trace( action_ordinal ); + if (_in_notification) { + action_trace& parent_trace = trx_context.get_action_trace( trace.parent_action_ordinal ); + if (parent_trace.creator_action_ordinal > 0) { + action_trace& creator_trace = trx_context.get_action_trace( parent_trace.creator_action_ordinal ); + return creator_trace.receiver; + } + } else { + if (trace.creator_action_ordinal > 0) { + action_trace& creator_trace = trx_context.get_action_trace( trace.creator_action_ordinal ); + return creator_trace.receiver; + } + } + return 0; +} } } /// eosio::chain diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index 6e543c8a356..55e93327e27 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -309,6 +309,7 @@ struct controller_impl { set_activation_handler(); set_activation_handler(); + set_activation_handler(); #define SET_APP_HANDLER( receiver, contract, action) \ @@ -3013,6 +3014,13 @@ void controller_impl::on_activation +void controller_impl::on_activation() { + db.modify( db.get(), [&]( auto& ps ) { + add_intrinsic_to_whitelist( ps.whitelisted_intrinsics, "get_sender" ); + } ); +} + template<> void controller_impl::on_activation() { const auto& indx = db.get_index(); diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index d1eac6495dd..a28d8696ff0 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -554,6 +554,8 @@ class apply_context { action_name get_receiver()const { return receiver; } const action& get_action()const { return *act; } + action_name get_sender() const; + /// Fields: public: @@ -570,6 +572,7 @@ class apply_context { uint32_t action_ordinal = 0; bool privileged = false; bool context_free = false; + bool _in_notification = false; // executing notification public: generic_index idx64; diff --git a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp index 18a52f7e6b8..43e799fba4a 100644 --- a/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp +++ b/libraries/chain/include/eosio/chain/protocol_feature_manager.hpp @@ -22,7 +22,8 @@ enum class builtin_protocol_feature_t : uint32_t { disallow_empty_producer_schedule, restrict_action_to_self, only_bill_first_authorizer, - forward_setcode + forward_setcode, + get_sender }; struct protocol_feature_subjective_restrictions { diff --git a/libraries/chain/protocol_feature_manager.cpp b/libraries/chain/protocol_feature_manager.cpp index 18071049fac..5281a41ac5b 100644 --- a/libraries/chain/protocol_feature_manager.cpp +++ b/libraries/chain/protocol_feature_manager.cpp @@ -121,6 +121,17 @@ Adds CPU and network bandwidth usage to only the first authorizer of a transacti Builtin protocol feature: FORWARD_SETCODE Forward eosio::setcode actions to the WebAssembly code deployed on the eosio account. +*/ + {} + } ) + ( builtin_protocol_feature_t::get_sender, builtin_protocol_feature_spec{ + "GET_SENDER", + fc::variant("1eab748b95a2e6f4d7cb42065bdee5566af8efddf01a55a0a8d831b823f8828a").as(), + // SHA256 hash of the raw message below within the comment delimiters (do not modify message below). +/* +Builtin protocol feature: GET_SENDER + +Allows contracts to determine which account is the sender of an inline action. */ {} } ) diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 004a7326e83..bbc51f1a1d3 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -928,6 +928,9 @@ class system_api : public context_aware_api { return context.control.is_protocol_feature_activated( feature_digest ); } + name get_sender() { + return context.get_sender(); + } }; constexpr size_t max_assert_message = 1024; @@ -1807,6 +1810,7 @@ REGISTER_INTRINSICS(system_api, (current_time, int64_t() ) (publication_time, int64_t() ) (is_feature_activated, int(int) ) + (get_sender, int64_t() ) ); REGISTER_INTRINSICS(context_free_system_api, From ae2684a376cfa8e6680a308a0d6bf80197ca708d Mon Sep 17 00:00:00 2001 From: Matias Romeo Date: Wed, 3 Apr 2019 12:39:12 -0300 Subject: [PATCH 342/680] cleos: handle no Content-length header in http response --- programs/cleos/httpc.cpp | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/programs/cleos/httpc.cpp b/programs/cleos/httpc.cpp index 5503c8fe8ec..9a9cdb8f866 100644 --- a/programs/cleos/httpc.cpp +++ b/programs/cleos/httpc.cpp @@ -89,17 +89,22 @@ namespace eosio { namespace client { namespace http { if(std::regex_search(header, match, clregex)) response_content_length = std::stoi(match[1]); } - EOS_ASSERT(response_content_length >= 0, invalid_http_response, "Invalid content-length response"); std::stringstream re; - // Write whatever content we already have to output. - response_content_length -= response.size(); - if (response.size() > 0) - re << &response; + if( response_content_length >= 0 ) { + // Write whatever content we already have to output. + response_content_length -= response.size(); + if (response.size() > 0) + re << &response; - boost::asio::read(socket, response, boost::asio::transfer_exactly(response_content_length)); - re << &response; + boost::asio::read(socket, response, boost::asio::transfer_exactly(response_content_length)); + } else { + boost::system::error_code ec; + boost::asio::read(socket, response, boost::asio::transfer_all(), ec); + EOS_ASSERT(!ec || ec == boost::asio::ssl::error::stream_truncated, http_exception, "Unable to read http response: ${err}", ("err",ec.message())); + } + re << &response; return re.str(); } From 8ce4166f38849ca9255562f5ba23aeec9467ddea Mon Sep 17 00:00:00 2001 From: Matias Romeo Date: Tue, 9 Apr 2019 21:57:25 -0300 Subject: [PATCH 343/680] cleos: simplify http response body reading --- programs/cleos/httpc.cpp | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/programs/cleos/httpc.cpp b/programs/cleos/httpc.cpp index 9a9cdb8f866..7d9326b9ed7 100644 --- a/programs/cleos/httpc.cpp +++ b/programs/cleos/httpc.cpp @@ -90,20 +90,19 @@ namespace eosio { namespace client { namespace http { response_content_length = std::stoi(match[1]); } - std::stringstream re; - if( response_content_length >= 0 ) { - // Write whatever content we already have to output. + // Attempt to read the response body using the length indicated by the + // Content-length header. If the header was not present just read all available bytes. + if( response_content_length != -1 ) { response_content_length -= response.size(); - if (response.size() > 0) - re << &response; - - boost::asio::read(socket, response, boost::asio::transfer_exactly(response_content_length)); + if( response_content_length > 0 ) + boost::asio::read(socket, response, boost::asio::transfer_exactly(response_content_length)); } else { boost::system::error_code ec; boost::asio::read(socket, response, boost::asio::transfer_all(), ec); EOS_ASSERT(!ec || ec == boost::asio::ssl::error::stream_truncated, http_exception, "Unable to read http response: ${err}", ("err",ec.message())); } + std::stringstream re; re << &response; return re.str(); } From dd226ab98f6c44baaf6ab8ee5b35588f55a1522d Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Thu, 11 Apr 2019 11:11:57 -0400 Subject: [PATCH 344/680] revert missed commented out mongo stuff and revert chainbase change --- libraries/chainbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chainbase b/libraries/chainbase index d4a6fd350a9..8ade03c693f 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit d4a6fd350a9f4a812a3e33019c864d198bdb2e10 +Subproject commit 8ade03c693ff4ccde3debb6ccfd0fda2b37b0c8a From 03259699ace1763dd824e7eb7f844ec751a993b7 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Thu, 11 Apr 2019 11:31:21 -0400 Subject: [PATCH 345/680] ship: don't truncate log from future versions --- .../eosio/state_history_plugin/state_history_log.hpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp index c8634894d2c..60fff908c47 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp @@ -35,6 +35,7 @@ inline uint64_t ship_magic(uint32_t version) { return N(ship) | version; } inline bool is_ship(uint64_t magic) { return (magic & 0xffff'ffff'0000'0000) == N(ship); } inline uint32_t get_ship_version(uint64_t magic) { return magic; } inline bool is_ship_supported_version(uint64_t magic) { return is_ship(magic) && get_ship_version(magic) == 0; } +inline bool is_ship_unsupported_version(uint64_t magic) { return is_ship(magic) && get_ship_version(magic) != 0; } struct state_history_log_header { uint64_t magic = ship_magic(0); @@ -176,8 +177,11 @@ class state_history_log { read_header(header, false); uint64_t suffix; if (!is_ship_supported_version(header.magic) || header.payload_size > size || - pos + state_history_log_header_serial_size + header.payload_size + sizeof(suffix) > size) + pos + state_history_log_header_serial_size + header.payload_size + sizeof(suffix) > size) { + EOS_ASSERT(!is_ship_unsupported_version(header.magic), chain::plugin_exception, + "${name}.log has an unsupported version", ("name", name)); break; + } log.seekg(pos + state_history_log_header_serial_size + header.payload_size); log.read((char*)&suffix, sizeof(suffix)); if (suffix != pos) From 62109ed6702bfeb7c449e8639e7a2f32da995cf3 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Thu, 11 Apr 2019 11:44:18 -0400 Subject: [PATCH 346/680] use boost 1.68 --- scripts/eosio_build.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index f813b4f99eb..2b46049151c 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -63,7 +63,7 @@ export MONGO_C_DRIVER_ROOT=${SRC_LOCATION}/mongo-c-driver-${MONGO_C_DRIVER_VERSI export MONGO_CXX_DRIVER_VERSION=3.4.0 export MONGO_CXX_DRIVER_ROOT=${SRC_LOCATION}/mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION} export BOOST_VERSION_MAJOR=1 -export BOOST_VERSION_MINOR=67 +export BOOST_VERSION_MINOR=68 export BOOST_VERSION_PATCH=0 export BOOST_VERSION=${BOOST_VERSION_MAJOR}_${BOOST_VERSION_MINOR}_${BOOST_VERSION_PATCH} export BOOST_ROOT=${SRC_LOCATION}/boost_${BOOST_VERSION} @@ -290,7 +290,7 @@ printf "## ENABLE_COVERAGE_TESTING=%s\\n" "${ENABLE_COVERAGE_TESTING}" mkdir -p $BUILD_DIR cd $BUILD_DIR -$CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=false \ +$CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS "${REPO_ROOT}" From 8697e70fb0a9745df284ee679f6390fcaecdc401 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Thu, 11 Apr 2019 11:46:57 -0400 Subject: [PATCH 347/680] #6980: memory leak when --trace-history not used --- plugins/state_history_plugin/state_history_plugin.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 0df317198d7..2a2a1664126 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -345,7 +345,7 @@ struct state_history_plugin_impl : std::enable_shared_from_thisreceipt) { + if (p->receipt && trace_log) { if (is_onblock(p)) onblock_trace = p; else if (p->failed_dtrx_trace) From 7eebf69574bca5a093563751174a52ba24d68c7f Mon Sep 17 00:00:00 2001 From: arhag Date: Thu, 11 Apr 2019 13:35:05 -0400 Subject: [PATCH 348/680] store uint64_t error_code in eosio_assert_code_exception rather than extracting it from the string message #6898 --- libraries/chain/apply_context.cpp | 7 +-- libraries/chain/controller.cpp | 43 ++++--------------- .../chain/include/eosio/chain/controller.hpp | 2 +- .../chain/include/eosio/chain/exceptions.hpp | 38 +++++++++++++++- libraries/chain/wasm_interface.cpp | 8 +++- 5 files changed, 53 insertions(+), 45 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 616c8e31dac..5badd7185d0 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -82,14 +82,9 @@ void apply_context::exec_one() } catch( const wasm_exit& ) {} } } FC_RETHROW_EXCEPTIONS( warn, "pending console output: ${console}", ("console", _pending_console_output) ) - } catch( const eosio_assert_code_exception& e ) { - action_trace& trace = trx_context.get_action_trace( action_ordinal ); - trace.error_code = controller::convert_exception_to_error_code( e ); - trace.except = e; - finalize_trace( trace, start ); - throw; } catch( const fc::exception& e ) { action_trace& trace = trx_context.get_action_trace( action_ordinal ); + trace.error_code = controller::convert_exception_to_error_code( e ); trace.except = e; finalize_trace( trace, start ); throw; diff --git a/libraries/chain/controller.cpp b/libraries/chain/controller.cpp index bccad2c784b..e3721ef4726 100644 --- a/libraries/chain/controller.cpp +++ b/libraries/chain/controller.cpp @@ -981,13 +981,9 @@ struct controller_impl { return trace; } catch( const protocol_feature_bad_block_exception& ) { throw; - } catch( const eosio_assert_code_exception& e ) { - cpu_time_to_bill_us = trx_context.update_billed_cpu_time( fc::time_point::now() ); - trace->error_code = controller::convert_exception_to_error_code( e ); - trace->except = e; - trace->except_ptr = std::current_exception(); } catch( const fc::exception& e ) { cpu_time_to_bill_us = trx_context.update_billed_cpu_time( fc::time_point::now() ); + trace->error_code = controller::convert_exception_to_error_code( e ); trace->except = e; trace->except_ptr = std::current_exception(); } @@ -1128,13 +1124,9 @@ struct controller_impl { return trace; } catch( const protocol_feature_bad_block_exception& ) { throw; - } catch( const eosio_assert_code_exception& e ) { - trace->error_code = controller::convert_exception_to_error_code( e ); - trace->except = e; - trace->except_ptr = std::current_exception(); - trace->elapsed = fc::time_point::now() - trx_context.start; } catch( const fc::exception& e ) { cpu_time_to_bill_us = trx_context.update_billed_cpu_time( fc::time_point::now() ); + trace->error_code = controller::convert_exception_to_error_code( e ); trace->except = e; trace->except_ptr = std::current_exception(); trace->elapsed = fc::time_point::now() - trx_context.start; @@ -1324,11 +1316,8 @@ struct controller_impl { unapplied_transactions.erase( trx->signed_id ); } return trace; - } catch( const eosio_assert_code_exception& e ) { - trace->error_code = controller::convert_exception_to_error_code( e ); - trace->except = e; - trace->except_ptr = std::current_exception(); } catch( const fc::exception& e ) { + trace->error_code = controller::convert_exception_to_error_code( e ); trace->except = e; trace->except_ptr = std::current_exception(); } @@ -2997,28 +2986,12 @@ bool controller::all_subjective_mitigations_disabled()const { return my->conf.disable_all_subjective_mitigations; } -fc::optional controller::convert_exception_to_error_code( const eosio_assert_code_exception& e ) { - const auto& logs = e.get_log(); - - if( logs.size() == 0 ) return {}; - - const auto msg = logs[0].get_message(); - - auto pos = msg.find( ": " ); - - if( pos == std::string::npos || (pos + 2) >= msg.size() ) return {}; - - pos += 2; - - uint64_t error_code = 0; - - try { - error_code = std::strtoull( msg.c_str() + pos, nullptr, 10 ); - } catch( ... ) { - return {}; - } +fc::optional controller::convert_exception_to_error_code( const fc::exception& e ) { + const eosio_assert_code_exception* e_ptr = dynamic_cast( &e ); + + if( e_ptr == nullptr ) return {}; - return error_code; + return e_ptr->error_code; } /// Protocol feature activation handlers: diff --git a/libraries/chain/include/eosio/chain/controller.hpp b/libraries/chain/include/eosio/chain/controller.hpp index 31b6ae4b830..43d1f1637c9 100644 --- a/libraries/chain/include/eosio/chain/controller.hpp +++ b/libraries/chain/include/eosio/chain/controller.hpp @@ -275,7 +275,7 @@ namespace eosio { namespace chain { void add_to_ram_correction( account_name account, uint64_t ram_bytes ); bool all_subjective_mitigations_disabled()const; - static fc::optional convert_exception_to_error_code( const eosio_assert_code_exception& e ); + static fc::optional convert_exception_to_error_code( const fc::exception& e ); signal pre_accepted_block; signal accepted_block_header; diff --git a/libraries/chain/include/eosio/chain/exceptions.hpp b/libraries/chain/include/eosio/chain/exceptions.hpp index a80213e0425..51ab7e9f6c6 100644 --- a/libraries/chain/include/eosio/chain/exceptions.hpp +++ b/libraries/chain/include/eosio/chain/exceptions.hpp @@ -68,6 +68,42 @@ { throw( effect_type( e.what(), e.get_log() ) ); } +#define FC_DECLARE_DERIVED_EXCEPTION_WITH_ERROR_CODE( TYPE, BASE, CODE, WHAT ) \ + class TYPE : public BASE \ + { \ + public: \ + enum code_enum { \ + code_value = CODE, \ + }; \ + explicit TYPE( int64_t code, const std::string& name_value, const std::string& what_value ) \ + :BASE( code, name_value, what_value ){} \ + explicit TYPE( fc::log_message&& m, int64_t code, const std::string& name_value, const std::string& what_value ) \ + :BASE( std::move(m), code, name_value, what_value ){} \ + explicit TYPE( fc::log_messages&& m, int64_t code, const std::string& name_value, const std::string& what_value )\ + :BASE( std::move(m), code, name_value, what_value ){}\ + explicit TYPE( const fc::log_messages& m, int64_t code, const std::string& name_value, const std::string& what_value )\ + :BASE( m, code, name_value, what_value ){}\ + TYPE( const std::string& what_value, const fc::log_messages& m ) \ + :BASE( m, CODE, BOOST_PP_STRINGIZE(TYPE), what_value ){} \ + TYPE( fc::log_message&& m ) \ + :BASE( fc::move(m), CODE, BOOST_PP_STRINGIZE(TYPE), WHAT ){}\ + TYPE( fc::log_messages msgs ) \ + :BASE( fc::move( msgs ), CODE, BOOST_PP_STRINGIZE(TYPE), WHAT ) {} \ + TYPE( const TYPE& c ) \ + :BASE(c),error_code(c.error_code) {} \ + TYPE( const BASE& c ) \ + :BASE(c){} \ + TYPE():BASE(CODE, BOOST_PP_STRINGIZE(TYPE), WHAT){}\ + \ + virtual std::shared_ptr dynamic_copy_exception()const\ + { return std::make_shared( *this ); } \ + virtual NO_RETURN void dynamic_rethrow_exception()const \ + { if( code() == CODE ) throw *this;\ + else fc::exception::dynamic_rethrow_exception(); \ + } \ + fc::optional error_code; \ + }; + namespace eosio { namespace chain { FC_DECLARE_EXCEPTION( chain_exception, @@ -207,7 +243,7 @@ namespace eosio { namespace chain { 3050002, "Invalid Action Arguments" ) FC_DECLARE_DERIVED_EXCEPTION( eosio_assert_message_exception, action_validate_exception, 3050003, "eosio_assert_message assertion failure" ) - FC_DECLARE_DERIVED_EXCEPTION( eosio_assert_code_exception, action_validate_exception, + FC_DECLARE_DERIVED_EXCEPTION_WITH_ERROR_CODE( eosio_assert_code_exception, action_validate_exception, 3050004, "eosio_assert_code assertion failure" ) FC_DECLARE_DERIVED_EXCEPTION( action_not_found_exception, action_validate_exception, 3050005, "Action can not be found" ) diff --git a/libraries/chain/wasm_interface.cpp b/libraries/chain/wasm_interface.cpp index 004a7326e83..54feb1b6a87 100644 --- a/libraries/chain/wasm_interface.cpp +++ b/libraries/chain/wasm_interface.cpp @@ -960,8 +960,12 @@ class context_free_system_api : public context_aware_api { void eosio_assert_code( bool condition, uint64_t error_code ) { if( BOOST_UNLIKELY( !condition ) ) { - EOS_THROW( eosio_assert_code_exception, - "assertion failure with error code: ${error_code}", ("error_code", error_code) ); + eosio_assert_code_exception e( FC_LOG_MESSAGE( error, + "assertion failure with error code: ${error_code}", + ("error_code", error_code) + ) ); + e.error_code = error_code; + throw e; } } From 4e4179a074fbc3d5e322f0bcdcde2ba7b053ded8 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Thu, 11 Apr 2019 13:46:37 -0400 Subject: [PATCH 349/680] revert back to boost 1.67 for now --- scripts/eosio_build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 2b46049151c..8f52b41404b 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -63,7 +63,7 @@ export MONGO_C_DRIVER_ROOT=${SRC_LOCATION}/mongo-c-driver-${MONGO_C_DRIVER_VERSI export MONGO_CXX_DRIVER_VERSION=3.4.0 export MONGO_CXX_DRIVER_ROOT=${SRC_LOCATION}/mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION} export BOOST_VERSION_MAJOR=1 -export BOOST_VERSION_MINOR=68 +export BOOST_VERSION_MINOR=67 export BOOST_VERSION_PATCH=0 export BOOST_VERSION=${BOOST_VERSION_MAJOR}_${BOOST_VERSION_MINOR}_${BOOST_VERSION_PATCH} export BOOST_ROOT=${SRC_LOCATION}/boost_${BOOST_VERSION} From 517713090db14b5b200b0cfedd47d296140b6037 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Thu, 11 Apr 2019 15:11:12 -0400 Subject: [PATCH 350/680] remove is_ship_unsupported_version --- .../state_history_log.hpp | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp index 60fff908c47..11f3665bc35 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp @@ -34,8 +34,7 @@ namespace eosio { inline uint64_t ship_magic(uint32_t version) { return N(ship) | version; } inline bool is_ship(uint64_t magic) { return (magic & 0xffff'ffff'0000'0000) == N(ship); } inline uint32_t get_ship_version(uint64_t magic) { return magic; } -inline bool is_ship_supported_version(uint64_t magic) { return is_ship(magic) && get_ship_version(magic) == 0; } -inline bool is_ship_unsupported_version(uint64_t magic) { return is_ship(magic) && get_ship_version(magic) != 0; } +inline bool is_ship_supported_version(uint64_t magic) { return get_ship_version(magic) == 0; } struct state_history_log_header { uint64_t magic = ship_magic(0); @@ -75,8 +74,9 @@ class state_history_log { fc::datastream ds(bytes, sizeof(bytes)); fc::raw::unpack(ds, header); EOS_ASSERT(!ds.remaining(), chain::plugin_exception, "state_history_log_header_serial_size mismatch"); - EOS_ASSERT(!assert_version || is_ship_supported_version(header.magic), chain::plugin_exception, - "corrupt ${name}.log (0)", ("name", name)); + if (assert_version) + EOS_ASSERT(is_ship(header.magic) && is_ship_supported_version(header.magic), chain::plugin_exception, + "corrupt ${name}.log (0)", ("name", name)); } void write_header(const state_history_log_header& header) { @@ -151,7 +151,7 @@ class state_history_log { } log.seekg(suffix); read_header(header, false); - if (!is_ship_supported_version(header.magic) || + if (!is_ship(header.magic) || !is_ship_supported_version(header.magic) || suffix + state_history_log_header_serial_size + header.payload_size + sizeof(suffix) != size) { elog("corrupt ${name}.log (3)", ("name", name)); return false; @@ -176,9 +176,9 @@ class state_history_log { log.seekg(pos); read_header(header, false); uint64_t suffix; - if (!is_ship_supported_version(header.magic) || header.payload_size > size || + if (!is_ship(header.magic) || !is_ship_supported_version(header.magic) || header.payload_size > size || pos + state_history_log_header_serial_size + header.payload_size + sizeof(suffix) > size) { - EOS_ASSERT(!is_ship_unsupported_version(header.magic), chain::plugin_exception, + EOS_ASSERT(!is_ship(header.magic) || is_ship_supported_version(header.magic), chain::plugin_exception, "${name}.log has an unsupported version", ("name", name)); break; } @@ -206,7 +206,7 @@ class state_history_log { state_history_log_header header; log.seekg(0); read_header(header, false); - EOS_ASSERT(is_ship_supported_version(header.magic) && + EOS_ASSERT(is_ship(header.magic) && is_ship_supported_version(header.magic) && state_history_log_header_serial_size + header.payload_size + sizeof(uint64_t) <= size, chain::plugin_exception, "corrupt ${name}.log (1)", ("name", name)); _begin_block = chain::block_header::num_from_id(header.block_id); @@ -241,7 +241,8 @@ class state_history_log { read_header(header, false); uint64_t suffix_pos = pos + state_history_log_header_serial_size + header.payload_size; uint64_t suffix; - EOS_ASSERT(is_ship_supported_version(header.magic) && suffix_pos + sizeof(suffix) <= size, + EOS_ASSERT(is_ship(header.magic) && is_ship_supported_version(header.magic) && + suffix_pos + sizeof(suffix) <= size, chain::plugin_exception, "corrupt ${name}.log (7)", ("name", name)); log.seekg(suffix_pos); log.read((char*)&suffix, sizeof(suffix)); From 1ef7e839b9ea669c4172a00e3b825515a3c7537e Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 11 Apr 2019 14:05:10 -0500 Subject: [PATCH 351/680] Fix corner case of no connections when trying to determine who to sync to --- plugins/net_plugin/net_plugin.cpp | 36 +++++++++++++++++-------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index 7b2f2cd10bf..b224cf2ad0f 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1320,43 +1320,47 @@ namespace eosio { source = conn; } else { - if (my_impl->connections.size() == 1) { + if( my_impl->connections.size() == 0 ) { + sync_source.reset(); + } else if( my_impl->connections.size() == 1 ) { if (!source) { source = *my_impl->connections.begin(); } - } - else { + } else { // init to a linear array search auto cptr = my_impl->connections.begin(); auto cend = my_impl->connections.end(); // do we remember the previous source? - if (source) { + if( source ) { //try to find it in the list - cptr = my_impl->connections.find(source); + cptr = my_impl->connections.find( source ); cend = cptr; - if (cptr == my_impl->connections.end()) { + if( cptr == my_impl->connections.end() ) { //not there - must have been closed! cend is now connections.end, so just flatten the ring. source.reset(); cptr = my_impl->connections.begin(); } else { //was found - advance the start to the next. cend is the old source. - if (++cptr == my_impl->connections.end() && cend != my_impl->connections.end() ) { + if( ++cptr == my_impl->connections.end() && cend != my_impl->connections.end() ) { cptr = my_impl->connections.begin(); } } } //scan the list of peers looking for another able to provide sync blocks. - auto cstart_it = cptr; - do { - //select the first one which is current and break out. - if((*cptr)->current()) { - source = *cptr; - break; - } - if(++cptr == my_impl->connections.end()) + if( cptr != my_impl->connections.end() ) { + auto cstart_it = cptr; + do { + //select the first one which is current and break out. + if( (*cptr)->current() ) { + source = *cptr; + break; + } + if( ++cptr == my_impl->connections.end() ) cptr = my_impl->connections.begin(); - } while(cptr != cstart_it); + } while( cptr != cstart_it ); + } + } // no need to check the result, either source advanced or the whole list was checked and the old source is reused. } } From cda07dad47559ea1a17b22a8cfccf0d404fc3da2 Mon Sep 17 00:00:00 2001 From: Kevin Heifner Date: Thu, 11 Apr 2019 15:09:16 -0500 Subject: [PATCH 352/680] Fix merge --- plugins/net_plugin/net_plugin.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/plugins/net_plugin/net_plugin.cpp b/plugins/net_plugin/net_plugin.cpp index b224cf2ad0f..f5776351410 100644 --- a/plugins/net_plugin/net_plugin.cpp +++ b/plugins/net_plugin/net_plugin.cpp @@ -1321,7 +1321,7 @@ namespace eosio { } else { if( my_impl->connections.size() == 0 ) { - sync_source.reset(); + source.reset(); } else if( my_impl->connections.size() == 1 ) { if (!source) { source = *my_impl->connections.begin(); @@ -1360,7 +1360,6 @@ namespace eosio { cptr = my_impl->connections.begin(); } while( cptr != cstart_it ); } - } // no need to check the result, either source advanced or the whole list was checked and the old source is reused. } } From aad7db93b8af9c6a37502fb566e95538c73e6186 Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Thu, 11 Apr 2019 17:06:05 -0400 Subject: [PATCH 353/680] gcc 7 --- plugins/state_history_plugin/state_history_plugin.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 2680165b5c0..032b0ff0349 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -392,7 +392,8 @@ struct state_history_plugin_impl : std::enable_shared_from_thisblock->id(), + state_history_log_header header{.magic = ship_magic(0), + .block_id = block_state->block->id(), .payload_size = sizeof(uint32_t) + traces_bin.size()}; trace_log->write_entry(header, block_state->block->previous, [&](auto& stream) { uint32_t s = (uint32_t)traces_bin.size(); @@ -486,7 +487,8 @@ struct state_history_plugin_impl : std::enable_shared_from_thisblock->id(), + state_history_log_header header{.magic = ship_magic(0), + .block_id = block_state->block->id(), .payload_size = sizeof(uint32_t) + deltas_bin.size()}; chain_state_log->write_entry(header, block_state->block->previous, [&](auto& stream) { uint32_t s = (uint32_t)deltas_bin.size(); From b9e37aaedfd9a378221a4158665c384b49d9de9a Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Thu, 11 Apr 2019 17:31:30 -0400 Subject: [PATCH 354/680] Addressed some change requests --- CMakeLists.txt | 8 ++--- scripts/eosio_build.sh | 62 ++++++++++++++++++++++++++--------- scripts/eosio_build_amazon.sh | 29 ++++++++++------ scripts/eosio_build_centos.sh | 29 ++++++++++------ scripts/eosio_build_darwin.sh | 29 ++++++++++------ scripts/eosio_build_fedora.sh | 29 ++++++++++------ scripts/eosio_build_ubuntu.sh | 29 ++++++++++------ 7 files changed, 146 insertions(+), 69 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0aea9883ac5..0a1d8530b0f 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -49,15 +49,15 @@ set( INSTALLER_APP_ID "68ad7005-8eee-49c9-95ce-9eed97e5b347" ) # http://stackoverflow.com/a/18369825 if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") - if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 6.0) - message(FATAL_ERROR "GCC version must be at least 6.0!") + if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0) + message(FATAL_ERROR "GCC version must be at least 7.0!") endif() if ("${CMAKE_GENERATOR}" STREQUAL "Ninja") add_compile_options(-fdiagnostics-color=always) endif() elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang") - if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.0) - message(FATAL_ERROR "Clang version must be at least 4.0!") + if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0) + message(FATAL_ERROR "Clang version must be at least 5.0!") endif() if ("${CMAKE_GENERATOR}" STREQUAL "Ninja") add_compile_options(-fcolor-diagnostics) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index 8f52b41404b..b4735214824 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -72,7 +72,16 @@ export LLVM_VERSION=release_40 export LLVM_ROOT=${OPT_LOCATION}/llvm export LLVM_DIR=${LLVM_ROOT}/lib/cmake/llvm export CLANG8_ROOT=${OPT_LOCATION}/clang8 -export PINNED_COMPILER_VERSION=release_80 +export PINNED_COMPILER_BRANCH=release_80 +export PINNED_COMPILER_LLVM_COMMIT=18e41dc +export PINNED_COMPILER_CLANG_COMMIT=a03da8b +export PINNED_COMPILER_LLD_COMMIT=d60a035 +export PINNED_COMPILER_POLLY_COMMIT=1bc06e5 +export PINNED_COMPILER_CLANG_TOOLS_EXTRA_COMMIT=6b34834 +export PINNED_COMPILER_LIBCXX_COMMIT=1853712 +export PINNED_COMPILER_LIBCXXABI_COMMIT=d7338a4 +export PINNED_COMPILER_LIBUNWIND_COMMIT=57f6739 +export PINNED_COMPILER_COMPILER_RT_COMMIT=5bc7979 export DOXYGEN_VERSION=1_8_14 export DOXYGEN_ROOT=${SRC_LOCATION}/doxygen-${DOXYGEN_VERSION} export TINI_VERSION=0.18.0 @@ -183,22 +192,45 @@ fi BUILD_CLANG8=false if [ $NONINTERACTIVE -eq 0 ]; then - printf "#include \nint main(){ std::cout << \"Hello, World!\" << std::endl; }" &> $TEMP_DIR/test.cpp - `c++ -c -std=c++17 $TEMP_DIR/test.cpp -o $TEMP_DIR/test.o &> /dev/null` - if [ $? -ne 0 ]; then - `CXX -c -std=c++17 $TEMP_DIR/test.cpp -o $TEMP_DIR/test.o &> /dev/null` - if [ $? -ne 0 ]; then - printf "Error no C++17 support.\\nEnter Y/y or N/n to continue with downloading and building a viable compiler or exit now.\\nIf you already have a C++17 compiler installed or would like to install your own, export CXX to point to the compiler of your choosing." - read -p "Enter Y/y or N/n to continue with downloading and building a viable compiler or exit now." yn - case $yn in - [Yy]* ) BUILD_CLANG8=true; break;; - [Nn]* ) exit 1;; - * ) echo "Improper input"; exit 1;; - esac + if [ ! -z $CXX ]; then + CPP_COMP=$CXX + else + CPP_COMP=c++ + fi + + NO_CPP17=false + + WHICH_CPP=`which $CPP_COMP` + COMPILER_TYPE=`readlink $WHICH_CPP` + if [[ $COMPILER_TYPE == "clang++" ]]; then + if [[ `c++ --version | cut -d ' ' -f 1 | head -n 1` == "Apple" ]]; then + ### Apple clang version 10 + if [[ `c++ --version | cut -d ' ' -f 4 | cut -d '.' -f 1 | head -n 1` -lt 10 ]]; then + NO_CPP17=true + fi + else + ### clang version 5 + if [[ `c++ --version | cut -d ' ' -f 4 | cut -d '.' -f 1 | head -n 1` -lt 5 ]]; then + NO_CPP17=true + fi fi else - CXX=c++ + ### gcc version 7 + if [[ `c++ -dumpversion | cut -d '.' -f 1` -lt 7 ]]; then + NO_CPP17=true + fi + fi + + if $NO_CPP17; then + printf "Error no C++17 support.\\nEnter Y/y or N/n to continue with downloading and building a viable compiler or exit now.\\nIf you already have a C++17 compiler installed or would like to install your own, export CXX to point to the compiler of your choosing." + read -p "Enter Y/y or N/n to continue with downloading and building a viable compiler or exit now. " yn + case $yn in + [Yy]* ) BUILD_CLANG8=true; break;; + [Nn]* ) exit 1;; + * ) echo "Improper input"; exit 1;; + esac fi + CXX=$CPP_COMP else BUILD_CLANG8=true CXX=${OPT_LOCATION}/clang8/bin/clang++ @@ -292,7 +324,7 @@ cd $BUILD_DIR $CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ - -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS "${REPO_ROOT}" + -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio -DCMAKE_EXE_LINKER_FLAGS="-stdlib=libc++ -lc++abi" $LOCAL_CMAKE_FLAGS "${REPO_ROOT}" if [ $? -ne 0 ]; then exit -1; fi make -j"${JOBS}" diff --git a/scripts/eosio_build_amazon.sh b/scripts/eosio_build_amazon.sh index f0214df603e..e769917a790 100755 --- a/scripts/eosio_build_amazon.sh +++ b/scripts/eosio_build_amazon.sh @@ -248,23 +248,32 @@ if [ $? -ne 0 ]; then exit -1; fi cd .. printf "\\n" -if [ $BUILD_CLANG8 ]; then +if $BUILD_CLANG8; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" cd ${OPT_LOCATION} \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && git checkout $PINNED_COMPILER_LLVM_COMMIT \ && cd tools \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/lld.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/polly.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ + && cd lld && git checkout $PINNED_COMPILER_LLD_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ + && cd polly && git checkout $PINNED_COMPILER_POLLY_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && git checkout $PINNED_COMPILER_CLANG_VERSION \ && mkdir extra && cd extra \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang-tools-extra.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ + && cd clang-tools-extra && git checkout $PINNED_COMPILER_CLANG_TOOLS_EXTRA_COMMIT && cd .. \ && cd ../../../../projects \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxx.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxxabi.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libunwind.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ + && cd libcxx && git checkout $PINNED_COMPILER_LIBCXX_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ + && cd libcxxabi && git checkout $PINNED_COMPILER_LIBCXXABI_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ + && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ + && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ && cd ${OPT_LOCATION}/clang8 \ && mkdir build && cd build \ && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ diff --git a/scripts/eosio_build_centos.sh b/scripts/eosio_build_centos.sh index e2f98fd9214..7a11c7ee257 100755 --- a/scripts/eosio_build_centos.sh +++ b/scripts/eosio_build_centos.sh @@ -299,23 +299,32 @@ if [ $? -ne 0 ]; then exit -1; fi cd .. printf "\\n" -if [ $BUILD_CLANG8 ]; then +if $BUILD_CLANG8; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" cd ${OPT_LOCATION} \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && git checkout $PINNED_COMPILER_LLVM_COMMIT \ && cd tools \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/lld.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/polly.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ + && cd lld && git checkout $PINNED_COMPILER_LLD_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ + && cd polly && git checkout $PINNED_COMPILER_POLLY_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && git checkout $PINNED_COMPILER_CLANG_VERSION \ && mkdir extra && cd extra \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang-tools-extra.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ + && cd clang-tools-extra && git checkout $PINNED_COMPILER_CLANG_TOOLS_EXTRA_COMMIT && cd .. \ && cd ../../../../projects \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxx.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxxabi.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libunwind.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ + && cd libcxx && git checkout $PINNED_COMPILER_LIBCXX_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ + && cd libcxxabi && git checkout $PINNED_COMPILER_LIBCXXABI_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ + && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ + && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ && cd ${OPT_LOCATION}/clang8 \ && mkdir build && cd build \ && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 07fa2f53cac..1335540d64b 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -262,23 +262,32 @@ fi cd .. printf "\\n" -if [ $BUILD_CLANG8 ]; then +if $BUILD_CLANG8; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" cd ${OPT_LOCATION} \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 \ + && cd clang8 && git checkout $PINNED_COMPILER_LLVM_COMMIT \ && cd tools \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/lld.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/polly.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ + && cd lld && git checkout $PINNED_COMPILER_LLD_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ + && cd polly && git checkout $PINNED_COMPILER_POLLY_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && git checkout $PINNED_COMPILER_CLANG_VERSION \ && mkdir extra && cd extra \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang-tools-extra.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ + && cd clang-tools-extra && git checkout $PINNED_COMPILER_CLANG_TOOLS_EXTRA_COMMIT && cd .. \ && cd ../../../../projects \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxx.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxxabi.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libunwind.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ + && cd libcxx && git checkout $PINNED_COMPILER_LIBCXX_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ + && cd libcxxabi && git checkout $PINNED_COMPILER_LIBCXXABI_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ + && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ + && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ && cd ${OPT_LOCATION}/clang8 \ && mkdir build && cd build \ && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ diff --git a/scripts/eosio_build_fedora.sh b/scripts/eosio_build_fedora.sh index 49591fcf105..8fab815109c 100755 --- a/scripts/eosio_build_fedora.sh +++ b/scripts/eosio_build_fedora.sh @@ -231,23 +231,32 @@ if [ $? -ne 0 ]; then exit -1; fi cd .. printf "\\n" -if [ $BUILD_CLANG8 ]; then +if $BUILD_CLANG8; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" cd ${OPT_LOCATION} \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && git checkout $PINNED_COMPILER_LLVM_COMMIT \ && cd tools \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/lld.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/polly.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ + && cd lld && git checkout $PINNED_COMPILER_LLD_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ + && cd polly && git checkout $PINNED_COMPILER_POLLY_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && git checkout $PINNED_COMPILER_CLANG_VERSION \ && mkdir extra && cd extra \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang-tools-extra.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ + && cd clang-tools-extra && git checkout $PINNED_COMPILER_CLANG_TOOLS_EXTRA_COMMIT && cd .. \ && cd ../../../../projects \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxx.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxxabi.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libunwind.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ + && cd libcxx && git checkout $PINNED_COMPILER_LIBCXX_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ + && cd libcxxabi && git checkout $PINNED_COMPILER_LIBCXXABI_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ + && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ + && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ && cd ${OPT_LOCATION}/clang8 \ && mkdir build && cd build \ && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ diff --git a/scripts/eosio_build_ubuntu.sh b/scripts/eosio_build_ubuntu.sh index dd5136b6446..514c5f7a9b6 100755 --- a/scripts/eosio_build_ubuntu.sh +++ b/scripts/eosio_build_ubuntu.sh @@ -254,23 +254,32 @@ if [ $? -ne 0 ]; then exit -1; fi cd .. printf "\\n" -if [ $BUILD_CLANG8 ]; then +if $BUILD_CLANG8; then printf "Checking Clang 8 support...\\n" if [ ! -d $CLANG8_ROOT ]; then printf "Installing Clang 8...\\n" cd ${OPT_LOCATION} \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/llvm.git clang8 && cd clang8 \ + && git checkout $PINNED_COMPILER_LLVM_COMMIT \ && cd tools \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/lld.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/polly.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/lld.git \ + && cd lld && git checkout $PINNED_COMPILER_LLD_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/polly.git \ + && cd polly && git checkout $PINNED_COMPILER_POLLY_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang.git clang && cd clang/tools \ + && git checkout $PINNED_COMPILER_CLANG_VERSION \ && mkdir extra && cd extra \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/clang-tools-extra.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/clang-tools-extra.git \ + && cd clang-tools-extra && git checkout $PINNED_COMPILER_CLANG_TOOLS_EXTRA_COMMIT && cd .. \ && cd ../../../../projects \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxx.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libcxxabi.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/libunwind.git \ - && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_VERSION https://git.llvm.org/git/compiler-rt.git \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxx.git \ + && cd libcxx && git checkout $PINNED_COMPILER_LIBCXX_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libcxxabi.git \ + && cd libcxxabi && git checkout $PINNED_COMPILER_LIBCXXABI_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/libunwind.git \ + && cd libunwind && git checkout $PINNED_COMPILER_LIBUNWIND_COMMIT && cd ../ \ + && git clone --depth 1 --single-branch --branch $PINNED_COMPILER_BRANCH https://git.llvm.org/git/compiler-rt.git \ + && cd compiler-rt && git checkout $PINNED_COMPILER_COMPILER_RT_COMMIT && cd ../ \ && cd ${OPT_LOCATION}/clang8 \ && mkdir build && cd build \ && $CMAKE -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="${CLANG8_ROOT}" -DLLVM_BUILD_EXTERNAL_COMPILER_RT=ON -DLLVM_BUILD_LLVM_DYLIB=ON -DLLVM_ENABLE_LIBCXX=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_INCLUDE_DOCS=OFF -DLLVM_OPTIMIZED_TABLEGEN=ON -DLLVM_TARGETS_TO_BUILD=all -DCMAKE_BUILD_TYPE=Release .. \ From d58a0612572dce809e4792450d864339d885918b Mon Sep 17 00:00:00 2001 From: Todd Fleming Date: Thu, 11 Apr 2019 19:05:01 -0400 Subject: [PATCH 355/680] ship_current_version --- .../eosio/state_history_plugin/state_history_log.hpp | 11 ++++++----- plugins/state_history_plugin/state_history_plugin.cpp | 4 ++-- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp index 11f3665bc35..12d43d82139 100644 --- a/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp +++ b/plugins/state_history_plugin/include/eosio/state_history_plugin/state_history_log.hpp @@ -31,13 +31,14 @@ namespace eosio { * payload */ -inline uint64_t ship_magic(uint32_t version) { return N(ship) | version; } -inline bool is_ship(uint64_t magic) { return (magic & 0xffff'ffff'0000'0000) == N(ship); } -inline uint32_t get_ship_version(uint64_t magic) { return magic; } -inline bool is_ship_supported_version(uint64_t magic) { return get_ship_version(magic) == 0; } +inline uint64_t ship_magic(uint32_t version) { return N(ship) | version; } +inline bool is_ship(uint64_t magic) { return (magic & 0xffff'ffff'0000'0000) == N(ship); } +inline uint32_t get_ship_version(uint64_t magic) { return magic; } +inline bool is_ship_supported_version(uint64_t magic) { return get_ship_version(magic) == 0; } +static const uint32_t ship_current_version = 0; struct state_history_log_header { - uint64_t magic = ship_magic(0); + uint64_t magic = ship_magic(ship_current_version); chain::block_id_type block_id = {}; uint64_t payload_size = 0; }; diff --git a/plugins/state_history_plugin/state_history_plugin.cpp b/plugins/state_history_plugin/state_history_plugin.cpp index 032b0ff0349..716ba92019a 100644 --- a/plugins/state_history_plugin/state_history_plugin.cpp +++ b/plugins/state_history_plugin/state_history_plugin.cpp @@ -392,7 +392,7 @@ struct state_history_plugin_impl : std::enable_shared_from_thisblock->id(), .payload_size = sizeof(uint32_t) + traces_bin.size()}; trace_log->write_entry(header, block_state->block->previous, [&](auto& stream) { @@ -487,7 +487,7 @@ struct state_history_plugin_impl : std::enable_shared_from_thisblock->id(), .payload_size = sizeof(uint32_t) + deltas_bin.size()}; chain_state_log->write_entry(header, block_state->block->previous, [&](auto& stream) { From 4597fed8775ac30809a510f17c80c7364c03bd29 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Thu, 11 Apr 2019 19:20:04 -0400 Subject: [PATCH 356/680] Add new option for pinning --- CMakeLists.txt | 3 ++ scripts/eosio_build.sh | 74 ++++++++++++++++++++++++------------------ 2 files changed, 45 insertions(+), 32 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 0a1d8530b0f..234965baf48 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -47,6 +47,9 @@ set( GUI_CLIENT_EXECUTABLE_NAME eosio ) set( CUSTOM_URL_SCHEME "gcs" ) set( INSTALLER_APP_ID "68ad7005-8eee-49c9-95ce-9eed97e5b347" ) +if("$ENV{PIN_COMPILER}") + message("Pinning compiler") +endif() # http://stackoverflow.com/a/18369825 if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index b4735214824..eb8b2960c8b 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -119,7 +119,7 @@ function usage() NONINTERACTIVE=0 if [ $# -ne 0 ]; then - while getopts ":cdo:s:ahy" opt; do + while getopts ":cdo:s:ahpy" opt; do case "${opt}" in o ) options=( "Debug" "Release" "RelWithDebInfo" "MinSizeRel" ) @@ -153,6 +153,9 @@ if [ $# -ne 0 ]; then usage exit 1 ;; + p) + PIN_COMPILER=true + ;; y) NONINTERACTIVE=1 ;; @@ -191,51 +194,56 @@ if [ $STALE_SUBMODS -gt 0 ]; then fi BUILD_CLANG8=false -if [ $NONINTERACTIVE -eq 0 ]; then - if [ ! -z $CXX ]; then - CPP_COMP=$CXX - else - CPP_COMP=c++ - fi +if [ ! -z $CXX ]; then + CPP_COMP=$CXX +else + CPP_COMP=c++ +fi + +NO_CPP17=false - NO_CPP17=false - - WHICH_CPP=`which $CPP_COMP` - COMPILER_TYPE=`readlink $WHICH_CPP` - if [[ $COMPILER_TYPE == "clang++" ]]; then - if [[ `c++ --version | cut -d ' ' -f 1 | head -n 1` == "Apple" ]]; then - ### Apple clang version 10 - if [[ `c++ --version | cut -d ' ' -f 4 | cut -d '.' -f 1 | head -n 1` -lt 10 ]]; then - NO_CPP17=true - fi - else - ### clang version 5 - if [[ `c++ --version | cut -d ' ' -f 4 | cut -d '.' -f 1 | head -n 1` -lt 5 ]]; then - NO_CPP17=true - fi +WHICH_CPP=`which $CPP_COMP` +COMPILER_TYPE=`readlink $WHICH_CPP` +if [[ $COMPILER_TYPE == "clang++" ]]; then + if [[ `c++ --version | cut -d ' ' -f 1 | head -n 1` == "Apple" ]]; then + ### Apple clang version 10 + if [[ `c++ --version | cut -d ' ' -f 4 | cut -d '.' -f 1 | head -n 1` -lt 10 ]]; then + NO_CPP17=true fi else - ### gcc version 7 - if [[ `c++ -dumpversion | cut -d '.' -f 1` -lt 7 ]]; then + ### clang version 5 + if [[ `c++ --version | cut -d ' ' -f 4 | cut -d '.' -f 1 | head -n 1` -lt 5 ]]; then NO_CPP17=true fi fi +else + ### gcc version 7 + if [[ `c++ -dumpversion | cut -d '.' -f 1` -lt 7 ]]; then + NO_CPP17=true + fi +fi - if $NO_CPP17; then +if $PIN_COMPILER; then + BUILD_CLANG8=true + CPP_COMP=${OPT_LOCATION}/clang8/bin/clang++ +elif $NO_CPP17; then + if [ $NONINTERACTIVE -eq 0 ]; then + BUILD_CLANG8=true + CPP_COMP=${OPT_LOCATION}/clang8/bin/clang++ + else + echo "PIN ${PIN_COMPILER}" printf "Error no C++17 support.\\nEnter Y/y or N/n to continue with downloading and building a viable compiler or exit now.\\nIf you already have a C++17 compiler installed or would like to install your own, export CXX to point to the compiler of your choosing." read -p "Enter Y/y or N/n to continue with downloading and building a viable compiler or exit now. " yn case $yn in - [Yy]* ) BUILD_CLANG8=true; break;; + [Yy]* ) BUILD_CLANG8=true; CPP_COMP=${OPT_LOCATION}/clang8/bin/clang++; break;; [Nn]* ) exit 1;; * ) echo "Improper input"; exit 1;; esac fi - CXX=$CPP_COMP -else - BUILD_CLANG8=true - CXX=${OPT_LOCATION}/clang8/bin/clang++ fi +CXX=$CPP_COMP + export BUILD_CLANG8=$BUILD_CLANG8 printf "\\nBeginning build version: %s\\n" "${VERSION}" @@ -322,9 +330,11 @@ printf "## ENABLE_COVERAGE_TESTING=%s\\n" "${ENABLE_COVERAGE_TESTING}" mkdir -p $BUILD_DIR cd $BUILD_DIR +export PIN_COMPILER=$PIN_COMPILER + $CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ - -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ - -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio -DCMAKE_EXE_LINKER_FLAGS="-stdlib=libc++ -lc++abi" $LOCAL_CMAKE_FLAGS "${REPO_ROOT}" + -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ + -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS "${REPO_ROOT}" if [ $? -ne 0 ]; then exit -1; fi make -j"${JOBS}" From 14dc23476172d6526d93136359681689ed4f4790 Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Thu, 11 Apr 2019 19:41:45 -0400 Subject: [PATCH 357/680] undo mongo comment out --- CMakeLists.txt | 4 +- scripts/eosio_build_darwin.sh | 116 +++++++++++++++++----------------- 2 files changed, 61 insertions(+), 59 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 234965baf48..c809edae3c8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -48,7 +48,9 @@ set( CUSTOM_URL_SCHEME "gcs" ) set( INSTALLER_APP_ID "68ad7005-8eee-49c9-95ce-9eed97e5b347" ) if("$ENV{PIN_COMPILER}") - message("Pinning compiler") + message(STATUS "Pinning compiler to Clang 8") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++ -v") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -stdlib=libc++ -lc++abi") endif() # http://stackoverflow.com/a/18369825 if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") diff --git a/scripts/eosio_build_darwin.sh b/scripts/eosio_build_darwin.sh index 1335540d64b..ebda9b2ae8e 100755 --- a/scripts/eosio_build_darwin.sh +++ b/scripts/eosio_build_darwin.sh @@ -188,64 +188,64 @@ if [ $? -ne 0 ]; then exit -1; fi printf "\\n" -#printf "Checking MongoDB installation...\\n" -#if [ ! -d $MONGODB_ROOT ]; then -# printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" -# curl -OL https://fastdl.mongodb.org/osx/mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ -# && tar -xzf mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ -# && mv $SRC_LOCATION/mongodb-osx-x86_64-$MONGODB_VERSION $MONGODB_ROOT \ -# && touch $MONGODB_LOG_LOCATION/mongod.log \ -# && rm -f mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ -# && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ -# && mkdir -p $MONGODB_DATA_LOCATION \ -# && rm -rf $MONGODB_LINK_LOCATION \ -# && rm -rf $BIN_LOCATION/mongod \ -# && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ -# && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ -# || exit 1 -# printf " - MongoDB successfully installed @ ${MONGODB_ROOT}\\n" -#else -# printf " - MongoDB found with correct version @ ${MONGODB_ROOT}.\\n" -#fi -#if [ $? -ne 0 ]; then exit -1; fi -#printf "Checking MongoDB C driver installation...\\n" -#if [ ! -d $MONGO_C_DRIVER_ROOT ]; then -# printf "Installing MongoDB C driver...\\n" -# curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ -# && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ -# && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ -# && mkdir -p cmake-build \ -# && cd cmake-build \ -# && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ -# && make -j"${JOBS}" \ -# && make install \ -# && cd ../.. \ -# && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ -# || exit 1 -# printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" -#else -# printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" -#fi -#if [ $? -ne 0 ]; then exit -1; fi -#printf "Checking MongoDB C++ driver installation...\\n" -#if [ "$(grep "Version:" $HOME/lib/pkgconfig/libmongocxx-static.pc 2>/dev/null | tr -s ' ' | awk '{print $2}')" != $MONGO_CXX_DRIVER_VERSION ]; then -# printf "Installing MongoDB C++ driver...\\n" -# curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ -# && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ -# && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ -# && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ -# && make -j"${JOBS}" VERBOSE=1 \ -# && make install \ -# && cd ../.. \ -# && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ -# || exit 1 -# printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -#else -# printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" -#fi -#if [ $? -ne 0 ]; then exit -1; fi -# -#printf "\\n" +printf "Checking MongoDB installation...\\n" +if [ ! -d $MONGODB_ROOT ]; then + printf "Installing MongoDB into ${MONGODB_ROOT}...\\n" + curl -OL https://fastdl.mongodb.org/osx/mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ + && tar -xzf mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ + && mv $SRC_LOCATION/mongodb-osx-x86_64-$MONGODB_VERSION $MONGODB_ROOT \ + && touch $MONGODB_LOG_LOCATION/mongod.log \ + && rm -f mongodb-osx-ssl-x86_64-$MONGODB_VERSION.tgz \ + && cp -f $REPO_ROOT/scripts/mongod.conf $MONGODB_CONF \ + && mkdir -p $MONGODB_DATA_LOCATION \ + && rm -rf $MONGODB_LINK_LOCATION \ + && rm -rf $BIN_LOCATION/mongod \ + && ln -s $MONGODB_ROOT $MONGODB_LINK_LOCATION \ + && ln -s $MONGODB_LINK_LOCATION/bin/mongod $BIN_LOCATION/mongod \ + || exit 1 + printf " - MongoDB successfully installed @ ${MONGODB_ROOT}\\n" +else + printf " - MongoDB found with correct version @ ${MONGODB_ROOT}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi +printf "Checking MongoDB C driver installation...\\n" +if [ ! -d $MONGO_C_DRIVER_ROOT ]; then + printf "Installing MongoDB C driver...\\n" + curl -LO https://github.com/mongodb/mongo-c-driver/releases/download/$MONGO_C_DRIVER_VERSION/mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + && cd mongo-c-driver-$MONGO_C_DRIVER_VERSION \ + && mkdir -p cmake-build \ + && cd cmake-build \ + && $CMAKE -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME -DENABLE_BSON=ON -DENABLE_SSL=DARWIN -DENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF -DENABLE_STATIC=ON .. \ + && make -j"${JOBS}" \ + && make install \ + && cd ../.. \ + && rm mongo-c-driver-$MONGO_C_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C driver successfully installed @ ${MONGO_C_DRIVER_ROOT}.\\n" +else + printf " - MongoDB C driver found with correct version @ ${MONGO_C_DRIVER_ROOT}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi +printf "Checking MongoDB C++ driver installation...\\n" +if [ "$(grep "Version:" $HOME/lib/pkgconfig/libmongocxx-static.pc 2>/dev/null | tr -s ' ' | awk '{print $2}')" != $MONGO_CXX_DRIVER_VERSION ]; then + printf "Installing MongoDB C++ driver...\\n" + curl -L https://github.com/mongodb/mongo-cxx-driver/archive/r$MONGO_CXX_DRIVER_VERSION.tar.gz -o mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + && tar -xzf mongo-cxx-driver-r${MONGO_CXX_DRIVER_VERSION}.tar.gz \ + && cd mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION/build \ + && $CMAKE -DBUILD_SHARED_LIBS=OFF -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$HOME .. \ + && make -j"${JOBS}" VERBOSE=1 \ + && make install \ + && cd ../.. \ + && rm -f mongo-cxx-driver-r$MONGO_CXX_DRIVER_VERSION.tar.gz \ + || exit 1 + printf " - MongoDB C++ driver successfully installed @ ${MONGO_CXX_DRIVER_ROOT}.\\n" +else + printf " - MongoDB C++ driver found with correct version @ ${MONGO_CXX_DRIVER_ROOT}.\\n" +fi +if [ $? -ne 0 ]; then exit -1; fi + +printf "\\n" # We install llvm into /usr/local/opt using brew install llvm@4 From e146709864a917af76d45d8a370070ae0fd9a16c Mon Sep 17 00:00:00 2001 From: Bucky Kittinger Date: Thu, 11 Apr 2019 19:47:10 -0400 Subject: [PATCH 358/680] add cmake variable for compiler pinning --- CMakeLists.txt | 3 ++- scripts/eosio_build.sh | 6 ++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c809edae3c8..e35c50a286a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -47,11 +47,12 @@ set( GUI_CLIENT_EXECUTABLE_NAME eosio ) set( CUSTOM_URL_SCHEME "gcs" ) set( INSTALLER_APP_ID "68ad7005-8eee-49c9-95ce-9eed97e5b347" ) -if("$ENV{PIN_COMPILER}") +if(${EOSIO_PIN_COMPILER}) message(STATUS "Pinning compiler to Clang 8") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++ -v") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -stdlib=libc++ -lc++abi") endif() + # http://stackoverflow.com/a/18369825 if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 7.0) diff --git a/scripts/eosio_build.sh b/scripts/eosio_build.sh index eb8b2960c8b..684f4d4c870 100755 --- a/scripts/eosio_build.sh +++ b/scripts/eosio_build.sh @@ -226,12 +226,12 @@ fi if $PIN_COMPILER; then BUILD_CLANG8=true CPP_COMP=${OPT_LOCATION}/clang8/bin/clang++ + PIN_COMPILER_CMAKE="-DEOSIO_PIN_COMPILER=1" elif $NO_CPP17; then if [ $NONINTERACTIVE -eq 0 ]; then BUILD_CLANG8=true CPP_COMP=${OPT_LOCATION}/clang8/bin/clang++ else - echo "PIN ${PIN_COMPILER}" printf "Error no C++17 support.\\nEnter Y/y or N/n to continue with downloading and building a viable compiler or exit now.\\nIf you already have a C++17 compiler installed or would like to install your own, export CXX to point to the compiler of your choosing." read -p "Enter Y/y or N/n to continue with downloading and building a viable compiler or exit now. " yn case $yn in @@ -330,9 +330,7 @@ printf "## ENABLE_COVERAGE_TESTING=%s\\n" "${ENABLE_COVERAGE_TESTING}" mkdir -p $BUILD_DIR cd $BUILD_DIR -export PIN_COMPILER=$PIN_COMPILER - -$CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \ +$CMAKE -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX}" -DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true $PIN_COMPILER_CMAKE \ -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" -DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \ -DCMAKE_INSTALL_PREFIX=$OPT_LOCATION/eosio $LOCAL_CMAKE_FLAGS "${REPO_ROOT}" From a95b594ccc43a72b928c843224e6fbcdc6d4d566 Mon Sep 17 00:00:00 2001 From: Matt Witherspoon <32485495+spoonincode@users.noreply.github.com> Date: Thu, 11 Apr 2019 22:02:30 -0400 Subject: [PATCH 359/680] bump chainbase submodule to new DB file format --- libraries/chainbase | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/chainbase b/libraries/chainbase index eb2d0c28bc1..8a153c42842 160000 --- a/libraries/chainbase +++ b/libraries/chainbase @@ -1 +1 @@ -Subproject commit eb2d0c28bc1f1328e8a5fc899291336ad487b084 +Subproject commit 8a153c428429a62ce727814a1ba04d3fcdc2bc83 From fee8d96ad1f4abaf4f054418101b2540296c5f7b Mon Sep 17 00:00:00 2001 From: Kayan Date: Fri, 12 Apr 2019 11:51:22 +0800 Subject: [PATCH 360/680] get_sender always returns creator's contract name --- libraries/chain/apply_context.cpp | 18 ++++-------------- .../include/eosio/chain/apply_context.hpp | 1 - 2 files changed, 4 insertions(+), 15 deletions(-) diff --git a/libraries/chain/apply_context.cpp b/libraries/chain/apply_context.cpp index 5636d233163..4b945906999 100644 --- a/libraries/chain/apply_context.cpp +++ b/libraries/chain/apply_context.cpp @@ -131,8 +131,6 @@ void apply_context::exec() { _notified.emplace_back( receiver, action_ordinal ); exec_one(); - - _in_notification = true; for( uint32_t i = 1; i < _notified.size(); ++i ) { std::tie( receiver, action_ordinal ) = _notified[i]; exec_one(); @@ -816,18 +814,10 @@ void apply_context::add_ram_usage( account_name account, int64_t ram_delta ) { } action_name apply_context::get_sender() const { - action_trace& trace = trx_context.get_action_trace( action_ordinal ); - if (_in_notification) { - action_trace& parent_trace = trx_context.get_action_trace( trace.parent_action_ordinal ); - if (parent_trace.creator_action_ordinal > 0) { - action_trace& creator_trace = trx_context.get_action_trace( parent_trace.creator_action_ordinal ); - return creator_trace.receiver; - } - } else { - if (trace.creator_action_ordinal > 0) { - action_trace& creator_trace = trx_context.get_action_trace( trace.creator_action_ordinal ); - return creator_trace.receiver; - } + const action_trace& trace = trx_context.get_action_trace( action_ordinal ); + if (trace.creator_action_ordinal > 0) { + const action_trace& creator_trace = trx_context.get_action_trace( trace.creator_action_ordinal ); + return creator_trace.receiver; } return 0; } diff --git a/libraries/chain/include/eosio/chain/apply_context.hpp b/libraries/chain/include/eosio/chain/apply_context.hpp index a28d8696ff0..bf85e843ca3 100644 --- a/libraries/chain/include/eosio/chain/apply_context.hpp +++ b/libraries/chain/include/eosio/chain/apply_context.hpp @@ -572,7 +572,6 @@ class apply_context { uint32_t action_ordinal = 0; bool privileged = false; bool context_free = false; - bool _in_notification = false; // executing notification public: generic_index idx64; From 98eb3ac77185ac409c1902de95b9e128e87535bd Mon Sep 17 00:00:00 2001 From: Jeeyong Um Date: Fri, 12 Apr 2019 14:37:04 +0900 Subject: [PATCH 361/680] Make executable names configurable --- programs/cleos/CMakeLists.txt | 5 +++-- programs/cleos/{help_text.cpp => help_text.cpp.in} | 6 +++--- programs/keosd/main.cpp | 2 +- programs/nodeos/config.hpp.in | 1 + programs/nodeos/main.cpp | 12 ++++++------ 5 files changed, 14 insertions(+), 12 deletions(-) rename programs/cleos/{help_text.cpp => help_text.cpp.in} (98%) diff --git a/programs/cleos/CMakeLists.txt b/programs/cleos/CMakeLists.txt index 0d98fdcf63d..0787c5fe937 100644 --- a/programs/cleos/CMakeLists.txt +++ b/programs/cleos/CMakeLists.txt @@ -1,4 +1,5 @@ -add_executable( ${CLI_CLIENT_EXECUTABLE_NAME} main.cpp httpc.cpp help_text.cpp localize.hpp config.hpp CLI11.hpp) +configure_file(help_text.cpp.in help_text.cpp @ONLY) +add_executable( ${CLI_CLIENT_EXECUTABLE_NAME} main.cpp httpc.cpp ${CMAKE_CURRENT_BINARY_DIR}/help_text.cpp localize.hpp config.hpp CLI11.hpp) if( UNIX AND NOT APPLE ) set(rt_library rt ) endif() @@ -32,7 +33,7 @@ set(LOCALEDIR ${CMAKE_INSTALL_PREFIX}/share/locale) set(LOCALEDOMAIN ${CLI_CLIENT_EXECUTABLE_NAME}) configure_file(config.hpp.in config.hpp ESCAPE_QUOTES) -target_include_directories(${CLI_CLIENT_EXECUTABLE_NAME} PUBLIC ${Intl_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR}) +target_include_directories(${CLI_CLIENT_EXECUTABLE_NAME} PUBLIC ${Intl_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries( ${CLI_CLIENT_EXECUTABLE_NAME} PRIVATE appbase chain_api_plugin producer_plugin chain_plugin http_plugin eosio_chain fc ${CMAKE_DL_LIBS} ${PLATFORM_SPECIFIC_LIBS} ${Intl_LIBRARIES} ) diff --git a/programs/cleos/help_text.cpp b/programs/cleos/help_text.cpp.in similarity index 98% rename from programs/cleos/help_text.cpp rename to programs/cleos/help_text.cpp.in index 0f46f33114f..a133e17cade 100644 --- a/programs/cleos/help_text.cpp +++ b/programs/cleos/help_text.cpp.in @@ -65,7 +65,7 @@ const char* duplicate_key_import_help_text = _("This key is already imported int const char* unknown_abi_table_help_text = _(R"text(The ABI for the code on account "${1}" does not specify table "${2}". Please check the account and table name, and verify that the account has the expected code using: - cleos get code ${1})text"); + @CLI_CLIENT_EXECUTABLE_NAME@ get code ${1})text"); const char* failed_to_find_transaction_text = _("Failed to fetch information for transaction: \033[1m${1}\033[0m from the history plugin\n\n" "\033[32mIf you know the block number which included this transaction you providing it with the \033[2m--block-hint\033[22m option may help\033[0m"); @@ -201,7 +201,7 @@ const char* error_advice_invalid_ref_block_exception = "Ensure that the referen const char* error_advice_tx_duplicate = "You can try embedding eosio nonce action inside your transaction to ensure uniqueness."; const char* error_advice_invalid_action_args_exception = R"=====(Ensure that your arguments follow the contract abi! -You can check the contract's abi by using 'cleos get code' command.)====="; +You can check the contract's abi by using '@CLI_CLIENT_EXECUTABLE_NAME@ get code' command.)====="; const char* error_advice_permission_query_exception = "Most likely, the given account/ permission doesn't exist in the blockchain."; const char* error_advice_account_query_exception = "Most likely, the given account doesn't exist in the blockchain."; @@ -211,7 +211,7 @@ const char* error_advice_contract_query_exception = "Most likely, the given con const char* error_advice_tx_irrelevant_sig = "Please remove the unnecessary signature from your transaction!"; const char* error_advice_unsatisfied_authorization = "Ensure that you have the related private keys inside your wallet and your wallet is unlocked."; const char* error_advice_missing_auth_exception = R"=====(Ensure that you have the related authority inside your transaction!; -If you are currently using 'cleos push action' command, try to add the relevant authority using -p option.)====="; +If you are currently using '@CLI_CLIENT_EXECUTABLE_NAME@ push action' command, try to add the relevant authority using -p option.)====="; const char* error_advice_irrelevant_auth_exception = "Please remove the unnecessary authority from your action!"; const char* error_advice_missing_chain_api_plugin_exception = "Ensure that you have \033[2meosio::chain_api_plugin\033[0m\033[32m added to your node's configuration!"; diff --git a/programs/keosd/main.cpp b/programs/keosd/main.cpp index 196b722e5a5..626ef6bd0f8 100644 --- a/programs/keosd/main.cpp +++ b/programs/keosd/main.cpp @@ -48,7 +48,7 @@ int main(int argc, char** argv) if(!app().initialize(argc, argv)) return -1; auto& http = app().get_plugin(); - http.add_handler("/v1/keosd/stop", [](string, string, url_response_callback cb) { cb(200, fc::variant(fc::variant_object())); std::raise(SIGTERM); } ); + http.add_handler("/v1/" + keosd::config::key_store_executable_name + "/stop", [](string, string, url_response_callback cb) { cb(200, fc::variant(fc::variant_object())); std::raise(SIGTERM); } ); app().startup(); app().exec(); } catch (const fc::exception& e) { diff --git a/programs/nodeos/config.hpp.in b/programs/nodeos/config.hpp.in index dbeba72a3d1..821477e3270 100644 --- a/programs/nodeos/config.hpp.in +++ b/programs/nodeos/config.hpp.in @@ -11,6 +11,7 @@ namespace eosio { namespace nodeos { namespace config { constexpr uint64_t version = 0x${nodeos_BUILD_VERSION}; + const string node_executable_name = "${NODE_EXECUTABLE_NAME}"; }}} #endif // CONFIG_HPP_IN diff --git a/programs/nodeos/main.cpp b/programs/nodeos/main.cpp index 7034a03858a..8b3b2d9478c 100644 --- a/programs/nodeos/main.cpp +++ b/programs/nodeos/main.cpp @@ -87,8 +87,8 @@ int main(int argc, char** argv) app().set_version(eosio::nodeos::config::version); auto root = fc::app_path(); - app().set_default_data_dir(root / "eosio/nodeos/data" ); - app().set_default_config_dir(root / "eosio/nodeos/config" ); + app().set_default_data_dir(root / "eosio" / nodeos::config::node_executable_name / "data" ); + app().set_default_config_dir(root / "eosio" / nodeos::config::node_executable_name / "config" ); http_plugin::set_defaults({ .default_unix_socket_path = "", .default_http_port = 8888 @@ -96,10 +96,10 @@ int main(int argc, char** argv) if(!app().initialize(argc, argv)) return INITIALIZE_FAIL; initialize_logging(); - ilog("nodeos version ${ver}", ("ver", app().version_string())); + ilog("${name} version ${ver}", ("name", nodeos::config::node_executable_name)("ver", app().version_string())); ilog("eosio root is ${root}", ("root", root.string())); - ilog("nodeos using configuration file ${c}", ("c", app().full_config_file_path().string())); - ilog("nodeos data directory is ${d}", ("d", app().data_dir().string())); + ilog("${name} using configuration file ${c}", ("name", nodeos::config::node_executable_name)("c", app().full_config_file_path().string())); + ilog("${name} data directory is ${d}", ("name", nodeos::config::node_executable_name)("d", app().data_dir().string())); app().startup(); app().exec(); } catch( const extract_genesis_state_exception& e ) { @@ -145,6 +145,6 @@ int main(int argc, char** argv) return OTHER_FAIL; } - ilog("nodeos successfully exiting"); + ilog("${name} successfully exiting", ("name", nodeos::config::node_executable_name)); return SUCCESS; } From 8f3f41d870c212a3295e8d0b51ce18dcae75ebf8 Mon Sep 17 00:00:00 2001 From: Kayan Date: Fri, 12 Apr 2019 18:23:15 +0800 Subject: [PATCH 362/680] add test cases --- unittests/api_tests.cpp | 36 ++++++++++++ .../test-contracts/test_api/test_action.cpp | 55 ++++++++++++++++++ .../test-contracts/test_api/test_api.cpp | 7 ++- .../test-contracts/test_api/test_api.hpp | 3 + .../test-contracts/test_api/test_api.wasm | Bin 69149 -> 69868 bytes 5 files changed, 100 insertions(+), 1 deletion(-) diff --git a/unittests/api_tests.cpp b/unittests/api_tests.cpp index 04d2a69fffb..b1eb0a14da6 100644 --- a/unittests/api_tests.cpp +++ b/unittests/api_tests.cpp @@ -2532,4 +2532,40 @@ BOOST_FIXTURE_TEST_CASE(action_ordinal_failtest3, TESTER) { try { } FC_LOG_AND_RETHROW() } +/************************************************************************************* ++ * get_sender_test test cases ++ *************************************************************************************/ +BOOST_FIXTURE_TEST_CASE(get_sender_test, TESTER) { try { + + produce_blocks(1); + create_account(N(testapi) ); + create_account(N(testapi2), N(testapi), true, true ); + + set_code( N(testapi), contracts::test_api_wasm() ); + produce_blocks(1); + set_code( N(testapi2), contracts::test_api_wasm() ); + produce_blocks(1); + + using uint128_t = eosio::chain::uint128_t; + + uint128_t data = (N(testapi2) | ((uint128_t)(N(testapi)) << 64)); + CALL_TEST_FUNCTION( *this, "test_action", "get_sender_send_inline", fc::raw::pack(data) ); + + data = (N(testapi2) | ((uint128_t)(N(testapi2)) << 64)); + BOOST_CHECK_THROW(CALL_TEST_FUNCTION( *this, "test_action", "get_sender_send_inline", fc::raw::pack(data)), eosio_assert_message_exception); + + data = (N(testapi2) | ((uint128_t)(N(testapi)) << 64)); + CALL_TEST_FUNCTION( *this, "test_action", "get_sender_notify", fc::raw::pack(data) ); + + data = (N(testapi2) | ((uint128_t)(N(testapi2)) << 64)); + BOOST_CHECK_THROW(CALL_TEST_FUNCTION( *this, "test_action", "get_sender_notify", fc::raw::pack(data)), eosio_assert_message_exception); + + data = ((uint128_t)1 | N(testapi2) | ((uint128_t)(N(testapi2)) << 64)); + CALL_TEST_FUNCTION( *this, "test_action", "get_sender_notify", fc::raw::pack(data) ); + + data = ((uint128_t)1 | N(testapi2) | ((uint128_t)(N(testapi)) << 64)); + BOOST_CHECK_THROW(CALL_TEST_FUNCTION( *this, "test_action", "get_sender_notify", fc::raw::pack(data)), eosio_assert_message_exception); + +} FC_LOG_AND_RETHROW() } + BOOST_AUTO_TEST_SUITE_END() diff --git a/unittests/test-contracts/test_api/test_action.cpp b/unittests/test-contracts/test_api/test_action.cpp index e371f336cc0..ae906b620c1 100644 --- a/unittests/test-contracts/test_api/test_action.cpp +++ b/unittests/test-contracts/test_api/test_action.cpp @@ -14,6 +14,15 @@ #include "test_api.hpp" +namespace eosio { + namespace internal_use_do_not_use { + extern "C" { + __attribute__((eosio_wasm_import)) + uint64_t get_sender(); + } + } +} + using namespace eosio; void test_action::read_action_normal() { @@ -342,3 +351,49 @@ void test_action::test_action_ordinal_foo(uint64_t receiver, uint64_t code, uint void test_action::test_action_ordinal_bar(uint64_t receiver, uint64_t code, uint64_t action) { print("exec 11"); } + +void test_action::get_sender_send_inline() { + + eosio_assert(internal_use_do_not_use::get_sender() == 0, "assert_sender failed"); + + uint128_t tmp; + read_action_data( &tmp, sizeof(tmp) ); + + uint64_t to_acc = (uint64_t)tmp; + uint64_t sender_acc = (uint64_t)(tmp >> 64); + + eosio::action act1(std::vector(), name(to_acc), + name(WASM_TEST_ACTION("test_action", "assert_sender")), + std::tuple(sender_acc)); + act1.send(); +} + +void test_action::assert_sender() { + uint64_t sender; + read_action_data( &sender, sizeof(sender) ); + eosio_assert(internal_use_do_not_use::get_sender() == sender, "assert_sender failed"); +} + +void test_action::get_sender_notify(uint64_t receiver, uint64_t code, uint64_t action) { + uint128_t tmp; + read_action_data( &tmp, sizeof(tmp) ); + + uint64_t to_acc = ((uint64_t)tmp & 0xfffffffffffffffeull); + uint64_t sender_acc = (uint64_t)(tmp >> 64); + bool send_inline = (tmp & 1); + + if (receiver == code) { // main + eosio_assert(internal_use_do_not_use::get_sender() == 0, "assert_sender failed 1"); + eosio::require_recipient(name(to_acc)); + } else { // in notification + if (!send_inline) { + eosio_assert(internal_use_do_not_use::get_sender() == sender_acc, "assert_sender failed 2"); + } else { + eosio::action act1(std::vector(), name(to_acc), + name(WASM_TEST_ACTION("test_action", "assert_sender")), + std::tuple(sender_acc)); + act1.send(); + } + } +} + diff --git a/unittests/test-contracts/test_api/test_api.cpp b/unittests/test-contracts/test_api/test_api.cpp index 241d4762a00..9679a8e6b77 100644 --- a/unittests/test-contracts/test_api/test_api.cpp +++ b/unittests/test-contracts/test_api/test_api.cpp @@ -41,7 +41,9 @@ extern "C" { } WASM_TEST_HANDLER( test_action, assert_true_cf ); - if ( action != WASM_TEST_ACTION("test_transaction", "stateful_api") && action != WASM_TEST_ACTION("test_transaction", "context_free_api") ) + if ( action != WASM_TEST_ACTION("test_transaction", "stateful_api") && + action != WASM_TEST_ACTION("test_transaction", "context_free_api") && + action != WASM_TEST_ACTION("test_action", "assert_sender")) require_auth(code); //test_types @@ -70,6 +72,9 @@ extern "C" { WASM_TEST_HANDLER_EX( test_action, test_action_ordinal4 ); WASM_TEST_HANDLER_EX( test_action, test_action_ordinal_foo ); WASM_TEST_HANDLER_EX( test_action, test_action_ordinal_bar ); + WASM_TEST_HANDLER ( test_action, get_sender_send_inline ); + WASM_TEST_HANDLER ( test_action, assert_sender ); + WASM_TEST_HANDLER_EX( test_action, get_sender_notify ); // test named actions // We enforce action name matches action data type name, so name mangling will not work for these tests. diff --git a/unittests/test-contracts/test_api/test_api.hpp b/unittests/test-contracts/test_api/test_api.hpp index bbcf9965352..6de8c87149d 100644 --- a/unittests/test-contracts/test_api/test_api.hpp +++ b/unittests/test-contracts/test_api/test_api.hpp @@ -75,6 +75,9 @@ struct test_action { static void test_action_ordinal4(uint64_t receiver, uint64_t code, uint64_t action); static void test_action_ordinal_foo(uint64_t receiver, uint64_t code, uint64_t action); static void test_action_ordinal_bar(uint64_t receiver, uint64_t code, uint64_t action); + static void get_sender_send_inline(); + static void assert_sender(); + static void get_sender_notify(uint64_t receiver, uint64_t code, uint64_t action); }; struct test_db { diff --git a/unittests/test-contracts/test_api/test_api.wasm b/unittests/test-contracts/test_api/test_api.wasm index dc9a7125d82e9770c5724b76b05ef779bc39d1ba..1ff6ec5b96155d4788de43e71ca1586b67657436 100755 GIT binary patch delta 13047 zcmcgz33yG{w?F&boAKP69FmYZHxZH$5mSUxPAVu`5>)Y5LSl$WB8jOjH|C*8*okY3 z3N_DSDpiW2t*M4KHO5daEuuPTYk0qP&bb-d{{Qd2?|a|7`RudTxc6Rb4{HyKmKlF6 zHs;?m{N&ns`~Bz4>^d|hXFzsJdU8sZ;9T;ZIMiTxkImu74f6coW-65G+AMNq7%Aw~;JmAmi& z=XY6GzQnn!wOa^_H#3W1VJdg9L9!S~I$RW_SSDIkLD2m|nA{~qFbjruiVzIF;o_op zAtK3&H3WzgHQp>pl0}dl1&bgjNNxg20|}_Bq+xqVBcQd8pvWUa#882!xCF57e2Pmj z($y~Ye0u#=${Eto-n-|5Gq&<7EvBRj243&RPvSi zV}nC5vGU@Y7XD$4!z`5Nd4}>8HG`Qy|H31YhkIpI;2ij7j{p`3{1Fw`fm!~}Q}T(| z8paa%u-dngzUIAv^(ZOxmKe(@`O(M3SSJ77cN*)#t$w~pm-xlABfQM71?$4S{rj*{ zJj*|XUE_uRosn)22;wjN1Cbjz){}Ss#+A1YXaHzfKqr-2zuu?J!W92h`52Q*T$z!Y zn#)Fn2xzLHlnSW|JRoOq7FZVoVS!DN_6z(K6-x{nhx`{o&D{1aWUb&Ba+R=qAvarE zvOc_{B^vb}XKBPl;MNDf*E$=jexJ@{M!pIcQA=`|sP+fgy$AfM$f@WKdPh+_Q!8jZ zGbB5zuRttMgi17e9H|a2m!q(Q9pCNnUrII*Jh z!lSZpcjenT@BqvhovO&P=xY%o46(sLL>QujMc^%|aY74&fLwTLh(XU=#1;l;3p%|@ zi7R=KyLrRSIoD@cTNq@EKuq2c2u)R-#2&|Q86+E%_GJ*Hf1gGS%d^-{#-Iw6|M>ck=;WaH3Jze z4rCy-jSOTU16iwBNHwE3v{YEg5#3z#q2pf-TN`8JlSsCgpsyhxu| zJBLpd4mDpX99)H@=2hXcNFNb8{*3equsldCwU1AfUu%0=vsjg~G@w%5{HjC^P}OBdsa2n=eKtgO zcDhr@6oWj7AAGfGNmQdq=016zHs12*F(np+kulMHar9U6XF9$N_;NuWRAOzM&fLGu z)3~qXF~tqs2VRNhkxkFD9~Lz0qU%l~-8`z1dr3jF&zbA5x0m*(i&2lKA2M~U0=`N7r^?&UhWR`