Skip to content
This repository has been archived by the owner on Aug 2, 2022. It is now read-only.

Commit

Permalink
Merge pull request #5943 from EOSIO/release/1.3.x
Browse files Browse the repository at this point in the history
Release 1.3.2
  • Loading branch information
b1bart authored Oct 9, 2018
2 parents 8f0f54c + 76635ba commit 1e9ca55
Show file tree
Hide file tree
Showing 8 changed files with 38 additions and 19 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ set( CXX_STANDARD_REQUIRED ON)

set(VERSION_MAJOR 1)
set(VERSION_MINOR 3)
set(VERSION_PATCH 1)
set(VERSION_PATCH 2)

set( CLI_CLIENT_EXECUTABLE_NAME cleos )
set( NODE_EXECUTABLE_NAME nodeos )
Expand Down
4 changes: 2 additions & 2 deletions Docker/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@ cd eos/Docker
docker build . -t eosio/eos
```

The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.3.1 tag, you could do the following:
The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.3.2 tag, you could do the following:

```bash
docker build -t eosio/eos:v1.3.1 --build-arg branch=v1.3.1 .
docker build -t eosio/eos:v1.3.2 --build-arg branch=v1.3.2 .
```

By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image.
Expand Down
12 changes: 6 additions & 6 deletions libraries/chain/abi_serializer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ namespace eosio { namespace chain {
structs[st.name] = st;

for( const auto& td : abi.types ) {
EOS_ASSERT(_is_type(td.type, 0, deadline, max_serialization_time), invalid_type_inside_abi, "invalid type", ("type",td.type));
EOS_ASSERT(_is_type(td.type, 0, deadline, max_serialization_time), invalid_type_inside_abi, "invalid type ${type}", ("type",td.type));
EOS_ASSERT(!_is_type(td.new_type_name, 0, deadline, max_serialization_time), duplicate_abi_type_def_exception, "type already exists", ("new_type_name",td.new_type_name));
typedefs[td.new_type_name] = td.type;
}
Expand Down Expand Up @@ -226,7 +226,7 @@ namespace eosio { namespace chain {
}
} FC_CAPTURE_AND_RETHROW( (t) ) }
for( const auto& t : typedefs ) { try {
EOS_ASSERT(_is_type(t.second, 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",t.second) );
EOS_ASSERT(_is_type(t.second, 0, deadline, max_serialization_time), invalid_type_inside_abi, "${type}", ("type",t.second) );
} FC_CAPTURE_AND_RETHROW( (t) ) }
for( const auto& s : structs ) { try {
if( s.second.base != type_name() ) {
Expand All @@ -242,23 +242,23 @@ namespace eosio { namespace chain {
}
for( const auto& field : s.second.fields ) { try {
EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) );
EOS_ASSERT(_is_type(_remove_bin_extension(field.type), 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",field.type) );
EOS_ASSERT(_is_type(_remove_bin_extension(field.type), 0, deadline, max_serialization_time), invalid_type_inside_abi, "${type}", ("type",field.type) );
} FC_CAPTURE_AND_RETHROW( (field) ) }
} FC_CAPTURE_AND_RETHROW( (s) ) }
for( const auto& s : variants ) { try {
for( const auto& type : s.second.types ) { try {
EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) );
EOS_ASSERT(_is_type(type, 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",type) );
EOS_ASSERT(_is_type(type, 0, deadline, max_serialization_time), invalid_type_inside_abi, "${type}", ("type",type) );
} FC_CAPTURE_AND_RETHROW( (type) ) }
} FC_CAPTURE_AND_RETHROW( (s) ) }
for( const auto& a : actions ) { try {
EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) );
EOS_ASSERT(_is_type(a.second, 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",a.second) );
EOS_ASSERT(_is_type(a.second, 0, deadline, max_serialization_time), invalid_type_inside_abi, "${type}", ("type",a.second) );
} FC_CAPTURE_AND_RETHROW( (a) ) }

for( const auto& t : tables ) { try {
EOS_ASSERT( fc::time_point::now() < deadline, abi_serialization_deadline_exception, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) );
EOS_ASSERT(_is_type(t.second, 0, deadline, max_serialization_time), invalid_type_inside_abi, "", ("type",t.second) );
EOS_ASSERT(_is_type(t.second, 0, deadline, max_serialization_time), invalid_type_inside_abi, "${type}", ("type",t.second) );
} FC_CAPTURE_AND_RETHROW( (t) ) }
}

Expand Down
1 change: 1 addition & 0 deletions plugins/COMMUNITY.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ Third parties are encouraged to make pull requests to this file (`develop` branc
| Kafka | https://github.com/TP-Lab/kafka_plugin |
| SQL | https://github.com/asiniscalchi/eosio_sql_plugin |
| ElasticSearch | https://github.com/EOSLaoMao/elasticsearch_plugin |
| ZMQ / history | https://github.com/cc32d9/eos_zmq_plugin |

## DISCLAIMER:

Expand Down
11 changes: 9 additions & 2 deletions plugins/chain_plugin/chain_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
#include <fc/io/json.hpp>
#include <fc/variant.hpp>
#include <signal.h>
#include <cstdlib>

namespace eosio {

Expand Down Expand Up @@ -939,6 +940,12 @@ void chain_plugin::handle_guard_exception(const chain::guard_exception& e) const
app().quit();
}

void chain_plugin::handle_db_exhaustion() {
elog("database memory exhausted: increase chain-state-db-size-mb and/or reversible-blocks-db-size-mb");
//return 1 -- it's what programs/nodeos/main.cpp considers "BAD_ALLOC"
std::_Exit(1);
}

namespace chain_apis {

const string read_only::KEYi64 = "i64";
Expand Down Expand Up @@ -1440,7 +1447,7 @@ void read_write::push_block(const read_write::push_block_params& params, next_fu
app().get_method<incoming::methods::block_sync>()(std::make_shared<signed_block>(params));
next(read_write::push_block_results{});
} catch ( boost::interprocess::bad_alloc& ) {
raise(SIGUSR1);
chain_plugin::handle_db_exhaustion();
} CATCH_AND_CALL(next);
}

Expand Down Expand Up @@ -1475,7 +1482,7 @@ void read_write::push_transaction(const read_write::push_transaction_params& par


} catch ( boost::interprocess::bad_alloc& ) {
raise(SIGUSR1);
chain_plugin::handle_db_exhaustion();
} CATCH_AND_CALL(next);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -648,6 +648,8 @@ class chain_plugin : public plugin<chain_plugin> {
fc::microseconds get_abi_serializer_max_time() const;

void handle_guard_exception(const chain::guard_exception& e) const;

static void handle_db_exhaustion();
private:
void log_guard_exception(const chain::guard_exception& e) const;

Expand Down
16 changes: 8 additions & 8 deletions plugins/producer_plugin/producer_plugin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ class producer_plugin_impl : public std::enable_shared_from_this<producer_plugin
elog((e.to_detail_string()));
except = true;
} catch ( boost::interprocess::bad_alloc& ) {
raise(SIGUSR1);
chain_plugin::handle_db_exhaustion();
return;
}

Expand Down Expand Up @@ -349,7 +349,7 @@ class producer_plugin_impl : public std::enable_shared_from_this<producer_plugin
if (response.contains<fc::exception_ptr>()) {
_transaction_ack_channel.publish(std::pair<fc::exception_ptr, packed_transaction_ptr>(response.get<fc::exception_ptr>(), trx));
if (_pending_block_mode == pending_block_mode::producing) {
fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} is REJECTING tx: ${txid} : ${why} ",
fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is REJECTING tx: ${txid} : ${why} ",
("block_num", chain.head_block_num() + 1)
("prod", chain.pending_block_state()->header.producer)
("txid", trx->id())
Expand All @@ -362,7 +362,7 @@ class producer_plugin_impl : public std::enable_shared_from_this<producer_plugin
} else {
_transaction_ack_channel.publish(std::pair<fc::exception_ptr, packed_transaction_ptr>(nullptr, trx));
if (_pending_block_mode == pending_block_mode::producing) {
fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} is ACCEPTING tx: ${txid}",
fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is ACCEPTING tx: ${txid}",
("block_num", chain.head_block_num() + 1)
("prod", chain.pending_block_state()->header.producer)
("txid", trx->id()));
Expand Down Expand Up @@ -397,7 +397,7 @@ class producer_plugin_impl : public std::enable_shared_from_this<producer_plugin
if (failure_is_subjective(*trace->except, deadline_is_subjective)) {
_pending_incoming_transactions.emplace_back(trx, persist_until_expired, next);
if (_pending_block_mode == pending_block_mode::producing) {
fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING ",
fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} COULD NOT FIT, tx: ${txid} RETRYING ",
("block_num", chain.head_block_num() + 1)
("prod", chain.pending_block_state()->header.producer)
("txid", trx->id()));
Expand All @@ -421,7 +421,7 @@ class producer_plugin_impl : public std::enable_shared_from_this<producer_plugin
} catch ( const guard_exception& e ) {
app().get_plugin<chain_plugin>().handle_guard_exception(e);
} catch ( boost::interprocess::bad_alloc& ) {
raise(SIGUSR1);
chain_plugin::handle_db_exhaustion();
} CATCH_AND_CALL(send_response);
}

Expand Down Expand Up @@ -1027,7 +1027,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool
while(!persisted_by_expiry.empty() && persisted_by_expiry.begin()->expiry <= pbs->header.timestamp.to_time_point()) {
auto const& txid = persisted_by_expiry.begin()->trx_id;
if (_pending_block_mode == pending_block_mode::producing) {
fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block num} for producer ${prod} is EXPIRING PERSISTED tx: ${txid}",
fc_dlog(_trx_trace_log, "[TRX_TRACE] Block ${block_num} for producer ${prod} is EXPIRING PERSISTED tx: ${txid}",
("block_num", chain.head_block_num() + 1)
("prod", chain.pending_block_state()->header.producer)
("txid", txid));
Expand Down Expand Up @@ -1240,7 +1240,7 @@ producer_plugin_impl::start_block_result producer_plugin_impl::start_block(bool
}

} catch ( boost::interprocess::bad_alloc& ) {
raise(SIGUSR1);
chain_plugin::handle_db_exhaustion();
return start_block_result::failed;
}

Expand Down Expand Up @@ -1363,7 +1363,7 @@ bool producer_plugin_impl::maybe_produce_block() {
app().get_plugin<chain_plugin>().handle_guard_exception(e);
return false;
} catch ( boost::interprocess::bad_alloc& ) {
raise(SIGUSR1);
chain_plugin::handle_db_exhaustion();
return false;
} FC_LOG_AND_DROP();

Expand Down
9 changes: 9 additions & 0 deletions tests/nodeos_under_min_avail_ram.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ def setName(self, num):
Print("NamedAccounts Name for %d is %s" % (temp, retStr))
return retStr


###############################################################
# nodeos_voting_test
# --dump-error-details <Upon error print etc/eosio/node_*/config.ini and var/lib/node_*/stderr.log to stdout>
Expand Down Expand Up @@ -155,6 +156,7 @@ def setName(self, num):
count=0
while keepProcessing:
numAmount+=1
timeOutCount=0
for fromIndex in range(namedAccounts.numAccounts):
count+=1
toIndex=fromIndex+1
Expand All @@ -167,8 +169,15 @@ def setName(self, num):
try:
trans=nodes[0].pushMessage(contract, action, data, opts)
if trans is None or not trans[0]:
timeOutCount+=1
if timeOutCount>=3:
Print("Failed to push create action to eosio contract for %d consecutive times, looks like nodeos already exited." % (timeOutCount))
keepProcessing=False
break
Print("Failed to push create action to eosio contract. sleep for 60 seconds")
time.sleep(60)
else:
timeOutCount=0
time.sleep(1)
except TypeError as ex:
keepProcessing=False
Expand Down

0 comments on commit 1e9ca55

Please sign in to comment.