Skip to content

Commit

Permalink
Merge pull request EOSIO#41 from EOSIO/master
Browse files Browse the repository at this point in the history
Merge from eosio master
  • Loading branch information
jchung00 authored Jun 27, 2018
2 parents 94aff61 + 90fefdd commit 1509de2
Show file tree
Hide file tree
Showing 19 changed files with 525 additions and 169 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ tests/plugin_test
unittests/unit_test

doxygen
eos.doxygen

wallet.json
witness_node_data_dir
Expand Down
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ set( CXX_STANDARD_REQUIRED ON)

set(VERSION_MAJOR 1)
set(VERSION_MINOR 0)
set(VERSION_PATCH 6)
set(VERSION_PATCH 7)

set( CLI_CLIENT_EXECUTABLE_NAME cleos )
set( GUI_CLIENT_EXECUTABLE_NAME eosio )
Expand Down
6 changes: 3 additions & 3 deletions Docker/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@ cd eos/Docker
docker build . -t eosio/eos
```

The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.0.6 tag, you could do the following:
The above will build off the most recent commit to the master branch by default. If you would like to target a specific branch/tag, you may use a build argument. For example, if you wished to generate a docker image based off of the v1.0.7 tag, you could do the following:

```bash
docker build -t eosio/eos:v1.0.6 --build-arg branch=v1.0.6 .
docker build -t eosio/eos:v1.0.7 --build-arg branch=v1.0.7 .
```

By default, the symbol in eosio.system is set to SYS. You can override this using the symbol argument while building the docker image.
Expand Down Expand Up @@ -181,7 +181,7 @@ Note: if you want to use the mongo db plugin, you have to enable it in your `dat

```
# pull images
docker pull eosio/eos:v1.0.6
docker pull eosio/eos:v1.0.7
# create volume
docker volume create --name=nodeos-data-volume
Expand Down
4 changes: 2 additions & 2 deletions Docker/docker-compose-eosio1.0.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ version: "3"

services:
nodeosd:
image: eosio/eos:v1.0.6
image: eosio/eos:v1.0.7
command: /opt/eosio/bin/nodeosd.sh --data-dir /opt/eosio/bin/data-dir -e
hostname: nodeosd
ports:
Expand All @@ -14,7 +14,7 @@ services:
- nodeos-data-volume:/opt/eosio/bin/data-dir

keosd:
image: eosio/eos:v1.0.6
image: eosio/eos:v1.0.7
command: /opt/eosio/bin/keosd --wallet-dir /opt/eosio/bin/data-dir --http-server-address=127.0.0.1:8900
hostname: keosd
links:
Expand Down
57 changes: 39 additions & 18 deletions contracts/eosio.system/delegate_bandwidth.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -154,9 +154,10 @@ namespace eosiosystem {


/**
* While buying ram uses the current market price according to the bancor-algorithm, selling ram only
* refunds the purchase price to the account. In this way there is no profit to be made through buying
* and selling ram.
* The system contract now buys and sells RAM allocations at prevailing market prices.
* This may result in traders buying RAM today in anticipation of potential shortages
* tomorrow. Overall this will result in the market balancing the supply and demand
* for RAM over time.
*/
void system_contract::sellram( account_name account, int64_t bytes ) {
require_auth( account );
Expand All @@ -174,6 +175,8 @@ namespace eosiosystem {
tokens_out = es.convert( asset(bytes,S(0,RAM)), CORE_SYMBOL);
});

eosio_assert( tokens_out.amount > 1, "token amount received from selling ram is too low" );

_gstate.total_ram_bytes_reserved -= static_cast<decltype(_gstate.total_ram_bytes_reserved)>(bytes); // bytes > 0 is asserted above
_gstate.total_ram_stake -= tokens_out.amount;

Expand All @@ -187,7 +190,10 @@ namespace eosiosystem {

INLINE_ACTION_SENDER(eosio::token, transfer)( N(eosio.token), {N(eosio.ram),N(active)},
{ N(eosio.ram), account, asset(tokens_out), std::string("sell ram") } );
auto fee = tokens_out.amount / 200;

auto fee = ( tokens_out.amount + 199 ) / 200; /// .5% fee (round up)
// since tokens_out.amount was asserted to be at least 2 earlier, fee.amount < tokens_out.amount

if( fee > 0 ) {
INLINE_ACTION_SENDER(eosio::token, transfer)( N(eosio.token), {account,N(active)},
{ account, N(eosio.ramfee), asset(fee), std::string("sell ram fee") } );
Expand All @@ -207,6 +213,9 @@ namespace eosiosystem {
{
require_auth( from );
eosio_assert( stake_net_delta != asset(0) || stake_cpu_delta != asset(0), "should stake non-zero amount" );
eosio_assert( std::abs( (stake_net_delta + stake_cpu_delta).amount )
>= std::max( std::abs( stake_net_delta.amount ), std::abs( stake_cpu_delta.amount ) ),
"net and cpu deltas cannot be opposite signs" );

account_name source_stake_from = from;
if ( transfer ) {
Expand Down Expand Up @@ -273,8 +282,16 @@ namespace eosiosystem {
auto net_balance = stake_net_delta;
auto cpu_balance = stake_cpu_delta;
bool need_deferred_trx = false;
if ( req != refunds_tbl.end() ) { //need to update refund
refunds_tbl.modify( req, 0, [&]( refund_request& r ) {


// net and cpu are same sign by assertions in delegatebw and undelegatebw
// redundant assertion also at start of changebw to protect against misuse of changebw
bool is_undelegating = (net_balance.amount + cpu_balance.amount ) < 0;
bool is_delegating_to_self = (!transfer && from == receiver);

if( is_delegating_to_self || is_undelegating ) {
if ( req != refunds_tbl.end() ) { //need to update refund
refunds_tbl.modify( req, 0, [&]( refund_request& r ) {
if ( net_balance < asset(0) || cpu_balance < asset(0) ) {
r.request_time = now();
}
Expand All @@ -293,17 +310,19 @@ namespace eosiosystem {
cpu_balance = asset(0);
}
});
eosio_assert( asset(0) <= req->net_amount, "negative net refund amount" ); //should never happen
eosio_assert( asset(0) <= req->cpu_amount, "negative cpu refund amount" ); //should never happen

if ( req->net_amount == asset(0) && req->cpu_amount == asset(0) ) {
refunds_tbl.erase( req );
need_deferred_trx = false;
} else {
need_deferred_trx = true;
}
} else if ( net_balance < asset(0) || cpu_balance < asset(0) ) { //need to create refund
refunds_tbl.emplace( from, [&]( refund_request& r ) {
eosio_assert( asset(0) <= req->net_amount, "negative net refund amount" ); //should never happen
eosio_assert( asset(0) <= req->cpu_amount, "negative cpu refund amount" ); //should never happen

if ( req->net_amount == asset(0) && req->cpu_amount == asset(0) ) {
refunds_tbl.erase( req );
need_deferred_trx = false;
} else {
need_deferred_trx = true;
}

} else if ( net_balance < asset(0) || cpu_balance < asset(0) ) { //need to create refund
refunds_tbl.emplace( from, [&]( refund_request& r ) {
r.owner = from;
if ( net_balance < asset(0) ) {
r.net_amount = -net_balance;
Expand All @@ -315,8 +334,9 @@ namespace eosiosystem {
} // else r.cpu_amount = 0 by default constructor
r.request_time = now();
});
need_deferred_trx = true;
} // else stake increase requested with no existing row in refunds_tbl -> nothing to do with refunds_tbl
need_deferred_trx = true;
} // else stake increase requested with no existing row in refunds_tbl -> nothing to do with refunds_tbl
} /// end if is_delegating_to_self || is_undelegating

if ( need_deferred_trx ) {
eosio::transaction out;
Expand Down Expand Up @@ -367,6 +387,7 @@ namespace eosiosystem {
eosio_assert( stake_cpu_quantity >= asset(0), "must stake a positive amount" );
eosio_assert( stake_net_quantity >= asset(0), "must stake a positive amount" );
eosio_assert( stake_net_quantity + stake_cpu_quantity > asset(0), "must stake a positive amount" );
eosio_assert( !transfer || from != receiver, "cannot use transfer flag if delegating to self" );

changebw( from, receiver, stake_net_quantity, stake_cpu_quantity, transfer);
} // delegatebw
Expand Down
3 changes: 3 additions & 0 deletions contracts/eosio.system/eosio.system.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,9 @@ namespace eosiosystem {
void system_contract::bidname( account_name bidder, account_name newname, asset bid ) {
require_auth( bidder );
eosio_assert( eosio::name_suffix(newname) == newname, "you can only bid on top-level suffix" );
eosio_assert( newname != 0, "the empty name is not a valid account name to bid on" );
eosio_assert( (newname & 0xFull) == 0, "13 character names are not valid account names to bid on" );
eosio_assert( (newname & 0x1F0ull) == 0, "accounts with 12 character names and no dots can be created without bidding required" );
eosio_assert( !is_account( newname ), "account already exists" );
eosio_assert( bid.symbol == asset().symbol, "asset must be system token" );
eosio_assert( bid.amount > 0, "insufficient bid" );
Expand Down
73 changes: 47 additions & 26 deletions libraries/chain/abi_serializer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@ using namespace boost;

namespace eosio { namespace chain {

const size_t abi_serializer::max_recursion_depth;
fc::microseconds abi_serializer::max_serialization_time = fc::microseconds(15*1000); // 15 ms

using boost::algorithm::ends_with;
using std::string;

Expand Down Expand Up @@ -238,21 +241,26 @@ namespace eosio { namespace chain {
return type;
}

void abi_serializer::_binary_to_variant(const type_name& type, fc::datastream<const char *>& stream,
fc::mutable_variant_object& obj, size_t recursion_depth)const {
FC_ASSERT( ++recursion_depth < max_recursion_depth, "recursive definition, max_recursion_depth" );
void abi_serializer::_binary_to_variant( const type_name& type, fc::datastream<const char *>& stream,
fc::mutable_variant_object& obj, size_t recursion_depth,
const fc::time_point& deadline )const
{
FC_ASSERT( ++recursion_depth < max_recursion_depth, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) );
FC_ASSERT( fc::time_point::now() < deadline, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) );
const auto& st = get_struct(type);
if( st.base != type_name() ) {
_binary_to_variant(resolve_type(st.base), stream, obj, recursion_depth);
_binary_to_variant(resolve_type(st.base), stream, obj, recursion_depth, deadline);
}
for( const auto& field : st.fields ) {
obj( field.name, _binary_to_variant(resolve_type(field.type), stream, recursion_depth) );
obj( field.name, _binary_to_variant(resolve_type(field.type), stream, recursion_depth, deadline) );
}
}

fc::variant abi_serializer::_binary_to_variant(const type_name& type, fc::datastream<const char *>& stream, size_t recursion_depth)const
fc::variant abi_serializer::_binary_to_variant( const type_name& type, fc::datastream<const char *>& stream,
size_t recursion_depth, const fc::time_point& deadline )const
{
FC_ASSERT( ++recursion_depth < max_recursion_depth, "recursive definition, max_recursion_depth" );
FC_ASSERT( ++recursion_depth < max_recursion_depth, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) );
FC_ASSERT( fc::time_point::now() < deadline, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) );
type_name rtype = resolve_type(type);
auto ftype = fundamental_type(rtype);
auto btype = built_in_types.find(ftype );
Expand All @@ -263,31 +271,41 @@ namespace eosio { namespace chain {
fc::unsigned_int size;
fc::raw::unpack(stream, size);
vector<fc::variant> vars;
vars.resize(size);
for (auto& var : vars) {
var = _binary_to_variant(ftype, stream, recursion_depth);
for( decltype(size.value) i = 0; i < size; ++i ) {
auto v = _binary_to_variant(ftype, stream, recursion_depth, deadline);
FC_ASSERT( !v.is_null(), "Invalid packed array" );
vars.emplace_back(std::move(v));
}
FC_ASSERT( vars.size() == size.value,
"packed size does not match unpacked array size, packed size ${p} actual size ${a}",
("p", size)("a", vars.size()) );
return fc::variant( std::move(vars) );
} else if ( is_optional(rtype) ) {
char flag;
fc::raw::unpack(stream, flag);
return flag ? _binary_to_variant(ftype, stream, recursion_depth) : fc::variant();
return flag ? _binary_to_variant(ftype, stream, recursion_depth, deadline) : fc::variant();
}

fc::mutable_variant_object mvo;
_binary_to_variant(rtype, stream, mvo, recursion_depth);
_binary_to_variant(rtype, stream, mvo, recursion_depth, deadline);
FC_ASSERT( mvo.size() > 0, "Unable to unpack stream ${type}", ("type", type) );
return fc::variant( std::move(mvo) );
}

fc::variant abi_serializer::_binary_to_variant(const type_name& type, const bytes& binary, size_t recursion_depth)const{
FC_ASSERT( ++recursion_depth < max_recursion_depth, "recursive definition, max_recursion_depth" );
fc::variant abi_serializer::_binary_to_variant( const type_name& type, const bytes& binary,
size_t recursion_depth, const fc::time_point& deadline )const
{
FC_ASSERT( ++recursion_depth < max_recursion_depth, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) );
FC_ASSERT( fc::time_point::now() < deadline, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) );
fc::datastream<const char*> ds( binary.data(), binary.size() );
return _binary_to_variant(type, ds, recursion_depth);
return _binary_to_variant(type, ds, recursion_depth, deadline);
}

void abi_serializer::_variant_to_binary(const type_name& type, const fc::variant& var, fc::datastream<char *>& ds, size_t recursion_depth)const
void abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var, fc::datastream<char *>& ds,
size_t recursion_depth, const fc::time_point& deadline )const
{ try {
FC_ASSERT( ++recursion_depth < max_recursion_depth, "recursive definition, max_recursion_depth" );
FC_ASSERT( ++recursion_depth < max_recursion_depth, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) );
FC_ASSERT( fc::time_point::now() < deadline, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) );
auto rtype = resolve_type(type);

auto btype = built_in_types.find(fundamental_type(rtype));
Expand All @@ -297,7 +315,7 @@ namespace eosio { namespace chain {
vector<fc::variant> vars = var.get_array();
fc::raw::pack(ds, (fc::unsigned_int)vars.size());
for (const auto& var : vars) {
_variant_to_binary(fundamental_type(rtype), var, ds, recursion_depth);
_variant_to_binary(fundamental_type(rtype), var, ds, recursion_depth, deadline);
}
} else {
const auto& st = get_struct(rtype);
Expand All @@ -306,14 +324,14 @@ namespace eosio { namespace chain {
const auto& vo = var.get_object();

if( st.base != type_name() ) {
_variant_to_binary(resolve_type(st.base), var, ds, recursion_depth);
_variant_to_binary(resolve_type(st.base), var, ds, recursion_depth, deadline);
}
for( const auto& field : st.fields ) {
if( vo.contains( string(field.name).c_str() ) ) {
_variant_to_binary(field.type, vo[field.name], ds, recursion_depth);
_variant_to_binary(field.type, vo[field.name], ds, recursion_depth, deadline);
}
else {
_variant_to_binary(field.type, fc::variant(), ds, recursion_depth);
_variant_to_binary(field.type, fc::variant(), ds, recursion_depth, deadline);
/// TODO: default construct field and write it out
FC_THROW( "Missing '${f}' in variant object", ("f",field.name) );
}
Expand All @@ -330,25 +348,28 @@ namespace eosio { namespace chain {
if (va.size() > 0) {
for( const auto& field : st.fields ) {
if( va.size() > i )
_variant_to_binary(field.type, va[i], ds, recursion_depth);
_variant_to_binary(field.type, va[i], ds, recursion_depth, deadline);
else
_variant_to_binary(field.type, fc::variant(), ds, recursion_depth);
_variant_to_binary(field.type, fc::variant(), ds, recursion_depth, deadline);
++i;
}
}
}
}
} FC_CAPTURE_AND_RETHROW( (type)(var) ) }

bytes abi_serializer::_variant_to_binary(const type_name& type, const fc::variant& var, size_t recursion_depth)const { try {
FC_ASSERT( ++recursion_depth < max_recursion_depth, "recursive definition, max_recursion_depth" );
bytes abi_serializer::_variant_to_binary( const type_name& type, const fc::variant& var,
size_t recursion_depth, const fc::time_point& deadline )const
{ try {
FC_ASSERT( ++recursion_depth < max_recursion_depth, "recursive definition, max_recursion_depth ${r} ", ("r", max_recursion_depth) );
FC_ASSERT( fc::time_point::now() < deadline, "serialization time limit ${t}us exceeded", ("t", max_serialization_time) );
if( !is_type(type) ) {
return var.as<bytes>();
}

bytes temp( 1024*1024 );
fc::datastream<char*> ds(temp.data(), temp.size() );
_variant_to_binary(type, var, ds, recursion_depth);
_variant_to_binary(type, var, ds, recursion_depth, deadline);
temp.resize(ds.tellp());
return temp;
} FC_CAPTURE_AND_RETHROW( (type)(var) ) }
Expand Down
12 changes: 11 additions & 1 deletion libraries/chain/controller.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,7 @@ struct controller_impl {
controller::config conf;
chain_id_type chain_id;
bool replaying = false;
bool in_trx_requiring_checks = false; ///< if true, checks that are normally skipped on replay (e.g. auth checks) cannot be skipped

typedef pair<scope_name,action_name> handler_key;
map< account_name, map<handler_key, apply_handler> > apply_handlers;
Expand Down Expand Up @@ -532,6 +533,11 @@ struct controller_impl {
signed_transaction dtrx;
fc::raw::unpack(ds,static_cast<transaction&>(dtrx) );

auto reset_in_trx_requiring_checks = fc::make_scoped_exit([old_value=in_trx_requiring_checks,this](){
in_trx_requiring_checks = old_value;
});
in_trx_requiring_checks = true;

transaction_context trx_context( self, dtrx, gto.trx_id );
trx_context.deadline = deadline;
trx_context.billed_cpu_time_us = billed_cpu_time_us;
Expand Down Expand Up @@ -760,6 +766,10 @@ struct controller_impl {

try {
auto onbtrx = std::make_shared<transaction_metadata>( get_on_block_transaction() );
auto reset_in_trx_requiring_checks = fc::make_scoped_exit([old_value=in_trx_requiring_checks,this](){
in_trx_requiring_checks = old_value;
});
in_trx_requiring_checks = true;
push_transaction( onbtrx, fc::time_point::maximum(), true, self.get_global_properties().configuration.min_transaction_cpu_usage );
} catch( const boost::interprocess::bad_alloc& e ) {
elog( "on block transaction failed due to a bad allocation" );
Expand Down Expand Up @@ -1395,7 +1405,7 @@ optional<producer_schedule_type> controller::proposed_producers()const {
}

bool controller::skip_auth_check()const {
return my->replaying && !my->conf.force_all_checks;
return my->replaying && !my->conf.force_all_checks && !my->in_trx_requiring_checks;
}

bool controller::contracts_console()const {
Expand Down
Loading

0 comments on commit 1509de2

Please sign in to comment.