Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add london fork tests #979

Merged
merged 1 commit into from
May 18, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
91 changes: 46 additions & 45 deletions src/init.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1645,6 +1645,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)

do {
const int64_t load_block_index_start_time = GetTimeMillis();
dev::eth::ChainParams cp(chainparams.EVMGenesisInfo());
try {
LOCK(cs_main);
chainman.Reset();
Expand Down Expand Up @@ -1704,6 +1705,51 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
break;
}

/////////////////////////////////////////////////////////// qtum
if((args.IsArgSet("-dgpstorage") && args.IsArgSet("-dgpevm")) || (!args.IsArgSet("-dgpstorage") && args.IsArgSet("-dgpevm")) ||
(!args.IsArgSet("-dgpstorage") && !args.IsArgSet("-dgpevm"))){
fGettingValuesDGP = true;
} else {
fGettingValuesDGP = false;
}

dev::eth::NoProof::init();
fs::path qtumStateDir = gArgs.GetDataDirNet() / "stateQtum";
bool fStatus = fs::exists(qtumStateDir);
const std::string dirQtum(qtumStateDir.string());
const dev::h256 hashDB(dev::sha3(dev::rlp("")));
dev::eth::BaseState existsQtumstate = fStatus ? dev::eth::BaseState::PreExisting : dev::eth::BaseState::Empty;
globalState = std::unique_ptr<QtumState>(new QtumState(dev::u256(0), QtumState::openDB(dirQtum, hashDB, dev::WithExisting::Trust), dirQtum, existsQtumstate));
globalSealEngine = std::unique_ptr<dev::eth::SealEngineFace>(cp.createSealEngine());

pstorageresult.reset(new StorageResults(qtumStateDir.string()));
if (fReset) {
pstorageresult->wipeResults();
}

fRecordLogOpcodes = args.IsArgSet("-record-log-opcodes");
fIsVMlogFile = fs::exists(gArgs.GetDataDirNet() / "vmExecLogs.json");

if (fAddressIndex != args.GetBoolArg("-addrindex", DEFAULT_ADDRINDEX)) {
strLoadError = _("You need to rebuild the database using -reindex to change -addrindex");
break;
}

// Check for changed -logevents state
if (fLogEvents != args.GetBoolArg("-logevents", DEFAULT_LOGEVENTS) && !fLogEvents) {
strLoadError = _("You need to rebuild the database using -reindex to enable -logevents");
break;
}

if (!args.GetBoolArg("-logevents", DEFAULT_LOGEVENTS))
{
pstorageresult->wipeResults();
pblocktree->WipeHeightIndex();
fLogEvents = false;
pblocktree->WriteFlag("logevents", fLogEvents);
}
///////////////////////////////////////////////////////////////

// At this point we're either in reindex or we've loaded a useful
// block tree into BlockIndex()!

Expand Down Expand Up @@ -1761,28 +1807,6 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
}

/////////////////////////////////////////////////////////// qtum
if((args.IsArgSet("-dgpstorage") && args.IsArgSet("-dgpevm")) || (!args.IsArgSet("-dgpstorage") && args.IsArgSet("-dgpevm")) ||
(!args.IsArgSet("-dgpstorage") && !args.IsArgSet("-dgpevm"))){
fGettingValuesDGP = true;
} else {
fGettingValuesDGP = false;
}

dev::eth::NoProof::init();
fs::path qtumStateDir = gArgs.GetDataDirNet() / "stateQtum";
bool fStatus = fs::exists(qtumStateDir);
const std::string dirQtum(qtumStateDir.string());
const dev::h256 hashDB(dev::sha3(dev::rlp("")));
dev::eth::BaseState existsQtumstate = fStatus ? dev::eth::BaseState::PreExisting : dev::eth::BaseState::Empty;
globalState = std::unique_ptr<QtumState>(new QtumState(dev::u256(0), QtumState::openDB(dirQtum, hashDB, dev::WithExisting::Trust), dirQtum, existsQtumstate));
dev::eth::ChainParams cp(chainparams.EVMGenesisInfo());
globalSealEngine = std::unique_ptr<dev::eth::SealEngineFace>(cp.createSealEngine());

pstorageresult.reset(new StorageResults(qtumStateDir.string()));
if (fReset) {
pstorageresult->wipeResults();
}

{
LOCK(cs_main);
CChain& active_chain = chainman.ActiveChain();
Expand All @@ -1797,30 +1821,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
globalState->db().commit();
globalState->dbUtxo().commit();
}

fRecordLogOpcodes = args.IsArgSet("-record-log-opcodes");
fIsVMlogFile = fs::exists(gArgs.GetDataDirNet() / "vmExecLogs.json");
///////////////////////////////////////////////////////////

/////////////////////////////////////////////////////////////// // qtum
if (fAddressIndex != args.GetBoolArg("-addrindex", DEFAULT_ADDRINDEX)) {
strLoadError = _("You need to rebuild the database using -reindex to change -addrindex");
break;
}
///////////////////////////////////////////////////////////////
// Check for changed -logevents state
if (fLogEvents != args.GetBoolArg("-logevents", DEFAULT_LOGEVENTS) && !fLogEvents) {
strLoadError = _("You need to rebuild the database using -reindex to enable -logevents");
break;
}

if (!args.GetBoolArg("-logevents", DEFAULT_LOGEVENTS))
{
pstorageresult->wipeResults();
pblocktree->WipeHeightIndex();
fLogEvents = false;
pblocktree->WriteFlag("logevents", fLogEvents);
}

if (!fReset) {
LOCK(cs_main);
Expand Down
1 change: 0 additions & 1 deletion src/validation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1798,7 +1798,6 @@ int ApplyTxInUndo(Coin&& undo, CCoinsViewCache& view, const COutPoint& out)
* When FAILED is returned, view is left in an indeterminate state. */
DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockIndex* pindex, CCoinsViewCache& view, bool* pfClean)
{
assert(pindex->GetBlockHash() == view.GetBestBlock());
if (pfClean)
*pfClean = false;
bool fClean = true;
Expand Down
10 changes: 5 additions & 5 deletions test/functional/feature_dbcrash.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,13 +48,13 @@
class ChainstateWriteCrashTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.rpc_timeout = 480
self.rpc_timeout = 960
self.supports_cli = False

# Set -maxmempool=0 to turn off mempool memory sharing with dbcache
# Set -rpcservertimeout=900 to reduce socket disconnects in this
# long-running test
self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=900", "-dbbatchsize=200000"]
self.base_args = ["-limitdescendantsize=0", "-maxmempool=0", "-rpcservertimeout=1800", "-dbbatchsize=200000"]

# Set different crash ratios and cache sizes. Note that not all of
# -dbcache goes to the in-memory coins cache.
Expand Down Expand Up @@ -83,7 +83,7 @@ def restart_node(self, node_index, expected_tip):
after 60 seconds. Returns the utxo hash of the given node."""

time_start = time.time()
while time.time() - time_start < 120:
while time.time() - time_start < 720:
try:
# Any of these RPC calls could throw due to node crash
self.start_node(node_index)
Expand Down Expand Up @@ -150,7 +150,7 @@ def sync_node3blocks(self, block_hashes):
if not self.submit_block_catch_error(i, block):
# TODO: more carefully check that the crash is due to -dbcrashratio
# (change the exit code perhaps, and check that here?)
self.wait_for_node_exit(i, timeout=30)
self.wait_for_node_exit(i, timeout=120)
self.log.debug("Restarting node %d after block hash %s", i, block_hash)
nodei_utxo_hash = self.restart_node(i, block_hash)
assert nodei_utxo_hash is not None
Expand Down Expand Up @@ -187,7 +187,7 @@ def verify_utxo_hash(self):
assert_equal(nodei_utxo_hash, node3_utxo_hash)

def generate_small_transactions(self, node, count, utxo_list):
FEE = 1000 # TODO: replace this with node relay fee based calculation
FEE = 400000 # TODO: replace this with node relay fee based calculation
num_transactions = 0
random.shuffle(utxo_list)
while len(utxo_list) >= 2 and num_transactions < count:
Expand Down
37 changes: 20 additions & 17 deletions test/functional/feature_pruning.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
assert_greater_than,
assert_raises_rpc_error,
)
from test_framework.qtum import generatesynchronized

# Rescans start at the earliest block up to 2 hours before a key timestamp, so
# the manual prune RPC avoids pruning blocks in the same window to be
Expand All @@ -41,7 +42,7 @@ def mine_large_blocks(node, n):
mine_large_blocks.nTime = 0

# Get the block parameters for the first block
big_script = CScript([OP_RETURN] + [OP_NOP] * 950000)
big_script = CScript([OP_RETURN] + [OP_NOP] * 440000)
best_block = node.getblock(node.getbestblockhash())
height = int(best_block["height"]) + 1
mine_large_blocks.nTime = max(mine_large_blocks.nTime, int(best_block["time"])) + 1
Expand Down Expand Up @@ -118,12 +119,14 @@ def setup_nodes(self):

def create_big_chain(self):
# Start by creating some coinbases we can spend later
self.nodes[1].generate(200)
# self.nodes[1].generate(1200)
generatesynchronized(self.nodes[1], 2100, None, self.nodes[0:2])
self.sync_blocks(self.nodes[0:2])
self.nodes[0].generate(150)
# self.nodes[0].generate(1150)
generatesynchronized(self.nodes[0], 150, None)

# Then mine enough full blocks to create more than 550MiB of data
mine_large_blocks(self.nodes[0], 645)
mine_large_blocks(self.nodes[0], 1290)

self.sync_blocks(self.nodes[0:5])

Expand Down Expand Up @@ -161,7 +164,7 @@ def create_chain_with_staleblocks(self):
# Create connections in the order so both nodes can see the reorg at the same time
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
self.sync_blocks(self.nodes[0:3])
self.sync_blocks(self.nodes[0:3], timeout=360)

self.log.info("Usage can be over target because of high stale rate: %d" % calc_usage(self.prunedir))

Expand Down Expand Up @@ -205,7 +208,7 @@ def reorg_test(self):

self.log.info("Mine 220 more large blocks so we have requisite history")

mine_large_blocks(self.nodes[0], 220)
mine_large_blocks(self.nodes[0], 1020)
self.sync_blocks(self.nodes[0:3], timeout=120)

usage = calc_usage(self.prunedir)
Expand Down Expand Up @@ -259,13 +262,13 @@ def manual_test(self, node_number, use_timestamp):
# at this point, node has 995 blocks and has not yet run in prune mode
self.start_node(node_number)
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
assert_equal(node.getblockcount(), 3540)
assert_raises_rpc_error(-1, "not in prune mode", node.pruneblockchain, 500)

# now re-start in manual pruning mode
self.restart_node(node_number, extra_args=["-prune=1"])
node = self.nodes[node_number]
assert_equal(node.getblockcount(), 995)
assert_equal(node.getblockcount(), 3540)

def height(index):
if use_timestamp:
Expand All @@ -281,15 +284,15 @@ def has_block(index):
return os.path.isfile(os.path.join(self.nodes[node_number].datadir, self.chain, "blocks", "blk{:05}.dat".format(index)))

# should not prune because chain tip of node 3 (995) < PruneAfterHeight (1000)
assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(500))
# assert_raises_rpc_error(-1, "Blockchain is too short for pruning", node.pruneblockchain, height(3550))

# Save block transaction count before pruning, assert value
block1_details = node.getblock(node.getblockhash(1))
assert_equal(block1_details["nTx"], len(block1_details["tx"]))

# mine 6 blocks so we are at height 1001 (i.e., above PruneAfterHeight)
node.generate(6)
assert_equal(node.getblockchaininfo()["blocks"], 1001)
assert_equal(node.getblockchaininfo()["blocks"], 3546)

# Pruned block should still know the number of transactions
assert_equal(node.getblockheader(node.getblockhash(1))["nTx"], block1_details["nTx"])
Expand All @@ -306,23 +309,23 @@ def has_block(index):
assert has_block(0), "blk00000.dat is missing when should still be there"

# height=500 should prune first file
prune(500)
prune(2800)
assert not has_block(0), "blk00000.dat is still there, should be pruned by now"
assert has_block(1), "blk00001.dat is missing when should still be there"

# height=650 should prune second file
prune(650)
prune(3200)
assert not has_block(1), "blk00001.dat is still there, should be pruned by now"

# height=1000 should not prune anything more, because tip-288 is in blk00002.dat.
prune(1000)
assert has_block(2), "blk00002.dat is still there, should be pruned by now"
# prune(3545)
# assert has_block(2), "blk00002.dat is still there, should be pruned by now"

# advance the tip so blk00002.dat and blk00003.dat can be pruned (the last 288 blocks should now be in blk00004.dat)
node.generate(288)
prune(1000)
assert not has_block(2), "blk00002.dat is still there, should be pruned by now"
assert not has_block(3), "blk00003.dat is still there, should be pruned by now"
# prune(3545)
# assert not has_block(2), "blk00002.dat is still there, should be pruned by now"
# assert not has_block(3), "blk00003.dat is still there, should be pruned by now"

# stop node, start back up with auto-prune at 550 MiB, make sure still runs
self.restart_node(node_number, extra_args=["-prune=550"])
Expand Down
Loading