From 193a846591cf4090baf1e19510ea213aeb6e39e2 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Fri, 26 Jul 2024 22:45:58 -0700 Subject: [PATCH 001/105] improve pillar block unit tests --- tests/pillar_chain_test.cpp | 138 +++++++++++------- .../test_util/include/test_util/test_util.hpp | 6 + tests/test_util/src/test_util.cpp | 16 ++ 3 files changed, 108 insertions(+), 52 deletions(-) diff --git a/tests/pillar_chain_test.cpp b/tests/pillar_chain_test.cpp index 34d2d03c6d..3924eedbe1 100644 --- a/tests/pillar_chain_test.cpp +++ b/tests/pillar_chain_test.cpp @@ -100,7 +100,7 @@ TEST_F(PillarChainTest, pillar_blocks_create) { } TEST_F(PillarChainTest, votes_count_changes) { - const auto validators_count = 3; + const auto validators_count = 5; auto node_cfgs = make_node_cfgs(validators_count, validators_count, 10); for (auto& node_cfg : node_cfgs) { @@ -108,18 +108,60 @@ TEST_F(PillarChainTest, votes_count_changes) { node_cfg.genesis.state.hardforks.ficus_hf.block_num = 0; node_cfg.genesis.state.hardforks.ficus_hf.pillar_blocks_interval = 4; } + auto nodes = launch_nodes(node_cfgs); + + auto wait_for_next_pillar_block = [&](size_t txs_count) -> PbftPeriod { + EXPECT_HAPPENS({20s, 100ms}, [&](auto& ctx) { + for (auto& node : nodes) { + if (ctx.fail_if(node->getDB()->getNumTransactionExecuted() != txs_count)) { + return; + } + } + }); + auto chain_size = nodes[0]->getPbftChain()->getPbftChainSize(); + + // Wait until new pillar block with changed validators vote_counts is created + auto new_pillar_block_period = chain_size - + chain_size % node_cfgs[0].genesis.state.hardforks.ficus_hf.pillar_blocks_interval + + node_cfgs[0].genesis.state.hardforks.ficus_hf.pillar_blocks_interval; + EXPECT_HAPPENS({20s, 250ms}, [&](auto& ctx) { + for (const auto& node : nodes) { + if (ctx.fail_if(node->getPbftChain()->getPbftChainSize() < new_pillar_block_period + 1)) { + return; + } + } + }); + + return new_pillar_block_period; + }; + + auto checkPillarBlockData = [&](size_t pillar_block_period, + std::unordered_map expected_validators_vote_counts_changes) { + // Check if vote_counts changes in new pillar block changed according to new delegations + for (auto& node : nodes) { + // Check if right amount of pillar blocks were created + const auto new_pillar_block = node->getDB()->getPillarBlock(pillar_block_period); + ASSERT_TRUE(new_pillar_block); + ASSERT_EQ(new_pillar_block->getPeriod(), pillar_block_period); + ASSERT_EQ(new_pillar_block->getValidatorsVoteCountsChanges().size(), + expected_validators_vote_counts_changes.size()); + for (const auto& vote_count_change : new_pillar_block->getValidatorsVoteCountsChanges()) { + EXPECT_TRUE(expected_validators_vote_counts_changes.contains(vote_count_change.addr_)); + ASSERT_EQ(vote_count_change.vote_count_change_, + expected_validators_vote_counts_changes[vote_count_change.addr_]); + } + } + }; - std::vector validators_vote_counts; - validators_vote_counts.reserve(node_cfgs.size()); + // Initial stakes of all validators + std::unordered_map expected_validators_vote_counts_changes; for (const auto& validator : node_cfgs[0].genesis.state.dpos.initial_validators) { - auto& vote_count = validators_vote_counts.emplace_back(0); + auto& vote_count = expected_validators_vote_counts_changes[validator.address]; for (const auto& delegation : validator.delegations) { vote_count += delegation.second / node_cfgs[0].genesis.state.dpos.vote_eligibility_balance_step; } } - auto nodes = launch_nodes(node_cfgs); - // Wait until nodes create first pillar block const auto first_pillar_block_period = node_cfgs[0].genesis.state.hardforks.ficus_hf.firstPillarBlockPeriod(); ASSERT_HAPPENS({20s, 250ms}, [&](auto& ctx) { @@ -128,61 +170,53 @@ TEST_F(PillarChainTest, votes_count_changes) { } }); - // Check if vote_counts changes in first pillar block == initial validators vote_counts - for (auto& node : nodes) { - // Check if right amount of pillar blocks were created - const auto first_pillar_block = node->getDB()->getPillarBlock(first_pillar_block_period); - ASSERT_TRUE(first_pillar_block); - - ASSERT_EQ(first_pillar_block->getPeriod(), first_pillar_block_period); - ASSERT_EQ(first_pillar_block->getValidatorsVoteCountsChanges().size(), validators_count); - size_t idx = 0; - for (const auto& vote_count_change : first_pillar_block->getValidatorsVoteCountsChanges()) { - ASSERT_EQ(vote_count_change.vote_count_change_, validators_vote_counts[idx]); - idx++; - } - } + checkPillarBlockData(first_pillar_block_period, expected_validators_vote_counts_changes); - // Change validators delegation - const auto delegation_value = 2 * node_cfgs[0].genesis.state.dpos.eligibility_balance_threshold; + // Delegate to validators + expected_validators_vote_counts_changes.clear(); + size_t txs_count = 0; for (size_t i = 0; i < validators_count; i++) { + const auto delegation_value = (i + 1) * node_cfgs[0].genesis.state.dpos.eligibility_balance_threshold; + expected_validators_vote_counts_changes[toAddress(node_cfgs[i].node_secret)] = i + 1; const auto trx = make_delegate_tx(node_cfgs[i], delegation_value, 1, 1000); nodes[0]->getTransactionManager()->insertTransaction(trx); + txs_count++; } - EXPECT_HAPPENS({20s, 100ms}, [&](auto& ctx) { - for (auto& node : nodes) { - if (ctx.fail_if(node->getDB()->getNumTransactionExecuted() != validators_count)) { - return; - } - } - }); - const auto chain_size = nodes[0]->getPbftChain()->getPbftChainSize(); + auto new_pillar_block_period = wait_for_next_pillar_block(txs_count); + checkPillarBlockData(new_pillar_block_period, expected_validators_vote_counts_changes); - // Wait until new pillar block with changed validators vote_counts is created - const auto new_pillar_block_period = - chain_size - chain_size % node_cfgs[0].genesis.state.hardforks.ficus_hf.pillar_blocks_interval + - node_cfgs[0].genesis.state.hardforks.ficus_hf.pillar_blocks_interval; - ASSERT_HAPPENS({20s, 250ms}, [&](auto& ctx) { - for (const auto& node : nodes) { - WAIT_EXPECT_GE(ctx, node->getPbftChain()->getPbftChainSize(), new_pillar_block_period + 1) - } - }); + // Undelegate from validators + expected_validators_vote_counts_changes.clear(); + for (size_t i = 0; i < validators_count - 1; i++) { + const auto undelegation_value = (i + 1) * node_cfgs[0].genesis.state.dpos.eligibility_balance_threshold; + expected_validators_vote_counts_changes[toAddress(node_cfgs[i].node_secret)] = dev::s256(i + 1) * -1; + const auto trx = make_undelegate_tx(node_cfgs[i], undelegation_value, 2, 1000); + nodes[0]->getTransactionManager()->insertTransaction(trx); + txs_count++; + } - // Check if vote_counts changes in new pillar block changed according to new delegations - for (auto& node : nodes) { - // Check if right amount of pillar blocks were created - const auto new_pillar_block = node->getDB()->getPillarBlock(new_pillar_block_period); - ASSERT_TRUE(new_pillar_block); - ASSERT_EQ(new_pillar_block->getPeriod(), new_pillar_block_period); - ASSERT_EQ(new_pillar_block->getValidatorsVoteCountsChanges().size(), validators_count); - size_t idx = 0; - for (const auto& vote_count_change : new_pillar_block->getValidatorsVoteCountsChanges()) { - ASSERT_EQ(vote_count_change.vote_count_change_, - delegation_value / node_cfgs[0].genesis.state.dpos.eligibility_balance_threshold); - idx++; - } + new_pillar_block_period = wait_for_next_pillar_block(txs_count); + checkPillarBlockData(new_pillar_block_period, expected_validators_vote_counts_changes); + + // Redelegate + const auto redelegate_to_addr = toAddress(node_cfgs[node_cfgs.size() - 1].node_secret); + expected_validators_vote_counts_changes.clear(); + expected_validators_vote_counts_changes[redelegate_to_addr] = 0; + for (size_t i = 0; i < validators_count - 3; i++) { + const auto node_addr = toAddress(node_cfgs[i].node_secret); + const auto node_vote_count = + nodes[0]->getFinalChain()->dpos_eligible_vote_count(new_pillar_block_period, node_addr); + const auto redelegation_value = node_vote_count * node_cfgs[0].genesis.state.dpos.eligibility_balance_threshold; + expected_validators_vote_counts_changes[node_addr] = dev::s256(node_vote_count) * -1; + expected_validators_vote_counts_changes[redelegate_to_addr] += dev::s256(node_vote_count); + const auto trx = make_redelegate_tx(node_cfgs[i], redelegation_value, redelegate_to_addr, 3, 1000); + nodes[0]->getTransactionManager()->insertTransaction(trx); + txs_count++; } + + new_pillar_block_period = wait_for_next_pillar_block(txs_count); + checkPillarBlockData(new_pillar_block_period, expected_validators_vote_counts_changes); } TEST_F(PillarChainTest, pillar_chain_syncing) { diff --git a/tests/test_util/include/test_util/test_util.hpp b/tests/test_util/include/test_util/test_util.hpp index 7bb1e82c67..87fcb03e16 100644 --- a/tests/test_util/include/test_util/test_util.hpp +++ b/tests/test_util/include/test_util/test_util.hpp @@ -165,6 +165,12 @@ SharedTransaction make_dpos_trx(const FullNodeConfig& sender_node_cfg, const u25 SharedTransaction make_delegate_tx(const FullNodeConfig& sender_node_cfg, const u256& value, uint64_t nonce, const u256& gas_price); +SharedTransaction make_undelegate_tx(const FullNodeConfig& sender_node_cfg, const u256& value, uint64_t nonce, + const u256& gas_price); + +SharedTransaction make_redelegate_tx(const FullNodeConfig& sender_node_cfg, const u256& value, const Address& to, + uint64_t nonce, const u256& gas_price); + u256 own_balance(const std::shared_ptr& node); state_api::BalanceMap effective_initial_balances(const state_api::Config& cfg); diff --git a/tests/test_util/src/test_util.cpp b/tests/test_util/src/test_util.cpp index c0eda415de..295e036a26 100644 --- a/tests/test_util/src/test_util.cpp +++ b/tests/test_util/src/test_util.cpp @@ -92,6 +92,22 @@ SharedTransaction make_delegate_tx(const FullNodeConfig& sender_node_cfg, const sender_node_cfg.node_secret, kContractAddress, sender_node_cfg.genesis.chain_id); } +SharedTransaction make_undelegate_tx(const FullNodeConfig& sender_node_cfg, const u256& value, uint64_t nonce, + const u256& gas_price) { + const auto addr = dev::toAddress(sender_node_cfg.node_secret); + const auto input = util::EncodingSolidity::packFunctionCall("undelegate(address,uint256)", addr, value); + return std::make_shared(nonce, 0, gas_price, TEST_TX_GAS_LIMIT, std::move(input), + sender_node_cfg.node_secret, kContractAddress, sender_node_cfg.genesis.chain_id); +} + +SharedTransaction make_redelegate_tx(const FullNodeConfig& sender_node_cfg, const u256& value, const Address& to, + uint64_t nonce, const u256& gas_price) { + const auto addr = dev::toAddress(sender_node_cfg.node_secret); + const auto input = util::EncodingSolidity::packFunctionCall("reDelegate(address,address,uint256)", addr, to, value); + return std::make_shared(nonce, 0, gas_price, TEST_TX_GAS_LIMIT, std::move(input), + sender_node_cfg.node_secret, kContractAddress, sender_node_cfg.genesis.chain_id); +} + u256 own_balance(const std::shared_ptr& node) { return node->getFinalChain()->getBalance(node->getAddress()).first; } From fc449278e7ada4f513cb40f0d2de35df7ec615d4 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Mon, 29 Jul 2024 12:16:34 -0700 Subject: [PATCH 002/105] save the minimum amount of pillar votes into db --- .../pillar_chain/pillar_chain_manager.hpp | 3 +- .../include/pillar_chain/pillar_votes.hpp | 9 ++-- .../src/pillar_chain/pillar_votes.cpp | 41 ++++++++++++++++--- 3 files changed, 43 insertions(+), 10 deletions(-) diff --git a/libraries/core_libs/consensus/include/pillar_chain/pillar_chain_manager.hpp b/libraries/core_libs/consensus/include/pillar_chain/pillar_chain_manager.hpp index f1ff32d638..8d333a193a 100644 --- a/libraries/core_libs/consensus/include/pillar_chain/pillar_chain_manager.hpp +++ b/libraries/core_libs/consensus/include/pillar_chain/pillar_chain_manager.hpp @@ -128,7 +128,8 @@ class PillarChainManager { * @param pillar_block_hash * @param above_threshold * - * @return all pillar votes for specified period and pillar block hash + * @return all pillar votes for specified period and pillar block hash. In case above_threshold == true, votes + * are sorted based on vote weight and the minimum number of votes above threshold are returned */ std::vector> getVerifiedPillarVotes(PbftPeriod period, const blk_hash_t pillar_block_hash, bool above_threshold = false) const; diff --git a/libraries/core_libs/consensus/include/pillar_chain/pillar_votes.hpp b/libraries/core_libs/consensus/include/pillar_chain/pillar_votes.hpp index 3400509b6e..2f98f3f6c7 100644 --- a/libraries/core_libs/consensus/include/pillar_chain/pillar_votes.hpp +++ b/libraries/core_libs/consensus/include/pillar_chain/pillar_votes.hpp @@ -10,8 +10,8 @@ namespace taraxa::pillar_chain { class PillarVotes { public: struct WeightVotes { - std::unordered_map> votes; - uint64_t weight{0}; // votes weight + std::unordered_map, uint64_t /* vote weight */>> votes; + uint64_t weight{0}; // votes accumulated weight }; struct PeriodVotes { @@ -62,7 +62,7 @@ class PillarVotes { * * @return true if vote was successfully added, otherwise false */ - bool addVerifiedVote(const std::shared_ptr& vote, u_int64_t validator_vote_count); + bool addVerifiedVote(const std::shared_ptr& vote, uint64_t validator_vote_count); /** * @brief Get all pillar block votes for specified pillar block @@ -71,7 +71,8 @@ class PillarVotes { * @param pillar_block_hash * @param above_threshold if true, return only if there is > threshold verified votes * - * @return all pillar block votes for specified period and pillar block hash + * @return all pillar block votes for specified period and pillar block hash. In case above_threshold == true, votes + * are sorted based on vote weight and the minimum number of votes above threshold are returned */ std::vector> getVerifiedVotes(PbftPeriod period, const blk_hash_t& pillar_block_hash, bool above_threshold = false) const; diff --git a/libraries/core_libs/consensus/src/pillar_chain/pillar_votes.cpp b/libraries/core_libs/consensus/src/pillar_chain/pillar_votes.cpp index f60a49dc9e..a127eba448 100644 --- a/libraries/core_libs/consensus/src/pillar_chain/pillar_votes.cpp +++ b/libraries/core_libs/consensus/src/pillar_chain/pillar_votes.cpp @@ -52,14 +52,45 @@ std::vector> PillarVotes::getVerifiedVotes(PbftPerio return {}; } - if (above_threshold && found_pillar_block_votes->second.weight < found_period_votes->second.threshold) { - return {}; + // Return minimum amount of >threshold sorted votes based on their weight + if (above_threshold) { + const auto threshold = found_period_votes->second.threshold; + if (found_pillar_block_votes->second.weight < threshold) { + return {}; + } + + // Sort votes using multiset + auto customComparator = [](const std::pair, uint64_t>& a, + const std::pair, uint64_t>& b) { + return a.second > b.second; + }; + std::multiset, uint64_t>, decltype(customComparator)> votes_set( + customComparator); + std::transform(found_pillar_block_votes->second.votes.begin(), found_pillar_block_votes->second.votes.end(), + std::inserter(votes_set, votes_set.end()), [](const auto& el) { return el.second; }); + + // Move minimum amount of > threshold votes with the highest vote counts + std::vector> sorted_votes; + sorted_votes.reserve(votes_set.size()); + uint64_t tmp_votes_count = 0; + for (auto it = votes_set.begin(); it != votes_set.end();) { + auto&& vote_pair = votes_set.extract(it++); + tmp_votes_count += vote_pair.value().second; + sorted_votes.push_back(std::move(vote_pair.value().first)); + + if (tmp_votes_count >= threshold) { + break; + } + } + + return sorted_votes; } + // Return all votes std::vector> votes; votes.reserve(found_pillar_block_votes->second.votes.size()); for (const auto& sig : found_pillar_block_votes->second.votes) { - votes.push_back(sig.second); + votes.push_back(sig.second.first); } return votes; @@ -70,7 +101,7 @@ bool PillarVotes::periodDataInitialized(PbftPeriod period) const { return votes_.contains(period); } -bool PillarVotes::addVerifiedVote(const std::shared_ptr& vote, u_int64_t validator_vote_count) { +bool PillarVotes::addVerifiedVote(const std::shared_ptr& vote, uint64_t validator_vote_count) { std::scoped_lock lock(mutex_); auto found_period_votes = votes_.find(vote->getPeriod()); @@ -92,7 +123,7 @@ bool PillarVotes::addVerifiedVote(const std::shared_ptr& vote, u_int auto pillar_block_votes = found_period_votes->second.pillar_block_votes.insert({vote->getBlockHash(), {}}).first; // Add validator vote count only if the vote is new - if (pillar_block_votes->second.votes.emplace(vote->getHash(), vote).second) { + if (pillar_block_votes->second.votes.emplace(vote->getHash(), std::make_pair(vote, validator_vote_count)).second) { pillar_block_votes->second.weight += validator_vote_count; } From 11cf86fd45c82d7829f2f5edd385ae40c82e0dc1 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Tue, 6 Aug 2024 11:13:00 -0700 Subject: [PATCH 003/105] improve proposed pbft blocks cleanup test --- tests/pbft_manager_test.cpp | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/tests/pbft_manager_test.cpp b/tests/pbft_manager_test.cpp index f5c455d084..bb5feef278 100644 --- a/tests/pbft_manager_test.cpp +++ b/tests/pbft_manager_test.cpp @@ -674,14 +674,17 @@ TEST_F(PbftManagerWithDagCreation, proposed_blocks) { ProposedBlocks proposed_blocks(db); std::map> blocks; - const uint32_t block_count = 100; // Create blocks - for (uint32_t i = 1; i <= block_count; i++) { - std::vector reward_votes_hashes; - auto block = std::make_shared(blk_hash_t(1), kNullBlockHash, kNullBlockHash, kNullBlockHash, 2, addr_t(), - dev::KeyPair::create().secret(), std::move(reward_votes_hashes)); - blocks.insert({block->getBlockHash(), block}); + for (PbftPeriod period = 1; period <= 3; period++) { + for (uint32_t i = 1; i <= 40; i++) { + std::vector reward_votes_hashes; + auto block = + std::make_shared(blk_hash_t(1), kNullBlockHash, kNullBlockHash, kNullBlockHash, period, addr_t(), + dev::KeyPair::create().secret(), std::move(reward_votes_hashes)); + blocks.insert({block->getBlockHash(), block}); + } } + const uint32_t block_count = blocks.size(); auto now = std::chrono::steady_clock::now(); for (auto b : blocks) { proposed_blocks.pushProposedPbftBlock(b.second); @@ -695,7 +698,7 @@ TEST_F(PbftManagerWithDagCreation, proposed_blocks) { EXPECT_TRUE(blocks.find(b->getBlockHash()) != blocks.end()); } now = std::chrono::steady_clock::now(); - proposed_blocks.cleanupProposedPbftBlocksByPeriod(3); + proposed_blocks.cleanupProposedPbftBlocksByPeriod(4); std::cout << "Time to erase " << block_count << " blocks: " << duration_cast(std::chrono::steady_clock::now() - now).count() << " microseconds" << std::endl; From 153ae462038caf3fd8d538e532acfd75b24eb2ce Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Mon, 12 Aug 2024 09:12:32 +0200 Subject: [PATCH 004/105] chore: dag block proposal limit --- .../cli/config_jsons/default/default_config.json | 2 +- .../cli/config_jsons/devnet/devnet_config.json | 2 +- .../cli/config_jsons/mainnet/mainnet_config.json | 2 +- .../cli/config_jsons/testnet/testnet_config.json | 2 +- libraries/common/include/common/constants.hpp | 1 + .../consensus/src/dag/dag_block_proposer.cpp | 13 ++++++++++--- .../core_libs/network/include/network/network.hpp | 7 +++++++ libraries/core_libs/network/src/network.cpp | 6 ++++++ 8 files changed, 28 insertions(+), 7 deletions(-) diff --git a/libraries/cli/include/cli/config_jsons/default/default_config.json b/libraries/cli/include/cli/config_jsons/default/default_config.json index 72e9409c3f..0f72c0bc9b 100644 --- a/libraries/cli/include/cli/config_jsons/default/default_config.json +++ b/libraries/cli/include/cli/config_jsons/default/default_config.json @@ -24,7 +24,7 @@ "packets_stats_time_period_ms": 60000, "peer_max_packets_processing_time_us": 10000000, "peer_max_packets_queue_size_limit": 100000, - "max_packets_queue_size": 200000 + "max_packets_queue_size": 100 }, "listen_ip": "0.0.0.0", "listen_port": 10002, diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json index 47d0713725..2f812df6f4 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json @@ -24,7 +24,7 @@ "packets_stats_time_period_ms": 60000, "peer_max_packets_processing_time_us": 10000000, "peer_max_packets_queue_size_limit": 100000, - "max_packets_queue_size": 200000 + "max_packets_queue_size": 100 }, "listen_ip": "0.0.0.0", "listen_port": 10002, diff --git a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_config.json b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_config.json index 66cb1b4499..f82d06c4f2 100644 --- a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_config.json +++ b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_config.json @@ -24,7 +24,7 @@ "packets_stats_time_period_ms": 60000, "peer_max_packets_processing_time_us": 0, "peer_max_packets_queue_size_limit": 0, - "max_packets_queue_size": 200000 + "max_packets_queue_size": 100 }, "listen_ip": "0.0.0.0", "listen_port": 10002, diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json index d444117b3c..676a6428c8 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json @@ -24,7 +24,7 @@ "packets_stats_time_period_ms": 60000, "peer_max_packets_processing_time_us": 0, "peer_max_packets_queue_size_limit": 0, - "max_packets_queue_size": 200000 + "max_packets_queue_size": 100 }, "listen_ip": "0.0.0.0", "listen_port": 10002, diff --git a/libraries/common/include/common/constants.hpp b/libraries/common/include/common/constants.hpp index 5d7e4540e1..9122c34c96 100644 --- a/libraries/common/include/common/constants.hpp +++ b/libraries/common/include/common/constants.hpp @@ -29,6 +29,7 @@ const uint64_t kMinTxGas{21000}; constexpr uint32_t kMinTransactionPoolSize{30000}; constexpr uint32_t kDefaultTransactionPoolSize{200000}; +constexpr uint32_t kMaxNonFinalizedTransactions{1000000}; const size_t kV2NetworkVersion = 2; diff --git a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp index cad7733b8c..9587b11daf 100644 --- a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp @@ -51,6 +51,11 @@ bool DagBlockProposer::proposeDagBlock() { return false; } + // Do not propose dag blocks if number of non finalized transactions is over the limit + if (trx_mgr_->getNonfinalizedTrxSize() > kMaxNonFinalizedTransactions) { + return false; + } + auto frontier = dag_mgr_->getDagFrontier(); LOG(log_dg_) << "Get frontier with pivot: " << frontier.pivot << " tips: " << frontier.tips; assert(!frontier.pivot.isZero()); @@ -183,12 +188,14 @@ void DagBlockProposer::start() { while (!stopped_) { // Blocks are not proposed if we are behind the network and still syncing auto syncing = false; + auto packets_over_the_limit = false; if (auto net = network_.lock()) { syncing = net->pbft_syncing(); + packets_over_the_limit = net->packetQueueOverLimit(); } - // Only sleep if block was not proposed or if we are syncing, if block is proposed try to propose another block - // immediately - if (syncing || !proposeDagBlock()) { + // Only sleep if block was not proposed or if we are syncing or if packets queue is over the limit, if block is + // proposed try to propose another block immediately + if (syncing || packets_over_the_limit || !proposeDagBlock()) { thisThreadSleepForMilliSeconds(min_proposal_delay); } } diff --git a/libraries/core_libs/network/include/network/network.hpp b/libraries/core_libs/network/include/network/network.hpp index 8c654eaaa7..30fd397944 100644 --- a/libraries/core_libs/network/include/network/network.hpp +++ b/libraries/core_libs/network/include/network/network.hpp @@ -77,6 +77,13 @@ class Network { */ void requestPillarBlockVotesBundle(PbftPeriod period, const blk_hash_t &pillar_block_hash); + /** + * @brief Get packets queue status + * + * @return true if packets queue is over the limit + */ + bool packetQueueOverLimit() const; + // METHODS USED IN TESTS ONLY template std::shared_ptr getSpecificHandler() const; diff --git a/libraries/core_libs/network/src/network.cpp b/libraries/core_libs/network/src/network.cpp index 9091c68571..248f773951 100644 --- a/libraries/core_libs/network/src/network.cpp +++ b/libraries/core_libs/network/src/network.cpp @@ -132,6 +132,12 @@ void Network::start() { bool Network::isStarted() { return tp_.is_running(); } +bool Network::packetQueueOverLimit() const { + auto [hp_queue_size, mp_queue_size, lp_queue_size] = packets_tp_->getQueueSize(); + auto total_size = hp_queue_size + mp_queue_size + lp_queue_size; + return total_size > kConf.network.ddos_protection.max_packets_queue_size; +} + std::list Network::getAllNodes() const { return host_->getNodes(); } size_t Network::getPeerCount() { return host_->peer_count(); } From 7afb5454082426f093dd4d8e921b9b2ce57a8586 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 19 Aug 2024 16:41:03 +0200 Subject: [PATCH 005/105] first try of optimization --- libraries/core_libs/node/src/node.cpp | 1 - .../include/storage/migration/dag_block.hpp | 12 +++ .../storage/src/migration/dag_block.cpp | 75 ++++++++++++++ .../src/migration/migration_manager.cpp | 7 +- libraries/core_libs/storage/src/storage.cpp | 6 +- libraries/types/dag_block/CMakeLists.txt | 2 + .../types/dag_block/include/dag/dag_block.hpp | 5 +- .../include/dag/dag_block_bundle_rlp.hpp | 44 +++++++++ libraries/types/dag_block/src/dag_block.cpp | 28 +++++- .../dag_block/src/dag_block_bundle_rlp.cpp | 98 +++++++++++++++++++ .../pbft_block/include/pbft/period_data.hpp | 2 + .../types/pbft_block/src/period_data.cpp | 38 +++++-- submodules/taraxa-evm | 2 +- 13 files changed, 298 insertions(+), 22 deletions(-) create mode 100644 libraries/core_libs/storage/include/storage/migration/dag_block.hpp create mode 100644 libraries/core_libs/storage/src/migration/dag_block.cpp create mode 100644 libraries/types/dag_block/include/dag/dag_block_bundle_rlp.hpp create mode 100644 libraries/types/dag_block/src/dag_block_bundle_rlp.cpp diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index cb65d26a3a..c7ea97b6ad 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -95,7 +95,6 @@ void FullNode::init() { if (conf_.db_config.fix_trx_period) { migration_manager.applyTransactionPeriod(); } - if (db_->getDagBlocksCount() == 0) { db_->setGenesisHash(conf_.genesis.genesisHash()); } diff --git a/libraries/core_libs/storage/include/storage/migration/dag_block.hpp b/libraries/core_libs/storage/include/storage/migration/dag_block.hpp new file mode 100644 index 0000000000..3a7ac8069a --- /dev/null +++ b/libraries/core_libs/storage/include/storage/migration/dag_block.hpp @@ -0,0 +1,12 @@ +#pragma once +#include "storage/migration/migration_base.hpp" + +namespace taraxa::storage::migration { +class DagBlockData : public migration::Base { + public: + DagBlockData(std::shared_ptr db); + std::string id() override; + uint32_t dbVersion() override; + void migrate(logger::Logger& log) override; +}; +} // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/src/migration/dag_block.cpp b/libraries/core_libs/storage/src/migration/dag_block.cpp new file mode 100644 index 0000000000..3dab017c74 --- /dev/null +++ b/libraries/core_libs/storage/src/migration/dag_block.cpp @@ -0,0 +1,75 @@ +#include "storage/migration/dag_block.hpp" + +#include + +#include + +#include "common/thread_pool.hpp" +#include "pbft/period_data.hpp" + +namespace taraxa::storage::migration { + +DagBlockData::DagBlockData(std::shared_ptr db) : migration::Base(db) {} + +std::string DagBlockData::id() { return "DagBlockData"; } + +uint32_t DagBlockData::dbVersion() { return 1; } + +void DagBlockData::migrate(logger::Logger& log) { + auto orig_col = DB::Columns::period_data; + auto copied_col = db_->copyColumn(db_->handle(orig_col), orig_col.name() + "-copy"); + + if (copied_col == nullptr) { + LOG(log) << "Migration " << id() << " skipped: Unable to copy " << orig_col.name() << " column"; + return; + } + + auto it = db_->getColumnIterator(copied_col.get()); + it->SeekToFirst(); + if (!it->Valid()) { + return; + } + + uint64_t start_period, end_period; + memcpy(&start_period, it->key().data(), sizeof(uint64_t)); + + it->SeekToLast(); + if (!it->Valid()) { + it->Prev(); + } + memcpy(&end_period, it->key().data(), sizeof(uint64_t)); + util::ThreadPool executor{std::thread::hardware_concurrency()}; + + const auto diff = (end_period - start_period) ? (end_period - start_period) : 1; + uint64_t curr_progress = 0; + + // Get and save data in new format for all blocks + for (uint64_t i = start_period; i <= end_period; ++i) { + executor.post([this, i, &copied_col]() { + const auto bytes = db_->getPeriodDataRaw(i); + const auto period_data_old_rlp = dev::RLP(bytes); + assert(period_data_old_rlp.itemCount() == 4); + + auto period_data = ::taraxa::PeriodData::FromOldPeriodData(period_data_old_rlp); + + db_->insert(copied_col.get(), i, period_data.rlp()); + }); + // This should slow down main loop so we are not using so much memory + while (executor.num_pending_tasks() > (executor.capacity() * 3)) { + taraxa::thisThreadSleepForMilliSeconds(50); + } + auto percentage = (i - start_period) * 100 / diff; + if (percentage > curr_progress) { + curr_progress = percentage; + LOG(log) << "Migration " << id() << " progress " << curr_progress << "%"; + } + } + + // It's not perfect to check with sleep, but it's just migration that should be run once + do { + taraxa::thisThreadSleepForMilliSeconds(100); + } while (executor.num_pending_tasks()); + + db_->replaceColumn(orig_col, std::move(copied_col)); +} +} // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/src/migration/migration_manager.cpp b/libraries/core_libs/storage/src/migration/migration_manager.cpp index a7d2ea6924..b73ecab839 100644 --- a/libraries/core_libs/storage/src/migration/migration_manager.cpp +++ b/libraries/core_libs/storage/src/migration/migration_manager.cpp @@ -1,9 +1,12 @@ #include "storage/migration/migration_manager.hpp" +#include "storage/migration/dag_block.hpp" #include "storage/migration/transaction_period.hpp" - namespace taraxa::storage::migration { -Manager::Manager(std::shared_ptr db, const addr_t& node_addr) : db_(db) { LOG_OBJECTS_CREATE("MIGRATIONS"); } +Manager::Manager(std::shared_ptr db, const addr_t& node_addr) : db_(db) { + LOG_OBJECTS_CREATE("MIGRATIONS"); + registerMigration(); +} void Manager::applyMigration(std::shared_ptr m) { if (m->isApplied()) { LOG(log_si_) << "Skip \"" << m->id() << "\" migration. It was already applied"; diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index 0ef686f139..8965b0cf55 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -149,7 +149,7 @@ std::unique_ptr DbStorage::copyColumn(rocksdb::Colu return nullptr; } - rocksdb::Checkpoint* checkpoint_raw; + rocksdb::Checkpoint* checkpoint_raw = nullptr; auto status = rocksdb::Checkpoint::Create(db_.get(), &checkpoint_raw); std::unique_ptr checkpoint(checkpoint_raw); checkStatus(status); @@ -160,7 +160,7 @@ std::unique_ptr DbStorage::copyColumn(rocksdb::Colu // Export dir should not exist before exporting the column family fs::remove_all(export_dir); - rocksdb::ExportImportFilesMetaData* metadata_raw; + rocksdb::ExportImportFilesMetaData* metadata_raw = nullptr; status = checkpoint->ExportColumnFamily(orig_column, export_dir, &metadata_raw); std::unique_ptr metadata(metadata_raw); checkStatus(status); @@ -174,7 +174,7 @@ std::unique_ptr DbStorage::copyColumn(rocksdb::Colu rocksdb::ImportColumnFamilyOptions import_options; import_options.move_files = move_data; - rocksdb::ColumnFamilyHandle* copied_column_raw; + rocksdb::ColumnFamilyHandle* copied_column_raw = nullptr; status = db_->CreateColumnFamilyWithImport(options, new_col_name, import_options, *metadata, &copied_column_raw); std::unique_ptr copied_column(copied_column_raw); checkStatus(status); diff --git a/libraries/types/dag_block/CMakeLists.txt b/libraries/types/dag_block/CMakeLists.txt index c3f2271ca6..95192fed27 100644 --- a/libraries/types/dag_block/CMakeLists.txt +++ b/libraries/types/dag_block/CMakeLists.txt @@ -1,8 +1,10 @@ set(HEADERS include/dag/dag_block.hpp + include/dag/dag_block_bundle_rlp.hpp ) set(SOURCES src/dag_block.cpp + src/dag_block_bundle_rlp.cpp ) add_library(dag_block ${SOURCES} ${HEADERS}) diff --git a/libraries/types/dag_block/include/dag/dag_block.hpp b/libraries/types/dag_block/include/dag/dag_block.hpp index 91fa27fb58..376507e786 100644 --- a/libraries/types/dag_block/include/dag/dag_block.hpp +++ b/libraries/types/dag_block/include/dag/dag_block.hpp @@ -50,6 +50,7 @@ class DagBlock { explicit DagBlock(Json::Value const &doc); explicit DagBlock(string const &json); explicit DagBlock(dev::RLP const &_rlp); + explicit DagBlock(dev::RLP const &_rlp, vec_trx_t &&trxs); explicit DagBlock(dev::bytes const &_rlp) : DagBlock(dev::RLP(_rlp)) {} /** @@ -102,7 +103,7 @@ class DagBlock { bool verifySig() const; void verifyVdf(const SortitionParams &vdf_config, const h256 &proposal_period_hash, const vrf_wrapper::vrf_pk_t &pk, uint64_t vote_count, uint64_t total_vote_count) const; - bytes rlp(bool include_sig) const; + bytes rlp(bool include_sig, bool include_trxs = true) const; /** * @brief Returns dag block data rlp stream @@ -110,7 +111,7 @@ class DagBlock { * @param include_sig * @return dev::RLPStream */ - dev::RLPStream streamRLP(bool include_sig) const; + dev::RLPStream streamRLP(bool include_sig, bool include_trxs = true) const; private: blk_hash_t sha3(bool include_sig) const; diff --git a/libraries/types/dag_block/include/dag/dag_block_bundle_rlp.hpp b/libraries/types/dag_block/include/dag/dag_block_bundle_rlp.hpp new file mode 100644 index 0000000000..828399ec7d --- /dev/null +++ b/libraries/types/dag_block/include/dag/dag_block_bundle_rlp.hpp @@ -0,0 +1,44 @@ +#pragma once + +#include +#include + +#include + +namespace taraxa { + +class DagBlock; + +/** @addtogroup DAG + * @{ + */ + +constexpr static size_t kDAGBlocksBundleRlpSize{3}; + +/** + * @brief Encodes pbft blocks into optimized blocks bundle rlp + * + * @param blocks + * @return blocks bundle rlp bytes + */ +dev::bytes encodeDAGBlocksBundleRlp(const std::vector& blocks); + +/** + * @brief Decodes pbft blocks from optimized blocks bundle rlp + * + * @param blocks_bundle_rlp + * @return blocks + */ +std::vector decodeDAGBlocksBundleRlp(const dev::RLP& blocks_bundle_rlp); + +/** + * @brief Decodes single dag block from optimized blocks bundle rlp + * + * @param blocks_bundle_rlp + * @return block + */ +std::shared_ptr decodeDAGBlockBundleRlp(uint64_t index, const dev::RLP& blocks_bundle_rlp); + +/** @}*/ + +} // namespace taraxa diff --git a/libraries/types/dag_block/src/dag_block.cpp b/libraries/types/dag_block/src/dag_block.cpp index d28605f988..5d91639dd1 100644 --- a/libraries/types/dag_block/src/dag_block.cpp +++ b/libraries/types/dag_block/src/dag_block.cpp @@ -75,6 +75,13 @@ DagBlock::DagBlock(dev::RLP const &rlp) { vdf_ = vdf_sortition::VdfSortition(vdf_bytes); } +DagBlock::DagBlock(dev::RLP const &rlp, vec_trx_t &&trxs) { + dev::bytes vdf_bytes; + util::rlp_tuple(util::RLPDecoderRef(rlp, true), pivot_, level_, timestamp_, vdf_bytes, tips_, sig_, gas_estimation_); + vdf_ = vdf_sortition::VdfSortition(vdf_bytes); + trxs_ = std::move(trxs); +} + level_t DagBlock::extract_dag_level_from_rlp(const dev::RLP &rlp) { return rlp[kLevelPosInRlp].toInt(); } sig_t DagBlock::extract_signature_from_rlp(const dev::RLP &rlp) { return rlp[kSigPosInRlp].toHash(); } @@ -158,17 +165,26 @@ addr_t const &DagBlock::getSender() const { return cached_sender_; } -dev::RLPStream DagBlock::streamRLP(bool include_sig) const { +dev::RLPStream DagBlock::streamRLP(bool include_sig, bool include_trxs) const { dev::RLPStream s; - constexpr auto base_field_count = 7; - s.appendList(include_sig ? base_field_count + 1 : base_field_count); + auto base_field_count = 6; + if (include_sig) { + base_field_count += 1; + } + if (include_trxs) { + base_field_count += 1; + } + + s.appendList(base_field_count); s << pivot_; s << level_; s << timestamp_; s << vdf_.rlp(); s.appendVector(tips_); - s.appendVector(trxs_); + if (include_trxs) { + s.appendVector(trxs_); + } if (include_sig) { s << sig_; } @@ -177,7 +193,9 @@ dev::RLPStream DagBlock::streamRLP(bool include_sig) const { return s; } -bytes DagBlock::rlp(bool include_sig) const { return streamRLP(include_sig).invalidate(); } +bytes DagBlock::rlp(bool include_sig, bool include_trxs) const { + return streamRLP(include_sig, include_trxs).invalidate(); +} blk_hash_t DagBlock::sha3(bool include_sig) const { return dev::sha3(rlp(include_sig)); } diff --git a/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp new file mode 100644 index 0000000000..097b508611 --- /dev/null +++ b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp @@ -0,0 +1,98 @@ +#include "dag/dag_block_bundle_rlp.hpp" + +#include + +#include "common/types.hpp" +#include "dag/dag_block.hpp" + +namespace taraxa { + +dev::bytes encodeDAGBlocksBundleRlp(const std::vector& blocks) { + if (blocks.empty()) { + return {}; + } + + std::unordered_map trx_hash_map; // Map to store transaction hash and its index + std::vector ordered_trx_hashes; + std::vector> indexes; + + for (const auto& block : blocks) { + std::vector idx; + idx.reserve(block.getTrxs().size()); + + for (const auto& trx : block.getTrxs()) { + if (trx_hash_map.find(trx) == trx_hash_map.end()) { + trx_hash_map[trx] = static_cast(trx_hash_map.size()); + ordered_trx_hashes.push_back(trx); // Track the insertion order + } + idx.push_back(trx_hash_map[trx]); + } + indexes.push_back(idx); + } + + dev::RLPStream blocks_bundle_rlp(kDAGBlocksBundleRlpSize); + blocks_bundle_rlp.appendList(ordered_trx_hashes.size()); + for (const auto& trx_hash : ordered_trx_hashes) { + blocks_bundle_rlp.append(trx_hash); + } + blocks_bundle_rlp.appendList(indexes.size()); + for (const auto& idx : indexes) { + blocks_bundle_rlp.appendList(idx.size()); + for (const auto& i : idx) { + blocks_bundle_rlp.append(i); + } + } + blocks_bundle_rlp.appendList(blocks.size()); + for (const auto& block : blocks) { + blocks_bundle_rlp.appendRaw(block.rlp(true, false)); + } + return blocks_bundle_rlp.invalidate(); +} + +std::vector decodeDAGBlocksBundleRlp(const dev::RLP& blocks_bundle_rlp) { + if (blocks_bundle_rlp.itemCount() != kDAGBlocksBundleRlpSize) { + return {}; + } + + std::vector ordered_trx_hashes; + std::vector> dags_trx_hashes; + + // Decode transaction hashes and + ordered_trx_hashes.reserve(blocks_bundle_rlp[0].itemCount()); + for (const auto& trx_hash_rlp : blocks_bundle_rlp[0]) { + ordered_trx_hashes.push_back(trx_hash_rlp.toHash()); + } + + for (const auto& idx_rlp : blocks_bundle_rlp[1]) { + std::vector hashes; + hashes.reserve(idx_rlp.itemCount()); + for (const auto& i : idx_rlp) { + hashes.push_back(ordered_trx_hashes[i.toInt()]); + } + dags_trx_hashes.push_back(std::move(hashes)); + } + + std::vector blocks; + blocks.reserve(blocks_bundle_rlp[2].itemCount()); + + for (size_t i = 0; i < blocks_bundle_rlp[2].itemCount(); i++) { + auto block = DagBlock(blocks_bundle_rlp[2][i], std::move(dags_trx_hashes[i])); + blocks.push_back(std::move(block)); + } + + return blocks; +} + +// // constexpr static size_t kPillarblocksBundleRlpSize{3}; + +// /** +// * @brief Decodes single dag block from optimized blocks bundle rlp +// * +// * @param blocks_bundle_rlp +// * @return block +// */ +// std::shared_ptr decodeDAGBlockBundleRlp(uint64_t index, const dev::RLP& blocks_bundle_rlp); + +/** @}*/ + +} // namespace taraxa diff --git a/libraries/types/pbft_block/include/pbft/period_data.hpp b/libraries/types/pbft_block/include/pbft/period_data.hpp index 5ea23e863d..408f448c08 100644 --- a/libraries/types/pbft_block/include/pbft/period_data.hpp +++ b/libraries/types/pbft_block/include/pbft/period_data.hpp @@ -31,6 +31,8 @@ class PeriodData { explicit PeriodData(const dev::RLP& all_rlp); explicit PeriodData(bytes const& all_rlp); + static PeriodData FromOldPeriodData(const dev::RLP& rlp) ; + std::shared_ptr pbft_blk; std::vector> previous_block_cert_votes; // These votes are the cert votes of previous block // which match reward votes in current pbft block diff --git a/libraries/types/pbft_block/src/period_data.cpp b/libraries/types/pbft_block/src/period_data.cpp index 8bb8ba795a..809841f163 100644 --- a/libraries/types/pbft_block/src/period_data.cpp +++ b/libraries/types/pbft_block/src/period_data.cpp @@ -2,7 +2,7 @@ #include -#include "dag/dag_block.hpp" +#include "dag/dag_block_bundle_rlp.hpp" #include "pbft/pbft_block.hpp" #include "transaction/transaction.hpp" #include "vote/pbft_vote.hpp" @@ -28,9 +28,8 @@ PeriodData::PeriodData(const dev::RLP& rlp) { previous_block_cert_votes = decodePbftVotesBundleRlp(votes_bundle_rlp); } - for (auto const dag_block_rlp : *it++) { - dag_blocks.emplace_back(dag_block_rlp); - } + const auto block_bundle_rlp = *it++; + dag_blocks = decodeDAGBlocksBundleRlp(block_bundle_rlp); for (auto const trx_rlp : *it++) { transactions.emplace_back(std::make_shared(trx_rlp)); @@ -55,10 +54,7 @@ bytes PeriodData::rlp() const { s.append(""); } - s.appendList(dag_blocks.size()); - for (auto const& b : dag_blocks) { - s.appendRaw(b.rlp(true)); - } + s.appendRaw(encodeDAGBlocksBundleRlp(dag_blocks)); s.appendList(transactions.size()); for (auto const& t : transactions) { @@ -81,6 +77,32 @@ void PeriodData::clear() { pillar_votes_.reset(); } +PeriodData PeriodData::FromOldPeriodData(const dev::RLP& rlp) { + PeriodData period_data; + auto it = rlp.begin(); + period_data.pbft_blk = std::make_shared(*it++); + + const auto votes_bundle_rlp = *it++; + if (period_data.pbft_blk->getPeriod() > 1) [[likely]] { + period_data.previous_block_cert_votes = decodePbftVotesBundleRlp(votes_bundle_rlp); + } + + for (auto const dag_block_rlp : *it++) { + period_data.dag_blocks.emplace_back(dag_block_rlp); + } + + for (auto const trx_rlp : *it++) { + period_data.transactions.emplace_back(std::make_shared(trx_rlp)); + } + + // Pillar votes are optional data of period data since ficus hardfork + if (rlp.itemCount() == 5) { + period_data.pillar_votes_ = decodePillarVotesBundleRlp(*it); + } + return period_data; + +} + std::ostream& operator<<(std::ostream& strm, PeriodData const& b) { strm << "[PeriodData] : " << b.pbft_blk << " , num of votes " << b.previous_block_cert_votes.size() << std::endl; return strm; diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 2edc2f91df..476ac57228 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 2edc2f91df972511e1ccba6440a38efd32812ee2 +Subproject commit 476ac5722850410018f7ff6038fb9e3ef505fd2a From cb49ed8bf136ab083dc6e3a93fad1bc007af750f Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 20 Aug 2024 13:12:57 +0200 Subject: [PATCH 006/105] add more funcions to fully sync on mainnet --- .../latest/pbft_sync_packet_handler.cpp | 2 +- libraries/core_libs/storage/src/storage.cpp | 3 +- .../dag_block/src/dag_block_bundle_rlp.cpp | 33 ++++++++++++------- .../pbft_block/include/pbft/period_data.hpp | 2 +- .../types/pbft_block/src/period_data.cpp | 1 - 5 files changed, 26 insertions(+), 15 deletions(-) diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp index 561217b6e6..96f5217775 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp @@ -231,7 +231,7 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, } PeriodData PbftSyncPacketHandler::decodePeriodData(const dev::RLP &period_data_rlp) const { - return PeriodData(period_data_rlp); + return PeriodData::FromOldPeriodData(period_data_rlp); } std::vector> PbftSyncPacketHandler::decodeVotesBundle( diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index 8965b0cf55..f1e5188ad6 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -7,6 +7,7 @@ #include #include "config/version.hpp" +#include "dag/dag_block_bundle_rlp.hpp" #include "dag/sortition_params_manager.hpp" #include "final_chain/final_chain.hpp" #include "pillar_chain/pillar_block.hpp" @@ -438,7 +439,7 @@ std::shared_ptr DbStorage::getDagBlock(blk_hash_t const& hash) { if (period_data.size() > 0) { auto period_data_rlp = dev::RLP(period_data); auto dag_blocks_data = period_data_rlp[DAG_BLOCKS_POS_IN_PERIOD_DATA]; - return std::make_shared(dag_blocks_data[data->second]); + return decodeDAGBlockBundleRlp(data->second, dag_blocks_data); } } return nullptr; diff --git a/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp index 097b508611..569a8457f5 100644 --- a/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp +++ b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp @@ -59,9 +59,8 @@ std::vector decodeDAGBlocksBundleRlp(const dev::RLP& blocks_bundle_rlp // Decode transaction hashes and ordered_trx_hashes.reserve(blocks_bundle_rlp[0].itemCount()); - for (const auto& trx_hash_rlp : blocks_bundle_rlp[0]) { - ordered_trx_hashes.push_back(trx_hash_rlp.toHash()); - } + std::transform(blocks_bundle_rlp[0].begin(), blocks_bundle_rlp[0].end(), std::back_inserter(ordered_trx_hashes), + [](const auto& trx_hash_rlp) { return trx_hash_rlp.template toHash(); }); for (const auto& idx_rlp : blocks_bundle_rlp[1]) { std::vector hashes; @@ -83,15 +82,27 @@ std::vector decodeDAGBlocksBundleRlp(const dev::RLP& blocks_bundle_rlp return blocks; } -// // constexpr static size_t kPillarblocksBundleRlpSize{3}; +std::shared_ptr decodeDAGBlockBundleRlp(uint64_t index, const dev::RLP& blocks_bundle_rlp) { + if (blocks_bundle_rlp.itemCount() != kDAGBlocksBundleRlpSize) { + return {}; + } + if (index >= blocks_bundle_rlp[2].itemCount()) { + return {}; + } -// /** -// * @brief Decodes single dag block from optimized blocks bundle rlp -// * -// * @param blocks_bundle_rlp -// * @return block -// */ -// std::shared_ptr decodeDAGBlockBundleRlp(uint64_t index, const dev::RLP& blocks_bundle_rlp); + std::vector ordered_trx_hashes; + ordered_trx_hashes.reserve(blocks_bundle_rlp[0].itemCount()); + std::transform(blocks_bundle_rlp[0].begin(), blocks_bundle_rlp[0].end(), std::back_inserter(ordered_trx_hashes), + [](const auto& trx_hash_rlp) { return trx_hash_rlp.template toHash(); }); + + const auto idx_rlp = blocks_bundle_rlp[1][index]; + std::vector trx_hashes; + trx_hashes.reserve(idx_rlp.itemCount()); + for (const auto& i : idx_rlp) { + trx_hashes.push_back(ordered_trx_hashes[i.toInt()]); + } + return std::make_shared(blocks_bundle_rlp[2][index], std::move(trx_hashes)); +} /** @}*/ diff --git a/libraries/types/pbft_block/include/pbft/period_data.hpp b/libraries/types/pbft_block/include/pbft/period_data.hpp index 408f448c08..134af25056 100644 --- a/libraries/types/pbft_block/include/pbft/period_data.hpp +++ b/libraries/types/pbft_block/include/pbft/period_data.hpp @@ -31,7 +31,7 @@ class PeriodData { explicit PeriodData(const dev::RLP& all_rlp); explicit PeriodData(bytes const& all_rlp); - static PeriodData FromOldPeriodData(const dev::RLP& rlp) ; + static PeriodData FromOldPeriodData(const dev::RLP& rlp); std::shared_ptr pbft_blk; std::vector> previous_block_cert_votes; // These votes are the cert votes of previous block diff --git a/libraries/types/pbft_block/src/period_data.cpp b/libraries/types/pbft_block/src/period_data.cpp index 809841f163..20014e7bc7 100644 --- a/libraries/types/pbft_block/src/period_data.cpp +++ b/libraries/types/pbft_block/src/period_data.cpp @@ -100,7 +100,6 @@ PeriodData PeriodData::FromOldPeriodData(const dev::RLP& rlp) { period_data.pillar_votes_ = decodePillarVotesBundleRlp(*it); } return period_data; - } std::ostream& operator<<(std::ostream& strm, PeriodData const& b) { From f76821deb41556eec8a6e814fff5a26e7a28cc9e Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 20 Aug 2024 13:41:31 +0200 Subject: [PATCH 007/105] cppcheck improvements --- .../dag_block/src/dag_block_bundle_rlp.cpp | 20 +++++++++---------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp index 569a8457f5..af34357631 100644 --- a/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp +++ b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp @@ -21,8 +21,7 @@ dev::bytes encodeDAGBlocksBundleRlp(const std::vector& blocks) { idx.reserve(block.getTrxs().size()); for (const auto& trx : block.getTrxs()) { - if (trx_hash_map.find(trx) == trx_hash_map.end()) { - trx_hash_map[trx] = static_cast(trx_hash_map.size()); + if (const auto [_, ok] = trx_hash_map.try_emplace(trx, static_cast(trx_hash_map.size())); ok) { ordered_trx_hashes.push_back(trx); // Track the insertion order } idx.push_back(trx_hash_map[trx]); @@ -65,9 +64,9 @@ std::vector decodeDAGBlocksBundleRlp(const dev::RLP& blocks_bundle_rlp for (const auto& idx_rlp : blocks_bundle_rlp[1]) { std::vector hashes; hashes.reserve(idx_rlp.itemCount()); - for (const auto& i : idx_rlp) { - hashes.push_back(ordered_trx_hashes[i.toInt()]); - } + std::transform(idx_rlp.begin(), idx_rlp.end(), std::back_inserter(hashes), + [&ordered_trx_hashes](const auto& i) { return ordered_trx_hashes[i.template toInt()]; }); + dags_trx_hashes.push_back(std::move(hashes)); } @@ -96,12 +95,11 @@ std::shared_ptr decodeDAGBlockBundleRlp(uint64_t index, const dev::RLP [](const auto& trx_hash_rlp) { return trx_hash_rlp.template toHash(); }); const auto idx_rlp = blocks_bundle_rlp[1][index]; - std::vector trx_hashes; - trx_hashes.reserve(idx_rlp.itemCount()); - for (const auto& i : idx_rlp) { - trx_hashes.push_back(ordered_trx_hashes[i.toInt()]); - } - return std::make_shared(blocks_bundle_rlp[2][index], std::move(trx_hashes)); + std::vector hashes; + hashes.reserve(idx_rlp.itemCount()); + std::transform(idx_rlp.begin(), idx_rlp.end(), std::back_inserter(hashes), + [&ordered_trx_hashes](const auto& i) { return ordered_trx_hashes[i.template toInt()]; }); + return std::make_shared(blocks_bundle_rlp[2][index], std::move(hashes)); } /** @}*/ From c91c17d8ffbfd0de45173724f04b0766dac8b06e Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Mon, 12 Aug 2024 14:18:55 +0200 Subject: [PATCH 008/105] chore: add rpc for retrieving nodes versions --- libraries/core_libs/network/rpc/Taraxa.cpp | 52 +++++++++++++++++++ libraries/core_libs/network/rpc/Taraxa.h | 1 + .../core_libs/network/rpc/Taraxa.jsonrpc.json | 6 +++ .../core_libs/network/rpc/TaraxaClient.h | 9 ++++ libraries/core_libs/network/rpc/TaraxaFace.h | 8 +++ libraries/core_libs/node/src/node.cpp | 8 +-- 6 files changed, 80 insertions(+), 4 deletions(-) diff --git a/libraries/core_libs/network/rpc/Taraxa.cpp b/libraries/core_libs/network/rpc/Taraxa.cpp index 53198f04a1..f17723605a 100644 --- a/libraries/core_libs/network/rpc/Taraxa.cpp +++ b/libraries/core_libs/network/rpc/Taraxa.cpp @@ -110,6 +110,58 @@ Json::Value Taraxa::taraxa_getScheduleBlockByPeriod(const std::string& _period) } } +Json::Value Taraxa::taraxa_getNodeVersions() { + try { + Json::Value res; + auto node = tryGetNode(); + auto db = node->getDB(); + auto period = node->getFinalChain()->last_block_number(); + const uint64_t max_blocks_to_process = 6000; + std::map node_version_map; + std::multimap> version_node_map; + std::map> version_count; + for (uint64_t i = period; i > 0 && period - i < max_blocks_to_process; i--) { + auto blk = db->getPbftBlock(i); + if (!blk.has_value()) { + break; + } + if (!node_version_map.contains(blk->getBeneficiary())) { + node_version_map[blk->getBeneficiary()] = blk->getExtraData()->getJson()["major_version"].asString() + "." + + blk->getExtraData()->getJson()["minor_version"].asString() + "." + + blk->getExtraData()->getJson()["patch_version"].asString(); + } + } + + auto total_vote_count = node->getFinalChain()->dpos_eligible_total_vote_count(period); + for (auto nv : node_version_map) { + auto vote_count = node->getFinalChain()->dpos_eligible_vote_count(period, nv.first); + version_node_map.insert({nv.second, {nv.first, vote_count}}); + version_count[nv.second].first++; + version_count[nv.second].second += vote_count; + } + + res["nodes"] = Json::Value(Json::arrayValue); + for (auto vn : version_node_map) { + Json::Value node_json; + node_json["node"] = vn.second.first.toString(); + node_json["version"] = vn.first; + node_json["vote_count"] = vn.second.second; + res["nodes"].append(node_json); + } + res["versions"] = Json::Value(Json::arrayValue); + for (auto vc : version_count) { + Json::Value version_json; + version_json["version"] = vc.first; + version_json["node_count"] = vc.second.first; + version_json["vote_percentage"] = vc.second.second * 100 / total_vote_count; + res["versions"].append(version_json); + } + return res; + } catch (...) { + BOOST_THROW_EXCEPTION(JsonRpcException(Errors::ERROR_RPC_INVALID_PARAMS)); + } +} + Json::Value Taraxa::taraxa_getDagBlockByLevel(const string& _blockLevel, bool _includeTransactions) { try { auto node = tryGetNode(); diff --git a/libraries/core_libs/network/rpc/Taraxa.h b/libraries/core_libs/network/rpc/Taraxa.h index 52fc9d8b25..4e5e7c2f3f 100644 --- a/libraries/core_libs/network/rpc/Taraxa.h +++ b/libraries/core_libs/network/rpc/Taraxa.h @@ -26,6 +26,7 @@ class Taraxa : public TaraxaFace { virtual std::string taraxa_dagBlockLevel() override; virtual std::string taraxa_dagBlockPeriod() override; virtual Json::Value taraxa_getScheduleBlockByPeriod(const std::string& _period) override; + virtual Json::Value taraxa_getNodeVersions() override; virtual std::string taraxa_pbftBlockHashByPeriod(const std::string& _period) override; virtual Json::Value taraxa_getConfig() override; virtual Json::Value taraxa_getChainStats() override; diff --git a/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json b/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json index ca7576933d..255da4cd2b 100644 --- a/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json +++ b/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json @@ -47,6 +47,12 @@ "order": [], "returns": {} }, + { + "name": "taraxa_getNodeVersions", + "params": [], + "order": [], + "returns": {} + }, { "name": "taraxa_getConfig", "params": [], diff --git a/libraries/core_libs/network/rpc/TaraxaClient.h b/libraries/core_libs/network/rpc/TaraxaClient.h index e0b318774c..49a6a978d7 100644 --- a/libraries/core_libs/network/rpc/TaraxaClient.h +++ b/libraries/core_libs/network/rpc/TaraxaClient.h @@ -79,6 +79,15 @@ class TaraxaClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } + Json::Value taraxa_getNodeVersions() throw(jsonrpc::JsonRpcException) { + Json::Value p; + p = Json::nullValue; + Json::Value result = this->CallMethod("taraxa_getNodeVersions", p); + if (result.isObject()) + return result; + else + throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); + } Json::Value taraxa_getConfig() throw(jsonrpc::JsonRpcException) { Json::Value p; p = Json::nullValue; diff --git a/libraries/core_libs/network/rpc/TaraxaFace.h b/libraries/core_libs/network/rpc/TaraxaFace.h index 797e6af5f0..4db4e97472 100644 --- a/libraries/core_libs/network/rpc/TaraxaFace.h +++ b/libraries/core_libs/network/rpc/TaraxaFace.h @@ -37,6 +37,9 @@ class TaraxaFace : public ServerInterface { this->bindAndAddMethod(jsonrpc::Procedure("taraxa_getScheduleBlockByPeriod", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::TaraxaFace::taraxa_getScheduleBlockByPeriodI); + this->bindAndAddMethod( + jsonrpc::Procedure("taraxa_getNodeVersions", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL), + &taraxa::net::TaraxaFace::taraxa_getNodeVersionsI); this->bindAndAddMethod( jsonrpc::Procedure("taraxa_getConfig", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL), &taraxa::net::TaraxaFace::taraxa_getConfigI); @@ -83,6 +86,10 @@ class TaraxaFace : public ServerInterface { inline virtual void taraxa_getScheduleBlockByPeriodI(const Json::Value &request, Json::Value &response) { response = this->taraxa_getScheduleBlockByPeriod(request[0u].asString()); } + inline virtual void taraxa_getNodeVersionsI(const Json::Value &request, Json::Value &response) { + (void)request; + response = this->taraxa_getNodeVersions(); + } inline virtual void taraxa_getConfigI(const Json::Value &request, Json::Value &response) { (void)request; response = this->taraxa_getConfig(); @@ -114,6 +121,7 @@ class TaraxaFace : public ServerInterface { virtual std::string taraxa_dagBlockLevel() = 0; virtual std::string taraxa_dagBlockPeriod() = 0; virtual Json::Value taraxa_getScheduleBlockByPeriod(const std::string ¶m1) = 0; + virtual Json::Value taraxa_getNodeVersions() = 0; virtual Json::Value taraxa_getConfig() = 0; virtual Json::Value taraxa_getChainStats() = 0; virtual std::string taraxa_pbftBlockHashByPeriod(const std::string ¶m1) = 0; diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index cb65d26a3a..4285b15d65 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -214,10 +214,10 @@ void FullNode::start() { auto eth_json_rpc = net::rpc::eth::NewEth(std::move(eth_rpc_params)); std::shared_ptr test_json_rpc; - if (conf_.enable_test_rpc) { - // TODO Because this object refers to FullNode, the lifecycle/dependency management is more complicated); - test_json_rpc = std::make_shared(shared_from_this()); - } + // if (conf_.enable_test_rpc) { + // TODO Because this object refers to FullNode, the lifecycle/dependency management is more complicated); + test_json_rpc = std::make_shared(shared_from_this()); + //} std::shared_ptr debug_json_rpc; if (conf_.enable_debug) { From a52a6d271e6bdaf53cdd2c5c4dd46f9df4a7a24d Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Tue, 6 Aug 2024 13:07:10 -0700 Subject: [PATCH 009/105] refactor FinalChain class --- .../consensus/include/dag/dag_manager.hpp | 4 +- .../include/final_chain/final_chain.hpp | 147 ++++++++---- .../include/final_chain/final_chain_impl.hpp | 109 --------- .../include/key_manager/key_manager.hpp | 4 +- .../consensus/include/pbft/pbft_manager.hpp | 4 +- .../include/rewards/rewards_stats.hpp | 2 + .../slashing_manager/slashing_manager.hpp | 4 +- .../transaction/transaction_manager.hpp | 6 +- .../include/vote_manager/vote_manager.hpp | 4 +- .../consensus/src/dag/dag_block_proposer.cpp | 10 +- .../consensus/src/dag/dag_manager.cpp | 20 +- .../{final_chai_impl.cpp => final_chain.cpp} | 215 +++++++++--------- .../consensus/src/key_manager/key_manager.cpp | 4 +- .../consensus/src/pbft/pbft_manager.cpp | 50 ++-- .../src/pillar_chain/pillar_chain_manager.cpp | 8 +- .../src/slashing_manager/slashing_manager.cpp | 4 +- .../src/transaction/transaction_manager.cpp | 12 +- .../src/transaction/transaction_queue.cpp | 4 +- .../src/vote_manager/vote_manager.cpp | 31 ++- .../core_libs/network/graphql/src/account.cpp | 8 +- .../core_libs/network/graphql/src/query.cpp | 10 +- .../network/graphql/src/sync_state.cpp | 2 +- .../network/graphql/src/transaction.cpp | 18 +- .../graphql/src/types/current_state.cpp | 2 +- libraries/core_libs/network/rpc/Debug.cpp | 18 +- libraries/core_libs/network/rpc/Taraxa.cpp | 12 +- libraries/core_libs/network/rpc/Test.cpp | 2 +- libraries/core_libs/network/rpc/eth/Eth.cpp | 38 ++-- libraries/core_libs/network/rpc/eth/Eth.h | 2 +- .../core_libs/network/rpc/eth/LogFilter.cpp | 22 +- .../core_libs/network/rpc/eth/LogFilter.hpp | 2 +- .../core_libs/node/include/node/node.hpp | 2 +- libraries/core_libs/node/src/node.cpp | 6 +- tests/final_chain_test.cpp | 73 +++--- tests/full_node_test.cpp | 22 +- tests/pbft_manager_test.cpp | 20 +- tests/pillar_chain_test.cpp | 10 +- tests/rpc_test.cpp | 8 +- tests/state_api_test.cpp | 18 +- .../src/node_dag_creation_fixture.cpp | 6 +- tests/test_util/src/test_util.cpp | 4 +- tests/transaction_test.cpp | 18 +- 42 files changed, 453 insertions(+), 512 deletions(-) delete mode 100644 libraries/core_libs/consensus/include/final_chain/final_chain_impl.hpp rename libraries/core_libs/consensus/src/final_chain/{final_chai_impl.cpp => final_chain.cpp} (69%) diff --git a/libraries/core_libs/consensus/include/dag/dag_manager.hpp b/libraries/core_libs/consensus/include/dag/dag_manager.hpp index 30d025ba1a..d6683848a1 100644 --- a/libraries/core_libs/consensus/include/dag/dag_manager.hpp +++ b/libraries/core_libs/consensus/include/dag/dag_manager.hpp @@ -46,7 +46,7 @@ class DagManager : public std::enable_shared_from_this { explicit DagManager(const DagBlock &dag_genesis_block, addr_t node_addr, const SortitionConfig &sortition_config, const DagConfig &dag_config, std::shared_ptr trx_mgr, - std::shared_ptr pbft_chain, std::shared_ptr final_chain, + std::shared_ptr pbft_chain, std::shared_ptr final_chain, std::shared_ptr db, std::shared_ptr key_manager, uint64_t pbft_gas_limit, const state_api::Config &state_config, bool is_light_node = false, uint64_t light_node_history = 0, uint32_t max_levels_per_period = kMaxLevelsPerPeriod, @@ -281,7 +281,7 @@ class DagManager : public std::enable_shared_from_this { const uint32_t cache_max_size_ = 10000; const uint32_t cache_delete_step_ = 100; ExpirationCacheMap seen_blocks_; - std::shared_ptr final_chain_; + std::shared_ptr final_chain_; const uint64_t kPbftGasLimit; const HardforksConfig kHardforks; const uint64_t kValidatorMaxVote; diff --git a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp index eeaef817a2..d66cd6bae4 100644 --- a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp +++ b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp @@ -5,9 +5,11 @@ #include "common/event.hpp" #include "common/types.hpp" #include "config/config.hpp" +#include "final_chain/cache.hpp" #include "final_chain/data.hpp" #include "final_chain/state_api.hpp" #include "final_chain/state_api_data.hpp" +#include "rewards/rewards_stats.hpp" #include "storage/storage.hpp" namespace taraxa::final_chain { @@ -35,15 +37,16 @@ class FinalChain { decltype(block_applying_emitter_)::Subscriber const& block_applying_ = block_applying_emitter_; FinalChain() = default; - virtual ~FinalChain() = default; + ~FinalChain() = default; + FinalChain(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, const addr_t& node_addr); FinalChain(const FinalChain&) = delete; FinalChain(FinalChain&&) = delete; FinalChain& operator=(const FinalChain&) = delete; FinalChain& operator=(FinalChain&&) = delete; - virtual void stop() = 0; + void stop(); - virtual EthBlockNumber delegation_delay() const = 0; + EthBlockNumber delegationDelay() const; /** * @brief Method which finalizes a block and executes it in EVM @@ -53,9 +56,9 @@ class FinalChain { * @param precommit_ext * @return finalization result */ - virtual std::future> finalize( - PeriodData&& period_data, std::vector&& finalized_dag_blk_hashes, - std::shared_ptr&& anchor = nullptr) = 0; + std::future> finalize(PeriodData&& period_data, + std::vector&& finalized_dag_blk_hashes, + std::shared_ptr&& anchor = nullptr); /** * @brief Method to get block header by block number @@ -63,13 +66,13 @@ class FinalChain { * @param n block number of header to get. If not specified then it returns latest * @return BlockHeader */ - virtual std::shared_ptr block_header(std::optional n = {}) const = 0; + std::shared_ptr blockHeader(std::optional n = {}) const; /** * @brief Method to get last block number(chain size) * @return EthBlockNumber */ - virtual EthBlockNumber last_block_number() const = 0; + EthBlockNumber lastBlockNumber() const; /** * @brief Method to get block number by hash @@ -77,7 +80,7 @@ class FinalChain { * @param h block hash * @return EthBlockNumber */ - virtual std::optional block_number(h256 const& h) const = 0; + std::optional blockNumber(h256 const& h) const; /** * @brief Method to get block hash by block number @@ -85,48 +88,48 @@ class FinalChain { * @param n EthBlockNumber * @return BlockHash h256 */ - virtual std::optional block_hash(std::optional n = {}) const = 0; + std::optional blockHash(std::optional n = {}) const; /** * @brief Needed if we are changing params with hardfork and it affects Go part of code. For example DPOS contract * @param new_config state_api::Config */ - virtual void update_state_config(const state_api::Config& new_config) = 0; + void updateStateConfig(const state_api::Config& new_config); /** * @brief Method to get all transaction hashes from the block * @param n EthBlockNumber * @return TransactionHashes list if transaction hashes */ - virtual std::shared_ptr transaction_hashes(std::optional n = {}) const = 0; + std::shared_ptr transactionHashes(std::optional n = {}) const; /** * @brief Method to get all transactions from the block * @param n EthBlockNumber * @return SharedTransactions vector of shared_ptrs to Transaction */ - virtual const SharedTransactions transactions(std::optional n = {}) const = 0; + const SharedTransactions transactions(std::optional n = {}) const; /** * @brief Method to get transaction location by hash * @param trx_hash hash of transaction to get location for * @return std::optional transaction location or nullopt */ - virtual std::optional transaction_location(h256 const& trx_hash) const = 0; + std::optional transactionLocation(h256 const& trx_hash) const; /** * @brief Method to get transaction receipt by hash * @param _transactionHash hash of transaction to get receipt for * @return std::optional transaction receipt or nullopt */ - virtual std::optional transaction_receipt(h256 const& _transactionHash) const = 0; + std::optional transactionReceipt(h256 const& _transactionHash) const; /** * @brief Method to get transactions count in block * @param n block number * @return count of transactions in block */ - virtual uint64_t transactionCount(std::optional n = {}) const = 0; + uint64_t transactionCount(std::optional n = {}) const; /** * @brief Method used to search for contract logs with bloom filter @@ -135,8 +138,7 @@ class FinalChain { * @param to EthBlockNumber block to end search * @return block that matches specified bloom filter */ - virtual std::vector withBlockBloom(LogBloom const& b, EthBlockNumber from, - EthBlockNumber to) const = 0; + std::vector withBlockBloom(LogBloom const& b, EthBlockNumber from, EthBlockNumber to) const; /** * @brief Method to get account information @@ -145,8 +147,7 @@ class FinalChain { * @param blk_n number of block we are getting state from * @return std::optional account object or nullopt if account wasn't found */ - virtual std::optional get_account(addr_t const& addr, - std::optional blk_n = {}) const = 0; + std::optional getAccount(addr_t const& addr, std::optional blk_n = {}) const; /** * @brief Returns the value from a storage position at a given address. @@ -155,15 +156,14 @@ class FinalChain { * @param blk_n number of block we are getting state from * @return the value at this storage position */ - virtual h256 get_account_storage(addr_t const& addr, u256 const& key, - std::optional blk_n = {}) const = 0; + h256 getAccountStorage(addr_t const& addr, u256 const& key, std::optional blk_n = {}) const; /** * @brief Returns code at a given address. * @param addr account address * @param blk_n number of block we are getting state from * @return code at a given address. */ - virtual bytes get_code(addr_t const& addr, std::optional blk_n = {}) const = 0; + bytes getCode(addr_t const& addr, std::optional blk_n = {}) const; /** * @brief Executes a new message call immediately without creating a transaction on the block chain. That means that @@ -172,8 +172,7 @@ class FinalChain { * @param blk_n EthBlockNumber number of block we are getting state from * @return state_api::ExecutionResult */ - virtual state_api::ExecutionResult call(state_api::EVMTransaction const& trx, - std::optional blk_n = {}) const = 0; + state_api::ExecutionResult call(state_api::EVMTransaction const& trx, std::optional blk_n = {}) const; /** * @brief Trace execution of a new message call immediately without creating a transactions on the block chain. That @@ -182,15 +181,15 @@ class FinalChain { * @param blk_n EthBlockNumber number of block we are getting state from * @return std::string */ - virtual std::string trace(std::vector trx, EthBlockNumber blk_n, - std::optional params = {}) const = 0; + std::string trace(std::vector trx, EthBlockNumber blk_n, + std::optional params = {}) const; /** * @brief total count of eligible votes are in DPOS precompiled contract * @param blk_num EthBlockNumber number of block we are getting state from * @return total count of eligible votes */ - virtual uint64_t dpos_eligible_total_vote_count(EthBlockNumber blk_num) const = 0; + uint64_t dposEligibleTotalVoteCount(EthBlockNumber blk_num) const; /** * @brief total count of eligible votes account has in DPOS precompiled contract @@ -198,7 +197,7 @@ class FinalChain { * @param addr account address * @return address eligible votes count */ - virtual uint64_t dpos_eligible_vote_count(EthBlockNumber blk_num, addr_t const& addr) const = 0; + uint64_t dposEligibleVoteCount(EthBlockNumber blk_num, addr_t const& addr) const; /** * @brief method to check if address have enough votes to participate in consensus @@ -206,7 +205,7 @@ class FinalChain { * @param addr account address * @return is address eligible */ - virtual bool dpos_is_eligible(EthBlockNumber blk_num, addr_t const& addr) const = 0; + bool dposIsEligible(EthBlockNumber blk_num, addr_t const& addr) const; /** * @brief Get the vrf key object from DPOS state @@ -214,66 +213,114 @@ class FinalChain { * @param blk_n number of block we are getting state from * @return vrf_wrapper::vrf_pk_t */ - virtual vrf_wrapper::vrf_pk_t dpos_get_vrf_key(EthBlockNumber blk_n, const addr_t& addr) const = 0; + vrf_wrapper::vrf_pk_t dposGetVrfKey(EthBlockNumber blk_n, const addr_t& addr) const; /** * @brief Prune state db for all blocks older than blk_n * @param blk_n number of block we are getting state from */ - virtual void prune(EthBlockNumber blk_n) = 0; + void prune(EthBlockNumber blk_n); /** * @brief Wait until next block is finalized */ - virtual void wait_for_finalized() = 0; + void waitForFinalized(); - virtual std::vector dpos_validators_total_stakes(EthBlockNumber blk_num) const = 0; + std::vector dposValidatorsTotalStakes(EthBlockNumber blk_num) const; - virtual uint256_t dpos_total_amount_delegated(EthBlockNumber blk_num) const = 0; + uint256_t dposTotalAmountDelegated(EthBlockNumber blk_num) const; /** * @param blk_num * @return vector of validators vote counts for provided blk_num */ - virtual std::vector dpos_validators_vote_counts(EthBlockNumber blk_num) const = 0; + std::vector dposValidatorsVoteCounts(EthBlockNumber blk_num) const; /** * @param blk_num * @return yield */ - virtual uint64_t dpos_yield(EthBlockNumber blk_num) const = 0; + uint64_t dposYield(EthBlockNumber blk_num) const; /** * @param blk_num * @return total supply */ - virtual u256 dpos_total_supply(EthBlockNumber blk_num) const = 0; + u256 dposTotalSupply(EthBlockNumber blk_num) const; /** * @param blk_num * @return bridge root */ - virtual h256 get_bridge_root(EthBlockNumber blk_num) const = 0; + h256 getBridgeRoot(EthBlockNumber blk_num) const; /** * @param blk_num * @return bridge epoch */ - virtual h256 get_bridge_epoch(EthBlockNumber blk_num) const = 0; + h256 getBridgeEpoch(EthBlockNumber blk_num) const; // TODO move out of here: - std::pair getBalance(addr_t const& addr) const { - if (auto acc = get_account(addr)) { - return {acc->balance, true}; - } - return {0, false}; - } + std::pair getBalance(addr_t const& addr) const; + SharedTransaction makeBridgeFinalizationTransaction(); + bool isNeedToFinalize(EthBlockNumber blk_num) const; + std::vector makeSystemTransactions(PbftPeriod blk_num); + std::shared_ptr finalize_(PeriodData&& new_blk, + std::vector&& finalized_dag_blk_hashes, + std::shared_ptr&& anchor); + std::shared_ptr appendBlock(DB::Batch& batch, const addr_t& author, uint64_t timestamp, + uint64_t gas_limit, const h256& state_root, u256 total_reward, + const SharedTransactions& transactions = {}, + const TransactionReceipts& receipts = {}, const bytes& extra_data = {}); + + private: + std::shared_ptr getTransactionHashes(std::optional n = {}) const; + const SharedTransactions getTransactions(std::optional n = {}) const; + std::shared_ptr getBlockHeader(EthBlockNumber n) const; + std::optional getBlockHash(EthBlockNumber n) const; + EthBlockNumber lastIfAbsent(const std::optional& client_blk_n) const; + static state_api::EVMTransaction toEvmTransaction(const SharedTransaction& trx); + static void appendEvmTransactions(std::vector& evm_trxs, const SharedTransactions& trxs); + BlocksBlooms blockBlooms(const h256& chunk_id) const; + static h256 blockBloomsChunkId(EthBlockNumber level, EthBlockNumber index); + std::vector withBlockBloom(const LogBloom& b, EthBlockNumber from, EthBlockNumber to, + EthBlockNumber level, EthBlockNumber index) const; + + private: + std::shared_ptr db_; + const uint64_t kBlockGasLimit; + StateAPI state_api_; + const bool kLightNode = false; + const uint32_t kMaxLevelsPerPeriod; + rewards::Stats rewards_; + + // It is not prepared to use more then 1 thread. Examine it if you want to change threads count + boost::asio::thread_pool executor_thread_{1}; + + std::atomic num_executed_dag_blk_ = 0; + std::atomic num_executed_trx_ = 0; + + EthBlockNumber delegation_delay_; + + ValueByBlockCache> block_headers_cache_; + ValueByBlockCache> block_hashes_cache_; + ValueByBlockCache transactions_cache_; + ValueByBlockCache> transaction_hashes_cache_; + MapByBlockCache> accounts_cache_; + + ValueByBlockCache total_vote_count_cache_; + MapByBlockCache dpos_vote_count_cache_; + MapByBlockCache dpos_is_eligible_cache_; + + std::condition_variable finalized_cv_; + std::mutex finalized_mtx_; + + std::atomic last_block_number_; + + const HardforksConfig& kHardforksConfig; + LOG_OBJECTS_DEFINE }; /** @} */ } // namespace taraxa::final_chain - -namespace taraxa { -using final_chain::FinalChain; -} // namespace taraxa diff --git a/libraries/core_libs/consensus/include/final_chain/final_chain_impl.hpp b/libraries/core_libs/consensus/include/final_chain/final_chain_impl.hpp deleted file mode 100644 index fc462ab093..0000000000 --- a/libraries/core_libs/consensus/include/final_chain/final_chain_impl.hpp +++ /dev/null @@ -1,109 +0,0 @@ -#pragma once - -#include "final_chain/cache.hpp" -#include "final_chain/final_chain.hpp" -#include "rewards/rewards_stats.hpp" - -namespace taraxa::final_chain { - -class FinalChainImpl final : public FinalChain { - std::shared_ptr db_; - const uint64_t kBlockGasLimit; - StateAPI state_api_; - const bool kLightNode = false; - const uint32_t kMaxLevelsPerPeriod; - rewards::Stats rewards_; - - // It is not prepared to use more then 1 thread. Examine it if you want to change threads count - boost::asio::thread_pool executor_thread_{1}; - - std::atomic num_executed_dag_blk_ = 0; - std::atomic num_executed_trx_ = 0; - - EthBlockNumber delegation_delay_; - - ValueByBlockCache> block_headers_cache_; - ValueByBlockCache> block_hashes_cache_; - ValueByBlockCache transactions_cache_; - ValueByBlockCache> transaction_hashes_cache_; - MapByBlockCache> accounts_cache_; - - ValueByBlockCache total_vote_count_cache_; - MapByBlockCache dpos_vote_count_cache_; - MapByBlockCache dpos_is_eligible_cache_; - - std::condition_variable finalized_cv_; - std::mutex finalized_mtx_; - - std::atomic last_block_number_; - - const HardforksConfig& kHardforksConfig; - LOG_OBJECTS_DEFINE - - public: - FinalChainImpl(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, const addr_t& node_addr); - - void stop() override; - std::future> finalize( - PeriodData&& new_blk, std::vector&& finalized_dag_blk_hashes, - std::shared_ptr&& anchor = nullptr) override; - EthBlockNumber delegation_delay() const override; - SharedTransaction make_bridge_finalization_transaction(); - bool isNeedToFinalize(EthBlockNumber blk_num) const; - std::vector makeSystemTransactions(PbftPeriod blk_num); - std::shared_ptr finalize_(PeriodData&& new_blk, - std::vector&& finalized_dag_blk_hashes, - std::shared_ptr&& anchor); - void prune(EthBlockNumber blk_n) override; - std::shared_ptr append_block(DB::Batch& batch, const addr_t& author, uint64_t timestamp, - uint64_t gas_limit, const h256& state_root, u256 total_reward, - const SharedTransactions& transactions = {}, - const TransactionReceipts& receipts = {}, const bytes& extra_data = {}); - EthBlockNumber last_block_number() const override; - std::optional block_number(const h256& h) const override; - std::optional block_hash(std::optional n = {}) const override; - std::shared_ptr block_header(std::optional n = {}) const override; - std::optional transaction_location(const h256& trx_hash) const override; - std::optional transaction_receipt(const h256& trx_h) const override; - uint64_t transactionCount(std::optional n = {}) const override; - std::shared_ptr transaction_hashes(std::optional n = {}) const override; - const SharedTransactions transactions(std::optional n = {}) const override; - std::vector withBlockBloom(const LogBloom& b, EthBlockNumber from, EthBlockNumber to) const override; - std::optional get_account(const addr_t& addr, - std::optional blk_n = {}) const override; - void update_state_config(const state_api::Config& new_config) override; - h256 get_account_storage(const addr_t& addr, const u256& key, - std::optional blk_n = {}) const override; - bytes get_code(const addr_t& addr, std::optional blk_n = {}) const override; - state_api::ExecutionResult call(const state_api::EVMTransaction& trx, - std::optional blk_n = {}) const override; - std::string trace(std::vector trxs, EthBlockNumber blk_n, - std::optional params = {}) const override; - uint64_t dpos_eligible_total_vote_count(EthBlockNumber blk_num) const override; - uint64_t dpos_eligible_vote_count(EthBlockNumber blk_num, const addr_t& addr) const override; - bool dpos_is_eligible(EthBlockNumber blk_num, const addr_t& addr) const override; - vrf_wrapper::vrf_pk_t dpos_get_vrf_key(EthBlockNumber blk_n, const addr_t& addr) const override; - std::vector dpos_validators_total_stakes(EthBlockNumber blk_num) const override; - virtual uint256_t dpos_total_amount_delegated(EthBlockNumber blk_num) const override; - std::vector dpos_validators_vote_counts(EthBlockNumber blk_num) const override; - void wait_for_finalized() override; - uint64_t dpos_yield(EthBlockNumber blk_num) const override; - u256 dpos_total_supply(EthBlockNumber blk_num) const override; - h256 get_bridge_root(EthBlockNumber blk_num) const override; - h256 get_bridge_epoch(EthBlockNumber blk_num) const override; - - private: - std::shared_ptr get_transaction_hashes(std::optional n = {}) const; - const SharedTransactions get_transactions(std::optional n = {}) const; - std::shared_ptr get_block_header(EthBlockNumber n) const; - std::optional get_block_hash(EthBlockNumber n) const; - EthBlockNumber last_if_absent(const std::optional& client_blk_n) const; - static state_api::EVMTransaction to_evm_transaction(const SharedTransaction& trx); - static void append_evm_transactions(std::vector& evm_trxs, const SharedTransactions& trxs); - BlocksBlooms block_blooms(const h256& chunk_id) const; - static h256 block_blooms_chunk_id(EthBlockNumber level, EthBlockNumber index); - std::vector withBlockBloom(const LogBloom& b, EthBlockNumber from, EthBlockNumber to, - EthBlockNumber level, EthBlockNumber index) const; -}; - -} // namespace taraxa::final_chain \ No newline at end of file diff --git a/libraries/core_libs/consensus/include/key_manager/key_manager.hpp b/libraries/core_libs/consensus/include/key_manager/key_manager.hpp index f698dcb5f9..7c44f58bca 100644 --- a/libraries/core_libs/consensus/include/key_manager/key_manager.hpp +++ b/libraries/core_libs/consensus/include/key_manager/key_manager.hpp @@ -10,7 +10,7 @@ namespace taraxa { class KeyManager { public: - KeyManager(std::shared_ptr final_chain); + KeyManager(std::shared_ptr final_chain); KeyManager(const KeyManager &) = delete; KeyManager(KeyManager &&) = delete; KeyManager &operator=(const KeyManager &) = delete; @@ -22,7 +22,7 @@ class KeyManager { std::shared_mutex vrf_keys_mutex_; std::unordered_map> vrf_keys_; - std::shared_ptr final_chain_; + std::shared_ptr final_chain_; }; } // namespace taraxa \ No newline at end of file diff --git a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp index 04ef10314b..5d3c73f973 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp @@ -58,7 +58,7 @@ class PbftManager { PbftManager(const GenesisConfig &conf, addr_t node_addr, std::shared_ptr db, std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, std::shared_ptr dag_mgr, std::shared_ptr trx_mgr, - std::shared_ptr final_chain, + std::shared_ptr final_chain, std::shared_ptr pillar_chain_mgr, secret_t node_sk); ~PbftManager(); PbftManager(const PbftManager &) = delete; @@ -568,7 +568,7 @@ class PbftManager { std::shared_ptr dag_mgr_; std::weak_ptr network_; std::shared_ptr trx_mgr_; - std::shared_ptr final_chain_; + std::shared_ptr final_chain_; std::shared_ptr pillar_chain_mgr_; const addr_t node_addr_; diff --git a/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp b/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp index d9668ea1c7..c4d58babb3 100644 --- a/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp +++ b/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp @@ -1,3 +1,5 @@ +#pragma once + #include "config/hardfork.hpp" #include "rewards/block_stats.hpp" #include "storage/storage.hpp" diff --git a/libraries/core_libs/consensus/include/slashing_manager/slashing_manager.hpp b/libraries/core_libs/consensus/include/slashing_manager/slashing_manager.hpp index b6d1278045..f49b4a7136 100644 --- a/libraries/core_libs/consensus/include/slashing_manager/slashing_manager.hpp +++ b/libraries/core_libs/consensus/include/slashing_manager/slashing_manager.hpp @@ -7,7 +7,7 @@ namespace taraxa { class SlashingManager { public: - SlashingManager(std::shared_ptr final_chain, std::shared_ptr trx_manager, + SlashingManager(std::shared_ptr final_chain, std::shared_ptr trx_manager, std::shared_ptr gas_pricer, const FullNodeConfig &config, secret_t node_sk); SlashingManager(const SlashingManager &) = delete; SlashingManager(SlashingManager &&) = delete; @@ -17,7 +17,7 @@ class SlashingManager { bool submitDoubleVotingProof(const std::shared_ptr &vote_a, const std::shared_ptr &vote_b); private: - std::shared_ptr final_chain_; + std::shared_ptr final_chain_; std::shared_ptr trx_manager_; std::shared_ptr gas_pricer_; diff --git a/libraries/core_libs/consensus/include/transaction/transaction_manager.hpp b/libraries/core_libs/consensus/include/transaction/transaction_manager.hpp index 0d5f272f5d..e6140f511e 100644 --- a/libraries/core_libs/consensus/include/transaction/transaction_manager.hpp +++ b/libraries/core_libs/consensus/include/transaction/transaction_manager.hpp @@ -50,8 +50,8 @@ class FullNode; */ class TransactionManager : public std::enable_shared_from_this { public: - TransactionManager(const FullNodeConfig &conf, std::shared_ptr db, std::shared_ptr final_chain, - addr_t node_addr); + TransactionManager(const FullNodeConfig &conf, std::shared_ptr db, + std::shared_ptr final_chain, addr_t node_addr); /** * @brief Estimates required gas value to execute transaction @@ -245,7 +245,7 @@ class TransactionManager : public std::enable_shared_from_this db_{nullptr}; - std::shared_ptr final_chain_{nullptr}; + std::shared_ptr final_chain_{nullptr}; LOG_OBJECTS_DEFINE }; diff --git a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp index bf666d3a1e..0272600dcc 100644 --- a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp +++ b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp @@ -28,7 +28,7 @@ class VoteManager { public: VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, const secret_t& node_sk, const vrf_wrapper::vrf_sk_t& vrf_sk, std::shared_ptr db, std::shared_ptr pbft_chain, - std::shared_ptr final_chain, std::shared_ptr key_manager, + std::shared_ptr final_chain, std::shared_ptr key_manager, std::shared_ptr slashing_manager); ~VoteManager() = default; VoteManager(const VoteManager&) = delete; @@ -278,7 +278,7 @@ class VoteManager { std::shared_ptr db_; std::shared_ptr pbft_chain_; - std::shared_ptr final_chain_; + std::shared_ptr final_chain_; std::shared_ptr key_manager_; std::weak_ptr network_; std::shared_ptr slashing_manager_; diff --git a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp index 9587b11daf..dd1ccc3893 100644 --- a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp @@ -78,9 +78,9 @@ bool DagBlockProposer::proposeDagBlock() { } uint64_t max_vote_count = 0; - const auto vote_count = final_chain_->dpos_eligible_vote_count(*proposal_period, node_addr_); + const auto vote_count = final_chain_->dposEligibleVoteCount(*proposal_period, node_addr_); if (*proposal_period < kHardforks.magnolia_hf.block_num) { - max_vote_count = final_chain_->dpos_eligible_total_vote_count(*proposal_period); + max_vote_count = final_chain_->dposEligibleTotalVoteCount(*proposal_period); } else { max_vote_count = kValidatorMaxVote; } @@ -345,14 +345,14 @@ DagBlock DagBlockProposer::createDagBlock(DagFrontier&& frontier, level_t level, } bool DagBlockProposer::isValidDposProposer(PbftPeriod propose_period) const { - if (final_chain_->last_block_number() < propose_period) { - LOG(log_wr_) << "Last finalized block period " << final_chain_->last_block_number() << " < propose_period " + if (final_chain_->lastBlockNumber() < propose_period) { + LOG(log_wr_) << "Last finalized block period " << final_chain_->lastBlockNumber() << " < propose_period " << propose_period; return false; } try { - return final_chain_->dpos_is_eligible(propose_period, node_addr_); + return final_chain_->dposIsEligible(propose_period, node_addr_); } catch (state_api::ErrFutureBlock& c) { LOG(log_wr_) << "Proposal period " << propose_period << " is too far ahead of DPOS. " << c.what(); return false; diff --git a/libraries/core_libs/consensus/src/dag/dag_manager.cpp b/libraries/core_libs/consensus/src/dag/dag_manager.cpp index fd5853dc48..03575dd742 100644 --- a/libraries/core_libs/consensus/src/dag/dag_manager.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_manager.cpp @@ -17,7 +17,7 @@ namespace taraxa { DagManager::DagManager(const DagBlock &dag_genesis_block, addr_t node_addr, const SortitionConfig &sortition_config, const DagConfig &dag_config, std::shared_ptr trx_mgr, - std::shared_ptr pbft_chain, std::shared_ptr final_chain, + std::shared_ptr pbft_chain, std::shared_ptr final_chain, std::shared_ptr db, std::shared_ptr key_manager, uint64_t pbft_gas_limit, const state_api::Config &state_config, bool is_light_node, uint64_t light_node_history, uint32_t max_levels_per_period, uint32_t dag_expiry_limit) try @@ -506,9 +506,9 @@ void DagManager::recoverDag() { // Verify VDF solution try { uint64_t max_vote_count = 0; - const auto vote_count = final_chain_->dpos_eligible_vote_count(*propose_period, blk.getSender()); + const auto vote_count = final_chain_->dposEligibleVoteCount(*propose_period, blk.getSender()); if (*propose_period < kHardforks.magnolia_hf.block_num) { - max_vote_count = final_chain_->dpos_eligible_total_vote_count(*propose_period); + max_vote_count = final_chain_->dposEligibleTotalVoteCount(*propose_period); } else { max_vote_count = kValidatorMaxVote; } @@ -662,9 +662,9 @@ std::pair DagManager::ver try { const auto proposal_period_hash = db_->getPeriodBlockHash(*propose_period); uint64_t max_vote_count = 0; - const auto vote_count = final_chain_->dpos_eligible_vote_count(*propose_period, blk.getSender()); + const auto vote_count = final_chain_->dposEligibleVoteCount(*propose_period, blk.getSender()); if (*propose_period < kHardforks.magnolia_hf.block_num) { - max_vote_count = final_chain_->dpos_eligible_total_vote_count(*propose_period); + max_vote_count = final_chain_->dposEligibleTotalVoteCount(*propose_period); } else { max_vote_count = kValidatorMaxVote; } @@ -680,7 +680,7 @@ std::pair DagManager::ver auto dag_block_sender = blk.getSender(); bool dpos_qualified; try { - dpos_qualified = final_chain_->dpos_is_eligible(*propose_period, dag_block_sender); + dpos_qualified = final_chain_->dposIsEligible(*propose_period, dag_block_sender); } catch (state_api::ErrFutureBlock &c) { LOG(log_er_) << "Verify proposal period " << *propose_period << " is too far ahead of DPOS. " << c.what(); return {VerifyBlockReturnType::FutureBlock, {}}; @@ -688,7 +688,7 @@ std::pair DagManager::ver if (!dpos_qualified) { LOG(log_er_) << "Invalid DAG block DPOS. DAG block " << blk << " is not eligible for DPOS at period " << *propose_period << " for sender " << dag_block_sender.toString() << " current period " - << final_chain_->last_block_number(); + << final_chain_->lastBlockNumber(); return {VerifyBlockReturnType::NotEligible, {}}; } { @@ -701,14 +701,14 @@ std::pair DagManager::ver if (total_block_weight != block_gas_estimation) { LOG(log_er_) << "Invalid block_gas_estimation. DAG block " << blk.getHash() << " block_gas_estimation: " << block_gas_estimation << " total_block_weight " << total_block_weight - << " current period " << final_chain_->last_block_number(); + << " current period " << final_chain_->lastBlockNumber(); return {VerifyBlockReturnType::IncorrectTransactionsEstimation, {}}; } if (total_block_weight > getDagConfig().gas_limit) { LOG(log_er_) << "BlockTooBig. DAG block " << blk.getHash() << " gas_limit: " << getDagConfig().gas_limit << " total_block_weight " << total_block_weight << " current period " - << final_chain_->last_block_number(); + << final_chain_->lastBlockNumber(); return {VerifyBlockReturnType::BlockTooBig, {}}; } @@ -724,7 +724,7 @@ std::pair DagManager::ver if (block_gas_estimation > kPbftGasLimit) { LOG(log_er_) << "BlockTooBig. DAG block " << blk.getHash() << " with tips has limit: " << kPbftGasLimit << " block_gas_estimation " << block_gas_estimation << " current period " - << final_chain_->last_block_number(); + << final_chain_->lastBlockNumber(); return {VerifyBlockReturnType::BlockTooBig, {}}; } } diff --git a/libraries/core_libs/consensus/src/final_chain/final_chai_impl.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp similarity index 69% rename from libraries/core_libs/consensus/src/final_chain/final_chai_impl.cpp rename to libraries/core_libs/consensus/src/final_chain/final_chain.cpp index 4c77e95324..ebe285fefb 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chai_impl.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -1,14 +1,14 @@ +#include "final_chain/final_chain.hpp" + #include "common/encoding_solidity.hpp" -#include "final_chain/final_chain_impl.hpp" #include "final_chain/trie_common.hpp" namespace taraxa::final_chain { -FinalChainImpl::FinalChainImpl(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, - const addr_t& node_addr) +FinalChain::FinalChain(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, const addr_t& node_addr) : db_(db), kBlockGasLimit(config.genesis.pbft.gas_limit), - state_api_([this](auto n) { return block_hash(n).value_or(ZeroHash()); }, // + state_api_([this](auto n) { return blockHash(n).value_or(ZeroHash()); }, // config.genesis.state, config.opts_final_chain, { db->stateDbStoragePath().string(), @@ -17,13 +17,13 @@ FinalChainImpl::FinalChainImpl(const std::shared_ptr& db, const taraxa::Full kMaxLevelsPerPeriod(config.max_levels_per_period), rewards_( config.genesis.pbft.committee_size, config.genesis.state.hardforks, db_, - [this](EthBlockNumber n) { return dpos_eligible_total_vote_count(n); }, + [this](EthBlockNumber n) { return dposEligibleTotalVoteCount(n); }, state_api_.get_last_committed_state_descriptor().blk_num), - block_headers_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return get_block_header(blk); }), - block_hashes_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return get_block_hash(blk); }), - transactions_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return get_transactions(blk); }), + block_headers_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return getBlockHeader(blk); }), + block_hashes_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return getBlockHash(blk); }), + transactions_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return getTransactions(blk); }), transaction_hashes_cache_(config.final_chain_cache_in_blocks, - [this](uint64_t blk) { return get_transaction_hashes(blk); }), + [this](uint64_t blk) { return getTransactionHashes(blk); }), accounts_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk, const addr_t& addr) { return state_api_.get_account(blk, addr); }), total_vote_count_cache_(config.final_chain_cache_in_blocks, @@ -43,8 +43,8 @@ FinalChainImpl::FinalChainImpl(const std::shared_ptr& db, const taraxa::Full // If we don't have genesis block in db then create and push it if (!last_blk_num) [[unlikely]] { auto batch = db_->createWriteBatch(); - auto header = append_block(batch, addr_t(), config.genesis.dag_genesis_block.getTimestamp(), kBlockGasLimit, - state_db_descriptor.state_root, u256(0)); + auto header = appendBlock(batch, addr_t(), config.genesis.dag_genesis_block.getTimestamp(), kBlockGasLimit, + state_db_descriptor.state_root, u256(0)); block_headers_cache_.append(header->number, header); last_block_number_ = header->number; @@ -92,9 +92,9 @@ FinalChainImpl::FinalChainImpl(const std::shared_ptr& db, const taraxa::Full } } -void FinalChainImpl::stop() { executor_thread_.join(); } +void FinalChain::stop() { executor_thread_.join(); } -std::future> FinalChainImpl::finalize( +std::future> FinalChain::finalize( PeriodData&& new_blk, std::vector&& finalized_dag_blk_hashes, std::shared_ptr&& anchor) { auto p = std::make_shared>>(); boost::asio::post(executor_thread_, [this, new_blk = std::move(new_blk), @@ -106,18 +106,18 @@ std::future> FinalChainImpl::finalize( return p->get_future(); } -EthBlockNumber FinalChainImpl::delegation_delay() const { return delegation_delay_; } +EthBlockNumber FinalChain::delegationDelay() const { return delegation_delay_; } -SharedTransaction FinalChainImpl::make_bridge_finalization_transaction() { +SharedTransaction FinalChain::makeBridgeFinalizationTransaction() { const static auto finalize_method = util::EncodingSolidity::packFunctionCall("finalizeEpoch()"); - auto account = get_account(kTaraxaSystemAccount).value_or(state_api::ZeroAccount); + auto account = getAccount(kTaraxaSystemAccount).value_or(state_api::ZeroAccount); auto trx = std::make_shared(account.nonce, 0, 0, kBlockGasLimit, finalize_method, kHardforksConfig.ficus_hf.bridge_contract_address); return trx; } -bool FinalChainImpl::isNeedToFinalize(EthBlockNumber blk_num) const { +bool FinalChain::isNeedToFinalize(EthBlockNumber blk_num) const { const static auto get_bridge_root_method = util::EncodingSolidity::packFunctionCall("shouldFinalizeEpoch()"); return u256(call(state_api::EVMTransaction{dev::ZeroAddress, 1, kHardforksConfig.ficus_hf.bridge_contract_address, state_api::ZeroAccount.nonce, 0, 10000000, get_bridge_root_method}, @@ -126,15 +126,15 @@ bool FinalChainImpl::isNeedToFinalize(EthBlockNumber blk_num) const { .convert_to(); } -std::vector FinalChainImpl::makeSystemTransactions(PbftPeriod blk_num) { +std::vector FinalChain::makeSystemTransactions(PbftPeriod blk_num) { std::vector system_transactions; - // Make system transactions blocks sooner than next pillar block period, - // e.g.: if pillar block period is 100, this will return true for period 100 - delegation_delay() == 95, 195, 295, + // Make system transactions blocks sooner than next pillar block period, + // e.g.: if pillar block period is 100, this will return true for period 100 - delegationDelay() == 95, 195, 295, // etc... - if (kHardforksConfig.ficus_hf.isPillarBlockPeriod(blk_num + delegation_delay())) { - if (const auto bridge_contract = get_account(kHardforksConfig.ficus_hf.bridge_contract_address); bridge_contract) { + if (kHardforksConfig.ficus_hf.isPillarBlockPeriod(blk_num + delegationDelay())) { + if (const auto bridge_contract = getAccount(kHardforksConfig.ficus_hf.bridge_contract_address); bridge_contract) { if (bridge_contract->code_size && isNeedToFinalize(blk_num - 1)) { - auto finalize_trx = make_bridge_finalization_transaction(); + auto finalize_trx = makeBridgeFinalizationTransaction(); system_transactions.push_back(finalize_trx); } } @@ -143,12 +143,12 @@ std::vector FinalChainImpl::makeSystemTransactions(PbftPeriod return system_transactions; } -std::shared_ptr FinalChainImpl::finalize_(PeriodData&& new_blk, - std::vector&& finalized_dag_blk_hashes, - std::shared_ptr&& anchor) { +std::shared_ptr FinalChain::finalize_(PeriodData&& new_blk, + std::vector&& finalized_dag_blk_hashes, + std::shared_ptr&& anchor) { auto batch = db_->createWriteBatch(); - block_applying_emitter_.emit(block_header()->number + 1); + block_applying_emitter_.emit(blockHeader()->number + 1); /* // Any dag block producer producing duplicate dag blocks on same level should be slashed @@ -172,7 +172,7 @@ std::shared_ptr FinalChainImpl::finalize_(PeriodData&& auto all_transactions = new_blk.transactions; all_transactions.insert(all_transactions.end(), system_transactions.begin(), system_transactions.end()); std::vector evm_trxs; - append_evm_transactions(evm_trxs, all_transactions); + appendEvmTransactions(evm_trxs, all_transactions); const auto& [exec_results] = state_api_.execute_transactions( {new_blk.pbft_blk->getBeneficiary(), kBlockGasLimit, new_blk.pbft_blk->getTimestamp(), BlockHeader::difficulty()}, @@ -203,8 +203,8 @@ std::shared_ptr FinalChainImpl::finalize_(PeriodData&& const auto& [state_root, total_reward] = state_api_.distribute_rewards(rewards_stats); auto blk_header = - append_block(batch, new_blk.pbft_blk->getBeneficiary(), new_blk.pbft_blk->getTimestamp(), kBlockGasLimit, - state_root, total_reward, all_transactions, receipts, new_blk.pbft_blk->getExtraDataRlp()); + appendBlock(batch, new_blk.pbft_blk->getBeneficiary(), new_blk.pbft_blk->getTimestamp(), kBlockGasLimit, + state_root, total_reward, all_transactions, receipts, new_blk.pbft_blk->getExtraDataRlp()); // Update number of executed DAG blocks and transactions auto num_executed_dag_blk = num_executed_dag_blk_ + finalized_dag_blk_hashes.size(); @@ -268,22 +268,22 @@ std::shared_ptr FinalChainImpl::finalize_(PeriodData&& return result; } -void FinalChainImpl::prune(EthBlockNumber blk_n) { +void FinalChain::prune(EthBlockNumber blk_n) { LOG(log_nf_) << "Pruning data older than " << blk_n; - auto last_block_to_keep = get_block_header(blk_n); + auto last_block_to_keep = getBlockHeader(blk_n); if (last_block_to_keep) { auto block_to_keep = last_block_to_keep; std::vector state_root_to_keep; while (block_to_keep) { state_root_to_keep.push_back(block_to_keep->state_root); - block_to_keep = get_block_header(block_to_keep->number + 1); + block_to_keep = getBlockHeader(block_to_keep->number + 1); } - auto block_to_prune = get_block_header(last_block_to_keep->number - 1); + auto block_to_prune = getBlockHeader(last_block_to_keep->number - 1); while (block_to_prune && block_to_prune->number > 0) { db_->remove(DB::Columns::final_chain_blk_by_number, block_to_prune->number); db_->remove(DB::Columns::final_chain_blk_hash_by_number, block_to_prune->number); db_->remove(DB::Columns::final_chain_blk_number_by_hash, block_to_prune->hash); - block_to_prune = get_block_header(block_to_prune->number - 1); + block_to_prune = getBlockHeader(block_to_prune->number - 1); } db_->compactColumn(DB::Columns::final_chain_blk_by_number); @@ -294,14 +294,13 @@ void FinalChainImpl::prune(EthBlockNumber blk_n) { } } -std::shared_ptr FinalChainImpl::append_block(DB::Batch& batch, const addr_t& author, uint64_t timestamp, - uint64_t gas_limit, const h256& state_root, u256 total_reward, - const SharedTransactions& transactions, - const TransactionReceipts& receipts, - const bytes& extra_data) { +std::shared_ptr FinalChain::appendBlock(DB::Batch& batch, const addr_t& author, uint64_t timestamp, + uint64_t gas_limit, const h256& state_root, u256 total_reward, + const SharedTransactions& transactions, + const TransactionReceipts& receipts, const bytes& extra_data) { auto blk_header_ptr = std::make_shared(); auto& blk_header = *blk_header_ptr; - auto last_block = block_header(); + auto last_block = blockHeader(); blk_header.number = last_block ? last_block->number + 1 : 0; blk_header.parent_hash = last_block ? last_block->hash : h256(); blk_header.author = author; @@ -333,8 +332,8 @@ std::shared_ptr FinalChainImpl::append_block(DB::Batch& batch, cons auto log_bloom_for_index = blk_header.log_bloom; log_bloom_for_index.shiftBloom<3>(sha3(blk_header.author.ref())); for (uint64_t level = 0, index = blk_header.number; level < c_bloomIndexLevels; ++level, index /= c_bloomIndexSize) { - auto chunk_id = block_blooms_chunk_id(level, index / c_bloomIndexSize); - auto chunk_to_alter = block_blooms(chunk_id); + auto chunk_id = blockBloomsChunkId(level, index / c_bloomIndexSize); + auto chunk_to_alter = blockBlooms(chunk_id); chunk_to_alter[index % c_bloomIndexSize] |= log_bloom_for_index; db_->insert(batch, DB::Columns::final_chain_log_blooms_index, chunk_id, util::rlp_enc(rlp_strm, chunk_to_alter)); } @@ -345,28 +344,28 @@ std::shared_ptr FinalChainImpl::append_block(DB::Batch& batch, cons return blk_header_ptr; } -EthBlockNumber FinalChainImpl::last_block_number() const { return last_block_number_; } +EthBlockNumber FinalChain::lastBlockNumber() const { return last_block_number_; } -std::optional FinalChainImpl::block_number(const h256& h) const { +std::optional FinalChain::blockNumber(const h256& h) const { return db_->lookup_int(h, DB::Columns::final_chain_blk_number_by_hash); } -std::optional FinalChainImpl::block_hash(std::optional n) const { - return block_hashes_cache_.get(last_if_absent(n)); +std::optional FinalChain::blockHash(std::optional n) const { + return block_hashes_cache_.get(lastIfAbsent(n)); } -std::shared_ptr FinalChainImpl::block_header(std::optional n) const { +std::shared_ptr FinalChain::blockHeader(std::optional n) const { if (!n) { return block_headers_cache_.last(); } return block_headers_cache_.get(*n); } -std::optional FinalChainImpl::transaction_location(const h256& trx_hash) const { +std::optional FinalChain::transactionLocation(const h256& trx_hash) const { return db_->getTransactionLocation(trx_hash); } -std::optional FinalChainImpl::transaction_receipt(const h256& trx_h) const { +std::optional FinalChain::transactionReceipt(const h256& trx_h) const { auto raw = db_->lookup(trx_h, DB::Columns::final_chain_receipt_by_trx_hash); if (raw.empty()) { return {}; @@ -376,20 +375,20 @@ std::optional FinalChainImpl::transaction_receipt(const h256 return ret; } -uint64_t FinalChainImpl::transactionCount(std::optional n) const { - return db_->getTransactionCount(last_if_absent(n)); +uint64_t FinalChain::transactionCount(std::optional n) const { + return db_->getTransactionCount(lastIfAbsent(n)); } -std::shared_ptr FinalChainImpl::transaction_hashes(std::optional n) const { - return transaction_hashes_cache_.get(last_if_absent(n)); +std::shared_ptr FinalChain::transactionHashes(std::optional n) const { + return transaction_hashes_cache_.get(lastIfAbsent(n)); } -const SharedTransactions FinalChainImpl::transactions(std::optional n) const { - return transactions_cache_.get(last_if_absent(n)); +const SharedTransactions FinalChain::transactions(std::optional n) const { + return transactions_cache_.get(lastIfAbsent(n)); } -std::vector FinalChainImpl::withBlockBloom(const LogBloom& b, EthBlockNumber from, - EthBlockNumber to) const { +std::vector FinalChain::withBlockBloom(const LogBloom& b, EthBlockNumber from, + EthBlockNumber to) const { std::vector ret; // start from the top-level auto u = int_pow(c_bloomIndexSize, c_bloomIndexLevels); @@ -400,28 +399,27 @@ std::vector FinalChainImpl::withBlockBloom(const LogBloom& b, Et return ret; } -std::optional FinalChainImpl::get_account(const addr_t& addr, - std::optional blk_n) const { - return accounts_cache_.get(last_if_absent(blk_n), addr); +std::optional FinalChain::getAccount(const addr_t& addr, + std::optional blk_n) const { + return accounts_cache_.get(lastIfAbsent(blk_n), addr); } -void FinalChainImpl::update_state_config(const state_api::Config& new_config) { +void FinalChain::updateStateConfig(const state_api::Config& new_config) { delegation_delay_ = new_config.dpos.delegation_delay; state_api_.update_state_config(new_config); } -h256 FinalChainImpl::get_account_storage(const addr_t& addr, const u256& key, - std::optional blk_n) const { - return state_api_.get_account_storage(last_if_absent(blk_n), addr, key); +h256 FinalChain::getAccountStorage(const addr_t& addr, const u256& key, std::optional blk_n) const { + return state_api_.get_account_storage(lastIfAbsent(blk_n), addr, key); } -bytes FinalChainImpl::get_code(const addr_t& addr, std::optional blk_n) const { - return state_api_.get_code_by_address(last_if_absent(blk_n), addr); +bytes FinalChain::getCode(const addr_t& addr, std::optional blk_n) const { + return state_api_.get_code_by_address(lastIfAbsent(blk_n), addr); } -state_api::ExecutionResult FinalChainImpl::call(const state_api::EVMTransaction& trx, - std::optional blk_n) const { - auto const blk_header = block_header(last_if_absent(blk_n)); +state_api::ExecutionResult FinalChain::call(const state_api::EVMTransaction& trx, + std::optional blk_n) const { + auto const blk_header = blockHeader(lastIfAbsent(blk_n)); if (!blk_header) { throw std::runtime_error("Future block"); } @@ -435,9 +433,9 @@ state_api::ExecutionResult FinalChainImpl::call(const state_api::EVMTransaction& trx); } -std::string FinalChainImpl::trace(std::vector trxs, EthBlockNumber blk_n, - std::optional params) const { - const auto blk_header = block_header(last_if_absent(blk_n)); +std::string FinalChain::trace(std::vector trxs, EthBlockNumber blk_n, + std::optional params) const { + const auto blk_header = blockHeader(lastIfAbsent(blk_n)); if (!blk_header) { throw std::runtime_error("Future block"); } @@ -451,44 +449,44 @@ std::string FinalChainImpl::trace(std::vector trxs, E trxs, params)); } -uint64_t FinalChainImpl::dpos_eligible_total_vote_count(EthBlockNumber blk_num) const { +uint64_t FinalChain::dposEligibleTotalVoteCount(EthBlockNumber blk_num) const { return total_vote_count_cache_.get(blk_num); } -uint64_t FinalChainImpl::dpos_eligible_vote_count(EthBlockNumber blk_num, const addr_t& addr) const { +uint64_t FinalChain::dposEligibleVoteCount(EthBlockNumber blk_num, const addr_t& addr) const { return dpos_vote_count_cache_.get(blk_num, addr); } -bool FinalChainImpl::dpos_is_eligible(EthBlockNumber blk_num, const addr_t& addr) const { +bool FinalChain::dposIsEligible(EthBlockNumber blk_num, const addr_t& addr) const { return dpos_is_eligible_cache_.get(blk_num, addr); } -vrf_wrapper::vrf_pk_t FinalChainImpl::dpos_get_vrf_key(EthBlockNumber blk_n, const addr_t& addr) const { +vrf_wrapper::vrf_pk_t FinalChain::dposGetVrfKey(EthBlockNumber blk_n, const addr_t& addr) const { return state_api_.dpos_get_vrf_key(blk_n, addr); } -std::vector FinalChainImpl::dpos_validators_total_stakes(EthBlockNumber blk_num) const { +std::vector FinalChain::dposValidatorsTotalStakes(EthBlockNumber blk_num) const { return state_api_.dpos_validators_total_stakes(blk_num); } -uint256_t FinalChainImpl::dpos_total_amount_delegated(EthBlockNumber blk_num) const { +uint256_t FinalChain::dposTotalAmountDelegated(EthBlockNumber blk_num) const { return state_api_.dpos_total_amount_delegated(blk_num); } -std::vector FinalChainImpl::dpos_validators_vote_counts(EthBlockNumber blk_num) const { +std::vector FinalChain::dposValidatorsVoteCounts(EthBlockNumber blk_num) const { return state_api_.dpos_validators_vote_counts(blk_num); } -void FinalChainImpl::wait_for_finalized() { +void FinalChain::waitForFinalized() { std::unique_lock lck(finalized_mtx_); finalized_cv_.wait_for(lck, std::chrono::milliseconds(10)); } -uint64_t FinalChainImpl::dpos_yield(EthBlockNumber blk_num) const { return state_api_.dpos_yield(blk_num); } +uint64_t FinalChain::dposYield(EthBlockNumber blk_num) const { return state_api_.dpos_yield(blk_num); } -u256 FinalChainImpl::dpos_total_supply(EthBlockNumber blk_num) const { return state_api_.dpos_total_supply(blk_num); } +u256 FinalChain::dposTotalSupply(EthBlockNumber blk_num) const { return state_api_.dpos_total_supply(blk_num); } -h256 FinalChainImpl::get_bridge_root(EthBlockNumber blk_num) const { +h256 FinalChain::getBridgeRoot(EthBlockNumber blk_num) const { const static auto get_bridge_root_method = util::EncodingSolidity::packFunctionCall("getBridgeRoot()"); return h256(call(state_api::EVMTransaction{dev::ZeroAddress, 1, kHardforksConfig.ficus_hf.bridge_contract_address, state_api::ZeroAccount.nonce, 0, 10000000, get_bridge_root_method}, @@ -496,16 +494,23 @@ h256 FinalChainImpl::get_bridge_root(EthBlockNumber blk_num) const { .code_retval); } -h256 FinalChainImpl::get_bridge_epoch(EthBlockNumber blk_num) const { - const static auto get_bridge_epoch_method = util::EncodingSolidity::packFunctionCall("finalizedEpoch()"); +h256 FinalChain::getBridgeEpoch(EthBlockNumber blk_num) const { + const static auto getBridgeEpoch_method = util::EncodingSolidity::packFunctionCall("finalizedEpoch()"); return h256(call(state_api::EVMTransaction{dev::ZeroAddress, 1, kHardforksConfig.ficus_hf.bridge_contract_address, - state_api::ZeroAccount.nonce, 0, 10000000, get_bridge_epoch_method}, + state_api::ZeroAccount.nonce, 0, 10000000, getBridgeEpoch_method}, blk_num) .code_retval); } -std::shared_ptr FinalChainImpl::get_transaction_hashes(std::optional n) const { - const auto& trxs = db_->getPeriodTransactions(last_if_absent(n)); +std::pair FinalChain::getBalance(addr_t const& addr) const { + if (auto acc = getAccount(addr)) { + return {acc->balance, true}; + } + return {0, false}; +} + +std::shared_ptr FinalChain::getTransactionHashes(std::optional n) const { + const auto& trxs = db_->getPeriodTransactions(lastIfAbsent(n)); auto ret = std::make_shared(); if (!trxs) { return ret; @@ -516,14 +521,14 @@ std::shared_ptr FinalChainImpl::get_transaction_hashes(std::o return ret; } -const SharedTransactions FinalChainImpl::get_transactions(std::optional n) const { - if (auto trxs = db_->getPeriodTransactions(last_if_absent(n))) { +const SharedTransactions FinalChain::getTransactions(std::optional n) const { + if (auto trxs = db_->getPeriodTransactions(lastIfAbsent(n))) { return *trxs; } return {}; } -std::shared_ptr FinalChainImpl::get_block_header(EthBlockNumber n) const { +std::shared_ptr FinalChain::getBlockHeader(EthBlockNumber n) const { if (auto raw = db_->lookup(n, DB::Columns::final_chain_blk_by_number); !raw.empty()) { auto ret = std::make_shared(); ret->rlp(dev::RLP(raw)); @@ -532,7 +537,7 @@ std::shared_ptr FinalChainImpl::get_block_header(EthBlockNumb return {}; } -std::optional FinalChainImpl::get_block_hash(EthBlockNumber n) const { +std::optional FinalChain::getBlockHash(EthBlockNumber n) const { auto raw = db_->lookup(n, DB::Columns::final_chain_blk_hash_by_number); if (raw.empty()) { return {}; @@ -540,42 +545,40 @@ std::optional FinalChainImpl::get_block_hash(EthBlockNumber n) const { return h256(raw, h256::FromBinary); } -EthBlockNumber FinalChainImpl::last_if_absent(const std::optional& client_blk_n) const { - return client_blk_n ? *client_blk_n : last_block_number(); +EthBlockNumber FinalChain::lastIfAbsent(const std::optional& client_blk_n) const { + return client_blk_n ? *client_blk_n : lastBlockNumber(); } -state_api::EVMTransaction FinalChainImpl::to_evm_transaction(const SharedTransaction& trx) { +state_api::EVMTransaction FinalChain::toEvmTransaction(const SharedTransaction& trx) { return state_api::EVMTransaction{ trx->getSender(), trx->getGasPrice(), trx->getReceiver(), trx->getNonce(), trx->getValue(), trx->getGas(), trx->getData(), }; } -void FinalChainImpl::append_evm_transactions(std::vector& evm_trxs, - const SharedTransactions& trxs) { +void FinalChain::appendEvmTransactions(std::vector& evm_trxs, + const SharedTransactions& trxs) { std::transform(trxs.cbegin(), trxs.cend(), std::back_inserter(evm_trxs), - [](const auto& trx) { return to_evm_transaction(trx); }); + [](const auto& trx) { return toEvmTransaction(trx); }); } -BlocksBlooms FinalChainImpl::block_blooms(const h256& chunk_id) const { +BlocksBlooms FinalChain::blockBlooms(const h256& chunk_id) const { if (auto raw = db_->lookup(chunk_id, DB::Columns::final_chain_log_blooms_index); !raw.empty()) { return dev::RLP(raw).toArray(); } return {}; } -h256 FinalChainImpl::block_blooms_chunk_id(EthBlockNumber level, EthBlockNumber index) { - return h256(index * 0xff + level); -} +h256 FinalChain::blockBloomsChunkId(EthBlockNumber level, EthBlockNumber index) { return h256(index * 0xff + level); } -std::vector FinalChainImpl::withBlockBloom(const LogBloom& b, EthBlockNumber from, EthBlockNumber to, - EthBlockNumber level, EthBlockNumber index) const { +std::vector FinalChain::withBlockBloom(const LogBloom& b, EthBlockNumber from, EthBlockNumber to, + EthBlockNumber level, EthBlockNumber index) const { std::vector ret; auto uCourse = int_pow(c_bloomIndexSize, level + 1); auto uFine = int_pow(c_bloomIndexSize, level); auto obegin = index == from / uCourse ? from / uFine % c_bloomIndexSize : 0; auto oend = index == to / uCourse ? (to / uFine) % c_bloomIndexSize + 1 : c_bloomIndexSize; - auto bb = block_blooms(block_blooms_chunk_id(level, index)); + auto bb = blockBlooms(blockBloomsChunkId(level, index)); for (auto o = obegin; o < oend; ++o) { if (bb[o].contains(b)) { // This level has something like what we want. diff --git a/libraries/core_libs/consensus/src/key_manager/key_manager.cpp b/libraries/core_libs/consensus/src/key_manager/key_manager.cpp index 24d7eaa948..ad968d4a6c 100644 --- a/libraries/core_libs/consensus/src/key_manager/key_manager.cpp +++ b/libraries/core_libs/consensus/src/key_manager/key_manager.cpp @@ -4,7 +4,7 @@ namespace taraxa { static const vrf_wrapper::vrf_pk_t kEmptyVrfKey; -KeyManager::KeyManager(std::shared_ptr final_chain) : final_chain_(std::move(final_chain)) {} +KeyManager::KeyManager(std::shared_ptr final_chain) : final_chain_(std::move(final_chain)) {} std::shared_ptr KeyManager::getVrfKey(EthBlockNumber blk_n, const addr_t& addr) { { @@ -15,7 +15,7 @@ std::shared_ptr KeyManager::getVrfKey(EthBlockNumber blk_ } try { - if (auto key = final_chain_->dpos_get_vrf_key(blk_n, addr); key != kEmptyVrfKey) { + if (auto key = final_chain_->dposGetVrfKey(blk_n, addr); key != kEmptyVrfKey) { std::unique_lock lock(vrf_keys_mutex_); return vrf_keys_.insert_or_assign(addr, std::make_shared(std::move(key))).first->second; } diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 9d413f5916..d4b41dcfb7 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -25,7 +25,7 @@ constexpr PbftStep kMaxSteps{13}; // Need to be a odd number PbftManager::PbftManager(const GenesisConfig &conf, addr_t node_addr, std::shared_ptr db, std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, std::shared_ptr dag_mgr, std::shared_ptr trx_mgr, - std::shared_ptr final_chain, + std::shared_ptr final_chain, std::shared_ptr pillar_chain_mgr, secret_t node_sk) : db_(std::move(db)), pbft_chain_(std::move(pbft_chain)), @@ -42,7 +42,7 @@ PbftManager::PbftManager(const GenesisConfig &conf, addr_t node_addr, std::share proposed_blocks_(db_) { LOG_OBJECTS_CREATE("PBFT_MGR"); - for (auto period = final_chain_->last_block_number() + 1, curr_period = pbft_chain_->getPbftChainSize(); + for (auto period = final_chain_->lastBlockNumber() + 1, curr_period = pbft_chain_->getPbftChainSize(); period <= curr_period; ++period) { auto period_raw = db_->getPeriodDataRaw(period); if (period_raw.size() == 0) { @@ -68,7 +68,7 @@ PbftManager::PbftManager(const GenesisConfig &conf, addr_t node_addr, std::share PbftPeriod start_period = 1; const auto recently_finalized_transactions_periods = - kRecentlyFinalizedTransactionsFactor * final_chain_->delegation_delay(); + kRecentlyFinalizedTransactionsFactor * final_chain_->delegationDelay(); if (pbft_chain_->getPbftChainSize() > recently_finalized_transactions_periods) { start_period = pbft_chain_->getPbftChainSize() - recently_finalized_transactions_periods; } @@ -201,7 +201,7 @@ void PbftManager::setPbftRound(PbftRound round) { void PbftManager::waitForPeriodFinalization() { do { // we need to be sure we finalized at least block block with num lower by delegation_delay - if (pbft_chain_->getPbftChainSize() <= final_chain_->last_block_number() + final_chain_->delegation_delay()) { + if (pbft_chain_->getPbftChainSize() <= final_chain_->lastBlockNumber() + final_chain_->delegationDelay()) { break; } thisThreadSleepForMilliSeconds(kPollingIntervalMs.count()); @@ -210,11 +210,11 @@ void PbftManager::waitForPeriodFinalization() { std::optional PbftManager::getCurrentDposTotalVotesCount() const { try { - return final_chain_->dpos_eligible_total_vote_count(pbft_chain_->getPbftChainSize()); + return final_chain_->dposEligibleTotalVoteCount(pbft_chain_->getPbftChainSize()); } catch (state_api::ErrFutureBlock &e) { LOG(log_wr_) << "Unable to get CurrentDposTotalVotesCount for period: " << pbft_chain_->getPbftChainSize() - << ". Period is too far ahead of actual finalized pbft chain size (" - << final_chain_->last_block_number() << "). Err msg: " << e.what(); + << ". Period is too far ahead of actual finalized pbft chain size (" << final_chain_->lastBlockNumber() + << "). Err msg: " << e.what(); } return {}; @@ -222,11 +222,11 @@ std::optional PbftManager::getCurrentDposTotalVotesCount() const { std::optional PbftManager::getCurrentNodeVotesCount() const { try { - return final_chain_->dpos_eligible_vote_count(pbft_chain_->getPbftChainSize(), node_addr_); + return final_chain_->dposEligibleVoteCount(pbft_chain_->getPbftChainSize(), node_addr_); } catch (state_api::ErrFutureBlock &e) { LOG(log_wr_) << "Unable to get CurrentNodeVotesCount for period: " << pbft_chain_->getPbftChainSize() - << ". Period is too far ahead of actual finalized pbft chain size (" - << final_chain_->last_block_number() << "). Err msg: " << e.what(); + << ". Period is too far ahead of actual finalized pbft chain size (" << final_chain_->lastBlockNumber() + << "). Err msg: " << e.what(); } return {}; @@ -1126,8 +1126,8 @@ PbftManager::generatePbftBlock(PbftPeriod propose_period, const blk_hash_t &prev [](const auto &v) { return v->getHash(); }); h256 last_state_root; - if (propose_period > final_chain_->delegation_delay()) { - if (const auto header = final_chain_->block_header(propose_period - final_chain_->delegation_delay())) { + if (propose_period > final_chain_->delegationDelay()) { + if (const auto header = final_chain_->blockHeader(propose_period - final_chain_->delegationDelay())) { last_state_root = header->state_root; } else { LOG(log_wr_) << "Block for period " << propose_period << " could not be proposed as we are behind"; @@ -1387,8 +1387,8 @@ PbftStateRootValidation PbftManager::validatePbftBlockStateRoot(const std::share auto const &pbft_block_hash = pbft_block->getBlockHash(); { h256 prev_state_root_hash; - if (period > final_chain_->delegation_delay()) { - if (const auto header = final_chain_->block_header(period - final_chain_->delegation_delay())) { + if (period > final_chain_->delegationDelay()) { + if (const auto header = final_chain_->blockHeader(period - final_chain_->delegationDelay())) { prev_state_root_hash = header->state_root; } else { LOG(log_wr_) << "Block " << pbft_block_hash << " could not be validated as we are behind"; @@ -1796,16 +1796,16 @@ bool PbftManager::pushPbftBlock_(PeriodData &&period_data, std::vectordelegation_delay(), e.g. block with period 32 + // Pillar block use state from current_pbft_chain_size - final_chain_->delegationDelay(), e.g. block with period 32 // uses state from period 27. - PbftPeriod request_period = current_pbft_chain_size - final_chain_->delegation_delay(); + PbftPeriod request_period = current_pbft_chain_size - final_chain_->delegationDelay(); // advancePeriod() -> resetConsensus() -> waitForPeriodFinalization() makes sure block request_period was already // finalized - assert(final_chain_->last_block_number() >= request_period); + assert(final_chain_->lastBlockNumber() >= request_period); - const auto block_header = final_chain_->block_header(request_period); - const auto bridge_root = final_chain_->get_bridge_root(request_period); - const auto bridge_epoch = final_chain_->get_bridge_epoch(request_period); + const auto block_header = final_chain_->blockHeader(request_period); + const auto bridge_root = final_chain_->getBridgeRoot(request_period); + const auto bridge_epoch = final_chain_->getBridgeEpoch(request_period); // Create pillar block const auto pillar_block = @@ -1814,7 +1814,7 @@ void PbftManager::processPillarBlock(PbftPeriod current_pbft_chain_size) { // Optimization - creates pillar vote right after pillar block was created, otherwise pillar votes are created during // next period pbft voting Check if node is eligible to vote for pillar block No need to catch ErrFutureBlock, // waitForPeriodFinalization() makes sure it does not happen - if (final_chain_->dpos_is_eligible(current_pbft_chain_size, node_addr_)) { + if (final_chain_->dposIsEligible(current_pbft_chain_size, node_addr_)) { if (pillar_block) { // Pillar votes are created in the next period (+ 1), this is optimization to create & broadcast it a bit faster const auto pillar_vote = pillar_chain_mgr_->genAndPlacePillarVote( @@ -1882,7 +1882,7 @@ std::optional>>> Pbf break; } // If syncing and pbft manager is faster than execution a delay might be needed to allow EVM to catch up - final_chain_->wait_for_finalized(); + final_chain_->waitForFinalized(); if (!retry_logged) { LOG(log_wr_) << "PBFT block " << pbft_block_hash << " validation delayed, state root missing, execution is behind"; @@ -2112,11 +2112,11 @@ bool PbftManager::validatePbftBlockPillarVotes(const PeriodData &period_data) co bool PbftManager::canParticipateInConsensus(PbftPeriod period) const { try { - return final_chain_->dpos_is_eligible(period, node_addr_); + return final_chain_->dposIsEligible(period, node_addr_); } catch (state_api::ErrFutureBlock &e) { LOG(log_er_) << "Unable to decide if node is consensus node or not for period: " << period - << ". Period is too far ahead of actual finalized pbft chain size (" - << final_chain_->last_block_number() << "). Err msg: " << e.what() + << ". Period is too far ahead of actual finalized pbft chain size (" << final_chain_->lastBlockNumber() + << "). Err msg: " << e.what() << ". Node is considered as not eligible to participate in consensus for period " << period; } diff --git a/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp b/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp index a822c50539..f0dc86b5c5 100644 --- a/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp +++ b/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp @@ -52,7 +52,7 @@ std::shared_ptr PillarChainManager::createPillarBlock( PbftPeriod period, const std::shared_ptr& block_header, const h256& bridge_root, const h256& bridge_epoch) { blk_hash_t previous_pillar_block_hash{}; // null block hash - auto new_vote_counts = final_chain_->dpos_validators_vote_counts(period); + auto new_vote_counts = final_chain_->dposValidatorsVoteCounts(period); std::vector votes_count_changes; // First ever pillar block @@ -257,7 +257,7 @@ bool PillarChainManager::validatePillarVote(const std::shared_ptr vo // Check if signer is eligible validator try { - if (!final_chain_->dpos_is_eligible(period - 1, validator)) { + if (!final_chain_->dposIsEligible(period - 1, validator)) { LOG(log_er_) << "Validator is not eligible. Pillar vote " << vote->getHash(); return false; } @@ -278,7 +278,7 @@ bool PillarChainManager::validatePillarVote(const std::shared_ptr vo uint64_t PillarChainManager::addVerifiedPillarVote(const std::shared_ptr& vote) { uint64_t validator_vote_count = 0; try { - validator_vote_count = final_chain_->dpos_eligible_vote_count(vote->getPeriod() - 1, vote->getVoterAddr()); + validator_vote_count = final_chain_->dposEligibleVoteCount(vote->getPeriod() - 1, vote->getVoterAddr()); } catch (state_api::ErrFutureBlock& e) { LOG(log_er_) << "Pillar vote " << vote->getHash() << " with period " << vote->getPeriod() << " is too far ahead of DPOS. " << e.what(); @@ -352,7 +352,7 @@ std::optional PillarChainManager::getPillarConsensusThreshold(PbftPeri try { // Pillar chain consensus threshold = total votes count / 2 + 1 - threshold = final_chain_->dpos_eligible_total_vote_count(period) / 2 + 1; + threshold = final_chain_->dposEligibleTotalVoteCount(period) / 2 + 1; } catch (state_api::ErrFutureBlock& e) { LOG(log_er_) << "Unable to get dpos total votes count for period " << period << " to calculate pillar consensus threshold: " << e.what(); diff --git a/libraries/core_libs/consensus/src/slashing_manager/slashing_manager.cpp b/libraries/core_libs/consensus/src/slashing_manager/slashing_manager.cpp index 309edeb93d..33310e6fba 100644 --- a/libraries/core_libs/consensus/src/slashing_manager/slashing_manager.cpp +++ b/libraries/core_libs/consensus/src/slashing_manager/slashing_manager.cpp @@ -8,7 +8,7 @@ namespace taraxa { const auto kContractAddress = addr_t("0x00000000000000000000000000000000000000EE"); -SlashingManager::SlashingManager(std::shared_ptr final_chain, +SlashingManager::SlashingManager(std::shared_ptr final_chain, std::shared_ptr trx_manager, std::shared_ptr gas_pricer, const FullNodeConfig &config, secret_t node_sk) : final_chain_(std::move(final_chain)), @@ -52,7 +52,7 @@ bool SlashingManager::submitDoubleVotingProof(const std::shared_ptr &v } // Check the balance - const auto account = final_chain_->get_account(kAddress).value_or(taraxa::state_api::ZeroAccount); + const auto account = final_chain_->getAccount(kAddress).value_or(taraxa::state_api::ZeroAccount); if (account.balance == 0) { return false; } diff --git a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp index 64fac73911..303daf57d9 100644 --- a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp +++ b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp @@ -10,7 +10,7 @@ namespace taraxa { TransactionManager::TransactionManager(FullNodeConfig const &conf, std::shared_ptr db, - std::shared_ptr final_chain, addr_t node_addr) + std::shared_ptr final_chain, addr_t node_addr) : kConf(conf), transactions_pool_(final_chain, kConf.transactions_pool_size), kDagBlockGasLimit(kConf.genesis.dag.gas_limit), @@ -121,7 +121,7 @@ TransactionStatus TransactionManager::insertValidatedTransaction(std::shared_ptr return TransactionStatus::Known; } - const auto account = final_chain_->get_account(tx->getSender()).value_or(taraxa::state_api::ZeroAccount); + const auto account = final_chain_->getAccount(tx->getSender()).value_or(taraxa::state_api::ZeroAccount); bool proposable = true; // Ensure the transaction adheres to nonce ordering @@ -141,7 +141,7 @@ TransactionStatus TransactionManager::insertValidatedTransaction(std::shared_ptr proposable = false; } - const auto last_block_number = final_chain_->last_block_number(); + const auto last_block_number = final_chain_->lastBlockNumber(); LOG(log_dg_) << "Transaction " << trx_hash << " inserted in trx pool"; return transactions_pool_.insert(std::move(tx), proposable, last_block_number); } @@ -189,7 +189,7 @@ void TransactionManager::saveTransactionsFromDagBlock(SharedTransactions const & std::unique_lock transactions_lock(transactions_mutex_); for (auto t : trxs) { - const auto account = final_chain_->get_account(t->getSender()).value_or(taraxa::state_api::ZeroAccount); + const auto account = final_chain_->getAccount(t->getSender()).value_or(taraxa::state_api::ZeroAccount); const auto tx_hash = t->getHash(); // Cheacking nonce in cheaper than checking db, verify with nonce if possible @@ -336,7 +336,7 @@ void TransactionManager::initializeRecentlyFinalizedTransactions(const PeriodDat void TransactionManager::updateFinalizedTransactionsStatus(PeriodData const &period_data) { // !!! There is no lock because it is called under std::unique_lock trx_lock(trx_mgr_->getTransactionsMutex()); const auto recently_finalized_transactions_periods = - kRecentlyFinalizedTransactionsFactor * final_chain_->delegation_delay(); + kRecentlyFinalizedTransactionsFactor * final_chain_->delegationDelay(); if (period_data.transactions.size() > 0) { // Delete transactions older than recently_finalized_transactions_periods if (period_data.pbft_blk->getPeriod() > recently_finalized_transactions_periods) { @@ -419,7 +419,7 @@ SharedTransactions TransactionManager::getTransactions(const vec_trx_t &trxs_has for (auto trx : finalizedTransactions) { // Only include transactions with valid nonce at proposal period - auto acc = final_chain_->get_account(trx->getSender(), proposal_period); + auto acc = final_chain_->getAccount(trx->getSender(), proposal_period); if (acc.has_value() && acc->nonce > trx->getNonce()) { LOG(log_er_) << "Old transaction: " << trx->getHash(); } else { diff --git a/libraries/core_libs/consensus/src/transaction/transaction_queue.cpp b/libraries/core_libs/consensus/src/transaction/transaction_queue.cpp index 235acd14ba..448541e495 100644 --- a/libraries/core_libs/consensus/src/transaction/transaction_queue.cpp +++ b/libraries/core_libs/consensus/src/transaction/transaction_queue.cpp @@ -4,7 +4,7 @@ namespace taraxa { -TransactionQueue::TransactionQueue(std::shared_ptr final_chain, size_t max_size) +TransactionQueue::TransactionQueue(std::shared_ptr final_chain, size_t max_size) : known_txs_(max_size * 2, max_size / 5), kNonProposableTransactionsMaxSize(max_size * kNonProposableTransactionsLimitPercentage / 100), kMaxSize(max_size), @@ -190,7 +190,7 @@ void TransactionQueue::blockFinalized(uint64_t block_number) { void TransactionQueue::purge() { for (auto account_it = account_nonce_transactions_.begin(); account_it != account_nonce_transactions_.end();) { - const auto account = final_chain_->get_account(account_it->first); + const auto account = final_chain_->getAccount(account_it->first); if (account.has_value()) { for (auto nonce_it = account_it->second.begin(); nonce_it != account_it->second.end();) { if (nonce_it->first < account->nonce) { diff --git a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp index 0ce0835c93..c05467d0aa 100644 --- a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp +++ b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp @@ -15,7 +15,7 @@ namespace taraxa { VoteManager::VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, const secret_t& node_sk, const vrf_wrapper::vrf_sk_t& vrf_sk, std::shared_ptr db, - std::shared_ptr pbft_chain, std::shared_ptr final_chain, + std::shared_ptr pbft_chain, std::shared_ptr final_chain, std::shared_ptr key_manager, std::shared_ptr slashing_manager) : kNodeAddr(node_addr), kPbftConfig(pbft_config), @@ -840,19 +840,19 @@ std::shared_ptr VoteManager::generateVoteWithWeight(const taraxa::blk_ uint64_t pbft_sortition_threshold = 0; try { - voter_dpos_votes_count = final_chain_->dpos_eligible_vote_count(period - 1, kNodeAddr); + voter_dpos_votes_count = final_chain_->dposEligibleVoteCount(period - 1, kNodeAddr); if (!voter_dpos_votes_count) { // No delegation return nullptr; } - total_dpos_votes_count = final_chain_->dpos_eligible_total_vote_count(period - 1); + total_dpos_votes_count = final_chain_->dposEligibleTotalVoteCount(period - 1); pbft_sortition_threshold = getPbftSortitionThreshold(total_dpos_votes_count, vote_type); } catch (state_api::ErrFutureBlock& e) { LOG(log_er_) << "Unable to place vote for period: " << period << ", round: " << round << ", step: " << step << ", voted block hash: " << blockhash.abridged() << ". " - << "Period is too far ahead of actual finalized pbft chain size (" << final_chain_->last_block_number() + << "Period is too far ahead of actual finalized pbft chain size (" << final_chain_->lastBlockNumber() << "). Err msg: " << e.what(); return nullptr; @@ -881,10 +881,9 @@ std::pair VoteManager::validateVote(const std::shared_ptrgetPeriod(); try { - const uint64_t voter_dpos_votes_count = - final_chain_->dpos_eligible_vote_count(vote_period - 1, vote->getVoterAddr()); + const uint64_t voter_dpos_votes_count = final_chain_->dposEligibleVoteCount(vote_period - 1, vote->getVoterAddr()); - // Mark vote as validated only after getting dpos_eligible_vote_count and other values from dpos contract. It is + // Mark vote as validated only after getting dposEligibleVoteCount and other values from dpos contract. It is // possible that we are behind in processing pbft blocks, in which case we wont be able to get values from dpos // contract and validation fails due to this, not due to the fact that vote is invalid... already_validated_votes_.insert(vote->getHash()); @@ -910,7 +909,7 @@ std::pair VoteManager::validateVote(const std::shared_ptrdpos_eligible_total_vote_count(vote_period - 1); + const uint64_t total_dpos_votes_count = final_chain_->dposEligibleTotalVoteCount(vote_period - 1); const uint64_t pbft_sortition_threshold = getPbftSortitionThreshold(total_dpos_votes_count, vote->getType()); if (!vote->calculateWeight(voter_dpos_votes_count, total_dpos_votes_count, pbft_sortition_threshold)) { err_msg << "Invalid vote " << vote->getHash() << ": zero weight"; @@ -918,7 +917,7 @@ std::pair VoteManager::validateVote(const std::shared_ptrgetHash() << " against dpos contract. It's period (" << vote_period - << ") is too far ahead of actual finalized pbft chain size (" << final_chain_->last_block_number() + << ") is too far ahead of actual finalized pbft chain size (" << final_chain_->lastBlockNumber() << "). Err msg: " << e.what(); return {false, err_msg.str()}; } catch (...) { @@ -944,11 +943,11 @@ std::optional VoteManager::getPbftTwoTPlusOne(PbftPeriod pbft_period, uint64_t total_dpos_votes_count = 0; try { - total_dpos_votes_count = final_chain_->dpos_eligible_total_vote_count(pbft_period); + total_dpos_votes_count = final_chain_->dposEligibleTotalVoteCount(pbft_period); } catch (state_api::ErrFutureBlock& e) { LOG(log_er_) << "Unable to calculate 2t + 1 for period: " << pbft_period - << ". Period is too far ahead of actual finalized pbft chain size (" - << final_chain_->last_block_number() << "). Err msg: " << e.what(); + << ". Period is too far ahead of actual finalized pbft chain size (" << final_chain_->lastBlockNumber() + << "). Err msg: " << e.what(); return {}; } @@ -971,14 +970,14 @@ bool VoteManager::genAndValidateVrfSortition(PbftPeriod pbft_period, PbftRound p VrfPbftSortition vrf_sortition(kVrfSk, {PbftVoteTypes::propose_vote, pbft_period, pbft_round, 1}); try { - const uint64_t voter_dpos_votes_count = final_chain_->dpos_eligible_vote_count(pbft_period - 1, kNodeAddr); + const uint64_t voter_dpos_votes_count = final_chain_->dposEligibleVoteCount(pbft_period - 1, kNodeAddr); if (!voter_dpos_votes_count) { LOG(log_er_) << "Generated vrf sortition for period " << pbft_period << ", round " << pbft_round << " is invalid. Voter dpos vote count is zero"; return false; } - const uint64_t total_dpos_votes_count = final_chain_->dpos_eligible_total_vote_count(pbft_period - 1); + const uint64_t total_dpos_votes_count = final_chain_->dposEligibleTotalVoteCount(pbft_period - 1); const uint64_t pbft_sortition_threshold = getPbftSortitionThreshold(total_dpos_votes_count, PbftVoteTypes::propose_vote); @@ -990,8 +989,8 @@ bool VoteManager::genAndValidateVrfSortition(PbftPeriod pbft_period, PbftRound p } } catch (state_api::ErrFutureBlock& e) { LOG(log_er_) << "Unable to generate vrf sorititon for period " << pbft_period << ", round " << pbft_round - << ". Period is too far ahead of actual finalized pbft chain size (" - << final_chain_->last_block_number() << "). Err msg: " << e.what(); + << ". Period is too far ahead of actual finalized pbft chain size (" << final_chain_->lastBlockNumber() + << "). Err msg: " << e.what(); return false; } diff --git a/libraries/core_libs/network/graphql/src/account.cpp b/libraries/core_libs/network/graphql/src/account.cpp index cd413bfe31..ebaa93bfec 100644 --- a/libraries/core_libs/network/graphql/src/account.cpp +++ b/libraries/core_libs/network/graphql/src/account.cpp @@ -9,12 +9,12 @@ namespace graphql::taraxa { Account::Account(std::shared_ptr<::taraxa::final_chain::FinalChain> final_chain, dev::Address address, ::taraxa::EthBlockNumber blk_n) : kAddress(std::move(address)), final_chain_(std::move(final_chain)) { - account_ = final_chain_->get_account(kAddress, blk_n); + account_ = final_chain_->getAccount(kAddress, blk_n); } Account::Account(std::shared_ptr<::taraxa::final_chain::FinalChain> final_chain, dev::Address address) : kAddress(std::move(address)), final_chain_(std::move(final_chain)) { - account_ = final_chain_->get_account(kAddress); + account_ = final_chain_->getAccount(kAddress); } response::Value Account::getAddress() const noexcept { return response::Value(kAddress.toString()); } @@ -34,11 +34,11 @@ response::Value Account::getTransactionCount() const noexcept { } response::Value Account::getCode() const noexcept { - return response::Value(dev::toJS(final_chain_->get_code(kAddress, final_chain_->last_block_number()))); + return response::Value(dev::toJS(final_chain_->getCode(kAddress, final_chain_->lastBlockNumber()))); } response::Value Account::getStorage(response::Value&& slotArg) const { - return response::Value(dev::toJS(final_chain_->get_account_storage(kAddress, dev::u256(slotArg.get())))); + return response::Value(dev::toJS(final_chain_->getAccountStorage(kAddress, dev::u256(slotArg.get())))); } } // namespace graphql::taraxa \ No newline at end of file diff --git a/libraries/core_libs/network/graphql/src/query.cpp b/libraries/core_libs/network/graphql/src/query.cpp index e8705353e4..2a14b9ed8a 100644 --- a/libraries/core_libs/network/graphql/src/query.cpp +++ b/libraries/core_libs/network/graphql/src/query.cpp @@ -37,17 +37,17 @@ std::shared_ptr Query::getBlock(std::optional&& std::optional<::taraxa::EthBlockNumber> block_number; if (number) { block_number = number->get(); - if (const auto last_block_number = final_chain_->last_block_number(); last_block_number < block_number) { + if (const auto last_block_number = final_chain_->lastBlockNumber(); last_block_number < block_number) { return nullptr; } } if (hash) { - block_number = final_chain_->block_number(dev::h256(hash->get())); + block_number = final_chain_->blockNumber(dev::h256(hash->get())); if (!block_number) { return nullptr; } } - auto block_header = final_chain_->block_header(block_number); + auto block_header = final_chain_->blockHeader(block_number); if (!block_header) { return nullptr; } @@ -85,7 +85,7 @@ std::vector> Query::getBlocks(response::Value&& f end_block_num = start_block_num + Query::kMaxPropagationLimit; } - const int last_block_number = final_chain_->last_block_number(); + const int last_block_number = final_chain_->lastBlockNumber(); if (start_block_num > last_block_number) { return blocks; } else if (end_block_num > last_block_number) { @@ -156,7 +156,7 @@ std::vector> Query::getPeriodDagBlocks( if (periodArg) { period = periodArg->get(); } else { - period = final_chain_->last_block_number(); + period = final_chain_->lastBlockNumber(); } auto dag_blocks = db_->getFinalizedDagBlockByPeriod(period); if (dag_blocks.size()) { diff --git a/libraries/core_libs/network/graphql/src/sync_state.cpp b/libraries/core_libs/network/graphql/src/sync_state.cpp index 4a41e83dad..af3b7b5800 100644 --- a/libraries/core_libs/network/graphql/src/sync_state.cpp +++ b/libraries/core_libs/network/graphql/src/sync_state.cpp @@ -12,7 +12,7 @@ SyncState::SyncState(std::shared_ptr<::taraxa::final_chain::FinalChain> final_ch response::Value SyncState::getStartingBlock() const noexcept { return response::Value(0); } response::Value SyncState::getCurrentBlock() const noexcept { - return response::Value(static_cast(final_chain_->last_block_number())); + return response::Value(static_cast(final_chain_->lastBlockNumber())); } response::Value SyncState::getHighestBlock() const noexcept { diff --git a/libraries/core_libs/network/graphql/src/transaction.cpp b/libraries/core_libs/network/graphql/src/transaction.cpp index d2dd870980..8ba1353daf 100644 --- a/libraries/core_libs/network/graphql/src/transaction.cpp +++ b/libraries/core_libs/network/graphql/src/transaction.cpp @@ -26,7 +26,7 @@ response::Value Transaction::getNonce() const noexcept { return response::Value( std::optional Transaction::getIndex() const noexcept { if (!location_) { - location_ = final_chain_->transaction_location(transaction_->getHash()); + location_ = final_chain_->transactionLocation(transaction_->getHash()); if (!location_) return std::nullopt; } return {location_->position}; @@ -34,7 +34,7 @@ std::optional Transaction::getIndex() const noexcept { std::shared_ptr Transaction::getFrom(std::optional&&) const { if (!location_) { - location_ = final_chain_->transaction_location(transaction_->getHash()); + location_ = final_chain_->transactionLocation(transaction_->getHash()); if (!location_) { return std::make_shared(std::make_shared(final_chain_, transaction_->getSender())); } @@ -46,7 +46,7 @@ std::shared_ptr Transaction::getFrom(std::optional Transaction::getTo(std::optional&&) const { if (!transaction_->getReceiver()) return nullptr; if (!location_) { - location_ = final_chain_->transaction_location(transaction_->getHash()); + location_ = final_chain_->transactionLocation(transaction_->getHash()); if (!location_) { return std::make_shared(std::make_shared(final_chain_, *transaction_->getReceiver())); } @@ -69,7 +69,7 @@ response::Value Transaction::getInputData() const noexcept { std::shared_ptr Transaction::getBlock() const { if (!location_) { - location_ = final_chain_->transaction_location(transaction_->getHash()); + location_ = final_chain_->transactionLocation(transaction_->getHash()); if (!location_) return nullptr; } return get_block_by_num_(location_->period); @@ -77,7 +77,7 @@ std::shared_ptr Transaction::getBlock() const { std::optional Transaction::getStatus() const noexcept { if (!receipt_) { - receipt_ = final_chain_->transaction_receipt(transaction_->getHash()); + receipt_ = final_chain_->transactionReceipt(transaction_->getHash()); if (!receipt_) return std::nullopt; } return response::Value(static_cast(receipt_->status_code)); @@ -85,7 +85,7 @@ std::optional Transaction::getStatus() const noexcept { std::optional Transaction::getGasUsed() const noexcept { if (!receipt_) { - receipt_ = final_chain_->transaction_receipt(transaction_->getHash()); + receipt_ = final_chain_->transactionReceipt(transaction_->getHash()); if (!receipt_) return std::nullopt; } return response::Value(static_cast(receipt_->gas_used)); @@ -93,7 +93,7 @@ std::optional Transaction::getGasUsed() const noexcept { std::optional Transaction::getCumulativeGasUsed() const noexcept { if (!receipt_) { - receipt_ = final_chain_->transaction_receipt(transaction_->getHash()); + receipt_ = final_chain_->transactionReceipt(transaction_->getHash()); if (!receipt_) return std::nullopt; } return response::Value(static_cast(receipt_->cumulative_gas_used)); @@ -101,7 +101,7 @@ std::optional Transaction::getCumulativeGasUsed() const noexcep std::shared_ptr Transaction::getCreatedContract(std::optional&&) const noexcept { if (!receipt_) { - receipt_ = final_chain_->transaction_receipt(transaction_->getHash()); + receipt_ = final_chain_->transactionReceipt(transaction_->getHash()); if (!receipt_) return nullptr; } if (!receipt_->new_contract_address) return nullptr; @@ -111,7 +111,7 @@ std::shared_ptr Transaction::getCreatedContract(std::optional>> Transaction::getLogs() const noexcept { std::vector> logs; if (!receipt_) { - receipt_ = final_chain_->transaction_receipt(transaction_->getHash()); + receipt_ = final_chain_->transactionReceipt(transaction_->getHash()); if (!receipt_) return std::nullopt; } diff --git a/libraries/core_libs/network/graphql/src/types/current_state.cpp b/libraries/core_libs/network/graphql/src/types/current_state.cpp index f7dc510266..7432978312 100644 --- a/libraries/core_libs/network/graphql/src/types/current_state.cpp +++ b/libraries/core_libs/network/graphql/src/types/current_state.cpp @@ -7,7 +7,7 @@ CurrentState::CurrentState(std::shared_ptr<::taraxa::final_chain::FinalChain> fi : final_chain_(std::move(final_chain)), dag_manager_(std::move(dag_manager)) {} response::Value CurrentState::getFinalBlock() const noexcept { - return response::Value(static_cast(final_chain_->last_block_number())); + return response::Value(static_cast(final_chain_->lastBlockNumber())); } response::Value CurrentState::getDagBlockLevel() const noexcept { diff --git a/libraries/core_libs/network/rpc/Debug.cpp b/libraries/core_libs/network/rpc/Debug.cpp index 9f45d79e98..11cf10699f 100644 --- a/libraries/core_libs/network/rpc/Debug.cpp +++ b/libraries/core_libs/network/rpc/Debug.cpp @@ -129,7 +129,7 @@ Json::Value Debug::debug_getPeriodTransactionsWithReceipts(const std::string& _p } auto final_chain = node->getFinalChain(); auto period = dev::jsToInt(_period); - auto block_hash = final_chain->block_hash(period); + auto block_hash = final_chain->blockHash(period); auto trxs = node->getDB()->getPeriodTransactions(period); if (!trxs.has_value()) { return Json::Value(Json::arrayValue); @@ -137,9 +137,9 @@ Json::Value Debug::debug_getPeriodTransactionsWithReceipts(const std::string& _p return transformToJsonParallel(*trxs, [&final_chain, &block_hash](const auto& trx) { auto hash = trx->getHash(); - auto r = final_chain->transaction_receipt(hash); + auto r = final_chain->transactionReceipt(hash); auto location = - rpc::eth::ExtendedTransactionLocation{{*final_chain->transaction_location(hash), *block_hash}, hash}; + rpc::eth::ExtendedTransactionLocation{{*final_chain->transactionLocation(hash), *block_hash}, hash}; auto transaction = rpc::eth::LocalisedTransaction{trx, location}; auto receipt = rpc::eth::LocalisedTransactionReceipt{*r, location, trx->getSender(), trx->getReceiver()}; auto receipt_json = rpc::eth::toJson(receipt); @@ -191,7 +191,7 @@ Json::Value Debug::debug_getPreviousBlockCertVotes(const std::string& _period) { } const auto votes_period = votes.front()->getPeriod(); - const uint64_t total_dpos_votes_count = final_chain->dpos_eligible_total_vote_count(votes_period - 1); + const uint64_t total_dpos_votes_count = final_chain->dposEligibleTotalVoteCount(votes_period - 1); res["total_votes_count"] = total_dpos_votes_count; res["votes"] = transformToJsonParallel(votes, [&](const auto& vote) { vote_manager->validateVote(vote); @@ -214,7 +214,7 @@ Json::Value Debug::debug_dposValidatorTotalStakes(const std::string& _period) { auto vote_manager = node->getVoteManager(); auto period = dev::jsToInt(_period); - auto validatorsStakes = final_chain->dpos_validators_total_stakes(period); + auto validatorsStakes = final_chain->dposValidatorsTotalStakes(period); Json::Value res(Json::arrayValue); @@ -240,7 +240,7 @@ Json::Value Debug::debug_dposTotalAmountDelegated(const std::string& _period) { auto final_chain = node->getFinalChain(); auto period = dev::jsToInt(_period); - auto totalAmountDelegated = final_chain->dpos_total_amount_delegated(period); + auto totalAmountDelegated = final_chain->dposTotalAmountDelegated(period); return toJS(totalAmountDelegated); } catch (...) { @@ -310,7 +310,7 @@ state_api::EVMTransaction Debug::to_eth_trx(const Json::Value& json, EthBlockNum trx.nonce = jsToU256(json["nonce"].asString()); } else { if (auto node = full_node_.lock()) { - trx.nonce = node->getFinalChain()->get_account(trx.from, blk_num).value_or(state_api::ZeroAccount).nonce; + trx.nonce = node->getFinalChain()->getAccount(trx.from, blk_num).value_or(state_api::ZeroAccount).nonce; } } @@ -320,7 +320,7 @@ state_api::EVMTransaction Debug::to_eth_trx(const Json::Value& json, EthBlockNum EthBlockNumber Debug::parse_blk_num(const string& blk_num_str) { if (blk_num_str == "latest" || blk_num_str == "pending" || blk_num_str.empty()) { if (auto node = full_node_.lock()) { - return node->getFinalChain()->last_block_number(); + return node->getFinalChain()->lastBlockNumber(); } } else if (blk_num_str == "earliest") { return 0; @@ -342,7 +342,7 @@ std::pair, std::optional(transaction_hash); - return {node->getDB()->getTransaction(hash), node->getFinalChain()->transaction_location(hash)}; + return {node->getDB()->getTransaction(hash), node->getFinalChain()->transactionLocation(hash)}; } return {}; } diff --git a/libraries/core_libs/network/rpc/Taraxa.cpp b/libraries/core_libs/network/rpc/Taraxa.cpp index f17723605a..04eb8a47da 100644 --- a/libraries/core_libs/network/rpc/Taraxa.cpp +++ b/libraries/core_libs/network/rpc/Taraxa.cpp @@ -115,7 +115,7 @@ Json::Value Taraxa::taraxa_getNodeVersions() { Json::Value res; auto node = tryGetNode(); auto db = node->getDB(); - auto period = node->getFinalChain()->last_block_number(); + auto period = node->getFinalChain()->lastBlockNumber(); const uint64_t max_blocks_to_process = 6000; std::map node_version_map; std::multimap> version_node_map; @@ -132,9 +132,9 @@ Json::Value Taraxa::taraxa_getNodeVersions() { } } - auto total_vote_count = node->getFinalChain()->dpos_eligible_total_vote_count(period); + auto total_vote_count = node->getFinalChain()->dposEligibleTotalVoteCount(period); for (auto nv : node_version_map) { - auto vote_count = node->getFinalChain()->dpos_eligible_vote_count(period, nv.first); + auto vote_count = node->getFinalChain()->dposEligibleVoteCount(period, nv.first); version_node_map.insert({nv.second, {nv.first, vote_count}}); version_count[nv.second].first++; version_count[nv.second].second += vote_count; @@ -194,7 +194,7 @@ Json::Value Taraxa::taraxa_getConfig() { return enc_json(tryGetNode()->getConfig Json::Value Taraxa::taraxa_getChainStats() { Json::Value res; if (auto node = full_node_.lock()) { - res["pbft_period"] = Json::UInt64(node->getFinalChain()->last_block_number()); + res["pbft_period"] = Json::UInt64(node->getFinalChain()->lastBlockNumber()); res["dag_blocks_executed"] = Json::UInt64(node->getDB()->getNumBlockExecuted()); res["transactions_executed"] = Json::UInt64(node->getDB()->getNumTransactionExecuted()); } @@ -210,7 +210,7 @@ std::string Taraxa::taraxa_yield(const std::string& _period) { } auto period = dev::jsToInt(_period); - return toJS(node->getFinalChain()->dpos_yield(period)); + return toJS(node->getFinalChain()->dposYield(period)); } catch (...) { BOOST_THROW_EXCEPTION(JsonRpcException(Errors::ERROR_RPC_INVALID_PARAMS)); } @@ -224,7 +224,7 @@ std::string Taraxa::taraxa_totalSupply(const std::string& _period) { } auto period = dev::jsToInt(_period); - return toJS(node->getFinalChain()->dpos_total_supply(period)); + return toJS(node->getFinalChain()->dposTotalSupply(period)); } catch (...) { BOOST_THROW_EXCEPTION(JsonRpcException(Errors::ERROR_RPC_INVALID_PARAMS)); } diff --git a/libraries/core_libs/network/rpc/Test.cpp b/libraries/core_libs/network/rpc/Test.cpp index 6f69dc1445..4f243f26c2 100644 --- a/libraries/core_libs/network/rpc/Test.cpp +++ b/libraries/core_libs/network/rpc/Test.cpp @@ -38,7 +38,7 @@ Json::Value Test::send_coin_transaction(const Json::Value ¶m1) { secret_t sk = secret_t(param1["secret"].asString()); uint64_t nonce = 0; if (!param1["nonce"]) { - auto acc = node->getFinalChain()->get_account(toAddress(sk)); + auto acc = node->getFinalChain()->getAccount(toAddress(sk)); nonce = acc->nonce.convert_to() + 1; } else { nonce = dev::jsToInt(param1["nonce"].asString()); diff --git a/libraries/core_libs/network/rpc/eth/Eth.cpp b/libraries/core_libs/network/rpc/eth/Eth.cpp index 517b7b1fa1..1f0ae169fc 100644 --- a/libraries/core_libs/network/rpc/eth/Eth.cpp +++ b/libraries/core_libs/network/rpc/eth/Eth.cpp @@ -127,27 +127,27 @@ class EthImpl : public Eth, EthParams { Json::Value eth_accounts() override { return toJsonArray(vector{address}); } - string eth_blockNumber() override { return toJS(final_chain->last_block_number()); } + string eth_blockNumber() override { return toJS(final_chain->lastBlockNumber()); } string eth_getBalance(const string& _address, const Json::Value& _json) override { const auto block_number = get_block_number_from_json(_json); - return toJS(final_chain->get_account(toAddress(_address), block_number).value_or(ZeroAccount).balance); + return toJS(final_chain->getAccount(toAddress(_address), block_number).value_or(ZeroAccount).balance); } string eth_getStorageAt(const string& _address, const string& _position, const Json::Value& _json) override { const auto block_number = get_block_number_from_json(_json); - return toJS(final_chain->get_account_storage(toAddress(_address), jsToU256(_position), block_number)); + return toJS(final_chain->getAccountStorage(toAddress(_address), jsToU256(_position), block_number)); } string eth_getStorageRoot(const string& _address, const string& _blockNumber) override { - return toJS(final_chain->get_account(toAddress(_address), parse_blk_num(_blockNumber)) + return toJS(final_chain->getAccount(toAddress(_address), parse_blk_num(_blockNumber)) .value_or(ZeroAccount) .storage_root_eth()); } string eth_getCode(const string& _address, const Json::Value& _json) override { const auto block_number = get_block_number_from_json(_json); - return toJS(final_chain->get_code(toAddress(_address), block_number)); + return toJS(final_chain->getCode(toAddress(_address), block_number)); } string eth_call(const Json::Value& _json, const Json::Value& _jsonBlock) override { @@ -167,7 +167,7 @@ class EthImpl : public Eth, EthParams { if (!blockNumber.empty()) { blk_n = parse_blk_num(blockNumber); } else { - blk_n = final_chain->last_block_number(); + blk_n = final_chain->lastBlockNumber(); } prepare_transaction_for_call(t, blk_n); @@ -229,7 +229,7 @@ class EthImpl : public Eth, EthParams { } Json::Value eth_getBlockByHash(const string& _blockHash, bool _includeTransactions) override { - if (auto blk_n = final_chain->block_number(jsToFixed<32>(_blockHash)); blk_n) { + if (auto blk_n = final_chain->blockNumber(jsToFixed<32>(_blockHash)); blk_n) { return get_block_by_number(*blk_n, _includeTransactions); } return Json::Value(); @@ -313,7 +313,7 @@ class EthImpl : public Eth, EthParams { void note_pending_transaction(const h256& trx_hash) override { watches_.new_transactions_.process_update(trx_hash); } Json::Value get_block_by_number(EthBlockNumber blk_n, bool include_transactions) { - auto blk_header = final_chain->block_header(blk_n); + auto blk_header = final_chain->blockHeader(blk_n); if (!blk_header) { return Json::Value(); } @@ -328,7 +328,7 @@ class EthImpl : public Eth, EthParams { ++loc.position; } } else { - auto hashes = final_chain->transaction_hashes(blk_n); + auto hashes = final_chain->transactionHashes(blk_n); trxs_json = toJsonArray(*hashes); } return ret; @@ -339,12 +339,12 @@ class EthImpl : public Eth, EthParams { if (!trx) { return {}; } - auto loc = final_chain->transaction_location(h); + auto loc = final_chain->transactionLocation(h); return LocalisedTransaction{ trx, TransactionLocationWithBlockHash{ *loc, - *final_chain->block_hash(loc->period), + *final_chain->blockHash(loc->period), }, }; } @@ -358,18 +358,18 @@ class EthImpl : public Eth, EthParams { trxs[trx_pos], TransactionLocationWithBlockHash{ {blk_n, trx_pos}, - *final_chain->block_hash(blk_n), + *final_chain->blockHash(blk_n), }, }; } optional get_transaction(const h256& blk_h, uint64_t _i) const { - auto blk_n = final_chain->block_number(blk_h); + auto blk_n = final_chain->blockNumber(blk_h); return blk_n ? get_transaction(_i, *blk_n) : nullopt; } optional get_transaction_receipt(const h256& trx_h) const { - auto r = final_chain->transaction_receipt(trx_h); + auto r = final_chain->transactionReceipt(trx_h); if (!r) { return {}; } @@ -384,12 +384,12 @@ class EthImpl : public Eth, EthParams { } uint64_t transactionCount(const h256& block_hash) const { - auto n = final_chain->block_number(block_hash); + auto n = final_chain->blockNumber(block_hash); return n ? final_chain->transactionCount(n) : 0; } trx_nonce_t transaction_count(EthBlockNumber n, const Address& addr) { - return final_chain->get_account(addr, n).value_or(ZeroAccount).nonce; + return final_chain->getAccount(addr, n).value_or(ZeroAccount).nonce; } state_api::ExecutionResult call(EthBlockNumber blk_n, const TransactionSkeleton& trx) { @@ -478,7 +478,7 @@ class EthImpl : public Eth, EthParams { EthBlockNumber parse_blk_num(const string& blk_num_str) { auto ret = parse_blk_num_specific(blk_num_str); - return ret ? *ret : final_chain->last_block_number(); + return ret ? *ret : final_chain->lastBlockNumber(); } EthBlockNumber get_block_number_from_json(const Json::Value& json) { @@ -487,7 +487,7 @@ class EthImpl : public Eth, EthParams { return parse_blk_num(json["blockNumber"].asString()); } if (!json["blockHash"].empty()) { - if (auto ret = final_chain->block_number(jsToFixed<32>(json["blockHash"].asString()))) { + if (auto ret = final_chain->blockNumber(jsToFixed<32>(json["blockHash"].asString()))) { return *ret; } throw std::runtime_error("Resource not found"); @@ -504,7 +504,7 @@ class EthImpl : public Eth, EthParams { if (const auto& fromBlock = json["fromBlock"]; !fromBlock.empty()) { from_block = parse_blk_num(fromBlock.asString()); } else { - from_block = final_chain->last_block_number(); + from_block = final_chain->lastBlockNumber(); } if (const auto& toBlock = json["toBlock"]; !toBlock.empty()) { to_block = parse_blk_num_specific(toBlock.asString()); diff --git a/libraries/core_libs/network/rpc/eth/Eth.h b/libraries/core_libs/network/rpc/eth/Eth.h index c36285bf82..262f0af34a 100644 --- a/libraries/core_libs/network/rpc/eth/Eth.h +++ b/libraries/core_libs/network/rpc/eth/Eth.h @@ -11,7 +11,7 @@ struct EthParams { Address address; uint64_t chain_id = 0; uint64_t gas_limit = ((uint64_t)1 << 53) - 1; - std::shared_ptr final_chain; + std::shared_ptr final_chain; std::function(const h256&)> get_trx; std::function& trx)> send_trx; std::function gas_pricer = [] { return u256(0); }; diff --git a/libraries/core_libs/network/rpc/eth/LogFilter.cpp b/libraries/core_libs/network/rpc/eth/LogFilter.cpp index 7723998bcf..3042b0d618 100644 --- a/libraries/core_libs/network/rpc/eth/LogFilter.cpp +++ b/libraries/core_libs/network/rpc/eth/LogFilter.cpp @@ -119,25 +119,25 @@ void LogFilter::match_one(const ExtendedTransactionLocation& trx_loc, const Tran } } -std::vector LogFilter::match_all(const FinalChain& final_chain) const { +std::vector LogFilter::match_all(const final_chain::FinalChain& final_chain) const { std::vector ret; - // to_block can't be greater than the last executed block number - const auto last_block_number = final_chain.last_block_number(); - auto to_blk_n = to_block_ ? *to_block_ : last_block_number; - if (to_blk_n > last_block_number) { - to_blk_n = last_block_number; - } - auto action = [&, this](EthBlockNumber blk_n) { - ExtendedTransactionLocation trx_loc{{{blk_n}, final_chain.block_hash(blk_n).value()}}; - auto hashes = final_chain.transaction_hashes(trx_loc.period); + ExtendedTransactionLocation trx_loc{{{blk_n}, *final_chain.blockHash(blk_n)}}; + auto hashes = final_chain.transactionHashes(trx_loc.period); for (const auto& hash : *hashes) { trx_loc.trx_hash = hash; - match_one(trx_loc, final_chain.transaction_receipt(hash).value(), [&](const auto& lle) { ret.push_back(lle); }); + match_one(trx_loc, *final_chain.transactionReceipt(hash), [&](const auto& lle) { ret.push_back(lle); }); ++trx_loc.position; } }; + // to_block can't be greater than the last executed block number + const auto last_block_number = final_chain.lastBlockNumber(); + auto to_blk_n = to_block_ ? *to_block_ : last_block_number; + if (to_blk_n > last_block_number) { + to_blk_n = last_block_number; + } + if (is_range_only_) { for (auto blk_n = from_block_; blk_n <= to_blk_n; ++blk_n) { action(blk_n); diff --git a/libraries/core_libs/network/rpc/eth/LogFilter.hpp b/libraries/core_libs/network/rpc/eth/LogFilter.hpp index 830717c319..08232c7a0f 100644 --- a/libraries/core_libs/network/rpc/eth/LogFilter.hpp +++ b/libraries/core_libs/network/rpc/eth/LogFilter.hpp @@ -27,7 +27,7 @@ struct LogFilter { bool blk_number_matches(EthBlockNumber blk_n) const; void match_one(ExtendedTransactionLocation const& trx_loc, TransactionReceipt const& r, std::function const& cb) const; - std::vector match_all(FinalChain const& final_chain) const; + std::vector match_all(final_chain::FinalChain const& final_chain) const; }; } // namespace taraxa::net::rpc::eth \ No newline at end of file diff --git a/libraries/core_libs/node/include/node/node.hpp b/libraries/core_libs/node/include/node/node.hpp index a0b9aa87b5..5e8ba477d5 100644 --- a/libraries/core_libs/node/include/node/node.hpp +++ b/libraries/core_libs/node/include/node/node.hpp @@ -114,7 +114,7 @@ class FullNode : public std::enable_shared_from_this { std::shared_ptr pbft_chain_; std::shared_ptr pillar_chain_mgr_; std::shared_ptr key_manager_; - std::shared_ptr final_chain_; + std::shared_ptr final_chain_; std::shared_ptr jsonrpc_http_; std::shared_ptr graphql_http_; std::shared_ptr jsonrpc_ws_; diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index 4285b15d65..864afa988d 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -11,7 +11,7 @@ #include "dag/dag.hpp" #include "dag/dag_block.hpp" #include "dag/dag_block_proposer.hpp" -#include "final_chain/final_chain_impl.hpp" +#include "final_chain/final_chain.hpp" #include "graphql/http_processor.hpp" #include "graphql/ws_server.hpp" #include "key_manager/key_manager.hpp" @@ -113,7 +113,7 @@ void FullNode::init() { } gas_pricer_ = std::make_shared(conf_.genesis.gas_price, conf_.is_light_node, db_); - final_chain_ = std::make_shared(db_, conf_, node_addr); + final_chain_ = std::make_shared(db_, conf_, node_addr); key_manager_ = std::make_shared(final_chain_); trx_mgr_ = std::make_shared(conf_, db_, final_chain_, node_addr); @@ -423,7 +423,7 @@ void FullNode::rebuildDb() { LOG(log_nf_) << "Adding PBFT block " << period_data->pbft_blk->getBlockHash().toString() << " from old DB into syncing queue for processing, final chain size: " - << final_chain_->last_block_number(); + << final_chain_->lastBlockNumber(); pbft_mgr_->periodDataQueuePush(std::move(*period_data), dev::p2p::NodeID(), std::move(cert_votes)); pbft_mgr_->waitForPeriodFinalization(); diff --git a/tests/final_chain_test.cpp b/tests/final_chain_test.cpp index 283d016813..ef59a96cae 100644 --- a/tests/final_chain_test.cpp +++ b/tests/final_chain_test.cpp @@ -1,10 +1,11 @@ +#include "final_chain/final_chain.hpp" + #include #include #include "common/constants.hpp" #include "common/vrf_wrapper.hpp" #include "config/config.hpp" -#include "final_chain/final_chain_impl.hpp" #include "final_chain/trie_common.hpp" #include "libdevcore/CommonJS.h" #include "network/rpc/eth/Eth.h" @@ -25,7 +26,7 @@ struct advance_check_opts { struct FinalChainTest : WithDataDir { std::shared_ptr db{new DbStorage(data_dir / "db")}; FullNodeConfig cfg = FullNodeConfig(); - std::shared_ptr SUT; + std::shared_ptr SUT; bool assume_only_toplevel_transfers = true; std::unordered_map expected_balances; uint64_t expected_blk_num = 0; @@ -45,11 +46,11 @@ struct FinalChainTest : WithDataDir { } void init() { - SUT = std::make_shared(db, cfg, addr_t{}); + SUT = std::make_shared(db, cfg, addr_t{}); const auto& effective_balances = effective_initial_balances(cfg.genesis.state); cfg.genesis.state.dpos.yield_percentage = 0; for (const auto& [addr, _] : cfg.genesis.state.initial_balances) { - auto acc_actual = SUT->get_account(addr); + auto acc_actual = SUT->getAccount(addr); ASSERT_TRUE(acc_actual); const auto expected_bal = effective_balances.at(addr); ASSERT_EQ(acc_actual->balance, expected_bal); @@ -87,17 +88,17 @@ struct FinalChainTest : WithDataDir { auto result = SUT->finalize(std::move(period_data), {dag_blk.getHash()}).get(); const auto& blk_h = *result->final_chain_blk; - EXPECT_EQ(util::rlp_enc(blk_h), util::rlp_enc(*SUT->block_header(blk_h.number))); - EXPECT_EQ(util::rlp_enc(blk_h), util::rlp_enc(*SUT->block_header())); + EXPECT_EQ(util::rlp_enc(blk_h), util::rlp_enc(*SUT->blockHeader(blk_h.number))); + EXPECT_EQ(util::rlp_enc(blk_h), util::rlp_enc(*SUT->blockHeader())); const auto& receipts = result->trx_receipts; - EXPECT_EQ(blk_h.hash, SUT->block_header()->hash); - EXPECT_EQ(blk_h.hash, SUT->block_hash()); - EXPECT_EQ(blk_h.parent_hash, SUT->block_header(expected_blk_num - 1)->hash); + EXPECT_EQ(blk_h.hash, SUT->blockHeader()->hash); + EXPECT_EQ(blk_h.hash, SUT->blockHash()); + EXPECT_EQ(blk_h.parent_hash, SUT->blockHeader(expected_blk_num - 1)->hash); EXPECT_EQ(blk_h.number, expected_blk_num); - EXPECT_EQ(blk_h.number, SUT->last_block_number()); + EXPECT_EQ(blk_h.number, SUT->lastBlockNumber()); EXPECT_EQ(SUT->transactionCount(blk_h.number), trxs.size()); for (size_t i = 0; i < trxs.size(); i++) EXPECT_EQ(*SUT->transactions(blk_h.number)[i], *trxs[i]); - EXPECT_EQ(*SUT->block_number(*SUT->block_hash(blk_h.number)), expected_blk_num); + EXPECT_EQ(*SUT->blockNumber(*SUT->blockHash(blk_h.number)), expected_blk_num); EXPECT_EQ(blk_h.author, pbft_block->getBeneficiary()); EXPECT_EQ(blk_h.timestamp, pbft_block->getTimestamp()); EXPECT_EQ(receipts.size(), trxs.size()); @@ -124,7 +125,7 @@ struct FinalChainTest : WithDataDir { if (!opts.expect_to_fail) { EXPECT_TRUE(r.gas_used != 0); } - EXPECT_EQ(util::rlp_enc(r), util::rlp_enc(*SUT->transaction_receipt(trx->getHash()))); + EXPECT_EQ(util::rlp_enc(r), util::rlp_enc(*SUT->transactionReceipt(trx->getHash()))); cumulative_gas_used_actual += r.gas_used; if (assume_only_toplevel_transfers && trx->getValue() != 0 && r.status_code == 1) { const auto& sender = trx->getSender(); @@ -134,10 +135,10 @@ struct FinalChainTest : WithDataDir { all_addrs_w_changed_balance.insert(sender); all_addrs_w_changed_balance.insert(receiver); expected_balances[receiver] += trx->getValue(); - if (SUT->get_account(sender)->code_size == 0) { + if (SUT->getAccount(sender)->code_size == 0) { expected_balance_changes[sender] = expected_balances[sender]; } - if (SUT->get_account(receiver)->code_size == 0) { + if (SUT->getAccount(receiver)->code_size == 0) { expected_balance_changes[receiver] = expected_balances[receiver]; } } @@ -152,7 +153,7 @@ struct FinalChainTest : WithDataDir { EXPECT_EQ(r.bloom(), LogBloom()); } expected_block_log_bloom |= r.bloom(); - auto trx_loc = *SUT->transaction_location(trx->getHash()); + auto trx_loc = *SUT->transactionLocation(trx->getHash()); EXPECT_EQ(trx_loc.period, blk_h.number); EXPECT_EQ(trx_loc.position, i); } @@ -163,7 +164,7 @@ struct FinalChainTest : WithDataDir { EXPECT_EQ(blk_h.log_bloom, expected_block_log_bloom); if (assume_only_toplevel_transfers) { for (const auto& addr : all_addrs_w_changed_balance) { - EXPECT_EQ(SUT->get_account(addr)->balance, expected_balances[addr]); + EXPECT_EQ(SUT->getAccount(addr)->balance, expected_balances[addr]); } } return result; @@ -307,9 +308,9 @@ TEST_F(FinalChainTest, initial_validators) { init(); const auto votes_per_address = cfg.genesis.state.dpos.validator_maximum_stake / cfg.genesis.state.dpos.vote_eligibility_balance_step; - const auto total_votes = SUT->dpos_eligible_total_vote_count(SUT->last_block_number()); + const auto total_votes = SUT->dposEligibleTotalVoteCount(SUT->lastBlockNumber()); for (const auto& vk : validator_keys) { - const auto address_votes = SUT->dpos_eligible_vote_count(SUT->last_block_number(), vk.address()); + const auto address_votes = SUT->dposEligibleVoteCount(SUT->lastBlockNumber(), vk.address()); EXPECT_EQ(votes_per_address, address_votes); EXPECT_EQ(validator_keys.size() * votes_per_address, total_votes); } @@ -334,13 +335,13 @@ TEST_F(FinalChainTest, nonce_test) { advance({trx3}); advance({trx4}); - ASSERT_EQ(SUT->get_account(addr)->nonce.convert_to(), 4); + ASSERT_EQ(SUT->getAccount(addr)->nonce.convert_to(), 4); // nonce_skipping is enabled, ok auto trx6 = std::make_shared(6, 100, 0, 100000, dev::bytes(), sk, receiver_addr); advance({trx6}); - ASSERT_EQ(SUT->get_account(addr)->nonce.convert_to(), 7); + ASSERT_EQ(SUT->getAccount(addr)->nonce.convert_to(), 7); // nonce is lower, fail auto trx5 = std::make_shared(5, 101, 0, 100000, dev::bytes(), sk, receiver_addr); @@ -362,10 +363,10 @@ TEST_F(FinalChainTest, nonce_skipping) { auto trx4 = std::make_shared(3, 100, 0, 100000, dev::bytes(), sk, receiver_addr); advance({trx1}); - ASSERT_EQ(SUT->get_account(addr)->nonce.convert_to(), 1); + ASSERT_EQ(SUT->getAccount(addr)->nonce.convert_to(), 1); advance({trx3}); - ASSERT_EQ(SUT->get_account(addr)->nonce.convert_to(), 3); + ASSERT_EQ(SUT->getAccount(addr)->nonce.convert_to(), 3); // fail transaction with the same nonce advance({trx3}, {false, false, true}); @@ -373,10 +374,10 @@ TEST_F(FinalChainTest, nonce_skipping) { // fail transaction with lower nonce advance({trx2}, {false, false, true}); - ASSERT_EQ(SUT->get_account(addr)->nonce.convert_to(), 3); + ASSERT_EQ(SUT->getAccount(addr)->nonce.convert_to(), 3); advance({trx4}); - ASSERT_EQ(SUT->get_account(addr)->nonce.convert_to(), 4); + ASSERT_EQ(SUT->getAccount(addr)->nonce.convert_to(), 4); } TEST_F(FinalChainTest, exec_trx_with_nonce_from_api) { @@ -398,7 +399,7 @@ TEST_F(FinalChainTest, exec_trx_with_nonce_from_api) { auto trx = std::make_shared(nonce, 1, 0, 1000000, dev::fromHex(samples::greeter_contract_code), sk); auto result = advance({trx}, {false, false, true}); } - auto account = SUT->get_account(addr); + auto account = SUT->getAccount(addr); ASSERT_EQ(account->nonce, nonce + 1); auto trx = std::make_shared(account->nonce, 1, 0, 1000000, dev::fromHex(samples::greeter_contract_code), sk); @@ -469,7 +470,7 @@ TEST_F(FinalChainTest, failed_transaction_fee) { auto trx2_1 = std::make_shared(2, 101, 1, gas, dev::bytes(), sk, receiver); advance({trx1}); - auto blk = SUT->block_header(expected_blk_num); + auto blk = SUT->blockHeader(expected_blk_num); auto proposer_balance = SUT->getBalance(blk->author); EXPECT_EQ(proposer_balance.first, 21000); advance({trx2}); @@ -477,24 +478,24 @@ TEST_F(FinalChainTest, failed_transaction_fee) { { // low nonce trx should fail and consume all gas - auto balance_before = SUT->get_account(addr)->balance; + auto balance_before = SUT->getAccount(addr)->balance; advance({trx2_1}, {false, false, true}); - auto receipt = SUT->transaction_receipt(trx2_1->getHash()); + auto receipt = SUT->transactionReceipt(trx2_1->getHash()); EXPECT_EQ(receipt->gas_used, gas); - EXPECT_EQ(balance_before - SUT->get_account(addr)->balance, receipt->gas_used * trx2_1->getGasPrice()); + EXPECT_EQ(balance_before - SUT->getAccount(addr)->balance, receipt->gas_used * trx2_1->getGasPrice()); } { // transaction gas is bigger then current account balance. Use closest int as gas used and decrease sender balance // by gas_used * gas_price - ASSERT_GE(gas, SUT->get_account(addr)->balance); - auto balance_before = SUT->get_account(addr)->balance; + ASSERT_GE(gas, SUT->getAccount(addr)->balance); + auto balance_before = SUT->getAccount(addr)->balance; auto gas_price = 3; auto trx4 = std::make_shared(4, 100, gas_price, gas, dev::bytes(), sk, receiver); advance({trx4}, {false, false, true}); - auto receipt = SUT->transaction_receipt(trx4->getHash()); + auto receipt = SUT->transactionReceipt(trx4->getHash()); EXPECT_GT(balance_before % gas_price, 0); EXPECT_EQ(receipt->gas_used, balance_before / gas_price); - EXPECT_EQ(SUT->get_account(addr)->balance, balance_before % gas_price); + EXPECT_EQ(SUT->getAccount(addr)->balance, balance_before % gas_price); } } @@ -832,9 +833,9 @@ TEST_F(FinalChainTest, remove_jailed_validator_votes_from_total) { init(); const auto votes_per_address = cfg.genesis.state.dpos.validator_maximum_stake / cfg.genesis.state.dpos.vote_eligibility_balance_step; - const auto total_votes_before = SUT->dpos_eligible_total_vote_count(SUT->last_block_number()); + const auto total_votes_before = SUT->dposEligibleTotalVoteCount(SUT->lastBlockNumber()); for (const auto& vk : validator_keys) { - const auto address_votes = SUT->dpos_eligible_vote_count(SUT->last_block_number(), vk.address()); + const auto address_votes = SUT->dposEligibleVoteCount(SUT->lastBlockNumber(), vk.address()); EXPECT_EQ(votes_per_address, address_votes); EXPECT_EQ(validator_keys.size() * votes_per_address, total_votes_before); } @@ -853,7 +854,7 @@ TEST_F(FinalChainTest, remove_jailed_validator_votes_from_total) { advance({}); } - const auto total_votes = SUT->dpos_eligible_total_vote_count(SUT->last_block_number()); + const auto total_votes = SUT->dposEligibleTotalVoteCount(SUT->lastBlockNumber()); EXPECT_EQ(total_votes_before - votes_per_address, total_votes); } diff --git a/tests/full_node_test.cpp b/tests/full_node_test.cpp index b5aa407e1d..57b358e14b 100644 --- a/tests/full_node_test.cpp +++ b/tests/full_node_test.cpp @@ -417,7 +417,7 @@ TEST_F(FullNodeTest, sync_five_nodes) { void assert_all_transactions_known() { for (auto &n : nodes_) { for (auto &t : transactions) { - auto location = n->getFinalChain()->transaction_location(t); + auto location = n->getFinalChain()->transactionLocation(t); ASSERT_EQ(location.has_value(), true); } } @@ -426,7 +426,7 @@ TEST_F(FullNodeTest, sync_five_nodes) { void assert_all_transactions_success() { for (auto &n : nodes_) { for (auto &t : transactions) { - auto receipt = n->getFinalChain()->transaction_receipt(t); + auto receipt = n->getFinalChain()->transactionReceipt(t); if (receipt->status_code != 1) { auto trx = n->getTransactionManager()->getTransaction(t); std::cout << "failed: " << t.toString() << " sender: " << trx->getSender() << " nonce: " << trx->getNonce() @@ -442,7 +442,7 @@ TEST_F(FullNodeTest, sync_five_nodes) { wait(wait_for, [this](auto &ctx) { for (auto &n : nodes_) { for (auto &t : transactions) { - if (!n->getFinalChain()->transaction_location(t)) { + if (!n->getFinalChain()->transactionLocation(t)) { ctx.fail(); } } @@ -1285,7 +1285,7 @@ TEST_F(FullNodeTest, db_rebuild) { nodes[0]->getTransactionManager()->insertTransaction(dummy_trx); trxs_count++; thisThreadSleepForMilliSeconds(100); - executed_chain_size = nodes[0]->getFinalChain()->last_block_number(); + executed_chain_size = nodes[0]->getFinalChain()->lastBlockNumber(); if (executed_chain_size == 5) { trxs_count_at_pbft_size_5 = nodes[0]->getDB()->getNumTransactionExecuted(); } @@ -1303,7 +1303,7 @@ TEST_F(FullNodeTest, db_rebuild) { ctx.fail(); } }); - executed_chain_size = nodes[0]->getFinalChain()->last_block_number(); + executed_chain_size = nodes[0]->getFinalChain()->lastBlockNumber(); std::cout << "Executed transactions " << trxs_count_at_pbft_size_5 << " at chain size 5" << std::endl; std::cout << "Total executed transactions " << executed_trxs << std::endl; std::cout << "Executed chain size " << executed_chain_size << std::endl; @@ -1316,7 +1316,7 @@ TEST_F(FullNodeTest, db_rebuild) { auto nodes = launch_nodes(node_cfgs); ASSERT_HAPPENS({10s, 100ms}, [&](auto &ctx) { WAIT_EXPECT_EQ(ctx, nodes[0]->getDB()->getNumTransactionExecuted(), trxs_count) - WAIT_EXPECT_EQ(ctx, nodes[0]->getFinalChain()->last_block_number(), executed_chain_size) + WAIT_EXPECT_EQ(ctx, nodes[0]->getFinalChain()->lastBlockNumber(), executed_chain_size) }); } @@ -1326,7 +1326,7 @@ TEST_F(FullNodeTest, db_rebuild) { auto nodes = launch_nodes(node_cfgs); EXPECT_HAPPENS({10s, 100ms}, [&](auto &ctx) { WAIT_EXPECT_EQ(ctx, nodes[0]->getDB()->getNumTransactionExecuted(), trxs_count) - WAIT_EXPECT_EQ(ctx, nodes[0]->getFinalChain()->last_block_number(), executed_chain_size) + WAIT_EXPECT_EQ(ctx, nodes[0]->getFinalChain()->lastBlockNumber(), executed_chain_size) }); } @@ -1338,7 +1338,7 @@ TEST_F(FullNodeTest, db_rebuild) { auto nodes = launch_nodes(node_cfgs); EXPECT_HAPPENS({10s, 100ms}, [&](auto &ctx) { WAIT_EXPECT_EQ(ctx, nodes[0]->getDB()->getNumTransactionExecuted(), trxs_count_at_pbft_size_5) - WAIT_EXPECT_EQ(ctx, nodes[0]->getFinalChain()->last_block_number(), 5) + WAIT_EXPECT_EQ(ctx, nodes[0]->getFinalChain()->lastBlockNumber(), 5) }); } @@ -1348,7 +1348,7 @@ TEST_F(FullNodeTest, db_rebuild) { auto nodes = launch_nodes(node_cfgs); EXPECT_HAPPENS({10s, 100ms}, [&](auto &ctx) { WAIT_EXPECT_EQ(ctx, nodes[0]->getDB()->getNumTransactionExecuted(), trxs_count_at_pbft_size_5) - WAIT_EXPECT_EQ(ctx, nodes[0]->getFinalChain()->last_block_number(), 5) + WAIT_EXPECT_EQ(ctx, nodes[0]->getFinalChain()->lastBlockNumber(), 5) }); } } @@ -1639,7 +1639,7 @@ TEST_F(FullNodeTest, graphql_test) { data = service::ScalarArgument::require("data", result); block = service::ScalarArgument::require("block", data); const auto hash = service::StringArgument::require("hash", block); - EXPECT_EQ(nodes[0]->getFinalChain()->block_header(3)->hash.toString(), hash); + EXPECT_EQ(nodes[0]->getFinalChain()->blockHeader(3)->hash.toString(), hash); // Get block hash by number query = R"({ block(number: 2) { transactionAt(index: 0) { hash } } })"_graphql; @@ -1656,7 +1656,7 @@ TEST_F(FullNodeTest, graphql_test) { block = service::ScalarArgument::require("block", data); auto transactionAt = service::ScalarArgument::require("transactionAt", block); const auto hash2 = service::StringArgument::require("hash", transactionAt); - EXPECT_EQ(nodes[0]->getFinalChain()->transaction_hashes(2)->at(0).toString(), hash2); + EXPECT_EQ(nodes[0]->getFinalChain()->transactionHashes(2)->at(0).toString(), hash2); } } // namespace taraxa::core_tests diff --git a/tests/pbft_manager_test.cpp b/tests/pbft_manager_test.cpp index f5c455d084..34db8087ce 100644 --- a/tests/pbft_manager_test.cpp +++ b/tests/pbft_manager_test.cpp @@ -518,7 +518,7 @@ TEST_F(PbftManagerWithDagCreation, dag_generation) { generateAndApplyInitialDag(); EXPECT_HAPPENS({10s, 250ms}, [&](auto &ctx) { - WAIT_EXPECT_EQ(ctx, node->getFinalChain()->get_account(node->getAddress())->nonce, nonce); + WAIT_EXPECT_EQ(ctx, node->getFinalChain()->getAccount(node->getAddress())->nonce, nonce); }); auto nonce_before = nonce; @@ -531,7 +531,7 @@ TEST_F(PbftManagerWithDagCreation, dag_generation) { EXPECT_EQ(nonce, nonce_before + tx_count); EXPECT_HAPPENS({60s, 250ms}, [&](auto &ctx) { - WAIT_EXPECT_EQ(ctx, node->getFinalChain()->get_account(node->getAddress())->nonce, nonce); + WAIT_EXPECT_EQ(ctx, node->getFinalChain()->getAccount(node->getAddress())->nonce, nonce); WAIT_EXPECT_EQ(ctx, node->getDB()->getNumTransactionExecuted(), nonce - 1); }); } @@ -606,7 +606,7 @@ TEST_F(PbftManagerWithDagCreation, limit_pbft_block) { EXPECT_HAPPENS({10s, 500ms}, [&](auto &ctx) { WAIT_EXPECT_EQ(ctx, trxs_before, node->getDB()->getNumTransactionExecuted()); }); - auto starting_block_number = node->getFinalChain()->last_block_number(); + auto starting_block_number = node->getFinalChain()->lastBlockNumber(); auto trx_in_block = 5; insertBlocks(generateDagBlocks(20, 5, trx_in_block)); @@ -617,7 +617,7 @@ TEST_F(PbftManagerWithDagCreation, limit_pbft_block) { }); auto max_pbft_block_capacity = node_cfgs.front().genesis.pbft.gas_limit / (trxEstimation() * 5); - for (size_t i = starting_block_number; i < node->getFinalChain()->last_block_number(); ++i) { + for (size_t i = starting_block_number; i < node->getFinalChain()->lastBlockNumber(); ++i) { const auto &blk_hash = node->getDB()->getPeriodBlockHash(i); ASSERT_TRUE(blk_hash != kNullBlockHash); const auto &pbft_block = node->getPbftChain()->getPbftBlockInChain(blk_hash); @@ -641,13 +641,13 @@ TEST_F(PbftManagerWithDagCreation, produce_overweighted_block) { EXPECT_HAPPENS({10s, 500ms}, [&](auto &ctx) { WAIT_EXPECT_EQ(ctx, trx_count, node->getDB()->getNumTransactionExecuted()); }); - auto starting_block_number = node->getFinalChain()->last_block_number(); + auto starting_block_number = node->getFinalChain()->lastBlockNumber(); const auto trx_in_block = dag_gas_limit / trxEstimation() + 2; insertBlocks(generateDagBlocks(1, 5, trx_in_block)); // We need to move one block forward when we will start applying those generated DAGs and transactions EXPECT_HAPPENS({10s, 100ms}, [&](auto &ctx) { - WAIT_EXPECT_EQ(ctx, node->getFinalChain()->last_block_number(), starting_block_number + 1); + WAIT_EXPECT_EQ(ctx, node->getFinalChain()->lastBlockNumber(), starting_block_number + 1); }); // check that new created transaction wasn't executed in that previous block ASSERT_EQ(trx_count, node->getDB()->getNumTransactionExecuted()); @@ -658,11 +658,11 @@ TEST_F(PbftManagerWithDagCreation, produce_overweighted_block) { EXPECT_HAPPENS({10s, 100ms}, [&](auto &ctx) { // all transactions should be included in 2 blocks WAIT_EXPECT_EQ(ctx, node->getDB()->getNumTransactionExecuted(), trx_count); - WAIT_EXPECT_EQ(ctx, node->getFinalChain()->last_block_number(), starting_block_number + 2); + WAIT_EXPECT_EQ(ctx, node->getFinalChain()->lastBlockNumber(), starting_block_number + 2); }); // verify that last block is overweighted, but it is in chain - const auto period = node->getFinalChain()->last_block_number(); + const auto period = node->getFinalChain()->lastBlockNumber(); auto period_raw = node->getDB()->getPeriodDataRaw(period); ASSERT_FALSE(period_raw.empty()); PeriodData period_data(period_raw); @@ -721,7 +721,7 @@ TEST_F(PbftManagerWithDagCreation, state_root_hash) { } EXPECT_HAPPENS({5s, 500ms}, [&](auto &ctx) { - WAIT_EXPECT_EQ(ctx, node->getFinalChain()->get_account(node->getAddress())->nonce, nonce); + WAIT_EXPECT_EQ(ctx, node->getFinalChain()->getAccount(node->getAddress())->nonce, nonce); WAIT_EXPECT_EQ(ctx, node->getDB()->getNumTransactionExecuted(), nonce - 1); }); @@ -733,7 +733,7 @@ TEST_F(PbftManagerWithDagCreation, state_root_hash) { auto period = pbft_block.getPeriod(); h256 state_root; if (period > state_root_delay) { - state_root = node->getFinalChain()->block_header(period - state_root_delay)->state_root; + state_root = node->getFinalChain()->blockHeader(period - state_root_delay)->state_root; } EXPECT_EQ(pbft_block.getPrevStateRoot(), state_root); diff --git a/tests/pillar_chain_test.cpp b/tests/pillar_chain_test.cpp index 34d2d03c6d..8f933445b3 100644 --- a/tests/pillar_chain_test.cpp +++ b/tests/pillar_chain_test.cpp @@ -200,7 +200,7 @@ TEST_F(PillarChainTest, pillar_chain_syncing) { // Wait until node1 creates at least 3 pillar blocks const auto pillar_blocks_count = 3; ASSERT_HAPPENS({20s, 250ms}, [&](auto& ctx) { - WAIT_EXPECT_EQ(ctx, node1->getFinalChain()->last_block_number(), + WAIT_EXPECT_EQ(ctx, node1->getFinalChain()->lastBlockNumber(), pillar_blocks_count * node_cfgs[0].genesis.state.hardforks.ficus_hf.pillar_blocks_interval) }); node1->getPbftManager()->stop(); @@ -209,7 +209,7 @@ TEST_F(PillarChainTest, pillar_chain_syncing) { auto node2 = launch_nodes({node_cfgs[1]})[0]; // Wait until node2 syncs pbft chain with node1 ASSERT_HAPPENS({20s, 200ms}, [&](auto& ctx) { - WAIT_EXPECT_EQ(ctx, node2->getFinalChain()->last_block_number(), node1->getFinalChain()->last_block_number()) + WAIT_EXPECT_EQ(ctx, node2->getFinalChain()->lastBlockNumber(), node1->getFinalChain()->lastBlockNumber()) }); node2->getPbftManager()->stop(); @@ -246,7 +246,7 @@ TEST_F(PillarChainTest, pillar_chain_syncing) { ASSERT_EQ(pillar_vote->getPeriod() - 1, node2_current_pillar_block->getPeriod()); ASSERT_EQ(pillar_vote->getBlockHash(), node2_current_pillar_block->getHash()); votes_count += - node2->getFinalChain()->dpos_eligible_vote_count(pillar_vote->getPeriod() - 1, pillar_vote->getVoterAddr()); + node2->getFinalChain()->dposEligibleVoteCount(pillar_vote->getPeriod() - 1, pillar_vote->getVoterAddr()); } ASSERT_GE(votes_count, threshold); } @@ -524,7 +524,7 @@ TEST_F(PillarChainTest, finalize_root_in_pillar_block) { ASSERT_EQ(trx->getSender(), kTaraxaSystemAccount); ASSERT_EQ(trx->getReceiver(), node_cfgs[0].genesis.state.hardforks.ficus_hf.bridge_contract_address); // check that correct hash is returned - auto hashes = node->getFinalChain()->transaction_hashes(period - 1); + auto hashes = node->getFinalChain()->transactionHashes(period - 1); ASSERT_EQ(hashes->size(), 1); ASSERT_EQ(hashes->at(0), trx->getHash()); // check that location by hash exists and is_system set to true @@ -540,7 +540,7 @@ TEST_F(PillarChainTest, finalize_root_in_pillar_block) { ASSERT_EQ(trx_by_hash->getReceiver(), node_cfgs[0].genesis.state.hardforks.ficus_hf.bridge_contract_address); ASSERT_EQ(trx_by_hash->getSender(), kTaraxaSystemAccount); // check that receipt exists - const auto& trx_receipt = node->getFinalChain()->transaction_receipt(trx->getHash()); + const auto& trx_receipt = node->getFinalChain()->transactionReceipt(trx->getHash()); ASSERT_TRUE(trx_receipt.has_value()); ASSERT_EQ(trx_receipt->status_code, 1); } diff --git a/tests/rpc_test.cpp b/tests/rpc_test.cpp index f6b6c90dbc..80aab92d75 100644 --- a/tests/rpc_test.cpp +++ b/tests/rpc_test.cpp @@ -76,8 +76,8 @@ TEST_F(RPCTest, eth_call) { eth_rpc_params.final_chain = final_chain; auto eth_json_rpc = net::rpc::eth::NewEth(std::move(eth_rpc_params)); - const auto last_block_num = final_chain->last_block_number(); - const u256 total_eligible = final_chain->dpos_eligible_total_vote_count(last_block_num); + const auto last_block_num = final_chain->lastBlockNumber(); + const u256 total_eligible = final_chain->dposEligibleTotalVoteCount(last_block_num); const auto total_eligible_str = dev::toHexPrefixed(dev::toBigEndian(total_eligible)); const auto empty_address = dev::KeyPair::create().address().toString(); @@ -227,7 +227,7 @@ TEST_F(RPCTest, eth_getBlock) { eth_rpc_params.final_chain = nodes.front()->getFinalChain(); auto eth_json_rpc = net::rpc::eth::NewEth(std::move(eth_rpc_params)); - wait({10s, 500ms}, [&](auto& ctx) { WAIT_EXPECT_EQ(ctx, 5, nodes[0]->getFinalChain()->last_block_number()); }); + wait({10s, 500ms}, [&](auto& ctx) { WAIT_EXPECT_EQ(ctx, 5, nodes[0]->getFinalChain()->lastBlockNumber()); }); auto block = eth_json_rpc->eth_getBlockByNumber("0x4", false); EXPECT_EQ(4, dev::jsToU256(block["number"].asString())); @@ -250,7 +250,7 @@ TEST_F(RPCTest, eip_1898) { EXPECT_EQ(eth_json_rpc->eth_getBalance(from, "0x0"), eth_json_rpc->eth_getBalance(from, zero_block)); Json::Value genesis_block(Json::objectValue); - genesis_block["blockHash"] = dev::toJS(*nodes.front()->getFinalChain()->block_hash(0)); + genesis_block["blockHash"] = dev::toJS(*nodes.front()->getFinalChain()->blockHash(0)); EXPECT_EQ(eth_json_rpc->eth_getBalance(from, "0x0"), eth_json_rpc->eth_getBalance(from, genesis_block)); } diff --git a/tests/state_api_test.cpp b/tests/state_api_test.cpp index 723ac88150..15d562d065 100644 --- a/tests/state_api_test.cpp +++ b/tests/state_api_test.cpp @@ -80,12 +80,12 @@ TEST_F(StateAPITest, DISABLED_dpos_integration) { // exp_q_acc_res[addr].is_eligible = true; // } // string meta = "at block " + to_string(curr_blk); - // EXPECT_EQ(addr_1_bal_expected, SUT.get_account(curr_blk, make_addr(1))->balance) << meta; + // EXPECT_EQ(addr_1_bal_expected, SUT.getAccount(curr_blk, make_addr(1))->balance) << meta; // for (auto const& addr : expected_eligible_set) { - // EXPECT_TRUE(SUT.dpos_is_eligible(curr_blk, addr)) << meta; - // EXPECT_EQ(SUT.dpos_eligible_vote_count(curr_blk, addr), 1) << meta; + // EXPECT_TRUE(SUT.dposIsEligible(curr_blk, addr)) << meta; + // EXPECT_EQ(SUT.dposEligibleVoteCount(curr_blk, addr), 1) << meta; // } - // EXPECT_EQ(SUT.dpos_eligible_total_vote_count(curr_blk), expected_eligible_set.size()) << meta; + // EXPECT_EQ(SUT.dposEligibleTotalVoteCount(curr_blk), expected_eligible_set.size()) << meta; // // auto q_res = SUT.dpos_query(curr_blk, q); // EXPECT_EQ(q_res.eligible_count, expected_eligible_set.size()) << meta; // for (auto& [addr, res_exp] : exp_q_acc_res) { @@ -222,8 +222,7 @@ TEST_F(StateAPITest, slashing) { auto nodes = launch_nodes(node_cfgs); auto node = nodes.begin()->get(); auto node_cfg = node_cfgs.begin(); - ASSERT_EQ(true, - node->getFinalChain()->dpos_is_eligible(node->getFinalChain()->last_block_number(), node->getAddress())); + ASSERT_EQ(true, node->getFinalChain()->dposIsEligible(node->getFinalChain()->lastBlockNumber(), node->getAddress())); // Generate 2 cert votes for 2 different blocks auto vote_a = node->getVoteManager()->generateVote(blk_hash_t{1}, PbftVoteTypes::cert_vote, 1, 1, 3); @@ -236,9 +235,8 @@ TEST_F(StateAPITest, slashing) { // After few blocks malicious validator should be jailed ASSERT_HAPPENS({10s, 100ms}, [&](auto& ctx) { - WAIT_EXPECT_EQ( - ctx, false, - node->getFinalChain()->dpos_is_eligible(node->getFinalChain()->last_block_number(), node->getAddress())) + WAIT_EXPECT_EQ(ctx, false, + node->getFinalChain()->dposIsEligible(node->getFinalChain()->lastBlockNumber(), node->getAddress())) }); // Option 2: more sophisticated and longer test @@ -246,7 +244,7 @@ TEST_F(StateAPITest, slashing) { // ASSERT_HAPPENS({5s, 100ms}, [&](auto& ctx) { // WAIT_EXPECT_EQ( // ctx, true, - // node->getFinalChain()->dpos_is_eligible(node->getFinalChain()->last_block_number(), node->getAddress())) + // node->getFinalChain()->dposIsEligible(node->getFinalChain()->lastBlockNumber(), node->getAddress())) // }); } diff --git a/tests/test_util/src/node_dag_creation_fixture.cpp b/tests/test_util/src/node_dag_creation_fixture.cpp index 729ef730db..5adc2624f9 100644 --- a/tests/test_util/src/node_dag_creation_fixture.cpp +++ b/tests/test_util/src/node_dag_creation_fixture.cpp @@ -46,14 +46,14 @@ void NodeDagCreationFixture::deployContract() { WAIT_EXPECT_TRUE(ctx, node->getDB()->transactionFinalized(trx->getHash())); if (!contract_addr) { - auto receipt = node->getFinalChain()->transaction_receipt(trx->getHash()); + auto receipt = node->getFinalChain()->transactionReceipt(trx->getHash()); WAIT_EXPECT_TRUE(ctx, receipt.has_value()); WAIT_EXPECT_TRUE(ctx, receipt->new_contract_address.has_value()); contract_addr = receipt->new_contract_address; } - auto r = node->getFinalChain()->transaction_receipt(trx->getHash()); + auto r = node->getFinalChain()->transactionReceipt(trx->getHash()); - WAIT_EXPECT_TRUE(ctx, !node->getFinalChain()->get_code(contract_addr.value()).empty()); + WAIT_EXPECT_TRUE(ctx, !node->getFinalChain()->getCode(contract_addr.value()).empty()); }); ASSERT_TRUE(contract_addr.has_value()); std::cout << "Contract deployed: " << contract_addr.value() << std::endl; diff --git a/tests/test_util/src/test_util.cpp b/tests/test_util/src/test_util.cpp index c0eda415de..9650568676 100644 --- a/tests/test_util/src/test_util.cpp +++ b/tests/test_util/src/test_util.cpp @@ -53,8 +53,8 @@ TransactionClient::Context TransactionClient::process(const std::shared_ptrgetHash(); if (wait_executed) { - auto success = wait(wait_opts_, - [&, this](auto& ctx) { ctx.fail_if(!node_->getFinalChain()->transaction_location(trx_hash)); }); + auto success = + wait(wait_opts_, [&, this](auto& ctx) { ctx.fail_if(!node_->getFinalChain()->transactionLocation(trx_hash)); }); if (success) { ctx.stage = TransactionStage::executed; } diff --git a/tests/transaction_test.cpp b/tests/transaction_test.cpp index 897ba5a63f..73bbd45a5d 100644 --- a/tests/transaction_test.cpp +++ b/tests/transaction_test.cpp @@ -8,7 +8,7 @@ #include "common/static_init.hpp" #include "config/genesis.hpp" -#include "final_chain/final_chain_impl.hpp" +#include "final_chain/final_chain.hpp" #include "final_chain/trie_common.hpp" #include "logger/logger.hpp" #include "pbft/pbft_manager.hpp" @@ -122,7 +122,7 @@ TEST_F(TransactionTest, sig) { TEST_F(TransactionTest, verifiers) { auto db = std::make_shared(data_dir); auto cfg = node_cfgs.front(); - auto final_chain = std::make_shared(db, cfg, addr_t{}); + auto final_chain = std::make_shared(db, cfg, addr_t{}); TransactionManager trx_mgr(cfg, db, final_chain, addr_t()); // insert trx std::thread t([&trx_mgr]() { @@ -143,7 +143,7 @@ TEST_F(TransactionTest, verifiers) { TEST_F(TransactionTest, transaction_limit) { auto db = std::make_shared(data_dir); auto cfg = node_cfgs.front(); - TransactionManager trx_mgr(cfg, db, std::make_shared(db, cfg, addr_t{}), addr_t()); + TransactionManager trx_mgr(cfg, db, std::make_shared(db, cfg, addr_t{}), addr_t()); // insert trx std::thread t([&trx_mgr]() { for (auto const& t : *g_signed_trx_samples) { @@ -164,7 +164,7 @@ TEST_F(TransactionTest, transaction_limit) { TEST_F(TransactionTest, prepare_signed_trx_for_propose) { auto db = std::make_shared(data_dir); auto cfg = node_cfgs.front(); - TransactionManager trx_mgr(cfg, db, std::make_shared(db, cfg, addr_t{}), addr_t()); + TransactionManager trx_mgr(cfg, db, std::make_shared(db, cfg, addr_t{}), addr_t()); std::thread insertTrx([&trx_mgr]() { for (auto const& t : *g_signed_trx_samples) { trx_mgr.insertTransaction(t); @@ -194,7 +194,7 @@ TEST_F(TransactionTest, prepare_signed_trx_for_propose) { TEST_F(TransactionTest, transaction_low_nonce) { auto db = std::make_shared(data_dir); auto cfg = node_cfgs.front(); - auto final_chain = std::make_shared(db, cfg, addr_t{}); + auto final_chain = std::make_shared(db, cfg, addr_t{}); TransactionManager trx_mgr(cfg, db, final_chain, addr_t()); const auto& trx_2 = g_signed_trx_samples[1]; auto& trx_1 = g_signed_trx_samples[0]; @@ -234,7 +234,7 @@ TEST_F(TransactionTest, transaction_low_nonce) { // Verify insufficient balance transaction is detected in verification auto trx_insufficient_balance = - std::make_shared(3, final_chain->get_account(dev::toAddress(g_secret))->balance + 1, 0, 100000, + std::make_shared(3, final_chain->getAccount(dev::toAddress(g_secret))->balance + 1, 0, 100000, dev::bytes(), g_secret, addr_t::random()); result = trx_mgr.verifyTransaction(trx_insufficient_balance); EXPECT_EQ(result.first, true); @@ -266,7 +266,7 @@ TEST_F(TransactionTest, transaction_low_nonce) { TEST_F(TransactionTest, transaction_concurrency) { auto db = std::make_shared(data_dir); auto cfg = node_cfgs.front(); - TransactionManager trx_mgr(cfg, db, std::make_shared(db, cfg, addr_t{}), addr_t()); + TransactionManager trx_mgr(cfg, db, std::make_shared(db, cfg, addr_t{}), addr_t()); bool stopped = false; // Insert transactions to memory pool and keep trying to insert them again on separate thread, it should always fail std::thread insertTrx([&trx_mgr, &stopped]() { @@ -650,7 +650,7 @@ TEST_F(TransactionTest, typed_deserialization) { TEST_F(TransactionTest, zero_gas_price_limit) { auto db = std::make_shared(data_dir); auto cfg = node_cfgs.front(); - auto final_chain = std::make_shared(db, cfg, addr_t{}); + auto final_chain = std::make_shared(db, cfg, addr_t{}); TransactionManager trx_mgr(cfg, db, final_chain, addr_t()); auto make_trx_with_price = [](uint64_t price) { return std::make_shared(1, 100, price, 100000, dev::bytes(), g_secret, addr_t::random()); @@ -673,7 +673,7 @@ TEST_F(TransactionTest, gas_price_limiting) { auto db = std::make_shared(data_dir); auto cfg = node_cfgs.front(); auto minimum_price = cfg.genesis.gas_price.minimum_price = 10; - auto final_chain = std::make_shared(db, cfg, addr_t{}); + auto final_chain = std::make_shared(db, cfg, addr_t{}); TransactionManager trx_mgr(cfg, db, final_chain, addr_t()); auto make_trx_with_price = [](uint64_t price) { return std::make_shared(1, 100, price, 100000, dev::bytes(), g_secret, addr_t::random()); From 8bdefadc47b4c0fa484d0d1a10a632aa53df60f2 Mon Sep 17 00:00:00 2001 From: kstdl Date: Mon, 19 Aug 2024 12:57:17 +0200 Subject: [PATCH 010/105] chore: use values from config for classes constructing --- .../include/dag/dag_block_proposer.hpp | 6 +-- .../consensus/include/dag/dag_manager.hpp | 15 ++---- .../consensus/include/pbft/pbft_manager.hpp | 9 ++-- .../slashing_manager/slashing_manager.hpp | 6 +-- .../include/vote_manager/vote_manager.hpp | 3 +- .../consensus/src/dag/dag_block_proposer.cpp | 24 ++++----- .../consensus/src/dag/dag_manager.cpp | 50 +++++++++---------- .../consensus/src/pbft/pbft_manager.cpp | 15 +++--- .../src/slashing_manager/slashing_manager.cpp | 9 ++-- .../src/vote_manager/vote_manager.cpp | 12 ++--- .../latest/status_packet_handler.cpp | 2 +- libraries/core_libs/node/src/node.cpp | 18 +++---- tests/dag_test.cpp | 34 ++++++------- tests/full_node_test.cpp | 2 +- tests/state_api_test.cpp | 4 +- 15 files changed, 91 insertions(+), 118 deletions(-) diff --git a/libraries/core_libs/consensus/include/dag/dag_block_proposer.hpp b/libraries/core_libs/consensus/include/dag/dag_block_proposer.hpp index 0b79a617ed..6745a603b5 100644 --- a/libraries/core_libs/consensus/include/dag/dag_block_proposer.hpp +++ b/libraries/core_libs/consensus/include/dag/dag_block_proposer.hpp @@ -35,11 +35,9 @@ class FinalChain; */ class DagBlockProposer { public: - DagBlockProposer(const DagBlockProposerConfig& bp_config, std::shared_ptr dag_mgr, + DagBlockProposer(const FullNodeConfig& config, std::shared_ptr dag_mgr, std::shared_ptr trx_mgr, std::shared_ptr final_chain, - std::shared_ptr db, std::shared_ptr key_manager, addr_t node_addr, - secret_t node_sk, vrf_wrapper::vrf_sk_t vrf_sk, uint64_t pbft_gas_limit, uint64_t dag_gas_limit, - const state_api::Config& state_config); + std::shared_ptr db, std::shared_ptr key_manager); ~DagBlockProposer() { stop(); } DagBlockProposer(const DagBlockProposer&) = delete; DagBlockProposer(DagBlockProposer&&) = delete; diff --git a/libraries/core_libs/consensus/include/dag/dag_manager.hpp b/libraries/core_libs/consensus/include/dag/dag_manager.hpp index d6683848a1..8c9c59200b 100644 --- a/libraries/core_libs/consensus/include/dag/dag_manager.hpp +++ b/libraries/core_libs/consensus/include/dag/dag_manager.hpp @@ -44,13 +44,9 @@ class DagManager : public std::enable_shared_from_this { MissingTip }; - explicit DagManager(const DagBlock &dag_genesis_block, addr_t node_addr, const SortitionConfig &sortition_config, - const DagConfig &dag_config, std::shared_ptr trx_mgr, + explicit DagManager(const FullNodeConfig &config, addr_t node_addr, std::shared_ptr trx_mgr, std::shared_ptr pbft_chain, std::shared_ptr final_chain, - std::shared_ptr db, std::shared_ptr key_manager, uint64_t pbft_gas_limit, - const state_api::Config &state_config, bool is_light_node = false, - uint64_t light_node_history = 0, uint32_t max_levels_per_period = kMaxLevelsPerPeriod, - uint32_t dag_expiry_limit = kDagExpiryLevelLimit); + std::shared_ptr db, std::shared_ptr key_manager); DagManager(const DagManager &) = delete; DagManager(DagManager &&) = delete; @@ -123,9 +119,6 @@ class DagManager : public std::enable_shared_from_this { */ uint setDagBlockOrder(blk_hash_t const &anchor, PbftPeriod period, vec_blk_t const &dag_order); - uint64_t getLightNodeHistory() const { return light_node_history_; } - bool isLightNode() const { return is_light_node_; } - std::optional>> getLatestPivotAndTips() const; /** @@ -237,7 +230,7 @@ class DagManager : public std::enable_shared_from_this { * @brief Clears light node history * */ - void clearLightNodeHistory(); + void clearLightNodeHistory(uint64_t light_node_history); private: void recoverDag(); @@ -267,8 +260,6 @@ class DagManager : public std::enable_shared_from_this { SortitionParamsManager sortition_params_manager_; const DagConfig dag_config_; const std::shared_ptr genesis_block_; - const bool is_light_node_ = false; - const uint64_t light_node_history_ = 0; const uint32_t max_levels_per_period_; const uint32_t dag_expiry_limit_; // Any non finalized dag block with a level smaller by // dag_expiry_limit_ than the current period anchor level is considered diff --git a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp index 5d3c73f973..26dd980255 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp @@ -55,11 +55,10 @@ class PbftManager { public: using time_point = std::chrono::system_clock::time_point; - PbftManager(const GenesisConfig &conf, addr_t node_addr, std::shared_ptr db, - std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, - std::shared_ptr dag_mgr, std::shared_ptr trx_mgr, - std::shared_ptr final_chain, - std::shared_ptr pillar_chain_mgr, secret_t node_sk); + PbftManager(const FullNodeConfig &conf, std::shared_ptr db, std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, std::shared_ptr dag_mgr, + std::shared_ptr trx_mgr, std::shared_ptr final_chain, + std::shared_ptr pillar_chain_mgr); ~PbftManager(); PbftManager(const PbftManager &) = delete; PbftManager(PbftManager &&) = delete; diff --git a/libraries/core_libs/consensus/include/slashing_manager/slashing_manager.hpp b/libraries/core_libs/consensus/include/slashing_manager/slashing_manager.hpp index f49b4a7136..87787f3879 100644 --- a/libraries/core_libs/consensus/include/slashing_manager/slashing_manager.hpp +++ b/libraries/core_libs/consensus/include/slashing_manager/slashing_manager.hpp @@ -7,8 +7,8 @@ namespace taraxa { class SlashingManager { public: - SlashingManager(std::shared_ptr final_chain, std::shared_ptr trx_manager, - std::shared_ptr gas_pricer, const FullNodeConfig &config, secret_t node_sk); + SlashingManager(const FullNodeConfig &config, std::shared_ptr final_chain, + std::shared_ptr trx_manager, std::shared_ptr gas_pricer); SlashingManager(const SlashingManager &) = delete; SlashingManager(SlashingManager &&) = delete; SlashingManager &operator=(const SlashingManager &) = delete; @@ -24,7 +24,7 @@ class SlashingManager { // Already processed double voting proofs ExpirationCache double_voting_proofs_; - const FullNodeConfig kConfig; + const FullNodeConfig &kConfig; const addr_t kAddress; const secret_t kPrivateKey; }; diff --git a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp index 0272600dcc..45d516eb9c 100644 --- a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp +++ b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp @@ -26,8 +26,7 @@ class TaraxaPeer; */ class VoteManager { public: - VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, const secret_t& node_sk, - const vrf_wrapper::vrf_sk_t& vrf_sk, std::shared_ptr db, std::shared_ptr pbft_chain, + VoteManager(const FullNodeConfig& config, std::shared_ptr db, std::shared_ptr pbft_chain, std::shared_ptr final_chain, std::shared_ptr key_manager, std::shared_ptr slashing_manager); ~VoteManager() = default; diff --git a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp index dd1ccc3893..9f61026790 100644 --- a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp @@ -13,27 +13,27 @@ namespace taraxa { using namespace vdf_sortition; -DagBlockProposer::DagBlockProposer(const DagBlockProposerConfig& bp_config, std::shared_ptr dag_mgr, +DagBlockProposer::DagBlockProposer(const FullNodeConfig& config, std::shared_ptr dag_mgr, std::shared_ptr trx_mgr, std::shared_ptr final_chain, std::shared_ptr db, - std::shared_ptr key_manager, addr_t node_addr, secret_t node_sk, - vrf_wrapper::vrf_sk_t vrf_sk, uint64_t pbft_gas_limit, uint64_t dag_gas_limit, - const state_api::Config& state_config) - : bp_config_(bp_config), + std::shared_ptr key_manager) + : bp_config_(config.genesis.dag.block_proposer), total_trx_shards_(std::max(bp_config_.shard, uint16_t(1))), dag_mgr_(std::move(dag_mgr)), trx_mgr_(std::move(trx_mgr)), final_chain_(std::move(final_chain)), key_manager_(std::move(key_manager)), db_(std::move(db)), - node_addr_(node_addr), - node_sk_(std::move(node_sk)), - vrf_sk_(std::move(vrf_sk)), + node_addr_(dev::toAddress(config.node_secret)), + node_sk_(config.node_secret), + vrf_sk_(config.vrf_secret), vrf_pk_(vrf_wrapper::getVrfPublicKey(vrf_sk_)), - kPbftGasLimit(pbft_gas_limit), - kDagGasLimit(dag_gas_limit), - kHardforks(state_config.hardforks), - kValidatorMaxVote(state_config.dpos.validator_maximum_stake / state_config.dpos.vote_eligibility_balance_step) { + kPbftGasLimit(config.genesis.pbft.gas_limit), + kDagGasLimit(config.genesis.dag.gas_limit), + kHardforks(config.genesis.state.hardforks), + kValidatorMaxVote(config.genesis.state.dpos.validator_maximum_stake / + config.genesis.state.dpos.vote_eligibility_balance_step) { + const auto& node_addr = node_addr_; LOG_OBJECTS_CREATE("DAG_PROPOSER"); // Add a random component in proposing stale blocks so that not all nodes propose stale blocks at the same time diff --git a/libraries/core_libs/consensus/src/dag/dag_manager.cpp b/libraries/core_libs/consensus/src/dag/dag_manager.cpp index 03575dd742..a381965515 100644 --- a/libraries/core_libs/consensus/src/dag/dag_manager.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_manager.cpp @@ -3,7 +3,6 @@ #include #include -#include #include #include #include @@ -15,33 +14,30 @@ #include "transaction/transaction_manager.hpp" namespace taraxa { -DagManager::DagManager(const DagBlock &dag_genesis_block, addr_t node_addr, const SortitionConfig &sortition_config, - const DagConfig &dag_config, std::shared_ptr trx_mgr, + +DagManager::DagManager(const FullNodeConfig &config, addr_t node_addr, std::shared_ptr trx_mgr, std::shared_ptr pbft_chain, std::shared_ptr final_chain, - std::shared_ptr db, std::shared_ptr key_manager, uint64_t pbft_gas_limit, - const state_api::Config &state_config, bool is_light_node, uint64_t light_node_history, - uint32_t max_levels_per_period, uint32_t dag_expiry_limit) try + std::shared_ptr db, std::shared_ptr key_manager) try : max_level_(db->getLastBlocksLevel()), - pivot_tree_(std::make_shared(dag_genesis_block.getHash(), node_addr)), - total_dag_(std::make_shared(dag_genesis_block.getHash(), node_addr)), + pivot_tree_(std::make_shared(config.genesis.dag_genesis_block.getHash(), node_addr)), + total_dag_(std::make_shared(config.genesis.dag_genesis_block.getHash(), node_addr)), trx_mgr_(std::move(trx_mgr)), pbft_chain_(std::move(pbft_chain)), db_(std::move(db)), key_manager_(std::move(key_manager)), - anchor_(dag_genesis_block.getHash()), + anchor_(config.genesis.dag_genesis_block.getHash()), period_(0), - sortition_params_manager_(node_addr, sortition_config, db_), - dag_config_(dag_config), - genesis_block_(std::make_shared(dag_genesis_block)), - is_light_node_(is_light_node), - light_node_history_(light_node_history), - max_levels_per_period_(max_levels_per_period), - dag_expiry_limit_(dag_expiry_limit), + sortition_params_manager_(node_addr, config.genesis.sortition, db_), + dag_config_(config.genesis.dag), + genesis_block_(std::make_shared(config.genesis.dag_genesis_block)), + max_levels_per_period_(config.max_levels_per_period), + dag_expiry_limit_(config.dag_expiry_limit), seen_blocks_(cache_max_size_, cache_delete_step_), final_chain_(std::move(final_chain)), - kPbftGasLimit(pbft_gas_limit), - kHardforks(state_config.hardforks), - kValidatorMaxVote(state_config.dpos.validator_maximum_stake / state_config.dpos.vote_eligibility_balance_step) { + kPbftGasLimit(config.genesis.pbft.gas_limit), + kHardforks(config.genesis.state.hardforks), + kValidatorMaxVote(config.genesis.state.dpos.validator_maximum_stake / + config.genesis.state.dpos.vote_eligibility_balance_step) { LOG_OBJECTS_CREATE("DAGMGR"); if (auto ret = getLatestPivotAndTips(); ret) { frontier_.pivot = ret->first; @@ -50,13 +46,13 @@ DagManager::DagManager(const DagBlock &dag_genesis_block, addr_t node_addr, cons } } // Set DAG level proposal period map - if (!db_->getProposalPeriodForDagLevel(max_levels_per_period)) { + if (!db_->getProposalPeriodForDagLevel(max_levels_per_period_)) { // Node start from scratch - db_->saveProposalPeriodDagLevelsMap(max_levels_per_period, 0); + db_->saveProposalPeriodDagLevelsMap(max_levels_per_period_, 0); } recoverDag(); - if (is_light_node_) { - clearLightNodeHistory(); + if (config.is_light_node) { + clearLightNodeHistory(config.light_node_history); } } catch (std::exception &e) { std::cerr << e.what() << std::endl; @@ -284,9 +280,9 @@ std::vector DagManager::getDagBlockOrder(blk_hash_t const &anchor, P return blk_orders; } -void DagManager::clearLightNodeHistory() { +void DagManager::clearLightNodeHistory(uint64_t light_node_history) { bool dag_expiry_level_condition = dag_expiry_level_ > max_levels_per_period_ + 1; - bool period_over_history_condition = period_ > light_node_history_; + bool period_over_history_condition = period_ > light_node_history; if (period_over_history_condition && dag_expiry_level_condition) { const auto proposal_period = db_->getProposalPeriodForDagLevel(dag_expiry_level_ - max_levels_per_period_ - 1); assert(proposal_period); @@ -294,8 +290,8 @@ void DagManager::clearLightNodeHistory() { const uint64_t start = 0; // This prevents deleting any data needed for dag blocks proposal period, we only delete periods for the expired dag // blocks - const uint64_t end = std::min(period_ - light_node_history_, *proposal_period); - LOG(log_tr_) << "period_ - light_node_history_ " << period_ - light_node_history_; + const uint64_t end = std::min(period_ - light_node_history, *proposal_period); + LOG(log_tr_) << "period_ - light_node_history_ " << period_ - light_node_history; LOG(log_tr_) << "dag_expiry_level - max_levels_per_period_ - 1: " << dag_expiry_level_ - max_levels_per_period_ - 1 << " *proposal_period " << *proposal_period; LOG(log_tr_) << "Delete period history from: " << start << " to " << end; diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index d4b41dcfb7..646c65ee13 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -22,11 +22,11 @@ using namespace std::chrono_literals; constexpr std::chrono::milliseconds kPollingIntervalMs{100}; constexpr PbftStep kMaxSteps{13}; // Need to be a odd number -PbftManager::PbftManager(const GenesisConfig &conf, addr_t node_addr, std::shared_ptr db, +PbftManager::PbftManager(const FullNodeConfig &conf, std::shared_ptr db, std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, std::shared_ptr dag_mgr, std::shared_ptr trx_mgr, std::shared_ptr final_chain, - std::shared_ptr pillar_chain_mgr, secret_t node_sk) + std::shared_ptr pillar_chain_mgr) : db_(std::move(db)), pbft_chain_(std::move(pbft_chain)), vote_mgr_(std::move(vote_mgr)), @@ -34,12 +34,13 @@ PbftManager::PbftManager(const GenesisConfig &conf, addr_t node_addr, std::share trx_mgr_(std::move(trx_mgr)), final_chain_(std::move(final_chain)), pillar_chain_mgr_(std::move(pillar_chain_mgr)), - node_addr_(std::move(node_addr)), - node_sk_(std::move(node_sk)), - kMinLambda(conf.pbft.lambda_ms), - dag_genesis_block_hash_(conf.dag_genesis_block.getHash()), - kGenesisConfig(conf), + node_addr_(dev::toAddress(conf.node_secret)), + node_sk_(conf.node_secret), + kMinLambda(conf.genesis.pbft.lambda_ms), + dag_genesis_block_hash_(conf.genesis.dag_genesis_block.getHash()), + kGenesisConfig(conf.genesis), proposed_blocks_(db_) { + const auto &node_addr = node_addr_; LOG_OBJECTS_CREATE("PBFT_MGR"); for (auto period = final_chain_->lastBlockNumber() + 1, curr_period = pbft_chain_->getPbftChainSize(); diff --git a/libraries/core_libs/consensus/src/slashing_manager/slashing_manager.cpp b/libraries/core_libs/consensus/src/slashing_manager/slashing_manager.cpp index 33310e6fba..e3b7ec51b6 100644 --- a/libraries/core_libs/consensus/src/slashing_manager/slashing_manager.cpp +++ b/libraries/core_libs/consensus/src/slashing_manager/slashing_manager.cpp @@ -8,16 +8,15 @@ namespace taraxa { const auto kContractAddress = addr_t("0x00000000000000000000000000000000000000EE"); -SlashingManager::SlashingManager(std::shared_ptr final_chain, - std::shared_ptr trx_manager, std::shared_ptr gas_pricer, - const FullNodeConfig &config, secret_t node_sk) +SlashingManager::SlashingManager(const FullNodeConfig &config, std::shared_ptr final_chain, + std::shared_ptr trx_manager, std::shared_ptr gas_pricer) : final_chain_(std::move(final_chain)), trx_manager_(std::move(trx_manager)), gas_pricer_(std::move(gas_pricer)), double_voting_proofs_(1000, 100), kConfig(config), - kAddress(toAddress(node_sk)), - kPrivateKey(std::move(node_sk)) {} + kAddress(toAddress(kConfig.node_secret)), + kPrivateKey(kConfig.node_secret) {} bool SlashingManager::submitDoubleVotingProof(const std::shared_ptr &vote_a, const std::shared_ptr &vote_b) { diff --git a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp index c05467d0aa..9e20441966 100644 --- a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp +++ b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp @@ -13,14 +13,13 @@ namespace taraxa { -VoteManager::VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, const secret_t& node_sk, - const vrf_wrapper::vrf_sk_t& vrf_sk, std::shared_ptr db, +VoteManager::VoteManager(const FullNodeConfig& config, std::shared_ptr db, std::shared_ptr pbft_chain, std::shared_ptr final_chain, std::shared_ptr key_manager, std::shared_ptr slashing_manager) - : kNodeAddr(node_addr), - kPbftConfig(pbft_config), - kVrfSk(vrf_sk), - kNodeSk(node_sk), + : kNodeAddr(dev::toAddress(config.node_secret)), + kPbftConfig(config.genesis.pbft), + kVrfSk(config.vrf_secret), + kNodeSk(config.node_secret), kNodePub(dev::toPublic(kNodeSk)), db_(std::move(db)), pbft_chain_(std::move(pbft_chain)), @@ -28,6 +27,7 @@ VoteManager::VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, key_manager_(std::move(key_manager)), slashing_manager_(std::move(slashing_manager)), already_validated_votes_(1000000, 1000) { + const auto& node_addr = kNodeAddr; LOG_OBJECTS_CREATE("VOTE_MGR"); auto addVerifiedVotes = [this](const std::vector>& votes, diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp index 855df89d43..8ab85c0b84 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp @@ -172,7 +172,7 @@ bool StatusPacketHandler::sendStatus(const dev::p2p::NodeID& node_id, bool initi std::move(dev::RLPStream(kInitialStatusPacketItemsCount) << kConf.genesis.chain_id << dag_max_level << kGenesisHash << pbft_chain_size << pbft_syncing_state_->isPbftSyncing() << pbft_round << TARAXA_MAJOR_VERSION << TARAXA_MINOR_VERSION - << TARAXA_PATCH_VERSION << dag_mgr_->isLightNode() << dag_mgr_->getLightNodeHistory())); + << TARAXA_PATCH_VERSION << kConf.is_light_node << kConf.light_node_history)); } else { success = sealAndSend( node_id, StatusPacket, diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index 864afa988d..a0e5751b69 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -130,20 +130,14 @@ void FullNode::init() { } pbft_chain_ = std::make_shared(node_addr, db_); - dag_mgr_ = std::make_shared( - conf_.genesis.dag_genesis_block, node_addr, conf_.genesis.sortition, conf_.genesis.dag, trx_mgr_, pbft_chain_, - final_chain_, db_, key_manager_, conf_.genesis.pbft.gas_limit, conf_.genesis.state, conf_.is_light_node, - conf_.light_node_history, conf_.max_levels_per_period, conf_.dag_expiry_limit); - auto slashing_manager = std::make_shared(final_chain_, trx_mgr_, gas_pricer_, conf_, kp_.secret()); - vote_mgr_ = std::make_shared(node_addr, conf_.genesis.pbft, kp_.secret(), conf_.vrf_secret, db_, - pbft_chain_, final_chain_, key_manager_, slashing_manager); + dag_mgr_ = std::make_shared(conf_, node_addr, trx_mgr_, pbft_chain_, final_chain_, db_, key_manager_); + auto slashing_manager = std::make_shared(conf_, final_chain_, trx_mgr_, gas_pricer_); + vote_mgr_ = std::make_shared(conf_, db_, pbft_chain_, final_chain_, key_manager_, slashing_manager); pillar_chain_mgr_ = std::make_shared(conf_.genesis.state.hardforks.ficus_hf, db_, final_chain_, key_manager_, node_addr); - pbft_mgr_ = std::make_shared(conf_.genesis, node_addr, db_, pbft_chain_, vote_mgr_, dag_mgr_, trx_mgr_, - final_chain_, pillar_chain_mgr_, kp_.secret()); - dag_block_proposer_ = std::make_shared( - conf_.genesis.dag.block_proposer, dag_mgr_, trx_mgr_, final_chain_, db_, key_manager_, node_addr, getSecretKey(), - getVrfSecretKey(), conf_.genesis.pbft.gas_limit, conf_.genesis.dag.gas_limit, conf_.genesis.state); + pbft_mgr_ = std::make_shared(conf_, db_, pbft_chain_, vote_mgr_, dag_mgr_, trx_mgr_, final_chain_, + pillar_chain_mgr_); + dag_block_proposer_ = std::make_shared(conf_, dag_mgr_, trx_mgr_, final_chain_, db_, key_manager_); network_ = std::make_shared(conf_, genesis_hash, conf_.net_file_path().string(), kp_, db_, pbft_mgr_, pbft_chain_, diff --git a/tests/dag_test.cpp b/tests/dag_test.cpp index 9a74fad8dc..6885b82624 100644 --- a/tests/dag_test.cpp +++ b/tests/dag_test.cpp @@ -135,9 +135,8 @@ TEST_F(DagTest, compute_epoch) { auto trx_mgr = std::make_shared(FullNodeConfig(), db_ptr, nullptr, addr_t()); auto pbft_chain = std::make_shared(addr_t(), db_ptr); const blk_hash_t GENESIS = node_cfgs[0].genesis.dag_genesis_block.getHash(); - auto mgr = std::make_shared(node_cfgs[0].genesis.dag_genesis_block, addr_t(), - node_cfgs[0].genesis.sortition, node_cfgs[0].genesis.dag, trx_mgr, pbft_chain, - nullptr, db_ptr, nullptr, 100000, node_cfgs[0].genesis.state); + node_cfgs[0].genesis.pbft.gas_limit = 100000; + auto mgr = std::make_shared(node_cfgs[0], addr_t(), trx_mgr, pbft_chain, nullptr, db_ptr, nullptr); DagBlock blkA(GENESIS, 1, {}, {trx_hash_t(2)}, sig_t(1), blk_hash_t(2), addr_t(1)); DagBlock blkB(GENESIS, 1, {}, {trx_hash_t(3), trx_hash_t(4)}, sig_t(1), blk_hash_t(3), addr_t(1)); @@ -228,9 +227,10 @@ TEST_F(DagTest, dag_expiry) { auto trx_mgr = std::make_shared(FullNodeConfig(), db_ptr, nullptr, addr_t()); auto pbft_chain = std::make_shared(addr_t(), db_ptr); const blk_hash_t GENESIS = node_cfgs[0].genesis.dag_genesis_block.getHash(); - auto mgr = std::make_shared( - node_cfgs[0].genesis.dag_genesis_block, addr_t(), node_cfgs[0].genesis.sortition, node_cfgs[0].genesis.dag, - trx_mgr, pbft_chain, nullptr, db_ptr, nullptr, 100000, node_cfgs[0].genesis.state, false, 0, 3, EXPIRY_LIMIT); + node_cfgs[0].max_levels_per_period = 3; + node_cfgs[0].dag_expiry_limit = EXPIRY_LIMIT; + node_cfgs[0].genesis.pbft.gas_limit = 100000; + auto mgr = std::make_shared(node_cfgs[0], addr_t(), trx_mgr, pbft_chain, nullptr, db_ptr, nullptr); DagBlock blkA(GENESIS, 1, {}, {trx_hash_t(2)}, sig_t(1), blk_hash_t(2), addr_t(1)); DagBlock blkB(GENESIS, 1, {}, {trx_hash_t(3), trx_hash_t(4)}, sig_t(1), blk_hash_t(3), addr_t(1)); @@ -304,9 +304,8 @@ TEST_F(DagTest, receive_block_in_order) { auto pbft_chain = std::make_shared(addr_t(), db_ptr); auto trx_mgr = std::make_shared(FullNodeConfig(), db_ptr, nullptr, addr_t()); const blk_hash_t GENESIS = node_cfgs[0].genesis.dag_genesis_block.getHash(); - auto mgr = std::make_shared(node_cfgs[0].genesis.dag_genesis_block, addr_t(), - node_cfgs[0].genesis.sortition, node_cfgs[0].genesis.dag, trx_mgr, pbft_chain, - nullptr, db_ptr, nullptr, 100000, node_cfgs[0].genesis.state); + node_cfgs[0].genesis.pbft.gas_limit = 100000; + auto mgr = std::make_shared(node_cfgs[0], addr_t(), trx_mgr, pbft_chain, nullptr, db_ptr, nullptr); DagBlock blk1(GENESIS, 1, {}, {}, sig_t(777), blk_hash_t(1), addr_t(15)); DagBlock blk2(blk_hash_t(1), 2, {}, {}, sig_t(777), blk_hash_t(2), addr_t(15)); @@ -336,9 +335,8 @@ TEST_F(DagTest, compute_epoch_2) { auto pbft_chain = std::make_shared(addr_t(), db_ptr); auto trx_mgr = std::make_shared(FullNodeConfig(), db_ptr, nullptr, addr_t()); const blk_hash_t GENESIS = node_cfgs[0].genesis.dag_genesis_block.getHash(); - auto mgr = std::make_shared(node_cfgs[0].genesis.dag_genesis_block, addr_t(), - node_cfgs[0].genesis.sortition, node_cfgs[0].genesis.dag, trx_mgr, pbft_chain, - nullptr, db_ptr, nullptr, 100000, node_cfgs[0].genesis.state); + node_cfgs[0].genesis.pbft.gas_limit = 100000; + auto mgr = std::make_shared(node_cfgs[0], addr_t(), trx_mgr, pbft_chain, nullptr, db_ptr, nullptr); DagBlock blkA(GENESIS, 1, {}, {trx_hash_t(2)}, sig_t(1), blk_hash_t(2), addr_t(1)); DagBlock blkB(GENESIS, 1, {}, {trx_hash_t(3), trx_hash_t(4)}, sig_t(1), blk_hash_t(3), addr_t(1)); @@ -419,9 +417,8 @@ TEST_F(DagTest, get_latest_pivot_tips) { auto trx_mgr = std::make_shared(FullNodeConfig(), db_ptr, nullptr, addr_t()); auto pbft_chain = std::make_shared(addr_t(), db_ptr); const blk_hash_t GENESIS = node_cfgs[0].genesis.dag_genesis_block.getHash(); - auto mgr = std::make_shared(node_cfgs[0].genesis.dag_genesis_block, addr_t(), - node_cfgs[0].genesis.sortition, node_cfgs[0].genesis.dag, trx_mgr, pbft_chain, - nullptr, db_ptr, nullptr, 100000, node_cfgs[0].genesis.state); + node_cfgs[0].genesis.pbft.gas_limit = 100000; + auto mgr = std::make_shared(node_cfgs[0], addr_t(), trx_mgr, pbft_chain, nullptr, db_ptr, nullptr); DagBlock blk2(GENESIS, 1, {}, {}, sig_t(1), blk_hash_t(2), addr_t(15)); DagBlock blk3(blk_hash_t(2), 2, {}, {}, sig_t(1), blk_hash_t(3), addr_t(15)); @@ -446,9 +443,8 @@ TEST_F(DagTest, initial_pivot) { auto db_ptr = std::make_shared(data_dir / "db"); auto trx_mgr = std::make_shared(FullNodeConfig(), db_ptr, nullptr, addr_t()); auto pbft_chain = std::make_shared(addr_t(), db_ptr); - auto mgr = std::make_shared(node_cfgs[0].genesis.dag_genesis_block, addr_t(), - node_cfgs[0].genesis.sortition, node_cfgs[0].genesis.dag, trx_mgr, pbft_chain, - nullptr, db_ptr, nullptr, 100000, node_cfgs[0].genesis.state); + node_cfgs[0].genesis.pbft.gas_limit = 100000; + auto mgr = std::make_shared(node_cfgs[0], addr_t(), trx_mgr, pbft_chain, nullptr, db_ptr, nullptr); auto pt = mgr->getLatestPivotAndTips(); @@ -458,7 +454,7 @@ TEST_F(DagTest, initial_pivot) { } // namespace taraxa::core_tests using namespace taraxa; -int main(int argc, char** argv) { +int main(int argc, char **argv) { static_init(); auto logging = logger::createDefaultLoggingConfig(); logging.verbosity = logger::Verbosity::Error; diff --git a/tests/full_node_test.cpp b/tests/full_node_test.cpp index 57b358e14b..8ea1d088de 100644 --- a/tests/full_node_test.cpp +++ b/tests/full_node_test.cpp @@ -1437,7 +1437,7 @@ TEST_F(FullNodeTest, light_node) { // broadcast dummy transaction nodes[1]->getTransactionManager()->insertTransaction(dummy_trx); thisThreadSleepForMilliSeconds(200); - nodes[1]->getDagManager()->clearLightNodeHistory(); + nodes[1]->getDagManager()->clearLightNodeHistory(node_cfgs[1].light_node_history); } EXPECT_HAPPENS({10s, 1s}, [&](auto &ctx) { // Verify full node and light node sync without any issues diff --git a/tests/state_api_test.cpp b/tests/state_api_test.cpp index 15d562d065..d2ded6b457 100644 --- a/tests/state_api_test.cpp +++ b/tests/state_api_test.cpp @@ -229,8 +229,8 @@ TEST_F(StateAPITest, slashing) { auto vote_b = node->getVoteManager()->generateVote(blk_hash_t{2}, PbftVoteTypes::cert_vote, 1, 1, 3); // Commit double voting proof - auto slashing_manager = std::make_shared(node->getFinalChain(), node->getTransactionManager(), - node->getGasPricer(), *node_cfg, node->getSecretKey()); + auto slashing_manager = std::make_shared(*node_cfg, node->getFinalChain(), + node->getTransactionManager(), node->getGasPricer()); ASSERT_EQ(true, slashing_manager->submitDoubleVotingProof(vote_a, vote_b)); // After few blocks malicious validator should be jailed From 9becf1facee13a7017ebd2eb51f02257200ca90f Mon Sep 17 00:00:00 2001 From: kstdl Date: Tue, 20 Aug 2024 15:37:39 +0200 Subject: [PATCH 011/105] chore: optimize includes and usings --- libraries/aleth/libdevcore/Base64.h | 2 +- libraries/aleth/libdevcore/Common.cpp | 1 - libraries/aleth/libdevcore/Common.h | 1 - libraries/aleth/libdevcore/CommonData.cpp | 4 +- libraries/aleth/libdevcore/CommonIO.h | 2 - libraries/aleth/libdevcore/CommonJS.h | 1 - libraries/aleth/libdevcore/Guards.cpp | 9 -- libraries/aleth/libdevcore/Guards.h | 1 - libraries/aleth/libdevcore/Log.h | 1 - libraries/aleth/libdevcore/RLP.cpp | 8 ++ libraries/aleth/libdevcore/RLP.h | 2 - libraries/aleth/libdevcore/SHA3.cpp | 2 + libraries/aleth/libdevcrypto/AES.cpp | 2 +- libraries/aleth/libdevcrypto/AES.h | 2 +- libraries/aleth/libdevcrypto/Common.cpp | 3 - libraries/aleth/libdevcrypto/CryptoPP.cpp | 2 - libraries/aleth/libp2p/All.h | 6 -- libraries/aleth/libp2p/Common.cpp | 1 + libraries/aleth/libp2p/Common.h | 3 +- libraries/aleth/libp2p/Host.cpp | 9 -- libraries/aleth/libp2p/Host.h | 7 +- libraries/aleth/libp2p/Network.cpp | 4 +- libraries/aleth/libp2p/Network.h | 7 -- libraries/aleth/libp2p/NodeTable.cpp | 96 ++++++++++--------- libraries/aleth/libp2p/NodeTable.h | 1 - libraries/aleth/libp2p/Peer.cpp | 2 - libraries/aleth/libp2p/RLPXFrameCoder.cpp | 11 +-- libraries/aleth/libp2p/RLPxHandshake.cpp | 18 ++-- libraries/aleth/libp2p/RLPxHandshake.h | 3 - libraries/aleth/libp2p/Session.cpp | 33 +++---- libraries/aleth/libp2p/Session.h | 4 - libraries/aleth/libp2p/UDP.cpp | 7 +- libraries/aleth/libp2p/UDP.h | 3 - libraries/aleth/libp2p/UPnP.cpp | 5 +- libraries/aleth/libp2p/UPnP.h | 1 - libraries/aleth/libp2p/taraxa.hpp | 2 +- libraries/cli/include/cli/config.hpp | 1 - libraries/cli/include/cli/config_updater.hpp | 5 +- libraries/cli/src/config.cpp | 4 +- libraries/cli/src/config_updater.cpp | 4 +- libraries/cli/src/tools.cpp | 3 +- libraries/common/include/common/constants.hpp | 1 - .../common/include/common/encoding_rlp.hpp | 12 +-- .../common/include/common/global_const.hpp | 2 - .../common/include/common/thread_pool.hpp | 1 - libraries/common/include/common/types.hpp | 6 -- libraries/common/include/common/util.hpp | 6 -- .../common/include/common/vrf_wrapper.hpp | 7 +- libraries/common/src/constants.cpp | 1 - libraries/common/src/jsoncpp.cpp | 10 +- libraries/common/src/util.cpp | 3 - libraries/config/include/config/config.hpp | 4 +- .../config/include/config/config_utils.hpp | 8 +- libraries/config/include/config/genesis.hpp | 7 -- libraries/config/include/config/hardfork.hpp | 2 + .../config/include/config/state_config.hpp | 1 - libraries/config/src/config.cpp | 2 +- libraries/config/src/config_utils.cpp | 8 ++ libraries/config/src/genesis.cpp | 3 - libraries/config/src/hardfork.cpp | 4 +- libraries/config/src/network.cpp | 1 + libraries/config/src/state_config.cpp | 2 - .../core_libs/consensus/include/dag/dag.hpp | 9 +- .../include/dag/dag_block_proposer.hpp | 4 +- .../consensus/include/dag/dag_manager.hpp | 4 +- .../include/dag/sortition_params_manager.hpp | 2 +- .../consensus/include/final_chain/cache.hpp | 1 - .../consensus/include/final_chain/data.hpp | 4 - .../include/final_chain/final_chain.hpp | 10 +- .../include/final_chain/state_api.hpp | 5 +- .../include/final_chain/state_api_data.hpp | 2 - .../consensus/include/pbft/pbft_chain.hpp | 4 +- .../consensus/include/pbft/pbft_manager.hpp | 7 +- .../include/pillar_chain/pillar_block.hpp | 3 + .../pillar_chain/pillar_chain_manager.hpp | 4 +- .../consensus/include/rewards/block_stats.hpp | 2 - .../include/rewards/rewards_stats.hpp | 8 +- .../include/transaction/gas_pricer.hpp | 5 +- .../transaction/transaction_manager.hpp | 16 ++-- .../include/vote_manager/verified_votes.hpp | 2 + .../include/vote_manager/vote_manager.hpp | 14 +-- libraries/core_libs/consensus/src/dag/dag.cpp | 7 +- .../consensus/src/dag/dag_block_proposer.cpp | 2 - .../consensus/src/dag/dag_manager.cpp | 3 +- .../src/dag/sortition_params_manager.cpp | 22 ++--- .../consensus/src/final_chain/data.cpp | 2 + .../consensus/src/final_chain/final_chain.cpp | 56 ++++++----- .../consensus/src/final_chain/state_api.cpp | 1 + .../consensus/src/pbft/pbft_chain.cpp | 1 - .../consensus/src/pbft/pbft_manager.cpp | 2 +- .../src/pillar_chain/pillar_block.cpp | 1 + .../src/pillar_chain/pillar_chain_manager.cpp | 1 + .../consensus/src/rewards/rewards_stats.cpp | 12 +-- .../src/slashing_manager/slashing_manager.cpp | 2 + .../consensus/src/transaction/gas_pricer.cpp | 5 +- .../src/transaction/transaction_manager.cpp | 12 +-- .../src/transaction/transaction_queue.cpp | 2 +- .../src/vote_manager/vote_manager.cpp | 8 +- .../network/include/network/network.hpp | 6 -- .../common/ext_syncing_packet_handler.hpp | 1 - .../latest/common/packet_handler.hpp | 1 - .../latest/get_dag_sync_packet_handler.hpp | 1 + ...get_pillar_votes_bundle_packet_handler.hpp | 1 - .../latest/pbft_sync_packet_handler.hpp | 1 + .../latest/transaction_packet_handler.hpp | 1 - .../shared_states/pbft_syncing_state.hpp | 5 +- .../tarcap/shared_states/peers_state.hpp | 2 - .../network/tarcap/stats/max_stats.hpp | 3 +- .../network/tarcap/stats/node_stats.hpp | 3 +- .../network/tarcap/stats/packet_stats.hpp | 3 +- .../network/tarcap/stats/packets_stats.hpp | 6 +- .../include/network/tarcap/taraxa_peer.hpp | 1 + .../network/threadpool/packet_data.hpp | 2 +- .../threadpool/packets_blocking_mask.hpp | 2 - .../network/threadpool/priority_queue.hpp | 1 - libraries/core_libs/network/rpc/Taraxa.cpp | 3 +- .../core_libs/network/src/http_server.cpp | 2 +- libraries/core_libs/network/src/network.cpp | 1 - .../common/ext_votes_packet_handler.cpp | 1 + .../latest/common/packet_handler.cpp | 1 + .../network/src/tarcap/stats/node_stats.cpp | 1 + .../stats/time_period_packets_stats.cpp | 5 +- .../network/src/tarcap/taraxa_capability.cpp | 1 - libraries/core_libs/network/src/ws_server.cpp | 4 +- .../core_libs/node/include/node/node.hpp | 9 -- libraries/core_libs/node/src/node.cpp | 2 - .../storage/migration/migration_base.hpp | 6 +- .../storage/migration/transaction_period.hpp | 2 - .../storage/include/storage/storage.hpp | 17 ++-- .../src/migration/transaction_period.cpp | 5 +- libraries/core_libs/storage/src/storage.cpp | 11 ++- libraries/logger/include/logger/logger.hpp | 1 - .../logger/include/logger/logger_config.hpp | 7 +- libraries/metrics/src/metrics_service.cpp | 5 +- libraries/types/dag_block/src/dag_block.cpp | 2 +- .../pbft_block/include/pbft/pbft_block.hpp | 2 - .../include/pbft/pbft_block_extra_data.hpp | 3 +- libraries/types/pbft_block/src/pbft_block.cpp | 5 +- .../pbft_block/src/pbft_block_extra_data.cpp | 2 + .../types/pbft_block/src/period_data.cpp | 5 - .../transaction/system_transaction.hpp | 1 - .../include/transaction/transaction.hpp | 3 +- .../types/vote/include/vote/pbft_vote.hpp | 6 +- .../types/vote/include/vote/vrf_sortition.hpp | 1 + libraries/types/vote/src/pbft_vote.cpp | 9 ++ libraries/types/vote/src/vote.cpp | 4 +- libraries/vdf/include/vdf/config.hpp | 1 - libraries/vdf/include/vdf/sortition.hpp | 6 -- libraries/vdf/src/config.cpp | 2 + libraries/vdf/src/sortition.cpp | 6 +- programs/taraxa-bootnode/main.cpp | 2 +- programs/taraxad/main.cpp | 3 +- tests/abi_test.cpp | 1 + tests/crypto_test.cpp | 7 +- tests/final_chain_test.cpp | 1 + tests/network_test.cpp | 2 +- tests/p2p_test.cpp | 7 +- tests/pillar_chain_test.cpp | 3 +- tests/rewards_stats_test.cpp | 2 +- tests/rpc_test.cpp | 2 - tests/state_api_test.cpp | 2 +- tests/tarcap_threadpool_test.cpp | 3 +- .../test_util/include/test_util/test_util.hpp | 6 -- tests/test_util/src/test_util.cpp | 3 + 164 files changed, 365 insertions(+), 480 deletions(-) delete mode 100644 libraries/aleth/libdevcore/Guards.cpp delete mode 100644 libraries/aleth/libp2p/All.h diff --git a/libraries/aleth/libdevcore/Base64.h b/libraries/aleth/libdevcore/Base64.h index 61e7010682..cb67f2d812 100644 --- a/libraries/aleth/libdevcore/Base64.h +++ b/libraries/aleth/libdevcore/Base64.h @@ -31,7 +31,7 @@ #include -#include "FixedHash.h" +#include "Common.h" namespace dev { std::string toBase64(bytesConstRef _in); diff --git a/libraries/aleth/libdevcore/Common.cpp b/libraries/aleth/libdevcore/Common.cpp index 4600bb2998..c7365a2c4e 100644 --- a/libraries/aleth/libdevcore/Common.cpp +++ b/libraries/aleth/libdevcore/Common.cpp @@ -4,7 +4,6 @@ #include "Common.h" -#include "Exceptions.h" #include "Log.h" #if defined(_WIN32) diff --git a/libraries/aleth/libdevcore/Common.h b/libraries/aleth/libdevcore/Common.h index e7dfe89035..6c6b1b550b 100644 --- a/libraries/aleth/libdevcore/Common.h +++ b/libraries/aleth/libdevcore/Common.h @@ -12,7 +12,6 @@ #include #include #include -#include #include #pragma warning(push) #pragma GCC diagnostic push diff --git a/libraries/aleth/libdevcore/CommonData.cpp b/libraries/aleth/libdevcore/CommonData.cpp index 1137c1f97d..cc2fa0f876 100644 --- a/libraries/aleth/libdevcore/CommonData.cpp +++ b/libraries/aleth/libdevcore/CommonData.cpp @@ -4,7 +4,9 @@ #include "CommonData.h" -#include +#include + +#include #include "Exceptions.h" diff --git a/libraries/aleth/libdevcore/CommonIO.h b/libraries/aleth/libdevcore/CommonIO.h index 989bfc7c58..3dda2965a3 100644 --- a/libraries/aleth/libdevcore/CommonIO.h +++ b/libraries/aleth/libdevcore/CommonIO.h @@ -10,8 +10,6 @@ #include #include #include -#include -#include #include #include #include diff --git a/libraries/aleth/libdevcore/CommonJS.h b/libraries/aleth/libdevcore/CommonJS.h index d0c17b8595..e7bb61b363 100644 --- a/libraries/aleth/libdevcore/CommonJS.h +++ b/libraries/aleth/libdevcore/CommonJS.h @@ -8,7 +8,6 @@ #include #include "CommonData.h" -#include "CommonIO.h" #include "FixedHash.h" namespace dev { diff --git a/libraries/aleth/libdevcore/Guards.cpp b/libraries/aleth/libdevcore/Guards.cpp deleted file mode 100644 index 3f852d4c0a..0000000000 --- a/libraries/aleth/libdevcore/Guards.cpp +++ /dev/null @@ -1,9 +0,0 @@ -// Aleth: Ethereum C++ client, tools and libraries. -// Copyright 2014-2019 Aleth Authors. -// Licensed under the GNU General Public License, Version 3. - -#include "Guards.h" -using namespace std; -using namespace dev; - -namespace dev {} diff --git a/libraries/aleth/libdevcore/Guards.h b/libraries/aleth/libdevcore/Guards.h index 57cb71f5f3..9dc94c4c77 100644 --- a/libraries/aleth/libdevcore/Guards.h +++ b/libraries/aleth/libdevcore/Guards.h @@ -4,7 +4,6 @@ #pragma once -#include #include #include #pragma warning(push) diff --git a/libraries/aleth/libdevcore/Log.h b/libraries/aleth/libdevcore/Log.h index f91a640627..518f430f7a 100644 --- a/libraries/aleth/libdevcore/Log.h +++ b/libraries/aleth/libdevcore/Log.h @@ -11,7 +11,6 @@ #include #include -#include "CommonIO.h" #include "FixedHash.h" #include "Terminal.h" diff --git a/libraries/aleth/libdevcore/RLP.cpp b/libraries/aleth/libdevcore/RLP.cpp index a8a34966cb..23118b0dd8 100644 --- a/libraries/aleth/libdevcore/RLP.cpp +++ b/libraries/aleth/libdevcore/RLP.cpp @@ -2,6 +2,14 @@ // Copyright 2013-2019 Aleth Authors. // Licensed under the GNU General Public License, Version 3. #include "RLP.h" + +#include +#include + +#include +#include +#include + using namespace std; using namespace dev; diff --git a/libraries/aleth/libdevcore/RLP.h b/libraries/aleth/libdevcore/RLP.h index 4a7ea2bce1..45301cf5b9 100644 --- a/libraries/aleth/libdevcore/RLP.h +++ b/libraries/aleth/libdevcore/RLP.h @@ -7,8 +7,6 @@ #pragma once #include -#include -#include #include #include #include diff --git a/libraries/aleth/libdevcore/SHA3.cpp b/libraries/aleth/libdevcore/SHA3.cpp index 1ffd34890f..77da7e8016 100644 --- a/libraries/aleth/libdevcore/SHA3.cpp +++ b/libraries/aleth/libdevcore/SHA3.cpp @@ -6,6 +6,8 @@ #include +#include "ethash/hash_types.hpp" + namespace dev { bool sha3(bytesConstRef _input, bytesRef o_output) noexcept { diff --git a/libraries/aleth/libdevcrypto/AES.cpp b/libraries/aleth/libdevcrypto/AES.cpp index a7da21c779..473694acd9 100644 --- a/libraries/aleth/libdevcrypto/AES.cpp +++ b/libraries/aleth/libdevcrypto/AES.cpp @@ -9,9 +9,9 @@ #include #include #include +#include using namespace dev; -using namespace dev::crypto; bytes dev::aesDecrypt(bytesConstRef _ivCipher, std::string const& _password, unsigned _rounds, bytesConstRef _salt) { bytes pw = asBytes(_password); diff --git a/libraries/aleth/libdevcrypto/AES.h b/libraries/aleth/libdevcrypto/AES.h index c2ec6fcfd5..fb5589bf2c 100644 --- a/libraries/aleth/libdevcrypto/AES.h +++ b/libraries/aleth/libdevcrypto/AES.h @@ -8,7 +8,7 @@ #pragma once -#include "Common.h" +#include namespace dev { diff --git a/libraries/aleth/libdevcrypto/Common.cpp b/libraries/aleth/libdevcrypto/Common.cpp index c6ca11776d..f3908b6f55 100644 --- a/libraries/aleth/libdevcrypto/Common.cpp +++ b/libraries/aleth/libdevcrypto/Common.cpp @@ -6,8 +6,6 @@ #include #include -#include -#include #include // conflicts with #include #include @@ -17,7 +15,6 @@ #include "AES.h" #include "CryptoPP.h" -#include "Exceptions.h" using namespace std; using namespace dev; using namespace dev::crypto; diff --git a/libraries/aleth/libdevcrypto/CryptoPP.cpp b/libraries/aleth/libdevcrypto/CryptoPP.cpp index 42ebf71d89..5a432d9514 100644 --- a/libraries/aleth/libdevcrypto/CryptoPP.cpp +++ b/libraries/aleth/libdevcrypto/CryptoPP.cpp @@ -7,9 +7,7 @@ #include #include #include -#include #include // conflicts with -#include static_assert(CRYPTOPP_VERSION >= 565, "Wrong Crypto++ version"); diff --git a/libraries/aleth/libp2p/All.h b/libraries/aleth/libp2p/All.h deleted file mode 100644 index 626ff2582f..0000000000 --- a/libraries/aleth/libp2p/All.h +++ /dev/null @@ -1,6 +0,0 @@ -#pragma once - -#include "Capability.h" -#include "Common.h" -#include "Host.h" -#include "Session.h" diff --git a/libraries/aleth/libp2p/Common.cpp b/libraries/aleth/libp2p/Common.cpp index 011c443ae0..5959fc2d71 100644 --- a/libraries/aleth/libp2p/Common.cpp +++ b/libraries/aleth/libp2p/Common.cpp @@ -7,6 +7,7 @@ #include #include "Network.h" +#include "libdevcore/CommonIO.h" namespace dev { namespace p2p { diff --git a/libraries/aleth/libp2p/Common.h b/libraries/aleth/libp2p/Common.h index 43548c338a..591639f004 100644 --- a/libraries/aleth/libp2p/Common.h +++ b/libraries/aleth/libp2p/Common.h @@ -25,8 +25,9 @@ #include #include #include + namespace ba = boost::asio; -namespace bi = boost::asio::ip; +namespace bi = ba::ip; namespace dev { diff --git a/libraries/aleth/libp2p/Host.cpp b/libraries/aleth/libp2p/Host.cpp index 1e773acf73..582682f3b9 100644 --- a/libraries/aleth/libp2p/Host.cpp +++ b/libraries/aleth/libp2p/Host.cpp @@ -4,24 +4,15 @@ #include "Host.h" -#include -#include -#include -#include - -#include #include #include #include -#include #include -#include #include "Capability.h" #include "Common.h" #include "RLPxHandshake.h" #include "Session.h" -#include "UPnP.h" using namespace std; using namespace dev; diff --git a/libraries/aleth/libp2p/Host.h b/libraries/aleth/libp2p/Host.h index c7299252a7..628e12d53b 100644 --- a/libraries/aleth/libp2p/Host.h +++ b/libraries/aleth/libp2p/Host.h @@ -4,16 +4,14 @@ #pragma once +#include #include #include #include #include -#include #include -#include #include -#include #include #include @@ -26,9 +24,6 @@ #include "Session.h" #include "taraxa.hpp" -namespace io = boost::asio; -namespace bi = io::ip; - namespace std { template <> struct hash> { diff --git a/libraries/aleth/libp2p/Network.cpp b/libraries/aleth/libp2p/Network.cpp index 41b98c5c3d..7561aec69a 100644 --- a/libraries/aleth/libp2p/Network.cpp +++ b/libraries/aleth/libp2p/Network.cpp @@ -2,17 +2,15 @@ // Copyright 2014-2019 Aleth Authors. // Licensed under the GNU General Public License, Version 3. -#include #ifndef _WIN32 #include #endif #include -#include #include #include -#include +#include #include #include "Common.h" diff --git a/libraries/aleth/libp2p/Network.h b/libraries/aleth/libp2p/Network.h index aee0f7ad4a..47e008aa81 100644 --- a/libraries/aleth/libp2p/Network.h +++ b/libraries/aleth/libp2p/Network.h @@ -7,14 +7,7 @@ #include #include -#include -#include -#include -#include - #include "Common.h" -namespace ba = boost::asio; -namespace bi = ba::ip; namespace dev { namespace p2p { diff --git a/libraries/aleth/libp2p/NodeTable.cpp b/libraries/aleth/libp2p/NodeTable.cpp index 49575dff69..82fba3a91b 100644 --- a/libraries/aleth/libp2p/NodeTable.cpp +++ b/libraries/aleth/libp2p/NodeTable.cpp @@ -5,7 +5,6 @@ #include "NodeTable.h" #include -using namespace std; namespace dev { namespace p2p { @@ -15,9 +14,9 @@ BOOST_LOG_INLINE_GLOBAL_LOGGER_CTOR_ARGS(g_discoveryWarnLogger, boost::log::sour (boost::log::keywords::severity = 0)(boost::log::keywords::channel = "discov")) // Cadence at which we timeout sent pings and evict unresponsive nodes -constexpr chrono::milliseconds c_handleTimeoutsIntervalMs{5000}; +constexpr std::chrono::milliseconds c_handleTimeoutsIntervalMs{5000}; // Cadence at which we remove old records from EndpointTracker -constexpr chrono::milliseconds c_removeOldEndpointStatementsIntervalMs{5000}; +constexpr std::chrono::milliseconds c_removeOldEndpointStatementsIntervalMs{5000}; // Change external endpoint after this number of peers report new one constexpr size_t c_minEndpointTrackStatements{10}; // Interval during which each endpoint statement is kept @@ -25,12 +24,12 @@ constexpr std::chrono::minutes c_endpointStatementTimeToLiveMin{5}; } // namespace -constexpr chrono::seconds DiscoveryDatagram::c_timeToLiveS; -constexpr chrono::milliseconds NodeTable::c_reqTimeoutMs; -constexpr chrono::milliseconds NodeTable::c_bucketRefreshMs; -constexpr chrono::milliseconds NodeTable::c_discoveryRoundIntervalMs; +constexpr std::chrono::seconds DiscoveryDatagram::c_timeToLiveS; +constexpr std::chrono::milliseconds NodeTable::c_reqTimeoutMs; +constexpr std::chrono::milliseconds NodeTable::c_bucketRefreshMs; +constexpr std::chrono::milliseconds NodeTable::c_discoveryRoundIntervalMs; -inline bool operator==(weak_ptr const& _weak, shared_ptr const& _shared) { +inline bool operator==(std::weak_ptr const& _weak, std::shared_ptr const& _shared) { return !_weak.owner_before(_shared) && !_shared.owner_before(_weak); } @@ -100,8 +99,8 @@ bool NodeTable::addKnownNode(Node const& _node, uint32_t _lastPongReceivedTime, return true; } - auto entry = make_shared(m_hostNodeIDHash, _node.id, _node.get_endpoint(), _lastPongReceivedTime, - _lastPongSentTime); + auto entry = std::make_shared(m_hostNodeIDHash, _node.id, _node.get_endpoint(), _lastPongReceivedTime, + _lastPongSentTime); if (entry->hasValidEndpointProof()) { LOG(m_logger) << "Known " << _node; @@ -137,16 +136,16 @@ bool NodeTable::isValidNode(Node const& _node) const { return true; } -list NodeTable::nodes() const { - list nodes; +std::list NodeTable::nodes() const { + std::list nodes; DEV_GUARDED(x_nodes) { for (auto& i : m_allNodes) nodes.push_back(i.second->id()); } return nodes; } -list NodeTable::snapshot() const { - list ret; +std::list NodeTable::snapshot() const { + std::list ret; DEV_GUARDED(x_state) { for (auto const& s : m_buckets) for (auto const& np : s.nodes) @@ -165,13 +164,14 @@ Node NodeTable::node(NodeID const& _id) { return UnspecifiedNode; } -shared_ptr NodeTable::nodeEntry(NodeID const& _id) { +std::shared_ptr NodeTable::nodeEntry(NodeID const& _id) { Guard l(x_nodes); auto const it = m_allNodes.find(_id); - return it != m_allNodes.end() ? it->second : shared_ptr(); + return it != m_allNodes.end() ? it->second : std::shared_ptr(); } -void NodeTable::doDiscoveryRound(NodeID _node, unsigned _round, shared_ptr>> _tried) { +void NodeTable::doDiscoveryRound(NodeID _node, unsigned _round, + std::shared_ptr>> _tried) { // NOTE: ONLY called by doDiscovery or "recursively" via lambda scheduled via // timer at the end of this function if (!m_socket->isOpen()) return; @@ -192,7 +192,7 @@ void NodeTable::doDiscoveryRound(NodeID _node, unsigned _round, shared_ptrendpoint(), _node); p.expiration = nextRequestExpirationTime(); p.sign(m_secret); - m_sentFindNodes.emplace_back(nodeEntry->id(), chrono::steady_clock::now()); + m_sentFindNodes.emplace_back(nodeEntry->id(), std::chrono::steady_clock::now()); LOG(m_logger) << p.typeName() << " to " << nodeEntry->node << " (target: " << _node << ")"; m_socket->send(p); @@ -225,15 +225,15 @@ void NodeTable::doDiscoveryRound(NodeID _node, unsigned _round, shared_ptr> NodeTable::nearestNodeEntries(NodeID const& _target) { - auto const distanceToTargetLess = [](pair> const& _node1, - pair> const& _node2) { +std::vector> NodeTable::nearestNodeEntries(NodeID const& _target) { + auto const distanceToTargetLess = [](std::pair> const& _node1, + std::pair> const& _node2) { return _node1.first < _node2.first; }; h256 const targetHash = sha3(_target); - std::multiset>, decltype(distanceToTargetLess)> nodesByDistanceToTarget( + std::multiset>, decltype(distanceToTargetLess)> nodesByDistanceToTarget( distanceToTargetLess); for (auto const& bucket : m_buckets) for (auto const& nodeWeakPtr : bucket.nodes) @@ -244,13 +244,13 @@ vector> NodeTable::nearestNodeEntries(NodeID const& _targe nodesByDistanceToTarget.erase(--nodesByDistanceToTarget.end()); } - vector> ret; + std::vector> ret; for (auto& distanceAndNode : nodesByDistanceToTarget) ret.emplace_back(std::move(distanceAndNode.second)); return ret; } -void NodeTable::ping(Node const& _node, shared_ptr _replacementNodeEntry) { +void NodeTable::ping(Node const& _node, std::shared_ptr _replacementNodeEntry) { if (!m_socket->isOpen()) return; // Don't send Ping if one is already sent @@ -267,7 +267,7 @@ void NodeTable::ping(Node const& _node, shared_ptr _replacementNodeEn m_socket->send(p); NodeValidation const validation{ - _node.id, _node.get_endpoint().tcpPort(), _node.get_endpoint().udpPort(), chrono::steady_clock::now(), + _node.id, _node.get_endpoint().tcpPort(), _node.get_endpoint().udpPort(), std::chrono::steady_clock::now(), pingHash, _replacementNodeEntry}; m_sentPings.insert({_node.get_endpoint(), validation}); } @@ -276,7 +276,7 @@ void NodeTable::schedulePing(Node const& _node) { post(strand_, [this, _node] { ping(_node, {}); }); } -void NodeTable::evict(NodeEntry const& _leastSeen, shared_ptr _replacement) { +void NodeTable::evict(NodeEntry const& _leastSeen, std::shared_ptr _replacement) { if (!m_socket->isOpen()) return; LOG(m_logger) << "Evicting node " << _leastSeen.node; ping(_leastSeen.node, std::move(_replacement)); @@ -284,7 +284,7 @@ void NodeTable::evict(NodeEntry const& _leastSeen, shared_ptr _replac if (m_nodeEventHandler) m_nodeEventHandler->appendEvent(_leastSeen.id(), NodeEntryScheduledForEviction); } -void NodeTable::noteActiveNode(shared_ptr _nodeEntry) { +void NodeTable::noteActiveNode(std::shared_ptr _nodeEntry) { assert(_nodeEntry); if (_nodeEntry->id() == m_hostNodeID) { @@ -300,7 +300,7 @@ void NodeTable::noteActiveNode(shared_ptr _nodeEntry) { LOG(m_logger) << "Active node " << _nodeEntry->node; - shared_ptr nodeToEvict; + std::shared_ptr nodeToEvict; { Guard l(x_state); // Find a bucket to put a node to @@ -349,12 +349,12 @@ void NodeTable::invalidateNode(NodeID const& _id) { sourceNodeEntry->lastPongReceivedTime = RLPXDatagramFace::secondsSinceEpoch() - NodeTable::c_bondingTimeSeconds; } -void NodeTable::dropNode(shared_ptr _n) { +void NodeTable::dropNode(std::shared_ptr _n) { // remove from nodetable { Guard l(x_state); NodeBucket& s = bucket_UNSAFE(_n.get()); - s.nodes.remove_if([_n](weak_ptr const& _bucketEntry) { return _bucketEntry == _n; }); + s.nodes.remove_if([_n](std::weak_ptr const& _bucketEntry) { return _bucketEntry == _n; }); } DEV_GUARDED(x_nodes) { m_allNodes.erase(_n->id()); } @@ -379,7 +379,7 @@ void NodeTable::onPacketReceived(UDPSocketFace*, bi::udp::endpoint const& _from, } } try { - unique_ptr packet = DiscoveryDatagram::interpretUDP(node_ip, _packet); + std::unique_ptr packet = DiscoveryDatagram::interpretUDP(node_ip, _packet); if (!packet) return; if (packet->isExpired()) { LOG(m_logger) << "Expired " << packet->typeName() << " from " << packet->sourceid << "@" << node_ip; @@ -387,7 +387,7 @@ void NodeTable::onPacketReceived(UDPSocketFace*, bi::udp::endpoint const& _from, } LOG(m_logger) << packet->typeName() << " from " << packet->sourceid << "@" << node_ip; - shared_ptr sourceNodeEntry; + std::shared_ptr sourceNodeEntry; switch (packet->packetType()) { case Pong::type: sourceNodeEntry = handlePong(node_ip, *packet); @@ -415,7 +415,7 @@ void NodeTable::onPacketReceived(UDPSocketFace*, bi::udp::endpoint const& _from, } if (sourceNodeEntry) noteActiveNode(std::move(sourceNodeEntry)); - } catch (exception const& _e) { + } catch (std::exception const& _e) { LOG(m_logger) << "Exception processing message from " << node_ip.address().to_string() << ":" << node_ip.port() << ": " << _e.what(); } catch (...) { @@ -423,7 +423,7 @@ void NodeTable::onPacketReceived(UDPSocketFace*, bi::udp::endpoint const& _from, } } -shared_ptr NodeTable::handlePong(bi::udp::endpoint const& _from, DiscoveryDatagram const& _packet) { +std::shared_ptr NodeTable::handlePong(bi::udp::endpoint const& _from, DiscoveryDatagram const& _packet) { // validate pong auto const sentPing = m_sentPings.find(_from); if (sentPing == m_sentPings.end()) { @@ -447,13 +447,13 @@ shared_ptr NodeTable::handlePong(bi::udp::endpoint const& _from, Disc } // create or update nodeEntry with new Pong received time - shared_ptr sourceNodeEntry; + std::shared_ptr sourceNodeEntry; DEV_GUARDED(x_nodes) { auto it = m_allNodes.find(sourceId); if (it == m_allNodes.end()) { - sourceNodeEntry = make_shared(m_hostNodeIDHash, sourceId, - NodeIPEndpoint{_from.address(), _from.port(), nodeValidation.tcpPort}, - RLPXDatagramFace::secondsSinceEpoch(), 0 /* lastPongSentTime */); + sourceNodeEntry = std::make_shared( + m_hostNodeIDHash, sourceId, NodeIPEndpoint{_from.address(), _from.port(), nodeValidation.tcpPort}, + RLPXDatagramFace::secondsSinceEpoch(), 0 /* lastPongSentTime */); // We need to setup external port, as we where able to do ping-pong exchange and node is active sourceNodeEntry->node.external_udp_port = nodeValidation.udpPort; @@ -488,8 +488,9 @@ shared_ptr NodeTable::handlePong(bi::udp::endpoint const& _from, Disc return sourceNodeEntry; } -shared_ptr NodeTable::handleNeighbours(bi::udp::endpoint const& _from, DiscoveryDatagram const& _packet) { - shared_ptr sourceNodeEntry = nodeEntry(_packet.sourceid); +std::shared_ptr NodeTable::handleNeighbours(bi::udp::endpoint const& _from, + DiscoveryDatagram const& _packet) { + std::shared_ptr sourceNodeEntry = nodeEntry(_packet.sourceid); if (!sourceNodeEntry) { LOG(m_logger) << "Source node (" << _packet.sourceid << "@" << _from << ") not found in node table. Ignoring Neighbours packet."; @@ -504,7 +505,7 @@ shared_ptr NodeTable::handleNeighbours(bi::udp::endpoint const& _from auto const& in = dynamic_cast(_packet); bool expected = false; - auto now = chrono::steady_clock::now(); + auto now = std::chrono::steady_clock::now(); m_sentFindNodes.remove_if([&](NodeIdTimePoint const& _t) noexcept { if (_t.first != in.sourceid) return false; if (now - _t.second < c_reqTimeoutMs) expected = true; @@ -544,7 +545,7 @@ std::shared_ptr NodeTable::handleFindNode(bi::udp::endpoint const& _f } auto const& in = dynamic_cast(_packet); - vector> nearest = nearestNodeEntries(in.target); + std::vector> nearest = nearestNodeEntries(in.target); static unsigned constexpr nlimit = (NodeSocket::maxDatagramSize - 109) / 90; for (unsigned offset = 0; offset < nearest.size(); offset += nlimit) { Neighbours out(_from, nearest, offset, nlimit); @@ -696,15 +697,15 @@ void NodeTable::doDiscovery() { crypto::Nonce::get().ref().copyTo( randNodeId.ref().cropped(static_cast(h256::size), static_cast(h256::size))); LOG(m_logger) << "Starting discovery algorithm run for random node id: " << randNodeId; - doDiscoveryRound(randNodeId, 0 /* round */, make_shared>>()); + doDiscoveryRound(randNodeId, 0 /* round */, std::make_shared>>()); })); } void NodeTable::doHandleTimeouts() { runBackgroundTask(c_handleTimeoutsIntervalMs, m_timeoutsTimer, [this]() { - vector> nodesToActivate; + std::vector> nodesToActivate; for (auto it = m_sentPings.begin(); it != m_sentPings.end();) { - if (chrono::steady_clock::now() > it->second.pingSentTime + m_requestTimeToLive) { + if (std::chrono::steady_clock::now() > it->second.pingSentTime + m_requestTimeToLive) { if (auto node = nodeEntry(it->second.nodeID)) { if (node->lastPongReceivedTime < RLPXDatagramFace::secondsSinceEpoch() - m_requestTimeToLive.count()) { if (it->first == node->endpoint()) { @@ -765,8 +766,9 @@ void NodeTable::cancelTimer(std::shared_ptr _timer) { post(strand_, [_timer] { _timer->expires_at(c_steadyClockMin); }); } -unique_ptr DiscoveryDatagram::interpretUDP(bi::udp::endpoint const& _from, bytesConstRef _packet) { - unique_ptr decoded; +std::unique_ptr DiscoveryDatagram::interpretUDP(bi::udp::endpoint const& _from, + bytesConstRef _packet) { + std::unique_ptr decoded; // h256 + Signature + type + RLP (smallest possible packet is empty neighbours // packet which is 3 bytes) if (_packet.size() < static_cast(h256::size) + static_cast(Signature::size) + 1 + 3) { diff --git a/libraries/aleth/libp2p/NodeTable.h b/libraries/aleth/libp2p/NodeTable.h index fb61806d7d..a64cf071b4 100644 --- a/libraries/aleth/libp2p/NodeTable.h +++ b/libraries/aleth/libp2p/NodeTable.h @@ -10,7 +10,6 @@ #include #include -#include "Common.h" #include "ENR.h" #include "EndpointTracker.h" diff --git a/libraries/aleth/libp2p/Peer.cpp b/libraries/aleth/libp2p/Peer.cpp index 58d00c6548..6837e65e46 100644 --- a/libraries/aleth/libp2p/Peer.cpp +++ b/libraries/aleth/libp2p/Peer.cpp @@ -4,8 +4,6 @@ #include "Peer.h" using namespace std; -using namespace dev; -using namespace dev::p2p; namespace dev { diff --git a/libraries/aleth/libp2p/RLPXFrameCoder.cpp b/libraries/aleth/libp2p/RLPXFrameCoder.cpp index 4245a69211..6cc835b416 100644 --- a/libraries/aleth/libp2p/RLPXFrameCoder.cpp +++ b/libraries/aleth/libp2p/RLPXFrameCoder.cpp @@ -11,11 +11,8 @@ #include #include -#include "RLPXPacket.h" #include "RLPxHandshake.h" -using namespace std; -using namespace dev; using namespace dev::p2p; RLPXFrameInfo::RLPXFrameInfo(bytesConstRef _header) @@ -231,16 +228,16 @@ bool RLPXFrameCoder::authAndDecryptFrame(bytesRef io) { return true; } -h128 RLPXFrameCoder::egressDigest() { +dev::h128 RLPXFrameCoder::egressDigest() { CryptoPP::Keccak_256 h(m_impl->egressMac); - h128 digest; + dev::h128 digest; h.TruncatedFinal(digest.data(), h128::size); return digest; } -h128 RLPXFrameCoder::ingressDigest() { +dev::h128 RLPXFrameCoder::ingressDigest() { CryptoPP::Keccak_256 h(m_impl->ingressMac); - h128 digest; + dev::h128 digest; h.TruncatedFinal(digest.data(), h128::size); return digest; } diff --git a/libraries/aleth/libp2p/RLPxHandshake.cpp b/libraries/aleth/libp2p/RLPxHandshake.cpp index 4582a5ce5e..ea4792a10b 100644 --- a/libraries/aleth/libp2p/RLPxHandshake.cpp +++ b/libraries/aleth/libp2p/RLPxHandshake.cpp @@ -4,13 +4,9 @@ #include "RLPxHandshake.h" -#include "Host.h" -#include "Peer.h" -#include "Session.h" -using namespace std; -using namespace dev; +#include + using namespace dev::p2p; -using namespace dev::crypto; constexpr std::chrono::milliseconds RLPXHandshake::c_timeout; @@ -20,10 +16,10 @@ constexpr size_t c_ackCipherSizeBytes = 210; constexpr size_t c_authCipherSizeBytes = 307; } // namespace -RLPXHandshake::RLPXHandshake(shared_ptr ctx, std::shared_ptr const& _socket) +RLPXHandshake::RLPXHandshake(std::shared_ptr ctx, std::shared_ptr const& _socket) : RLPXHandshake(std::move(ctx), _socket, {}) {} -RLPXHandshake::RLPXHandshake(shared_ptr ctx, std::shared_ptr const& _socket, +RLPXHandshake::RLPXHandshake(std::shared_ptr ctx, std::shared_ptr const& _socket, NodeID _remote) : host_ctx_(std::move(ctx)), m_remote(_remote), @@ -35,7 +31,7 @@ RLPXHandshake::RLPXHandshake(shared_ptr ctx, std::shared_ptr< m_logger.add_attribute("Prefix", prefixAttr); m_errorLogger.add_attribute("Prefix", prefixAttr); - stringstream remoteInfoStream; + std::stringstream remoteInfoStream; remoteInfoStream << "(" << _remote; if (remoteSocketConnected()) remoteInfoStream << "@" << m_socket->remoteEndpoint(); remoteInfoStream << ")"; @@ -97,7 +93,7 @@ void RLPXHandshake::writeAckEIP8() { m_ack.resize(m_ack.size() + padAmount, 0); bytes prefix(2); - toBigEndian(m_ack.size() + c_eciesOverhead, prefix); + toBigEndian(m_ack.size() + crypto::c_eciesOverhead, prefix); encryptECIES(m_remote, bytesConstRef(&prefix), &m_ack, m_ackCipher); m_ackCipher.insert(m_ackCipher.begin(), prefix.begin(), prefix.end()); @@ -219,7 +215,7 @@ void RLPXHandshake::cancel() { void RLPXHandshake::error(boost::system::error_code _ech) { host_ctx_->on_failure(m_remote, m_failureReason); - stringstream errorStream; + std::stringstream errorStream; errorStream << "Handshake failed"; if (_ech) errorStream << " (I/O error: " << _ech.message() << ")"; if (remoteSocketConnected()) diff --git a/libraries/aleth/libp2p/RLPxHandshake.h b/libraries/aleth/libp2p/RLPxHandshake.h index ae16c796c2..668723cb8a 100644 --- a/libraries/aleth/libp2p/RLPxHandshake.h +++ b/libraries/aleth/libp2p/RLPxHandshake.h @@ -11,9 +11,6 @@ #include "RLPXFrameCoder.h" #include "RLPXSocket.h" -namespace ba = boost::asio; -namespace bi = boost::asio::ip; - namespace dev { namespace p2p { diff --git a/libraries/aleth/libp2p/Session.cpp b/libraries/aleth/libp2p/Session.cpp index 74228ed798..da2409fab8 100644 --- a/libraries/aleth/libp2p/Session.cpp +++ b/libraries/aleth/libp2p/Session.cpp @@ -5,21 +5,18 @@ #include "Session.h" #include -#include #include +#include #include -#include "Host.h" #include "RLPXFrameCoder.h" -using namespace std; -using namespace dev; using namespace dev::p2p; static constexpr uint32_t MIN_COMPRESSION_SIZE = 500; -Session::Session(SessionCapabilities caps, unique_ptr _io, std::shared_ptr _s, +Session::Session(SessionCapabilities caps, std::unique_ptr _io, std::shared_ptr _s, std::shared_ptr _n, PeerSessionInfo _info, std::optional immediate_disconnect_reason) : m_capabilities(std::move(caps)), @@ -27,9 +24,9 @@ Session::Session(SessionCapabilities caps, unique_ptr _io, std:: m_socket(std::move(_s)), m_peer(std::move(_n)), m_info(std::move(_info)), - m_ping(chrono::steady_clock::time_point::max()), + m_ping(std::chrono::steady_clock::time_point::max()), immediate_disconnect_reason_(immediate_disconnect_reason) { - stringstream remoteInfoStream; + std::stringstream remoteInfoStream; remoteInfoStream << "(" << m_info.id << "@" << m_socket->remoteEndpoint() << ")"; m_logSuffix = remoteInfoStream.str(); auto const attr = boost::log::attributes::constant{remoteInfoStream.str()}; @@ -45,8 +42,8 @@ Session::Session(SessionCapabilities caps, unique_ptr _io, std:: std::shared_ptr Session::make(SessionCapabilities caps, std::unique_ptr _io, std::shared_ptr _s, std::shared_ptr _n, PeerSessionInfo _info, std::optional immediate_disconnect_reason) { - shared_ptr ret(new Session(std::move(caps), std::move(_io), std::move(_s), std::move(_n), std::move(_info), - immediate_disconnect_reason)); + std::shared_ptr ret(new Session(std::move(caps), std::move(_io), std::move(_s), std::move(_n), + std::move(_info), immediate_disconnect_reason)); if (immediate_disconnect_reason) { ret->disconnect_(*immediate_disconnect_reason); return ret; @@ -64,7 +61,7 @@ std::shared_ptr Session::make(SessionCapabilities caps, std::unique_ptr Session::~Session() { cnetlog << "Closing peer session with " << m_logSuffix; - m_peer->m_lastConnected = m_peer->m_lastAttempted.load() - chrono::seconds(1); + m_peer->m_lastConnected = m_peer->m_lastAttempted.load() - std::chrono::seconds(1); drop(ClientQuit); } @@ -76,7 +73,7 @@ void Session::readPacket(unsigned _packetType, RLP const& _r) { auto packet_type_str = capabilityPacketTypeToString(cap, _packetType); LOG(m_netLoggerDetail) << "Received " << packet_type_str << " (" << _packetType << ") from"; if (_packetType < UserPacket) { - string err_msg; + std::string err_msg; try { interpretP2pPacket(static_cast(_packetType), _r); } catch (RLPException const& e) { @@ -103,7 +100,7 @@ void Session::interpretP2pPacket(P2pPacketType _t, RLP const& _r) { LOG(m_capLoggerDetail) << p2pPacketTypeToString(_t) << " from"; switch (_t) { case DisconnectPacket: { - string reason = "Unspecified"; + std::string reason = "Unspecified"; auto r = (DisconnectReason)_r[0].toInt(); if (!_r[0].isInt()) drop(BadProtocol); @@ -123,12 +120,12 @@ void Session::interpretP2pPacket(P2pPacketType _t, RLP const& _r) { case PongPacket: { std::unique_lock l(x_info); m_info.lastPing = std::chrono::steady_clock::now() - m_ping; - LOG(m_capLoggerDetail) << "Ping latency: " << chrono::duration_cast(m_info.lastPing).count() - << " ms"; + LOG(m_capLoggerDetail) << "Ping latency: " + << std::chrono::duration_cast(m_info.lastPing).count() << " ms"; break; } default: - stringstream ss; + std::stringstream ss; ss << "Unknown p2p packet type: " << _t; throw UnknownP2PPacketType(ss.str()); } @@ -141,7 +138,7 @@ void Session::ping_() { m_ping = std::chrono::steady_clock::now(); } -RLPStream& Session::prep(RLPStream& _s, P2pPacketType _id, unsigned _args) { +dev::RLPStream& Session::prep(RLPStream& _s, P2pPacketType _id, unsigned _args) { return _s.append((unsigned)_id).appendList(_args); } @@ -355,7 +352,7 @@ void Session::doRead() { << packetType << " (" << packet_type_str << "). Frame Size: " << frame.size() << ". Size encoded in RLP: " << RLP(frame.cropped(1), RLP::LaissezFaire).actualSize() - << ". Message: " << toHex(frame) << endl; + << ". Message: " << toHex(frame) << std::endl; disconnect_(BadProtocol); return; } @@ -372,7 +369,7 @@ void Session::doRead() { "corrupted): " << packetType << " (" << packet_type_str << "). Frame Size: " << frame.size() << ". Size encoded in RLP: " << RLP(frame.cropped(1), RLP::LaissezFaire).actualSize() - << ". Message: " << toHex(frame) << endl; + << ". Message: " << toHex(frame) << std::endl; disconnect_(BadProtocol); return; } diff --git a/libraries/aleth/libp2p/Session.h b/libraries/aleth/libp2p/Session.h index 2c2244e934..b84732c17e 100644 --- a/libraries/aleth/libp2p/Session.h +++ b/libraries/aleth/libp2p/Session.h @@ -8,15 +8,11 @@ #include #include -#include #include #include -#include -#include #include #include -#include "Capability.h" #include "Common.h" #include "Peer.h" #include "RLPXSocket.h" diff --git a/libraries/aleth/libp2p/UDP.cpp b/libraries/aleth/libp2p/UDP.cpp index a9a142c6b0..b199519096 100644 --- a/libraries/aleth/libp2p/UDP.cpp +++ b/libraries/aleth/libp2p/UDP.cpp @@ -3,11 +3,8 @@ // Licensed under the GNU General Public License, Version 3. #include "UDP.h" -using namespace std; -using namespace dev; -using namespace dev::p2p; -h256 RLPXDatagramFace::sign(Secret const& _k) { +dev::h256 dev::p2p::RLPXDatagramFace::sign(Secret const& _k) { assert(packetType()); RLPStream rlpxstream; @@ -35,7 +32,7 @@ h256 RLPXDatagramFace::sign(Secret const& _k) { return hash; } -Public RLPXDatagramFace::authenticate(bytesConstRef _sig, bytesConstRef _rlp) { +dev::Public dev::p2p::RLPXDatagramFace::authenticate(bytesConstRef _sig, bytesConstRef _rlp) { Signature const& sig = *(Signature const*)_sig.data(); return dev::recover(sig, sha3(_rlp)); } diff --git a/libraries/aleth/libp2p/UDP.h b/libraries/aleth/libp2p/UDP.h index d168cf4ad8..72310e5695 100644 --- a/libraries/aleth/libp2p/UDP.h +++ b/libraries/aleth/libp2p/UDP.h @@ -15,11 +15,8 @@ #include #include #include -#include #include "Common.h" -namespace ba = boost::asio; -namespace bi = ba::ip; namespace dev { namespace p2p { diff --git a/libraries/aleth/libp2p/UPnP.cpp b/libraries/aleth/libp2p/UPnP.cpp index bcda5f2c0d..733202b0c0 100644 --- a/libraries/aleth/libp2p/UPnP.cpp +++ b/libraries/aleth/libp2p/UPnP.cpp @@ -15,8 +15,7 @@ #include #include #include -using namespace std; -using namespace dev; + using namespace dev::p2p; UPnP::UPnP() @@ -82,7 +81,7 @@ UPnP::~UPnP() { for (auto i : r) removeRedirect(i); } -string UPnP::externalIP() { +std::string UPnP::externalIP() { #if ETH_MINIUPNPC char addr[16]; if (!UPNP_GetExternalIPAddress(m_urls->controlURL, m_data->first.servicetype, addr)) return addr; diff --git a/libraries/aleth/libp2p/UPnP.h b/libraries/aleth/libp2p/UPnP.h index abd4243bb3..03fe6966ff 100644 --- a/libraries/aleth/libp2p/UPnP.h +++ b/libraries/aleth/libp2p/UPnP.h @@ -7,7 +7,6 @@ #include #include #include -#include struct UPNPUrls; struct IGDdatas; diff --git a/libraries/aleth/libp2p/taraxa.hpp b/libraries/aleth/libp2p/taraxa.hpp index 288a13b458..7250bb65e3 100644 --- a/libraries/aleth/libp2p/taraxa.hpp +++ b/libraries/aleth/libp2p/taraxa.hpp @@ -1,6 +1,6 @@ #pragma once -#include "libp2p/Common.h" +#include "Common.h" namespace dev::p2p { diff --git a/libraries/cli/include/cli/config.hpp b/libraries/cli/include/cli/config.hpp index 41aa55e7e4..c03478efb2 100644 --- a/libraries/cli/include/cli/config.hpp +++ b/libraries/cli/include/cli/config.hpp @@ -3,7 +3,6 @@ #include #include -#include "cli/configs.hpp" #include "config/config.hpp" namespace taraxa::cli { diff --git a/libraries/cli/include/cli/config_updater.hpp b/libraries/cli/include/cli/config_updater.hpp index a0b9d8577c..84f88edd5a 100644 --- a/libraries/cli/include/cli/config_updater.hpp +++ b/libraries/cli/include/cli/config_updater.hpp @@ -1,8 +1,9 @@ #pragma once -#include +#include -#include "config/config.hpp" +#include +#include namespace taraxa::cli { diff --git a/libraries/cli/src/config.cpp b/libraries/cli/src/config.cpp index 3023f3fbbe..35db2c4c13 100644 --- a/libraries/cli/src/config.cpp +++ b/libraries/cli/src/config.cpp @@ -117,8 +117,8 @@ Config::Config(int argc, const char* argv[]) { "Log channels to log in addition to log channels defined in config: [channel:level, ....]"); node_command_options.add_options()(LOG_CONFIGURATIONS, bpo::value>(&log_configurations)->multitoken(), - "Log confifugrations to use: [configuration_name, ....]"); - node_command_options.add_options()(NODE_SECRET, bpo::value(&node_secret), "Nose secret key to use"); + "Log configurations to use: [configuration_name, ....]"); + node_command_options.add_options()(NODE_SECRET, bpo::value(&node_secret), "Node secret key to use"); node_command_options.add_options()(VRF_SECRET, bpo::value(&vrf_secret), "Vrf secret key to use"); diff --git a/libraries/cli/src/config_updater.cpp b/libraries/cli/src/config_updater.cpp index 52fee01e70..bfd538f817 100644 --- a/libraries/cli/src/config_updater.cpp +++ b/libraries/cli/src/config_updater.cpp @@ -1,8 +1,6 @@ #include "cli/config_updater.hpp" #include "cli/tools.hpp" -#include "common/jsoncpp.hpp" -#include "config/version.hpp" namespace taraxa::cli { @@ -19,7 +17,7 @@ auto NetworkIPChange = [](Json::Value&, const Json::Value&) { ConfigUpdater::ConfigUpdater(int chain_id) { new_conf_ = tools::getConfig(static_cast(chain_id)); - // Regiser changes that should apply + // Register changes that should apply config_changes_.emplace_back(NetworkIPChange); } diff --git a/libraries/cli/src/tools.cpp b/libraries/cli/src/tools.cpp index 67a87e8263..26d03b0ad6 100644 --- a/libraries/cli/src/tools.cpp +++ b/libraries/cli/src/tools.cpp @@ -4,14 +4,13 @@ #include #include -#include #include "cli/config.hpp" +#include "cli/configs.hpp" #include "common/jsoncpp.hpp" using namespace std; using namespace dev; -namespace fs = std::filesystem; namespace taraxa::cli::tools { diff --git a/libraries/common/include/common/constants.hpp b/libraries/common/include/common/constants.hpp index 9122c34c96..2fa857ee91 100644 --- a/libraries/common/include/common/constants.hpp +++ b/libraries/common/include/common/constants.hpp @@ -8,7 +8,6 @@ namespace taraxa { GLOBAL_CONST(h256, ZeroHash); -GLOBAL_CONST(h256, EmptySHA3); GLOBAL_CONST(h256, EmptyRLPListSHA3); GLOBAL_CONST(h64, EmptyNonce); GLOBAL_CONST(u256, ZeroU256); diff --git a/libraries/common/include/common/encoding_rlp.hpp b/libraries/common/include/common/encoding_rlp.hpp index 90cb6918ee..8cd6fd8594 100644 --- a/libraries/common/include/common/encoding_rlp.hpp +++ b/libraries/common/include/common/encoding_rlp.hpp @@ -3,10 +3,6 @@ #include #include -#include -#include - -#include "common/util.hpp" namespace taraxa::util { using dev::RLP; @@ -41,7 +37,7 @@ inline auto rlp(RLPEncoderRef encoding, T const& target) -> decltype(target.rlp( inline auto rlp(RLPEncoderRef encoding, std::string const& target) { encoding.append(target); } -inline auto rlp(RLPEncoderRef encoding, bytes const& target) { encoding.append(target); } +inline auto rlp(RLPEncoderRef encoding, dev::bytes const& target) { encoding.append(target); } template void rlp(RLPEncoderRef encoding, std::optional const& target) { @@ -106,7 +102,7 @@ void rlp(RLPDecoderRef encoding, dev::FixedHash& target) { inline auto rlp(RLPDecoderRef encoding, std::string& target) { target = encoding.value.toString(encoding.strictness); } -inline auto rlp(RLPDecoderRef encoding, bytes& target) { target = encoding.value.toBytes(encoding.strictness); } +inline auto rlp(RLPDecoderRef encoding, dev::bytes& target) { target = encoding.value.toBytes(encoding.strictness); } template void rlp(RLPDecoderRef encoding, std::optional& target) { @@ -194,14 +190,14 @@ T rlp_dec(RLPDecoderRef encoding) { } template -bytes const& rlp_enc(RLPEncoderRef encoder_to_reuse, T const& obj) { +dev::bytes const& rlp_enc(RLPEncoderRef encoder_to_reuse, T const& obj) { encoder_to_reuse.clear(); rlp(encoder_to_reuse, obj); return encoder_to_reuse.out(); } template -bytes rlp_enc(T const& obj) { +dev::bytes rlp_enc(T const& obj) { dev::RLPStream s; rlp(s, obj); return std::move(s.invalidate()); diff --git a/libraries/common/include/common/global_const.hpp b/libraries/common/include/common/global_const.hpp index aca855643b..4abffacf6e 100644 --- a/libraries/common/include/common/global_const.hpp +++ b/libraries/common/include/common/global_const.hpp @@ -1,7 +1,5 @@ #pragma once -#include - #define GLOBAL_CONST(_type_, _name_) _type_ const &_name_() #define GLOBAL_CONST_DEF(_name_, _init_) \ diff --git a/libraries/common/include/common/thread_pool.hpp b/libraries/common/include/common/thread_pool.hpp index 7c7b40d44e..49ca42f286 100644 --- a/libraries/common/include/common/thread_pool.hpp +++ b/libraries/common/include/common/thread_pool.hpp @@ -1,7 +1,6 @@ #pragma once #include -#include #include "common/functional.hpp" diff --git a/libraries/common/include/common/types.hpp b/libraries/common/include/common/types.hpp index 661e82f3dd..b04a376a3a 100644 --- a/libraries/common/include/common/types.hpp +++ b/libraries/common/include/common/types.hpp @@ -6,15 +6,9 @@ #include #include -#include -#include -#include -#include namespace taraxa { -namespace fs = std::filesystem; - using dev::Address; using dev::AddressSet; using dev::bytes; diff --git a/libraries/common/include/common/util.hpp b/libraries/common/include/common/util.hpp index 737d84d317..82fc75bcf2 100644 --- a/libraries/common/include/common/util.hpp +++ b/libraries/common/include/common/util.hpp @@ -3,7 +3,6 @@ #include #include #include -#include #include #include @@ -11,18 +10,13 @@ #include #include #include -#include #include #include -#include -#include #include #include #include #include -#include "common/types.hpp" - namespace taraxa { /** diff --git a/libraries/common/include/common/vrf_wrapper.hpp b/libraries/common/include/common/vrf_wrapper.hpp index d233b6de70..f11a5af956 100644 --- a/libraries/common/include/common/vrf_wrapper.hpp +++ b/libraries/common/include/common/vrf_wrapper.hpp @@ -2,11 +2,8 @@ #include -#include - #include "common/types.hpp" -#include "common/util.hpp" -#include "sodium.h" +#include "sodium/crypto_vrf.h" namespace taraxa::vrf_wrapper { @@ -21,7 +18,7 @@ vrf_pk_t getVrfPublicKey(vrf_sk_t const &sk); bool isValidVrfPublicKey(vrf_pk_t const &pk); // get proof if public is valid std::optional getVrfProof(vrf_sk_t const &pk, bytes const &msg); -// get output if proff is valid +// get output if proof is valid std::optional getVrfOutput(vrf_pk_t const &pk, vrf_proof_t const &proof, bytes const &msg, bool strict = true); diff --git a/libraries/common/src/constants.cpp b/libraries/common/src/constants.cpp index cd40cbf508..b7b9e009f9 100644 --- a/libraries/common/src/constants.cpp +++ b/libraries/common/src/constants.cpp @@ -6,7 +6,6 @@ namespace taraxa { GLOBAL_CONST_DEF(ZeroHash, {}) -GLOBAL_CONST_DEF(EmptySHA3, dev::sha3(dev::bytesConstRef())) GLOBAL_CONST_DEF(EmptyRLPListSHA3, dev::sha3(dev::RLPStream(0).out())) GLOBAL_CONST_DEF(EmptyNonce, {}) GLOBAL_CONST_DEF(ZeroU256, {}) diff --git a/libraries/common/src/jsoncpp.cpp b/libraries/common/src/jsoncpp.cpp index ec4a9c55f8..8371e25575 100644 --- a/libraries/common/src/jsoncpp.cpp +++ b/libraries/common/src/jsoncpp.cpp @@ -1,6 +1,14 @@ #include "common/jsoncpp.hpp" -#include "common/util.hpp" +#include +#include + +#include +#include +#include +#include +#include +#include namespace taraxa::util { diff --git a/libraries/common/src/util.cpp b/libraries/common/src/util.cpp index a8c8190aa6..70845183f0 100644 --- a/libraries/common/src/util.cpp +++ b/libraries/common/src/util.cpp @@ -1,8 +1,5 @@ #include "common/util.hpp" -#include -#include -#include namespace taraxa { std::string jsonToUnstyledString(const Json::Value &value) { diff --git a/libraries/config/include/config/config.hpp b/libraries/config/include/config/config.hpp index 0ba2972caa..477c29019d 100644 --- a/libraries/config/include/config/config.hpp +++ b/libraries/config/include/config/config.hpp @@ -1,7 +1,5 @@ #pragma once -#include "common/config_exception.hpp" -#include "common/util.hpp" #include "common/vrf_wrapper.hpp" #include "config/genesis.hpp" #include "config/network.hpp" @@ -70,7 +68,7 @@ struct FullNodeConfig { auto net_file_path() const { return data_path / "net"; } /** - * @brief Validates config values, throws configexception if validation failes + * @brief Validates config values, throws configexception if validation fails * @return */ void validate() const; diff --git a/libraries/config/include/config/config_utils.hpp b/libraries/config/include/config/config_utils.hpp index e5b168d85f..2586973d00 100644 --- a/libraries/config/include/config/config_utils.hpp +++ b/libraries/config/include/config/config_utils.hpp @@ -1,10 +1,12 @@ #pragma once +#include #include +#include -#include "common/config_exception.hpp" -#include "common/types.hpp" -#include "common/util.hpp" +namespace Json { +class Value; +} // namespace Json namespace taraxa { std::string getConfigErr(const std::vector &path); diff --git a/libraries/config/include/config/genesis.hpp b/libraries/config/include/config/genesis.hpp index 4963214807..18d6c15c83 100644 --- a/libraries/config/include/config/genesis.hpp +++ b/libraries/config/include/config/genesis.hpp @@ -2,19 +2,12 @@ #include -#include -#include - -#include "common/lazy.hpp" #include "config/dag_config.hpp" #include "config/pbft_config.hpp" #include "config/state_config.hpp" #include "dag/dag_block.hpp" namespace taraxa { -using std::string; -using std::unordered_map; -using ::taraxa::util::lazy::LazyVal; struct GasPriceConfig { uint64_t percentile = 60; diff --git a/libraries/config/include/config/hardfork.hpp b/libraries/config/include/config/hardfork.hpp index 1e1c95c5b6..73c8419bc3 100644 --- a/libraries/config/include/config/hardfork.hpp +++ b/libraries/config/include/config/hardfork.hpp @@ -5,6 +5,7 @@ #include "common/encoding_rlp.hpp" #include "common/types.hpp" +namespace taraxa { struct Redelegation { taraxa::addr_t validator; taraxa::addr_t delegator; @@ -134,3 +135,4 @@ struct HardforksConfig { Json::Value enc_json(const HardforksConfig& obj); void dec_json(const Json::Value& json, HardforksConfig& obj); +} // namespace taraxa diff --git a/libraries/config/include/config/state_config.hpp b/libraries/config/include/config/state_config.hpp index 338f28e23c..a529073e26 100644 --- a/libraries/config/include/config/state_config.hpp +++ b/libraries/config/include/config/state_config.hpp @@ -1,7 +1,6 @@ #pragma once #include -#include #include "common/encoding_rlp.hpp" #include "common/types.hpp" diff --git a/libraries/config/src/config.cpp b/libraries/config/src/config.cpp index 26c915b210..9e9bffdd19 100644 --- a/libraries/config/src/config.cpp +++ b/libraries/config/src/config.cpp @@ -4,7 +4,7 @@ #include -#include "common/jsoncpp.hpp" +#include "common/config_exception.hpp" #include "common/thread_pool.hpp" #include "config/config_utils.hpp" diff --git a/libraries/config/src/config_utils.cpp b/libraries/config/src/config_utils.cpp index bdef058854..2daab50d43 100644 --- a/libraries/config/src/config_utils.cpp +++ b/libraries/config/src/config_utils.cpp @@ -1,5 +1,13 @@ #include "config/config_utils.hpp" +#include +#include +#include + +#include + +#include "common/config_exception.hpp" + namespace taraxa { std::string getConfigErr(const std::vector &path) { diff --git a/libraries/config/src/genesis.cpp b/libraries/config/src/genesis.cpp index a48b66c013..fe24c5fc2f 100644 --- a/libraries/config/src/genesis.cpp +++ b/libraries/config/src/genesis.cpp @@ -2,13 +2,10 @@ #include -#include - #include "common/config_exception.hpp" #include "libdevcore/SHA3.h" namespace taraxa { -using std::stringstream; Json::Value enc_json(GasPriceConfig const& obj) { Json::Value json(Json::objectValue); diff --git a/libraries/config/src/hardfork.cpp b/libraries/config/src/hardfork.cpp index be7768564b..70332394bf 100644 --- a/libraries/config/src/hardfork.cpp +++ b/libraries/config/src/hardfork.cpp @@ -2,6 +2,7 @@ #include "common/config_exception.hpp" +namespace taraxa { Json::Value enc_json(const Redelegation& obj) { Json::Value json(Json::objectValue); json["validator"] = dev::toJS(obj.validator); @@ -191,4 +192,5 @@ void dec_json(const Json::Value& json, HardforksConfig& obj) { } RLP_FIELDS_DEFINE(HardforksConfig, fix_redelegate_block_num, redelegations, rewards_distribution_frequency, magnolia_hf, - phalaenopsis_hf_block_num, fix_claim_all_block_num, aspen_hf, ficus_hf) \ No newline at end of file + phalaenopsis_hf_block_num, fix_claim_all_block_num, aspen_hf, ficus_hf) +} // namespace taraxa diff --git a/libraries/config/src/network.cpp b/libraries/config/src/network.cpp index 0509e3a68c..98031b74bf 100644 --- a/libraries/config/src/network.cpp +++ b/libraries/config/src/network.cpp @@ -1,5 +1,6 @@ #include "config/network.hpp" +#include "common/config_exception.hpp" #include "config/config_utils.hpp" namespace taraxa { diff --git a/libraries/config/src/state_config.cpp b/libraries/config/src/state_config.cpp index e938d3d000..4abccf6eb1 100644 --- a/libraries/config/src/state_config.cpp +++ b/libraries/config/src/state_config.cpp @@ -2,8 +2,6 @@ #include -#include - #include "common/vrf_wrapper.hpp" namespace taraxa::state_api { diff --git a/libraries/core_libs/consensus/include/dag/dag.hpp b/libraries/core_libs/consensus/include/dag/dag.hpp index 9f10ac9fa1..c414a03d3f 100644 --- a/libraries/core_libs/consensus/include/dag/dag.hpp +++ b/libraries/core_libs/consensus/include/dag/dag.hpp @@ -1,6 +1,5 @@ #pragma once -#include #include #include #include @@ -12,15 +11,11 @@ #include #include #include -#include -#include -#include -#include #include #include "common/types.hpp" -#include "common/util.hpp" -#include "dag/dag_block.hpp" +#include "logger/logger.hpp" + namespace taraxa { /** @addtogroup DAG diff --git a/libraries/core_libs/consensus/include/dag/dag_block_proposer.hpp b/libraries/core_libs/consensus/include/dag/dag_block_proposer.hpp index 6745a603b5..5ceb738401 100644 --- a/libraries/core_libs/consensus/include/dag/dag_block_proposer.hpp +++ b/libraries/core_libs/consensus/include/dag/dag_block_proposer.hpp @@ -1,12 +1,9 @@ #pragma once #include -#include #include #include -#include "boost/thread.hpp" -#include "config/config.hpp" #include "dag/dag_block.hpp" #include "logger/logger.hpp" #include "network/network.hpp" @@ -19,6 +16,7 @@ namespace taraxa { class TransactionManager; class KeyManager; class DagManager; +struct FullNodeConfig; namespace final_chain { class FinalChain; diff --git a/libraries/core_libs/consensus/include/dag/dag_manager.hpp b/libraries/core_libs/consensus/include/dag/dag_manager.hpp index 8c9c59200b..62232a57e9 100644 --- a/libraries/core_libs/consensus/include/dag/dag_manager.hpp +++ b/libraries/core_libs/consensus/include/dag/dag_manager.hpp @@ -6,6 +6,7 @@ #include "sortition_params_manager.hpp" #include "storage/storage.hpp" #include "transaction/transaction_manager.hpp" + namespace taraxa { /** @addtogroup DAG @@ -15,6 +16,7 @@ class Network; class DagBuffer; class FullNode; class KeyManager; +struct DagConfig; /** * @brief DagManager class contains in memory representation of part of the DAG that is not yet finalized in a pbft @@ -258,7 +260,7 @@ class DagManager : public std::enable_shared_from_this { std::map> non_finalized_blks_; DagFrontier frontier_; SortitionParamsManager sortition_params_manager_; - const DagConfig dag_config_; + const DagConfig &dag_config_; const std::shared_ptr genesis_block_; const uint32_t max_levels_per_period_; const uint32_t dag_expiry_limit_; // Any non finalized dag block with a level smaller by diff --git a/libraries/core_libs/consensus/include/dag/sortition_params_manager.hpp b/libraries/core_libs/consensus/include/dag/sortition_params_manager.hpp index 8039bc3d17..244897aea3 100644 --- a/libraries/core_libs/consensus/include/dag/sortition_params_manager.hpp +++ b/libraries/core_libs/consensus/include/dag/sortition_params_manager.hpp @@ -55,7 +55,7 @@ class SortitionParamsManager { * @param batch DB batch in which all changes will be added * @param non_empty_pbft_chain_size PBFT chain size excluding pbft blocks with null anchor */ - void pbftBlockPushed(const PeriodData& block, DbStorage::Batch& batch, PbftPeriod non_empty_pbft_chain_size); + void pbftBlockPushed(const PeriodData& block, Batch& batch, PbftPeriod non_empty_pbft_chain_size); /** * Calculate average DAG efficiency from dag_efficiencies_. Used at the end of interval. diff --git a/libraries/core_libs/consensus/include/final_chain/cache.hpp b/libraries/core_libs/consensus/include/final_chain/cache.hpp index 740c41b490..477c90b63d 100644 --- a/libraries/core_libs/consensus/include/final_chain/cache.hpp +++ b/libraries/core_libs/consensus/include/final_chain/cache.hpp @@ -1,7 +1,6 @@ #pragma once #include -#include #include #include #include diff --git a/libraries/core_libs/consensus/include/final_chain/data.hpp b/libraries/core_libs/consensus/include/final_chain/data.hpp index 8c9c1f1105..1410cfeaaf 100644 --- a/libraries/core_libs/consensus/include/final_chain/data.hpp +++ b/libraries/core_libs/consensus/include/final_chain/data.hpp @@ -5,12 +5,8 @@ #include #include -#include - -#include "common/constants.hpp" #include "common/encoding_rlp.hpp" #include "common/types.hpp" -#include "final_chain/state_api_data.hpp" #include "transaction/transaction.hpp" namespace taraxa { diff --git a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp index d66cd6bae4..bdd630a149 100644 --- a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp +++ b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp @@ -5,10 +5,10 @@ #include "common/event.hpp" #include "common/types.hpp" #include "config/config.hpp" +#include "config/state_config.hpp" #include "final_chain/cache.hpp" #include "final_chain/data.hpp" #include "final_chain/state_api.hpp" -#include "final_chain/state_api_data.hpp" #include "rewards/rewards_stats.hpp" #include "storage/storage.hpp" @@ -38,7 +38,7 @@ class FinalChain { FinalChain() = default; ~FinalChain() = default; - FinalChain(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, const addr_t& node_addr); + FinalChain(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, const addr_t& node_addr); FinalChain(const FinalChain&) = delete; FinalChain(FinalChain&&) = delete; FinalChain& operator=(const FinalChain&) = delete; @@ -268,8 +268,8 @@ class FinalChain { std::shared_ptr finalize_(PeriodData&& new_blk, std::vector&& finalized_dag_blk_hashes, std::shared_ptr&& anchor); - std::shared_ptr appendBlock(DB::Batch& batch, const addr_t& author, uint64_t timestamp, - uint64_t gas_limit, const h256& state_root, u256 total_reward, + std::shared_ptr appendBlock(Batch& batch, const addr_t& author, uint64_t timestamp, uint64_t gas_limit, + const h256& state_root, u256 total_reward, const SharedTransactions& transactions = {}, const TransactionReceipts& receipts = {}, const bytes& extra_data = {}); @@ -287,7 +287,7 @@ class FinalChain { EthBlockNumber level, EthBlockNumber index) const; private: - std::shared_ptr db_; + std::shared_ptr db_; const uint64_t kBlockGasLimit; StateAPI state_api_; const bool kLightNode = false; diff --git a/libraries/core_libs/consensus/include/final_chain/state_api.hpp b/libraries/core_libs/consensus/include/final_chain/state_api.hpp index 3075825abb..cffffad3f0 100644 --- a/libraries/core_libs/consensus/include/final_chain/state_api.hpp +++ b/libraries/core_libs/consensus/include/final_chain/state_api.hpp @@ -6,10 +6,13 @@ #include "final_chain/state_api_data.hpp" #include "rewards/block_stats.hpp" -#include "storage/storage.hpp" namespace taraxa::state_api { +struct Config; +struct Opts; +struct OptsDB; + /** @addtogroup FinalChain * @{ */ diff --git a/libraries/core_libs/consensus/include/final_chain/state_api_data.hpp b/libraries/core_libs/consensus/include/final_chain/state_api_data.hpp index fae4aeb841..1a9daacdcb 100644 --- a/libraries/core_libs/consensus/include/final_chain/state_api_data.hpp +++ b/libraries/core_libs/consensus/include/final_chain/state_api_data.hpp @@ -3,12 +3,10 @@ #include #include #include -#include #include #include "common/encoding_rlp.hpp" #include "common/types.hpp" -#include "config/state_config.hpp" namespace taraxa::state_api { diff --git a/libraries/core_libs/consensus/include/pbft/pbft_chain.hpp b/libraries/core_libs/consensus/include/pbft/pbft_chain.hpp index 2a4bf2ad63..1f9ea8e84a 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_chain.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_chain.hpp @@ -1,11 +1,9 @@ #pragma once -#include #include +#include #include -#include -#include "config/pbft_config.hpp" #include "logger/logger.hpp" #include "pbft/pbft_block.hpp" diff --git a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp index 26dd980255..d963e67cd6 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp @@ -1,6 +1,5 @@ #pragma once -#include #include #include "common/types.hpp" @@ -257,7 +256,7 @@ class PbftManager { /** * @brief Test/enforce broadcastVotes() to actually send votes */ - void testBroadcatVotesFunctionality(); + void testBroadcastVotesFunctionality(); /** * @brief Check PBFT blocks syncing queue. If there are synced PBFT blocks in queue, push it to PBFT chain @@ -305,7 +304,7 @@ class PbftManager { /** * @brief Check if there is 2t+1 cert votes for some valid block, if yes - push it into the chain - * @return true if new cert voted block was pushed into the chain, otheriwse false + * @return true if new cert voted block was pushed into the chain, otherwise false */ bool tryPushCertVotesBlock(); @@ -529,7 +528,7 @@ class PbftManager { const std::vector> &cert_votes) const; /** - @brief Validates PBFT block [illar] votes + @brief Validates PBFT block pillar votes * * @param period_data * @return diff --git a/libraries/core_libs/consensus/include/pillar_chain/pillar_block.hpp b/libraries/core_libs/consensus/include/pillar_chain/pillar_block.hpp index f6a3b3a5ff..4aa58d1903 100644 --- a/libraries/core_libs/consensus/include/pillar_chain/pillar_block.hpp +++ b/libraries/core_libs/consensus/include/pillar_chain/pillar_block.hpp @@ -1,7 +1,10 @@ #pragma once +#include #include +#include + #include "common/encoding_rlp.hpp" #include "common/types.hpp" #include "final_chain/state_api_data.hpp" diff --git a/libraries/core_libs/consensus/include/pillar_chain/pillar_chain_manager.hpp b/libraries/core_libs/consensus/include/pillar_chain/pillar_chain_manager.hpp index f1ff32d638..bc6a29a45e 100644 --- a/libraries/core_libs/consensus/include/pillar_chain/pillar_chain_manager.hpp +++ b/libraries/core_libs/consensus/include/pillar_chain/pillar_chain_manager.hpp @@ -3,7 +3,6 @@ #include #include "common/event.hpp" -#include "config/config.hpp" #include "final_chain/data.hpp" #include "logger/logger.hpp" #include "pillar_chain/pillar_block.hpp" @@ -13,6 +12,7 @@ namespace taraxa { class DbStorage; class Network; class KeyManager; +struct FicusHardforkConfig; } // namespace taraxa namespace taraxa::final_chain { @@ -168,7 +168,7 @@ class PillarChainManager { private: // Node config - const FicusHardforkConfig kFicusHfConfig; + const FicusHardforkConfig& kFicusHfConfig; std::shared_ptr db_; std::weak_ptr network_; diff --git a/libraries/core_libs/consensus/include/rewards/block_stats.hpp b/libraries/core_libs/consensus/include/rewards/block_stats.hpp index cc80cd1b8e..513a8a40fe 100644 --- a/libraries/core_libs/consensus/include/rewards/block_stats.hpp +++ b/libraries/core_libs/consensus/include/rewards/block_stats.hpp @@ -1,7 +1,5 @@ #pragma once -#include - #include "common/encoding_rlp.hpp" #include "common/types.hpp" #include "pbft/period_data.hpp" diff --git a/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp b/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp index c4d58babb3..1b74dfe71c 100644 --- a/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp +++ b/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp @@ -12,7 +12,7 @@ namespace taraxa::rewards { */ class Stats { public: - Stats(uint32_t committee_size, const HardforksConfig& hardforks, std::shared_ptr db, + Stats(uint32_t committee_size, const HardforksConfig& hardforks, std::shared_ptr db, std::function&& dpos_eligible_total_vote_count, EthBlockNumber last_blk_num = 0); /** @@ -21,7 +21,7 @@ class Stats { * @return vector that should be processed at current block */ std::vector processStats(const PeriodData& current_blk, const std::vector& trxs_gas_used, - DbStorage::Batch& write_batch); + Batch& write_batch); /** * @brief called on start of new rewards interval. clears blocks_stats_ collection * and removes all data saved in db column @@ -46,11 +46,11 @@ class Stats { /** * @brief saves stats to database to not lose this data in case of node restart */ - void saveBlockStats(uint64_t number, const BlockStats& stats, DbStorage::Batch& write_batch); + void saveBlockStats(uint64_t number, const BlockStats& stats, Batch& write_batch); const uint32_t kCommitteeSize; const HardforksConfig kHardforksConfig; - std::shared_ptr db_; + std::shared_ptr db_; const std::function dpos_eligible_total_vote_count_; std::unordered_map blocks_stats_; }; diff --git a/libraries/core_libs/consensus/include/transaction/gas_pricer.hpp b/libraries/core_libs/consensus/include/transaction/gas_pricer.hpp index 4335588f00..0034345825 100644 --- a/libraries/core_libs/consensus/include/transaction/gas_pricer.hpp +++ b/libraries/core_libs/consensus/include/transaction/gas_pricer.hpp @@ -1,12 +1,15 @@ #pragma once #include +#include #include "config/genesis.hpp" -#include "final_chain/final_chain.hpp" +#include "transaction/transaction.hpp" namespace taraxa { +class DbStorage; + /** @addtogroup Transaction * @{ */ diff --git a/libraries/core_libs/consensus/include/transaction/transaction_manager.hpp b/libraries/core_libs/consensus/include/transaction/transaction_manager.hpp index e6140f511e..9909d79695 100644 --- a/libraries/core_libs/consensus/include/transaction/transaction_manager.hpp +++ b/libraries/core_libs/consensus/include/transaction/transaction_manager.hpp @@ -1,7 +1,6 @@ #pragma once #include "common/event.hpp" -#include "config/config.hpp" #include "final_chain/final_chain.hpp" #include "logger/logger.hpp" #include "storage/storage.hpp" @@ -22,14 +21,15 @@ enum class TransactionStatus { Inserted = 0, InsertedNonProposable, Known, Overf class DagBlock; class DagManager; class FullNode; +class FullNodeConfig; /** * @brief TransactionManager class verifies and inserts incoming transactions in memory pool and handles saving * transactions and all transactions state change * - * Incoming new transactions can be verified with verifyTransaction functions and than inserted in the transaction pool - * with insertValidatedTransaction. Transactions are kept in transactions memory pool until they are included in a - * proposed dag block or received in an incoming dag block. Transaction verification consist of: + * Incoming new transactions can be verified with verifyTransaction functions and than inserted in the transaction + * pool with insertValidatedTransaction. Transactions are kept in transactions memory pool until they are included + * in a proposed dag block or received in an incoming dag block. Transaction verification consist of: * - Verifying the format * - Verifying signature * - Verifying chan id @@ -116,11 +116,11 @@ class TransactionManager : public std::enable_shared_from_this const transaction_accepted_{}; private: - const FullNodeConfig kConf; + const FullNodeConfig &kConf; // Guards updating transaction status // Transactions can be in one of three states: // 1. In transactions pool; 2. In non-finalized Dag block 3. Executed diff --git a/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp b/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp index 3fe2cacaed..046062b0ab 100644 --- a/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp +++ b/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp @@ -6,6 +6,8 @@ namespace taraxa { +class PbftVote; + enum class TwoTPlusOneVotedBlockType { SoftVotedBlock, CertVotedBlock, NextVotedBlock, NextVotedNullBlock }; struct VerifiedVotes { diff --git a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp index 45d516eb9c..4b77962837 100644 --- a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp +++ b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp @@ -16,6 +16,9 @@ namespace taraxa { class Network; class SlashingManager; +class PbftVote; +struct PbftConfig; +struct FullNodeConfig; namespace network::tarcap { class TaraxaPeer; @@ -105,8 +108,7 @@ class VoteManager { * @param block_hash * @param batch */ - void resetRewardVotes(PbftPeriod period, PbftRound round, PbftStep step, const blk_hash_t& block_hash, - DbStorage::Batch& batch); + void resetRewardVotes(PbftPeriod period, PbftRound round, PbftStep step, const blk_hash_t& block_hash, Batch& batch); /** * @brief Check reward votes for specified pbft block @@ -149,7 +151,7 @@ class VoteManager { * * @param write_batch */ - void clearOwnVerifiedVotes(DbStorage::Batch& write_batch); + void clearOwnVerifiedVotes(Batch& write_batch); /** * @brief Place a vote, save it in the verified votes queue, and gossip to peers @@ -199,7 +201,7 @@ class VoteManager { bool voteAlreadyValidated(const vote_hash_t& vote_hash) const; /** - * @brief Generates vrf sorition and calculates its weight + * @brief Generates vrf sortition and calculates its weight * @return true if sortition weight > 0, otherwise false */ bool genAndValidateVrfSortition(PbftPeriod pbft_period, PbftRound pbft_round) const; @@ -210,7 +212,7 @@ class VoteManager { * @param period * @param round * @param votes_type - * @return emoty optional if no 2t+1 voted block was found, otherwise initialized optional with block hash + * @return empty optional if no 2t+1 voted block was found, otherwise initialized optional with block hash */ std::optional getTwoTPlusOneVotedBlock(PbftPeriod period, PbftRound round, TwoTPlusOneVotedBlockType type) const; @@ -227,7 +229,7 @@ class VoteManager { TwoTPlusOneVotedBlockType type) const; /** - * @brief Sets current pbft period & round. It also checks if we dont alredy have 2t+1 vote bundles(pf any type) for + * @brief Sets current pbft period & round. It also checks if we dont already have 2t+1 vote bundles(pf any type) for * the provided period & round and if so, it saves these bundles into db * * @param pbft_period diff --git a/libraries/core_libs/consensus/src/dag/dag.cpp b/libraries/core_libs/consensus/src/dag/dag.cpp index 227aeda47f..05adb111f3 100644 --- a/libraries/core_libs/consensus/src/dag/dag.cpp +++ b/libraries/core_libs/consensus/src/dag/dag.cpp @@ -4,7 +4,6 @@ #include #include -#include #include #include #include @@ -12,10 +11,6 @@ #include #include "dag/dag.hpp" -#include "key_manager/key_manager.hpp" -#include "network/network.hpp" -#include "network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp" -#include "transaction/transaction_manager.hpp" namespace taraxa { @@ -53,7 +48,7 @@ bool Dag::addVEEs(blk_hash_t const &new_vertex, blk_hash_t const &pivot, std::ve // Note: add edges, // *** important - // Add a new block, edges are pointing from pivot to new_veretx + // Add a new block, edges are pointing from pivot to new_vertex if (!pivot.isZero()) { if (hasVertex(pivot)) { std::tie(edge, res) = boost::add_edge_by_label(pivot, new_vertex, graph_); diff --git a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp index 9f61026790..cdc137af23 100644 --- a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp @@ -1,7 +1,5 @@ #include "dag/dag_block_proposer.hpp" -#include - #include "common/util.hpp" #include "dag/dag_manager.hpp" #include "final_chain/final_chain.hpp" diff --git a/libraries/core_libs/consensus/src/dag/dag_manager.cpp b/libraries/core_libs/consensus/src/dag/dag_manager.cpp index a381965515..27d178fe08 100644 --- a/libraries/core_libs/consensus/src/dag/dag_manager.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_manager.cpp @@ -8,6 +8,7 @@ #include #include +#include "config/config.hpp" #include "dag/dag.hpp" #include "key_manager/key_manager.hpp" #include "network/network.hpp" @@ -174,7 +175,7 @@ std::pair> DagManager::addDagBlock(DagBlock &&blk, } if (save) { block_verified_.emit(blk); - if (auto net = network_.lock()) { + if (std::shared_ptr net = network_.lock()) { net->gossipDagBlock(blk, proposed, trxs); } } diff --git a/libraries/core_libs/consensus/src/dag/sortition_params_manager.cpp b/libraries/core_libs/consensus/src/dag/sortition_params_manager.cpp index 6d4015bc2d..aa37a3a6b0 100644 --- a/libraries/core_libs/consensus/src/dag/sortition_params_manager.cpp +++ b/libraries/core_libs/consensus/src/dag/sortition_params_manager.cpp @@ -1,7 +1,5 @@ #include "dag/sortition_params_manager.hpp" -#include "pbft/pbft_block.hpp" - namespace taraxa { SortitionParamsChange::SortitionParamsChange(PbftPeriod period, uint16_t efficiency, const VrfParams& vrf) @@ -100,7 +98,7 @@ void SortitionParamsManager::cleanup() { } } -void SortitionParamsManager::pbftBlockPushed(const PeriodData& block, DbStorage::Batch& batch, +void SortitionParamsManager::pbftBlockPushed(const PeriodData& block, Batch& batch, PbftPeriod non_empty_pbft_chain_size) { if (config_.changing_interval == 0) { return; @@ -162,18 +160,18 @@ int32_t getClosestThreshold(const EfficienciesMap& efficiencies, uint16_t target EfficienciesMap SortitionParamsManager::getEfficienciesToUpperRange(uint16_t efficiency, int32_t last_threshold_upper) const { - // efficiencies_to_uppper_range provide mapping from efficiency to VRF upper threshold, params_changes contain + // efficiencies_to_upper_range provide mapping from efficiency to VRF upper threshold, params_changes contain // efficiency for previous setting so mapping is done efficiency of i relates to VRF upper threshold of (i + 1) - EfficienciesMap efficiencies_to_uppper_range; + EfficienciesMap efficiencies_to_upper_range; for (uint32_t i = 1; i < params_changes_.size(); i++) { - efficiencies_to_uppper_range[params_changes_[i].interval_efficiency] = + efficiencies_to_upper_range[params_changes_[i].interval_efficiency] = params_changes_[i - 1].vrf_params.threshold_upper; } if (params_changes_.size() > 1) { - efficiencies_to_uppper_range[efficiency] = last_threshold_upper; + efficiencies_to_upper_range[efficiency] = last_threshold_upper; } - return efficiencies_to_uppper_range; + return efficiencies_to_upper_range; } int32_t SortitionParamsManager::getNewUpperRange(uint16_t efficiency) const { @@ -192,17 +190,17 @@ int32_t SortitionParamsManager::getNewUpperRange(uint16_t efficiency) const { threshold_change *= -1; } - auto efficiencies_to_uppper_range = getEfficienciesToUpperRange(efficiency, last_threshold_upper); + auto efficiencies_to_upper_range = getEfficienciesToUpperRange(efficiency, last_threshold_upper); // Check if all params are below, over target efficiency or empty. If so target is still not reached and change it by // calculated amount - if (efficiencies_to_uppper_range.empty() || (efficiencies_to_uppper_range.rbegin()->first < target_efficiency) || - (efficiencies_to_uppper_range.begin()->first >= target_efficiency)) { + if (efficiencies_to_upper_range.empty() || (efficiencies_to_upper_range.rbegin()->first < target_efficiency) || + (efficiencies_to_upper_range.begin()->first >= target_efficiency)) { return last_threshold_upper + threshold_change; } const auto closest_threshold = - getClosestThreshold(efficiencies_to_uppper_range, target_efficiency, is_over_target_efficiency); + getClosestThreshold(efficiencies_to_upper_range, target_efficiency, is_over_target_efficiency); const bool is_over_last_threshold = closest_threshold >= last_threshold_upper; diff --git a/libraries/core_libs/consensus/src/final_chain/data.cpp b/libraries/core_libs/consensus/src/final_chain/data.cpp index 73b2525ece..f0a09e7b16 100644 --- a/libraries/core_libs/consensus/src/final_chain/data.cpp +++ b/libraries/core_libs/consensus/src/final_chain/data.cpp @@ -2,6 +2,8 @@ #include +#include "common/constants.hpp" + namespace taraxa::final_chain { h256 const& BlockHeader::uncles_hash() { return EmptyRLPListSHA3(); } diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index ebe285fefb..060842c2c2 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -1,11 +1,15 @@ #include "final_chain/final_chain.hpp" #include "common/encoding_solidity.hpp" +#include "common/util.hpp" #include "final_chain/trie_common.hpp" +#include "storage/storage.hpp" +#include "transaction/system_transaction.hpp" namespace taraxa::final_chain { -FinalChain::FinalChain(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, const addr_t& node_addr) +FinalChain::FinalChain(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, + const addr_t& node_addr) : db_(db), kBlockGasLimit(config.genesis.pbft.gas_limit), state_api_([this](auto n) { return blockHash(n).value_or(ZeroHash()); }, // @@ -39,7 +43,7 @@ FinalChain::FinalChain(const std::shared_ptr& db, const taraxa::FullNodeConf num_executed_dag_blk_ = db_->getStatusField(taraxa::StatusDbField::ExecutedBlkCount); num_executed_trx_ = db_->getStatusField(taraxa::StatusDbField::ExecutedTrxCount); auto state_db_descriptor = state_api_.get_last_committed_state_descriptor(); - auto last_blk_num = db_->lookup_int(DBMetaKeys::LAST_NUMBER, DB::Columns::final_chain_meta); + auto last_blk_num = db_->lookup_int(DBMetaKeys::LAST_NUMBER, DbStorage::Columns::final_chain_meta); // If we don't have genesis block in db then create and push it if (!last_blk_num) [[unlikely]] { auto batch = db_->createWriteBatch(); @@ -65,9 +69,9 @@ FinalChain::FinalChain(const std::shared_ptr& db, const taraxa::FullNodeConf auto period_system_transactions = db_->getPeriodSystemTransactionsHashes(block_n); num_executed_trx_ -= period_system_transactions.size(); } - db_->insert(batch, DB::Columns::status, StatusDbField::ExecutedBlkCount, num_executed_dag_blk_.load()); - db_->insert(batch, DB::Columns::status, StatusDbField::ExecutedTrxCount, num_executed_trx_.load()); - db_->insert(batch, DB::Columns::final_chain_meta, DBMetaKeys::LAST_NUMBER, state_db_descriptor.blk_num); + db_->insert(batch, DbStorage::Columns::status, StatusDbField::ExecutedBlkCount, num_executed_dag_blk_.load()); + db_->insert(batch, DbStorage::Columns::status, StatusDbField::ExecutedTrxCount, num_executed_trx_.load()); + db_->insert(batch, DbStorage::Columns::final_chain_meta, DBMetaKeys::LAST_NUMBER, state_db_descriptor.blk_num); db_->commitWriteBatch(batch); last_blk_num = state_db_descriptor.blk_num; } @@ -210,8 +214,8 @@ std::shared_ptr FinalChain::finalize_(PeriodData&& new auto num_executed_dag_blk = num_executed_dag_blk_ + finalized_dag_blk_hashes.size(); auto num_executed_trx = num_executed_trx_ + all_transactions.size(); if (!finalized_dag_blk_hashes.empty()) { - db_->insert(batch, DB::Columns::status, StatusDbField::ExecutedBlkCount, num_executed_dag_blk); - db_->insert(batch, DB::Columns::status, StatusDbField::ExecutedTrxCount, num_executed_trx); + db_->insert(batch, DbStorage::Columns::status, StatusDbField::ExecutedBlkCount, num_executed_dag_blk); + db_->insert(batch, DbStorage::Columns::status, StatusDbField::ExecutedTrxCount, num_executed_trx); LOG(log_nf_) << "Executed dag blocks #" << num_executed_dag_blk_ - finalized_dag_blk_hashes.size() << "-" << num_executed_dag_blk_ - 1 << " , Transactions count: " << all_transactions.size(); } @@ -280,21 +284,21 @@ void FinalChain::prune(EthBlockNumber blk_n) { } auto block_to_prune = getBlockHeader(last_block_to_keep->number - 1); while (block_to_prune && block_to_prune->number > 0) { - db_->remove(DB::Columns::final_chain_blk_by_number, block_to_prune->number); - db_->remove(DB::Columns::final_chain_blk_hash_by_number, block_to_prune->number); - db_->remove(DB::Columns::final_chain_blk_number_by_hash, block_to_prune->hash); + db_->remove(DbStorage::Columns::final_chain_blk_by_number, block_to_prune->number); + db_->remove(DbStorage::Columns::final_chain_blk_hash_by_number, block_to_prune->number); + db_->remove(DbStorage::Columns::final_chain_blk_number_by_hash, block_to_prune->hash); block_to_prune = getBlockHeader(block_to_prune->number - 1); } - db_->compactColumn(DB::Columns::final_chain_blk_by_number); - db_->compactColumn(DB::Columns::final_chain_blk_hash_by_number); - db_->compactColumn(DB::Columns::final_chain_blk_number_by_hash); + db_->compactColumn(DbStorage::Columns::final_chain_blk_by_number); + db_->compactColumn(DbStorage::Columns::final_chain_blk_hash_by_number); + db_->compactColumn(DbStorage::Columns::final_chain_blk_number_by_hash); state_api_.prune(state_root_to_keep, last_block_to_keep->number); } } -std::shared_ptr FinalChain::appendBlock(DB::Batch& batch, const addr_t& author, uint64_t timestamp, +std::shared_ptr FinalChain::appendBlock(Batch& batch, const addr_t& author, uint64_t timestamp, uint64_t gas_limit, const h256& state_root, u256 total_reward, const SharedTransactions& transactions, const TransactionReceipts& receipts, const bytes& extra_data) { @@ -320,7 +324,7 @@ std::shared_ptr FinalChain::appendBlock(DB::Batch& batch, const add const auto& receipt = receipts[trx_idx]; receipts_trie[i_rlp] = util::rlp_enc(rlp_strm, receipt); - db_->insert(batch, DB::Columns::final_chain_receipt_by_trx_hash, trx->getHash(), rlp_strm.out()); + db_->insert(batch, DbStorage::Columns::final_chain_receipt_by_trx_hash, trx->getHash(), rlp_strm.out()); blk_header.log_bloom |= receipt.bloom(); } @@ -328,18 +332,20 @@ std::shared_ptr FinalChain::appendBlock(DB::Batch& batch, const add blk_header.receipts_root = hash256(receipts_trie); rlp_strm.clear(), blk_header.ethereum_rlp(rlp_strm); blk_header.hash = dev::sha3(rlp_strm.out()); - db_->insert(batch, DB::Columns::final_chain_blk_by_number, blk_header.number, util::rlp_enc(rlp_strm, blk_header)); + db_->insert(batch, DbStorage::Columns::final_chain_blk_by_number, blk_header.number, + util::rlp_enc(rlp_strm, blk_header)); auto log_bloom_for_index = blk_header.log_bloom; log_bloom_for_index.shiftBloom<3>(sha3(blk_header.author.ref())); for (uint64_t level = 0, index = blk_header.number; level < c_bloomIndexLevels; ++level, index /= c_bloomIndexSize) { auto chunk_id = blockBloomsChunkId(level, index / c_bloomIndexSize); auto chunk_to_alter = blockBlooms(chunk_id); chunk_to_alter[index % c_bloomIndexSize] |= log_bloom_for_index; - db_->insert(batch, DB::Columns::final_chain_log_blooms_index, chunk_id, util::rlp_enc(rlp_strm, chunk_to_alter)); + db_->insert(batch, DbStorage::Columns::final_chain_log_blooms_index, chunk_id, + util::rlp_enc(rlp_strm, chunk_to_alter)); } - db_->insert(batch, DB::Columns::final_chain_blk_hash_by_number, blk_header.number, blk_header.hash); - db_->insert(batch, DB::Columns::final_chain_blk_number_by_hash, blk_header.hash, blk_header.number); - db_->insert(batch, DB::Columns::final_chain_meta, DBMetaKeys::LAST_NUMBER, blk_header.number); + db_->insert(batch, DbStorage::Columns::final_chain_blk_hash_by_number, blk_header.number, blk_header.hash); + db_->insert(batch, DbStorage::Columns::final_chain_blk_number_by_hash, blk_header.hash, blk_header.number); + db_->insert(batch, DbStorage::Columns::final_chain_meta, DBMetaKeys::LAST_NUMBER, blk_header.number); return blk_header_ptr; } @@ -347,7 +353,7 @@ std::shared_ptr FinalChain::appendBlock(DB::Batch& batch, const add EthBlockNumber FinalChain::lastBlockNumber() const { return last_block_number_; } std::optional FinalChain::blockNumber(const h256& h) const { - return db_->lookup_int(h, DB::Columns::final_chain_blk_number_by_hash); + return db_->lookup_int(h, DbStorage::Columns::final_chain_blk_number_by_hash); } std::optional FinalChain::blockHash(std::optional n) const { @@ -366,7 +372,7 @@ std::optional FinalChain::transactionLocation(const h256& t } std::optional FinalChain::transactionReceipt(const h256& trx_h) const { - auto raw = db_->lookup(trx_h, DB::Columns::final_chain_receipt_by_trx_hash); + auto raw = db_->lookup(trx_h, DbStorage::Columns::final_chain_receipt_by_trx_hash); if (raw.empty()) { return {}; } @@ -529,7 +535,7 @@ const SharedTransactions FinalChain::getTransactions(std::optional FinalChain::getBlockHeader(EthBlockNumber n) const { - if (auto raw = db_->lookup(n, DB::Columns::final_chain_blk_by_number); !raw.empty()) { + if (auto raw = db_->lookup(n, DbStorage::Columns::final_chain_blk_by_number); !raw.empty()) { auto ret = std::make_shared(); ret->rlp(dev::RLP(raw)); return ret; @@ -538,7 +544,7 @@ std::shared_ptr FinalChain::getBlockHeader(EthBlockNumber n) } std::optional FinalChain::getBlockHash(EthBlockNumber n) const { - auto raw = db_->lookup(n, DB::Columns::final_chain_blk_hash_by_number); + auto raw = db_->lookup(n, DbStorage::Columns::final_chain_blk_hash_by_number); if (raw.empty()) { return {}; } @@ -563,7 +569,7 @@ void FinalChain::appendEvmTransactions(std::vector& e } BlocksBlooms FinalChain::blockBlooms(const h256& chunk_id) const { - if (auto raw = db_->lookup(chunk_id, DB::Columns::final_chain_log_blooms_index); !raw.empty()) { + if (auto raw = db_->lookup(chunk_id, DbStorage::Columns::final_chain_log_blooms_index); !raw.empty()) { return dev::RLP(raw).toArray(); } return {}; diff --git a/libraries/core_libs/consensus/src/final_chain/state_api.cpp b/libraries/core_libs/consensus/src/final_chain/state_api.cpp index eba9cf45dc..9366ae901b 100644 --- a/libraries/core_libs/consensus/src/final_chain/state_api.cpp +++ b/libraries/core_libs/consensus/src/final_chain/state_api.cpp @@ -7,6 +7,7 @@ #include #include "common/encoding_rlp.hpp" +#include "config/state_config.hpp" static_assert(sizeof(char) == sizeof(uint8_t)); diff --git a/libraries/core_libs/consensus/src/pbft/pbft_chain.cpp b/libraries/core_libs/consensus/src/pbft/pbft_chain.cpp index f728b85673..b34fd7cdaa 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_chain.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_chain.cpp @@ -2,7 +2,6 @@ #include -#include "common/jsoncpp.hpp" #include "pbft/pbft_manager.hpp" using namespace std; diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 646c65ee13..d5c2f5562a 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -632,7 +632,7 @@ void PbftManager::broadcastVotes() { } } -void PbftManager::testBroadcatVotesFunctionality() { +void PbftManager::testBroadcastVotesFunctionality() { // Set these variables to force broadcastVotes() send votes current_round_start_datetime_ = time_point{}; current_period_start_datetime_ = time_point{}; diff --git a/libraries/core_libs/consensus/src/pillar_chain/pillar_block.cpp b/libraries/core_libs/consensus/src/pillar_chain/pillar_block.cpp index bcd0c359db..c35c896389 100644 --- a/libraries/core_libs/consensus/src/pillar_chain/pillar_block.cpp +++ b/libraries/core_libs/consensus/src/pillar_chain/pillar_block.cpp @@ -1,5 +1,6 @@ #include "pillar_chain/pillar_block.hpp" +#include #include #include "common/encoding_rlp.hpp" diff --git a/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp b/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp index f0dc86b5c5..d582f94050 100644 --- a/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp +++ b/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp @@ -2,6 +2,7 @@ #include +#include "config/hardfork.hpp" #include "final_chain/final_chain.hpp" #include "key_manager/key_manager.hpp" #include "network/network.hpp" diff --git a/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp b/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp index b3bf47d6c6..f7c624603e 100644 --- a/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp +++ b/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp @@ -5,7 +5,7 @@ #include "storage/storage.hpp" namespace taraxa::rewards { -Stats::Stats(uint32_t committee_size, const HardforksConfig& hardforks, std::shared_ptr db, +Stats::Stats(uint32_t committee_size, const HardforksConfig& hardforks, std::shared_ptr db, std::function&& dpos_eligible_total_vote_count, EthBlockNumber last_blk_num) : kCommitteeSize(committee_size), kHardforksConfig(hardforks), @@ -19,7 +19,7 @@ void Stats::recoverFromDb(EthBlockNumber lastBlockNumber) { clear(lastBlockNumber); } - auto iterator = db_->getColumnIterator(DB::Columns::block_rewards_stats); + auto iterator = db_->getColumnIterator(DbStorage::Columns::block_rewards_stats); for (iterator->SeekToFirst(); iterator->Valid(); iterator->Next()) { PbftPeriod period; memcpy(&period, iterator->key().data(), sizeof(PbftPeriod)); @@ -27,10 +27,10 @@ void Stats::recoverFromDb(EthBlockNumber lastBlockNumber) { } } -void Stats::saveBlockStats(uint64_t period, const BlockStats& stats, DbStorage::Batch& write_batch) { +void Stats::saveBlockStats(uint64_t period, const BlockStats& stats, Batch& write_batch) { dev::RLPStream encoding; stats.rlp(encoding); - db_->insert(write_batch, DB::Columns::block_rewards_stats, period, encoding.out()); + db_->insert(write_batch, DbStorage::Columns::block_rewards_stats, period, encoding.out()); } uint32_t Stats::getCurrentDistributionFrequency(uint64_t current_block) const { @@ -47,7 +47,7 @@ void Stats::clear(uint64_t current_period) { if (frequency > 1 && current_period % frequency == 0) { // clear need to be called on vector because it was moved before blocks_stats_.clear(); - db_->deleteColumnData(DB::Columns::block_rewards_stats); + db_->deleteColumnData(DbStorage::Columns::block_rewards_stats); } } @@ -67,7 +67,7 @@ BlockStats Stats::getBlockStats(const PeriodData& blk, const std::vector& } std::vector Stats::processStats(const PeriodData& current_blk, const std::vector& trxs_gas_used, - DbStorage::Batch& write_batch) { + Batch& write_batch) { const auto current_period = current_blk.pbft_blk->getPeriod(); const auto frequency = getCurrentDistributionFrequency(current_period); auto block_stats = getBlockStats(current_blk, trxs_gas_used); diff --git a/libraries/core_libs/consensus/src/slashing_manager/slashing_manager.cpp b/libraries/core_libs/consensus/src/slashing_manager/slashing_manager.cpp index e3b7ec51b6..8aa4e2f531 100644 --- a/libraries/core_libs/consensus/src/slashing_manager/slashing_manager.cpp +++ b/libraries/core_libs/consensus/src/slashing_manager/slashing_manager.cpp @@ -2,7 +2,9 @@ #include "common/encoding_solidity.hpp" #include "common/types.hpp" +#include "config/config.hpp" #include "transaction/transaction_manager.hpp" +#include "vote/pbft_vote.hpp" namespace taraxa { diff --git a/libraries/core_libs/consensus/src/transaction/gas_pricer.cpp b/libraries/core_libs/consensus/src/transaction/gas_pricer.cpp index 84f2611f4d..4fcf9c7439 100644 --- a/libraries/core_libs/consensus/src/transaction/gas_pricer.cpp +++ b/libraries/core_libs/consensus/src/transaction/gas_pricer.cpp @@ -1,5 +1,8 @@ #include "transaction/gas_pricer.hpp" +#include "final_chain/final_chain.hpp" +#include "storage/storage.hpp" + namespace taraxa { GasPricer::GasPricer(const GasPriceConfig& config, bool is_light_node, std::shared_ptr db) @@ -25,7 +28,7 @@ u256 GasPricer::bid() const { void GasPricer::init(const std::shared_ptr& db) { const auto last_blk_num = - db->lookup_int(final_chain::DBMetaKeys::LAST_NUMBER, DB::Columns::final_chain_meta); + db->lookup_int(final_chain::DBMetaKeys::LAST_NUMBER, DbStorage::Columns::final_chain_meta); if (!last_blk_num || *last_blk_num == 0) return; auto block_num = *last_blk_num; diff --git a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp index 303daf57d9..c737efcfb1 100644 --- a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp +++ b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp @@ -4,12 +4,12 @@ #include #include -#include "dag/dag.hpp" +#include "config/config.hpp" #include "logger/logger.hpp" #include "transaction/transaction.hpp" namespace taraxa { -TransactionManager::TransactionManager(FullNodeConfig const &conf, std::shared_ptr db, +TransactionManager::TransactionManager(const FullNodeConfig &conf, std::shared_ptr db, std::shared_ptr final_chain, addr_t node_addr) : kConf(conf), transactions_pool_(final_chain, kConf.transactions_pool_size), @@ -192,7 +192,7 @@ void TransactionManager::saveTransactionsFromDagBlock(SharedTransactions const & const auto account = final_chain_->getAccount(t->getSender()).value_or(taraxa::state_api::ZeroAccount); const auto tx_hash = t->getHash(); - // Cheacking nonce in cheaper than checking db, verify with nonce if possible + // Checking nonce in cheaper than checking db, verify with nonce if possible bool trx_not_executed = account.nonce < t->getNonce() || !db_->transactionFinalized(tx_hash); if (trx_not_executed) { @@ -235,7 +235,7 @@ void TransactionManager::recoverNonfinalizedTransactions() { // line can be removed or replaced with an assert db_->removeTransactionToBatch(trx_hash, write_batch); } else { - // Cache sender now by caling getSender since getting sender later on proposing blocks can affect performance + // Cache sender now by calling getSender since getting sender later on proposing blocks can affect performance trxs[i]->getSender(); nonfinalized_transactions_in_dag_.emplace(trx_hash, std::move(trxs[i])); } @@ -253,9 +253,9 @@ bool TransactionManager::nonProposableTransactionsOverTheLimit() const { return transactions_pool_.nonProposableTransactionsOverTheLimit(); } -bool TransactionManager::isTransactionPoolFull(size_t precentage) const { +bool TransactionManager::isTransactionPoolFull(size_t percentage) const { std::shared_lock transactions_lock(transactions_mutex_); - return transactions_pool_.size() >= (kConf.transactions_pool_size * precentage / 100); + return transactions_pool_.size() >= (kConf.transactions_pool_size * percentage / 100); } size_t TransactionManager::getNonfinalizedTrxSize() const { diff --git a/libraries/core_libs/consensus/src/transaction/transaction_queue.cpp b/libraries/core_libs/consensus/src/transaction/transaction_queue.cpp index 448541e495..ba7360f774 100644 --- a/libraries/core_libs/consensus/src/transaction/transaction_queue.cpp +++ b/libraries/core_libs/consensus/src/transaction/transaction_queue.cpp @@ -135,7 +135,7 @@ TransactionStatus TransactionQueue::insert(std::shared_ptr &&transa assert(nonce_it->second->getHash() != tx_hash); // Replace transaction if gas price higher if (transaction->getGasPrice() > nonce_it->second->getGasPrice()) { - // Place same nonce transaction with lower gas price in non propsable transactions since it could be + // Place same nonce transaction with lower gas price in non proposable transactions since it could be // possible that some dag block might contain it non_proposable_transactions_[nonce_it->second->getHash()] = {last_block_number, nonce_it->second}; queue_transactions_.erase(nonce_it->second->getHash()); diff --git a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp index 9e20441966..1a1e031f69 100644 --- a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp +++ b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp @@ -7,8 +7,6 @@ #include #include "network/network.hpp" -#include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" -#include "network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp" #include "pbft/pbft_manager.hpp" namespace taraxa { @@ -583,7 +581,7 @@ PbftPeriod VoteManager::getRewardVotesPbftBlockPeriod() { } void VoteManager::resetRewardVotes(PbftPeriod period, PbftRound round, PbftStep step, const blk_hash_t& block_hash, - DbStorage::Batch& batch) { + Batch& batch) { // Save 2t+1 cert votes to database, remove old reward votes { std::scoped_lock lock(reward_votes_info_mutex_); @@ -815,7 +813,7 @@ void VoteManager::saveOwnVerifiedVote(const std::shared_ptr& vote) { std::vector> VoteManager::getOwnVerifiedVotes() { return own_verified_votes_; } -void VoteManager::clearOwnVerifiedVotes(DbStorage::Batch& write_batch) { +void VoteManager::clearOwnVerifiedVotes(Batch& write_batch) { db_->clearOwnVerifiedVotes(write_batch, own_verified_votes_); own_verified_votes_.clear(); } @@ -988,7 +986,7 @@ bool VoteManager::genAndValidateVrfSortition(PbftPeriod pbft_period, PbftRound p return false; } } catch (state_api::ErrFutureBlock& e) { - LOG(log_er_) << "Unable to generate vrf sorititon for period " << pbft_period << ", round " << pbft_round + LOG(log_er_) << "Unable to generate vrf sortition for period " << pbft_period << ", round " << pbft_round << ". Period is too far ahead of actual finalized pbft chain size (" << final_chain_->lastBlockNumber() << "). Err msg: " << e.what(); return false; diff --git a/libraries/core_libs/network/include/network/network.hpp b/libraries/core_libs/network/include/network/network.hpp index 30fd397944..36d55064b5 100644 --- a/libraries/core_libs/network/include/network/network.hpp +++ b/libraries/core_libs/network/include/network/network.hpp @@ -7,15 +7,9 @@ #include #include -#include #include -#include -#include -#include -#include #include "common/thread_pool.hpp" -#include "common/util.hpp" #include "config/config.hpp" #include "network/tarcap/taraxa_capability.hpp" #include "network/tarcap/tarcap_version.hpp" diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp index 16b9058076..fa79bffaad 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp @@ -1,6 +1,5 @@ #pragma once -#include "dag/dag_block.hpp" #include "dag/dag_manager.hpp" #include "packet_handler.hpp" diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp index 6262b6b477..0eb23e7d34 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp @@ -5,7 +5,6 @@ #include #include -#include "common/thread_pool.hpp" #include "exceptions.hpp" #include "logger/logger.hpp" #include "network/tarcap/packet_types.hpp" diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp index 4c48aa779b..5c7c01e7ea 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp @@ -1,6 +1,7 @@ #pragma once #include "common/packet_handler.hpp" +#include "transaction/transaction.hpp" namespace taraxa { class DagManager; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp index f487db5e30..9d85989729 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp @@ -1,7 +1,6 @@ #pragma once #include "common/packet_handler.hpp" -#include "pillar_chain/pillar_block.hpp" #include "pillar_chain/pillar_chain_manager.hpp" namespace taraxa::network::tarcap { diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp index 2fc0fb5a25..263b326ad7 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp @@ -1,6 +1,7 @@ #pragma once #include "common/ext_syncing_packet_handler.hpp" +#include "common/thread_pool.hpp" #include "vote_manager/vote_manager.hpp" namespace taraxa::network::tarcap { diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp index ed9d7e8a3d..47ba929275 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp @@ -1,7 +1,6 @@ #pragma once #include "common/packet_handler.hpp" -#include "dag/dag_block.hpp" #include "transaction/transaction.hpp" namespace taraxa { diff --git a/libraries/core_libs/network/include/network/tarcap/shared_states/pbft_syncing_state.hpp b/libraries/core_libs/network/include/network/tarcap/shared_states/pbft_syncing_state.hpp index 0c6145a77d..c6681cbfdb 100644 --- a/libraries/core_libs/network/include/network/tarcap/shared_states/pbft_syncing_state.hpp +++ b/libraries/core_libs/network/include/network/tarcap/shared_states/pbft_syncing_state.hpp @@ -1,9 +1,10 @@ #pragma once #include +#include +#include -#include "common/util.hpp" -#include "libp2p/Common.h" +#include "common/types.hpp" namespace taraxa::network::tarcap { diff --git a/libraries/core_libs/network/include/network/tarcap/shared_states/peers_state.hpp b/libraries/core_libs/network/include/network/tarcap/shared_states/peers_state.hpp index ec89429fdc..a8411b3e72 100644 --- a/libraries/core_libs/network/include/network/tarcap/shared_states/peers_state.hpp +++ b/libraries/core_libs/network/include/network/tarcap/shared_states/peers_state.hpp @@ -2,13 +2,11 @@ #include "common/util.hpp" #include "config/config.hpp" -#include "dag/dag_block.hpp" #include "libp2p/Common.h" #include "libp2p/Host.h" #include "network/tarcap/packet_types.hpp" #include "network/tarcap/stats/time_period_packets_stats.hpp" #include "network/tarcap/taraxa_peer.hpp" -#include "transaction/transaction.hpp" namespace taraxa::network::tarcap { diff --git a/libraries/core_libs/network/include/network/tarcap/stats/max_stats.hpp b/libraries/core_libs/network/include/network/tarcap/stats/max_stats.hpp index 1cabc4bd3b..dadb180309 100644 --- a/libraries/core_libs/network/include/network/tarcap/stats/max_stats.hpp +++ b/libraries/core_libs/network/include/network/tarcap/stats/max_stats.hpp @@ -1,6 +1,7 @@ #pragma once -#include "json/value.h" +#include + #include "network/tarcap/stats/packet_stats.hpp" namespace taraxa::network::tarcap { diff --git a/libraries/core_libs/network/include/network/tarcap/stats/node_stats.hpp b/libraries/core_libs/network/include/network/tarcap/stats/node_stats.hpp index c6feffb50e..bd66040358 100644 --- a/libraries/core_libs/network/include/network/tarcap/stats/node_stats.hpp +++ b/libraries/core_libs/network/include/network/tarcap/stats/node_stats.hpp @@ -1,7 +1,8 @@ #pragma once +#include + #include "common/types.hpp" -#include "json/value.h" #include "logger/logger.hpp" #include "network/tarcap/tarcap_version.hpp" diff --git a/libraries/core_libs/network/include/network/tarcap/stats/packet_stats.hpp b/libraries/core_libs/network/include/network/tarcap/stats/packet_stats.hpp index bc00fa9fb3..32c962f05c 100644 --- a/libraries/core_libs/network/include/network/tarcap/stats/packet_stats.hpp +++ b/libraries/core_libs/network/include/network/tarcap/stats/packet_stats.hpp @@ -1,11 +1,10 @@ #pragma once +#include #include #include -#include "json/value.h" - namespace taraxa::network::tarcap { /** diff --git a/libraries/core_libs/network/include/network/tarcap/stats/packets_stats.hpp b/libraries/core_libs/network/include/network/tarcap/stats/packets_stats.hpp index fd1db55f78..45fce26778 100644 --- a/libraries/core_libs/network/include/network/tarcap/stats/packets_stats.hpp +++ b/libraries/core_libs/network/include/network/tarcap/stats/packets_stats.hpp @@ -1,9 +1,9 @@ #pragma once -#include +#include -#include "network/tarcap/stats/max_stats.hpp" -#include "network/tarcap/stats/packets_stats.hpp" +#include "max_stats.hpp" +#include "packets_stats.hpp" namespace taraxa::network::tarcap { diff --git a/libraries/core_libs/network/include/network/tarcap/taraxa_peer.hpp b/libraries/core_libs/network/include/network/tarcap/taraxa_peer.hpp index 08b009eefe..424e1b787b 100644 --- a/libraries/core_libs/network/include/network/tarcap/taraxa_peer.hpp +++ b/libraries/core_libs/network/include/network/tarcap/taraxa_peer.hpp @@ -5,6 +5,7 @@ #include #include +#include "common/types.hpp" #include "common/util.hpp" #include "network/tarcap/stats/packets_stats.hpp" diff --git a/libraries/core_libs/network/include/network/threadpool/packet_data.hpp b/libraries/core_libs/network/include/network/threadpool/packet_data.hpp index af521b17ac..ab530a19b6 100644 --- a/libraries/core_libs/network/include/network/threadpool/packet_data.hpp +++ b/libraries/core_libs/network/include/network/threadpool/packet_data.hpp @@ -1,10 +1,10 @@ #pragma once +#include #include #include -#include "json/value.h" #include "network/tarcap/packet_types.hpp" namespace taraxa::network::threadpool { diff --git a/libraries/core_libs/network/include/network/threadpool/packets_blocking_mask.hpp b/libraries/core_libs/network/include/network/threadpool/packets_blocking_mask.hpp index be5823cec5..10fce1493a 100644 --- a/libraries/core_libs/network/include/network/threadpool/packets_blocking_mask.hpp +++ b/libraries/core_libs/network/include/network/threadpool/packets_blocking_mask.hpp @@ -2,9 +2,7 @@ #include -#include #include -#include #include "network/tarcap/packet_types.hpp" #include "network/threadpool/packet_data.hpp" diff --git a/libraries/core_libs/network/include/network/threadpool/priority_queue.hpp b/libraries/core_libs/network/include/network/threadpool/priority_queue.hpp index 196b8407a7..365bda2dd9 100644 --- a/libraries/core_libs/network/include/network/threadpool/priority_queue.hpp +++ b/libraries/core_libs/network/include/network/threadpool/priority_queue.hpp @@ -6,7 +6,6 @@ #include #include "logger/logger.hpp" -#include "network/tarcap/packet_types.hpp" #include "network/tarcap/tarcap_version.hpp" #include "network/threadpool/packets_blocking_mask.hpp" #include "packets_queue.hpp" diff --git a/libraries/core_libs/network/rpc/Taraxa.cpp b/libraries/core_libs/network/rpc/Taraxa.cpp index 04eb8a47da..92aaef506a 100644 --- a/libraries/core_libs/network/rpc/Taraxa.cpp +++ b/libraries/core_libs/network/rpc/Taraxa.cpp @@ -1,12 +1,13 @@ #include "Taraxa.h" +#include #include #include #include #include +#include "config/version.hpp" #include "dag/dag_manager.hpp" -#include "json/reader.h" #include "pbft/pbft_manager.hpp" #include "transaction/transaction_manager.hpp" diff --git a/libraries/core_libs/network/src/http_server.cpp b/libraries/core_libs/network/src/http_server.cpp index 7b681ac3fb..1c82a131db 100644 --- a/libraries/core_libs/network/src/http_server.cpp +++ b/libraries/core_libs/network/src/http_server.cpp @@ -101,7 +101,7 @@ void HttpConnection::read() { response_ = server_->request_processor_->process(request_); boost::beast::http::async_write( socket_, response_, - [this_sp = getShared()](auto const & /*ec*/, auto /*bytes_transfered*/) { this_sp->stop(); }); + [this_sp = getShared()](auto const & /*ec*/, auto /*bytes_transferred*/) { this_sp->stop(); }); } }); } diff --git a/libraries/core_libs/network/src/network.cpp b/libraries/core_libs/network/src/network.cpp index 248f773951..07ad0d602e 100644 --- a/libraries/core_libs/network/src/network.cpp +++ b/libraries/core_libs/network/src/network.cpp @@ -5,7 +5,6 @@ #include #include -#include #include "config/version.hpp" #include "network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp" diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.cpp index 21431f602c..37ed74f413 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.cpp @@ -1,5 +1,6 @@ #include "network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "pbft/pbft_manager.hpp" #include "vote/pbft_vote.hpp" #include "vote/votes_bundle_rlp.hpp" diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/packet_handler.cpp index 5e540af594..0113a486aa 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/packet_handler.cpp @@ -1,5 +1,6 @@ #include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "network/tarcap/stats/time_period_packets_stats.hpp" namespace taraxa::network::tarcap { diff --git a/libraries/core_libs/network/src/tarcap/stats/node_stats.cpp b/libraries/core_libs/network/src/tarcap/stats/node_stats.cpp index f9e1a175b3..d1854de97a 100644 --- a/libraries/core_libs/network/src/tarcap/stats/node_stats.cpp +++ b/libraries/core_libs/network/src/tarcap/stats/node_stats.cpp @@ -11,6 +11,7 @@ #include "pbft/pbft_manager.hpp" #include "transaction/transaction_manager.hpp" #include "vote_manager/vote_manager.hpp" + namespace taraxa::network::tarcap { NodeStats::NodeStats(std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, diff --git a/libraries/core_libs/network/src/tarcap/stats/time_period_packets_stats.cpp b/libraries/core_libs/network/src/tarcap/stats/time_period_packets_stats.cpp index d59a767285..9301af99d9 100644 --- a/libraries/core_libs/network/src/tarcap/stats/time_period_packets_stats.cpp +++ b/libraries/core_libs/network/src/tarcap/stats/time_period_packets_stats.cpp @@ -1,8 +1,9 @@ #include "network/tarcap/stats/time_period_packets_stats.hpp" +#include + #include "common/util.hpp" -#include "json/writer.h" -#include "network/tarcap/shared_states/peers_state.hpp" +#include "network/tarcap/taraxa_peer.hpp" namespace taraxa::network::tarcap { diff --git a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp index 562feb5f38..079143cb9f 100644 --- a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp +++ b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp @@ -23,7 +23,6 @@ #include "pillar_chain/pillar_chain_manager.hpp" #include "slashing_manager/slashing_manager.hpp" #include "transaction/transaction_manager.hpp" -#include "vote/pbft_vote.hpp" namespace taraxa::network::tarcap { diff --git a/libraries/core_libs/network/src/ws_server.cpp b/libraries/core_libs/network/src/ws_server.cpp index d8f74f4470..a2029556f9 100644 --- a/libraries/core_libs/network/src/ws_server.cpp +++ b/libraries/core_libs/network/src/ws_server.cpp @@ -7,9 +7,7 @@ #include #include "common/jsoncpp.hpp" -#include "common/util.hpp" -#include "config/config.hpp" -#include "network/rpc/eth/Eth.h" +#include "network/rpc/eth/data.hpp" namespace taraxa::net { diff --git a/libraries/core_libs/node/include/node/node.hpp b/libraries/core_libs/node/include/node/node.hpp index 5e8ba477d5..77573b7d78 100644 --- a/libraries/core_libs/node/include/node/node.hpp +++ b/libraries/core_libs/node/include/node/node.hpp @@ -5,18 +5,10 @@ #include #include -#include #include -#include -#include -#include -#include #include "common/thread_pool.hpp" -#include "common/util.hpp" -#include "common/vrf_wrapper.hpp" #include "config/config.hpp" -#include "config/version.hpp" #include "network/http_server.hpp" #include "network/rpc/DebugFace.h" #include "network/rpc/EthFace.h" @@ -26,7 +18,6 @@ #include "network/ws_server.hpp" #include "pbft/pbft_chain.hpp" #include "storage/storage.hpp" -#include "transaction/transaction.hpp" #include "vote_manager/vote_manager.hpp" namespace taraxa { diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index a0e5751b69..f7f854c705 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -5,7 +5,6 @@ #include #include #include -#include #include #include "dag/dag.hpp" @@ -30,7 +29,6 @@ #include "pillar_chain/pillar_chain_manager.hpp" #include "slashing_manager/slashing_manager.hpp" #include "storage/migration/migration_manager.hpp" -#include "storage/migration/transaction_period.hpp" #include "transaction/gas_pricer.hpp" #include "transaction/transaction_manager.hpp" diff --git a/libraries/core_libs/storage/include/storage/migration/migration_base.hpp b/libraries/core_libs/storage/include/storage/migration/migration_base.hpp index 5102d2541c..be2c20f433 100644 --- a/libraries/core_libs/storage/include/storage/migration/migration_base.hpp +++ b/libraries/core_libs/storage/include/storage/migration/migration_base.hpp @@ -10,7 +10,7 @@ class Base { // We need to specify version here, so in case of major version change(db reindex) we won't apply unneeded migrations virtual uint32_t dbVersion() = 0; - bool isApplied() { return db_->lookup_int(id(), DB::Columns::migrations).has_value(); } + bool isApplied() { return db_->lookup_int(id(), DbStorage::Columns::migrations).has_value(); } void apply(logger::Logger& log) { migrate(log); @@ -22,9 +22,9 @@ class Base { // Method with custom logic. All db changes should be made using `batch_` virtual void migrate(logger::Logger& log) = 0; - void setApplied() { db_->insert(batch_, DB::Columns::migrations, id(), true); } + void setApplied() { db_->insert(batch_, DbStorage::Columns::migrations, id(), true); } std::shared_ptr db_; - DB::Batch batch_; + Batch batch_; }; } // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/include/storage/migration/transaction_period.hpp b/libraries/core_libs/storage/include/storage/migration/transaction_period.hpp index 4baf0a6ad7..b29786e785 100644 --- a/libraries/core_libs/storage/include/storage/migration/transaction_period.hpp +++ b/libraries/core_libs/storage/include/storage/migration/transaction_period.hpp @@ -1,8 +1,6 @@ #pragma once #include -#include "common/thread_pool.hpp" -#include "pbft/period_data.hpp" #include "storage/migration/migration_base.hpp" namespace taraxa::storage::migration { diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 4f33fa06c5..6be77acc4f 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -7,7 +7,7 @@ #include #include -#include +#include #include "common/types.hpp" #include "dag/dag_block.hpp" @@ -16,7 +16,6 @@ #include "pbft/period_data.hpp" #include "pillar_chain/pillar_block.hpp" #include "storage/uint_comparator.hpp" -#include "transaction/system_transaction.hpp" #include "transaction/transaction.hpp" #include "vote/pillar_vote.hpp" #include "vote_manager/verified_votes.hpp" @@ -69,12 +68,12 @@ class DbException : public std::exception { const std::string desc_; }; +using Batch = rocksdb::WriteBatch; +using Slice = rocksdb::Slice; +using OnEntry = std::function; + class DbStorage : public std::enable_shared_from_this { public: - using Slice = rocksdb::Slice; - using Batch = rocksdb::WriteBatch; - using OnEntry = std::function; - class Column { string const name_; @@ -251,7 +250,7 @@ class DbStorage : public std::enable_shared_from_this { void removeDagBlockBatch(Batch& write_batch, blk_hash_t const& hash); void removeDagBlock(blk_hash_t const& hash); // Sortition params - void saveSortitionParamsChange(PbftPeriod period, const SortitionParamsChange& params, DbStorage::Batch& batch); + void saveSortitionParamsChange(PbftPeriod period, const SortitionParamsChange& params, Batch& batch); std::deque getLastSortitionParams(size_t count); std::optional getParamsChangeForPeriod(PbftPeriod period); @@ -350,7 +349,7 @@ class DbStorage : public std::enable_shared_from_this { std::vector getFinalizedDagBlockHashesByPeriod(PbftPeriod period); std::vector> getFinalizedDagBlockByPeriod(PbftPeriod period); - std::pair>> getLastPbftblockHashAndFinalizedDagBlockByPeriod( + std::pair>> getLastPbftBlockHashAndFinalizedDagBlockByPeriod( PbftPeriod period); // DPOS level to proposal period map @@ -470,6 +469,4 @@ class DbStorage : public std::enable_shared_from_this { void forEach(Column const& col, OnEntry const& f); }; -using DB = DbStorage; - } // namespace taraxa diff --git a/libraries/core_libs/storage/src/migration/transaction_period.cpp b/libraries/core_libs/storage/src/migration/transaction_period.cpp index f0dd0a6248..d2d20f107b 100644 --- a/libraries/core_libs/storage/src/migration/transaction_period.cpp +++ b/libraries/core_libs/storage/src/migration/transaction_period.cpp @@ -2,7 +2,8 @@ #include -#include "pbft/pbft_manager.hpp" +#include "common/thread_pool.hpp" +#include "common/util.hpp" namespace taraxa::storage::migration { @@ -13,7 +14,7 @@ std::string TransactionPeriod::id() { return "TransactionPeriod"; } uint32_t TransactionPeriod::dbVersion() { return 1; } void TransactionPeriod::migrate(logger::Logger& log) { - auto it = db_->getColumnIterator(DB::Columns::period_data); + auto it = db_->getColumnIterator(DbStorage::Columns::period_data); it->SeekToFirst(); if (!it->Valid()) { return; diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index fc5d2c1a62..1f91d616e8 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -8,10 +8,11 @@ #include "config/version.hpp" #include "dag/sortition_params_manager.hpp" -#include "final_chain/final_chain.hpp" +#include "final_chain/data.hpp" #include "pillar_chain/pillar_block.hpp" #include "rocksdb/utilities/checkpoint.h" #include "storage/uint_comparator.hpp" +#include "transaction/system_transaction.hpp" #include "vote/pbft_vote.hpp" #include "vote/votes_bundle_rlp.hpp" @@ -306,7 +307,7 @@ bool DbStorage::createSnapshot(PbftPeriod period) { LOG(log_nf_) << "Creating DB snapshot on period: " << period; - // Create rocskd checkpoint/snapshot + // Create rocksdb checkpoint/snapshot rocksdb::Checkpoint* checkpoint; auto status = rocksdb::Checkpoint::Create(db_.get(), &checkpoint); // Scope is to delete checkpoint object as soon as we don't need it anymore @@ -419,7 +420,7 @@ void DbStorage::checkStatus(rocksdb::Status const& status) { " SubCode: " + std::to_string(status.subcode()) + " Message:" + status.ToString()); } -DbStorage::Batch DbStorage::createWriteBatch() { return DbStorage::Batch(); } +Batch DbStorage::createWriteBatch() { return Batch(); } void DbStorage::commitWriteBatch(Batch& write_batch, rocksdb::WriteOptions const& opts) { auto status = db_->Write(opts, write_batch.GetWriteBatch()); @@ -615,7 +616,7 @@ void DbStorage::clearPeriodDataHistory(PbftPeriod end_period, uint64_t dag_level for (auto period = start_period; period < end_period; period++) { // Find transactions included in the old blocks and delete data related to these transactions to free // disk space - const auto& [pbft_block_hash, dag_blocks] = getLastPbftblockHashAndFinalizedDagBlockByPeriod(period); + const auto& [pbft_block_hash, dag_blocks] = getLastPbftBlockHashAndFinalizedDagBlockByPeriod(period); for (const auto& dag_block : dag_blocks) { for (const auto& trx_hash : dag_block->getTrxs()) { @@ -1267,7 +1268,7 @@ std::vector> DbStorage::getFinalizedDagBlockByPeriod(P } std::pair>> -DbStorage::getLastPbftblockHashAndFinalizedDagBlockByPeriod(PbftPeriod period) { +DbStorage::getLastPbftBlockHashAndFinalizedDagBlockByPeriod(PbftPeriod period) { std::vector> ret; blk_hash_t last_pbft_block_hash; if (auto period_data = getPeriodDataRaw(period); period_data.size() > 0) { diff --git a/libraries/logger/include/logger/logger.hpp b/libraries/logger/include/logger/logger.hpp index 380420bb31..da66005fa5 100644 --- a/libraries/logger/include/logger/logger.hpp +++ b/libraries/logger/include/logger/logger.hpp @@ -3,7 +3,6 @@ #include #include -#include "common/types.hpp" #include "logger/logger_config.hpp" namespace taraxa::logger { diff --git a/libraries/logger/include/logger/logger_config.hpp b/libraries/logger/include/logger/logger_config.hpp index 4330c9f3c0..5df90de654 100644 --- a/libraries/logger/include/logger/logger_config.hpp +++ b/libraries/logger/include/logger/logger_config.hpp @@ -3,16 +3,17 @@ #include #include #include -#include +#include #include -#include #include "common/types.hpp" +namespace fs = std::filesystem; + namespace taraxa::logger { // Logger verbosity -// this enum must match enum in aleth logs to corectly support aleths library +// this enum must match enum in aleth logs to correctly support aleth's library enum Verbosity { Silent = -1, Error = 0, diff --git a/libraries/metrics/src/metrics_service.cpp b/libraries/metrics/src/metrics_service.cpp index 9e7ca0989a..a6f3e24c74 100644 --- a/libraries/metrics/src/metrics_service.cpp +++ b/libraries/metrics/src/metrics_service.cpp @@ -1,8 +1,9 @@ #include "metrics/metrics_service.hpp" -#include +#include +#include -#include +#include #include #include diff --git a/libraries/types/dag_block/src/dag_block.cpp b/libraries/types/dag_block/src/dag_block.cpp index d28605f988..fb05d352db 100644 --- a/libraries/types/dag_block/src/dag_block.cpp +++ b/libraries/types/dag_block/src/dag_block.cpp @@ -7,11 +7,11 @@ #include +#include "common/encoding_rlp.hpp" #include "common/util.hpp" namespace taraxa { -using std::to_string; using vrf_wrapper::VrfSortitionBase; DagBlock::DagBlock(blk_hash_t pivot, level_t level, vec_blk_t tips, vec_trx_t trxs, uint64_t est, sig_t sig, diff --git a/libraries/types/pbft_block/include/pbft/pbft_block.hpp b/libraries/types/pbft_block/include/pbft/pbft_block.hpp index 42e5a7b203..cab5c843cc 100644 --- a/libraries/types/pbft_block/include/pbft/pbft_block.hpp +++ b/libraries/types/pbft_block/include/pbft/pbft_block.hpp @@ -6,9 +6,7 @@ #include #include "common/types.hpp" -#include "dag/dag_block.hpp" #include "pbft_block_extra_data.hpp" -#include "vote/pbft_vote.hpp" namespace taraxa { diff --git a/libraries/types/pbft_block/include/pbft/pbft_block_extra_data.hpp b/libraries/types/pbft_block/include/pbft/pbft_block_extra_data.hpp index 765069b45d..2e1bc4db20 100644 --- a/libraries/types/pbft_block/include/pbft/pbft_block_extra_data.hpp +++ b/libraries/types/pbft_block/include/pbft/pbft_block_extra_data.hpp @@ -1,13 +1,12 @@ #pragma once +#include #include #include #include #include #include "common/types.hpp" -#include "dag/dag_block.hpp" -#include "vote/vote.hpp" namespace taraxa { diff --git a/libraries/types/pbft_block/src/pbft_block.cpp b/libraries/types/pbft_block/src/pbft_block.cpp index e0deec66e0..6fd2de9062 100644 --- a/libraries/types/pbft_block/src/pbft_block.cpp +++ b/libraries/types/pbft_block/src/pbft_block.cpp @@ -4,6 +4,9 @@ #include +#include "common/encoding_rlp.hpp" +#include "common/util.hpp" + namespace taraxa { PbftBlock::PbftBlock(bytes const& b) : PbftBlock(dev::RLP(b)) {} @@ -56,7 +59,7 @@ void PbftBlock::calculateHash_() { if (!block_hash_) { block_hash_ = dev::sha3(rlp(true)); } else { - // Hash sould only be calculated once + // Hash should only be calculated once assert(false); } auto p = dev::recover(signature_, sha3(false)); diff --git a/libraries/types/pbft_block/src/pbft_block_extra_data.cpp b/libraries/types/pbft_block/src/pbft_block_extra_data.cpp index 6a14c99c61..1ded6113dd 100644 --- a/libraries/types/pbft_block/src/pbft_block_extra_data.cpp +++ b/libraries/types/pbft_block/src/pbft_block_extra_data.cpp @@ -1,5 +1,7 @@ #include "pbft/pbft_block_extra_data.hpp" +#include "common/encoding_rlp.hpp" + namespace taraxa { PbftBlockExtraData::PbftBlockExtraData(const uint16_t major_version, const uint16_t minor_version, diff --git a/libraries/types/pbft_block/src/period_data.cpp b/libraries/types/pbft_block/src/period_data.cpp index 8bb8ba795a..47e81dc413 100644 --- a/libraries/types/pbft_block/src/period_data.cpp +++ b/libraries/types/pbft_block/src/period_data.cpp @@ -1,11 +1,6 @@ #include "pbft/period_data.hpp" -#include - -#include "dag/dag_block.hpp" #include "pbft/pbft_block.hpp" -#include "transaction/transaction.hpp" -#include "vote/pbft_vote.hpp" #include "vote/votes_bundle_rlp.hpp" namespace taraxa { diff --git a/libraries/types/transaction/include/transaction/system_transaction.hpp b/libraries/types/transaction/include/transaction/system_transaction.hpp index e254abe678..d3c8f33c24 100644 --- a/libraries/types/transaction/include/transaction/system_transaction.hpp +++ b/libraries/types/transaction/include/transaction/system_transaction.hpp @@ -1,6 +1,5 @@ #pragma once -#include "common/constants.hpp" #include "transaction/transaction.hpp" namespace taraxa { diff --git a/libraries/types/transaction/include/transaction/transaction.hpp b/libraries/types/transaction/include/transaction/transaction.hpp index abc7eec047..a79c6fb271 100644 --- a/libraries/types/transaction/include/transaction/transaction.hpp +++ b/libraries/types/transaction/include/transaction/transaction.hpp @@ -1,10 +1,9 @@ #pragma once -#include +#include #include #include -#include "common/default_construct_copyable_movable.hpp" #include "common/types.hpp" namespace taraxa { diff --git a/libraries/types/vote/include/vote/pbft_vote.hpp b/libraries/types/vote/include/vote/pbft_vote.hpp index de2e3edd86..f051213ecc 100644 --- a/libraries/types/vote/include/vote/pbft_vote.hpp +++ b/libraries/types/vote/include/vote/pbft_vote.hpp @@ -1,5 +1,7 @@ #pragma once +#include + #include "common/vrf_wrapper.hpp" #include "vote.hpp" #include "vrf_sortition.hpp" @@ -87,7 +89,7 @@ class PbftVote : public Vote { bytes rlp(bool inc_sig = true, bool inc_weight = false) const; /** - * @brief Optimed Recursive Length Prefix + * @brief Optimized Recursive Length Prefix * @note Encode only vote's signature and vrf proof into the rlp * * @return bytes of RLP stream @@ -116,7 +118,7 @@ class PbftVote : public Vote { strm << " vote_signature: " << vote.vote_signature_ << std::endl; strm << " blockhash: " << vote.block_hash_ << std::endl; if (vote.weight_) strm << " weight: " << vote.weight_.value() << std::endl; - strm << " vrf_sorition: " << vote.vrf_sortition_ << std::endl; + strm << " vrf_sortition: " << vote.vrf_sortition_ << std::endl; return strm; } diff --git a/libraries/types/vote/include/vote/vrf_sortition.hpp b/libraries/types/vote/include/vote/vrf_sortition.hpp index 4019308cf1..2137f769bd 100644 --- a/libraries/types/vote/include/vote/vrf_sortition.hpp +++ b/libraries/types/vote/include/vote/vrf_sortition.hpp @@ -1,3 +1,4 @@ +#pragma once #include #include diff --git a/libraries/types/vote/src/pbft_vote.cpp b/libraries/types/vote/src/pbft_vote.cpp index 3884f3fbe4..2167be8e25 100644 --- a/libraries/types/vote/src/pbft_vote.cpp +++ b/libraries/types/vote/src/pbft_vote.cpp @@ -1,8 +1,17 @@ #include "vote/pbft_vote.hpp" +#include #include +#include +#include + +#include +#include +#include #include "common/encoding_rlp.hpp" +#include "vote/vote.hpp" +#include "vote/vrf_sortition.hpp" namespace taraxa { diff --git a/libraries/types/vote/src/vote.cpp b/libraries/types/vote/src/vote.cpp index 8df1b12765..bfabf6b079 100644 --- a/libraries/types/vote/src/vote.cpp +++ b/libraries/types/vote/src/vote.cpp @@ -2,8 +2,6 @@ #include -#include "common/encoding_rlp.hpp" - namespace taraxa { Vote::Vote(const blk_hash_t& block_hash) : block_hash_(block_hash) {} @@ -35,7 +33,7 @@ const blk_hash_t& Vote::getBlockHash() const { return block_hash_; } bool Vote::verifyVote() const { auto pk = getVoter(); - return !pk.isZero(); // recoverd public key means that it was verified + return !pk.isZero(); // recovered public key means that it was verified } } // namespace taraxa \ No newline at end of file diff --git a/libraries/vdf/include/vdf/config.hpp b/libraries/vdf/include/vdf/config.hpp index b4fee1a8b8..e68f02acbd 100644 --- a/libraries/vdf/include/vdf/config.hpp +++ b/libraries/vdf/include/vdf/config.hpp @@ -3,7 +3,6 @@ #include #include "common/constants.hpp" -#include "common/encoding_rlp.hpp" namespace taraxa { diff --git a/libraries/vdf/include/vdf/sortition.hpp b/libraries/vdf/include/vdf/sortition.hpp index dfe51942e8..d24822cd09 100644 --- a/libraries/vdf/include/vdf/sortition.hpp +++ b/libraries/vdf/include/vdf/sortition.hpp @@ -1,18 +1,12 @@ #pragma once -#include - -#include "ProverWesolowski.h" #include "common/types.hpp" #include "common/vrf_wrapper.hpp" #include "libdevcore/CommonData.h" -#include "logger/logger.hpp" -#include "openssl/bn.h" #include "vdf/config.hpp" namespace taraxa::vdf_sortition { -using namespace vdf; using namespace vrf_wrapper; // It includes a vrf for difficulty adjustment diff --git a/libraries/vdf/src/config.cpp b/libraries/vdf/src/config.cpp index 5a01d4a0d1..f56c543c93 100644 --- a/libraries/vdf/src/config.cpp +++ b/libraries/vdf/src/config.cpp @@ -2,6 +2,8 @@ #include +#include "libdevcore/RLP.h" + namespace taraxa { int32_t fixFromOverflow(uint16_t value, int32_t change, uint16_t limit) { diff --git a/libraries/vdf/src/sortition.cpp b/libraries/vdf/src/sortition.cpp index c9ba151bfb..6248e3d901 100644 --- a/libraries/vdf/src/sortition.cpp +++ b/libraries/vdf/src/sortition.cpp @@ -3,9 +3,11 @@ #include #include -#include - +#include "ProverWesolowski.h" +#include "common/encoding_rlp.hpp" +#include "common/util.hpp" namespace taraxa::vdf_sortition { +using namespace vdf; VdfSortition::VdfSortition(const SortitionParams& config, const vrf_sk_t& sk, const bytes& vrf_input, uint64_t vote_count, uint64_t total_vote_count) diff --git a/programs/taraxa-bootnode/main.cpp b/programs/taraxa-bootnode/main.cpp index 7d5b98684c..fc3680d8e0 100644 --- a/programs/taraxa-bootnode/main.cpp +++ b/programs/taraxa-bootnode/main.cpp @@ -12,12 +12,12 @@ #include #include #include -#include #include "cli/config.hpp" #include "cli/tools.hpp" #include "common/jsoncpp.hpp" #include "common/thread_pool.hpp" +#include "common/util.hpp" #include "config/version.hpp" namespace po = boost::program_options; diff --git a/programs/taraxad/main.cpp b/programs/taraxad/main.cpp index 76eb64ca7f..f09cede04a 100644 --- a/programs/taraxad/main.cpp +++ b/programs/taraxad/main.cpp @@ -2,13 +2,12 @@ #include #include "cli/config.hpp" +#include "common/config_exception.hpp" #include "common/static_init.hpp" #include "node/node.hpp" using namespace taraxa; -namespace bpo = boost::program_options; - int main(int argc, const char* argv[]) { static_init(); try { diff --git a/tests/abi_test.cpp b/tests/abi_test.cpp index b8ad6a9f63..4daf6322cd 100644 --- a/tests/abi_test.cpp +++ b/tests/abi_test.cpp @@ -1,6 +1,7 @@ #include #include "common/encoding_solidity.hpp" +#include "logger/logger.hpp" #include "test_util/gtest.hpp" namespace taraxa::core_tests { diff --git a/tests/crypto_test.cpp b/tests/crypto_test.cpp index ff820d9062..c33452f48f 100644 --- a/tests/crypto_test.cpp +++ b/tests/crypto_test.cpp @@ -10,11 +10,10 @@ #include "common/static_init.hpp" #include "common/vrf_wrapper.hpp" -#include "config/config.hpp" #include "logger/logger.hpp" #include "test_util/gtest.hpp" #include "vdf/sortition.hpp" -#include "vote/pbft_vote.hpp" +#include "vote/vrf_sortition.hpp" namespace taraxa::core_tests { using namespace vdf; @@ -66,7 +65,7 @@ TEST_F(CryptoTest, vrf_proof_verify) { EXPECT_TRUE(isValidVrfPublicKey(pk)); EXPECT_TRUE(isValidVrfPublicKey(pk2)); - auto msg = getRlpBytes("helloworld!"); + auto msg = getRlpBytes("hello world!"); auto proof = getVrfProof(sk, msg); EXPECT_TRUE(proof); auto output = getVrfOutput(pk, proof.value(), msg); @@ -312,7 +311,7 @@ TEST_F(CryptoTest, DISABLED_compute_vdf_solution_cost_time) { "0b6627a6680e01cea3d9f36fa797f7f34e8869c3a526d9ed63ed8170e35542aad05dc12c" "1df1edc9f3367fba550b7971fc2de6c5998d8784051c5be69abc9644"); level_t level = 1; - uint16_t threshold_upper = 0; // diffculty == diffuclty_stale + uint16_t threshold_upper = 0; // difficulty == difficulty_stale uint16_t difficulty_min = 0; uint16_t difficulty_max = 0; uint16_t lambda_bound = 100; diff --git a/tests/final_chain_test.cpp b/tests/final_chain_test.cpp index ef59a96cae..577a097ea0 100644 --- a/tests/final_chain_test.cpp +++ b/tests/final_chain_test.cpp @@ -4,6 +4,7 @@ #include #include "common/constants.hpp" +#include "common/encoding_solidity.hpp" #include "common/vrf_wrapper.hpp" #include "config/config.hpp" #include "final_chain/trie_common.hpp" diff --git a/tests/network_test.cpp b/tests/network_test.cpp index b831bc0a61..a60f16a6b2 100644 --- a/tests/network_test.cpp +++ b/tests/network_test.cpp @@ -856,7 +856,7 @@ TEST_F(NetworkTest, pbft_next_votes_sync_in_same_round) { node2->getPbftManager()->setPbftRound(2); // Node 1 broadcast his votes - node1_pbft_mgr->testBroadcatVotesFunctionality(); + node1_pbft_mgr->testBroadcastVotesFunctionality(); // Node 2 should receive votes from node 1, node 1 has its own 2 votes EXPECT_EQ(node1_vote_mgr->getVerifiedVotesSize(), 2); EXPECT_HAPPENS({5s, 100ms}, [&](auto& ctx) { WAIT_EXPECT_EQ(ctx, node2_vote_mgr->getVerifiedVotesSize(), 3) }); diff --git a/tests/p2p_test.cpp b/tests/p2p_test.cpp index 1f84dbdbfb..86f5aaf327 100644 --- a/tests/p2p_test.cpp +++ b/tests/p2p_test.cpp @@ -1,18 +1,15 @@ #include #include +#include #include #include #include #include -#include #include #include "common/static_init.hpp" #include "logger/logger.hpp" -#include "network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp" -#include "network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp" -#include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "network/tarcap/tarcap_version.hpp" #include "test_util/samples.hpp" #include "test_util/test_util.hpp" @@ -146,7 +143,7 @@ TEST_F(P2PTest, multiple_capabilities) { { test_tarcaps({1}, {1}); } { test_tarcaps({1, 2, 3}, {3, 4, 5}); } - // No common tarcapm version, connection should not be established + // No common tarcap version, connection should not be established { auto nodes = test_tarcaps({1, 2, 3}, {4, 5, 6}, false); // check that connection wasn't established diff --git a/tests/pillar_chain_test.cpp b/tests/pillar_chain_test.cpp index 8f933445b3..8f85bb78de 100644 --- a/tests/pillar_chain_test.cpp +++ b/tests/pillar_chain_test.cpp @@ -1,7 +1,6 @@ #include -#include - +#include "common/encoding_solidity.hpp" #include "common/static_init.hpp" #include "logger/logger.hpp" #include "pbft/pbft_manager.hpp" diff --git a/tests/rewards_stats_test.cpp b/tests/rewards_stats_test.cpp index 6fec7173b2..b7052c2c45 100644 --- a/tests/rewards_stats_test.cpp +++ b/tests/rewards_stats_test.cpp @@ -19,7 +19,7 @@ struct RewardsStatsTest : NodesTest {}; class TestableRewardsStats : public rewards::Stats { public: - TestableRewardsStats(const HardforksConfig::RewardsDistributionMap& rdm, std::shared_ptr db) + TestableRewardsStats(const HardforksConfig::RewardsDistributionMap& rdm, std::shared_ptr db) : rewards::Stats( 100, HardforksConfig{0, {}, rdm, MagnoliaHardfork{0, 0}, 0, 0, AspenHardfork{0, 0}, FicusHardforkConfig{0, 0}}, diff --git a/tests/rpc_test.cpp b/tests/rpc_test.cpp index 80aab92d75..ad34833237 100644 --- a/tests/rpc_test.cpp +++ b/tests/rpc_test.cpp @@ -1,10 +1,8 @@ #include -#include #include #include #include "network/rpc/eth/Eth.h" -#include "test_util/gtest.hpp" #include "test_util/samples.hpp" namespace taraxa::core_tests { diff --git a/tests/state_api_test.cpp b/tests/state_api_test.cpp index d2ded6b457..56db16ced7 100644 --- a/tests/state_api_test.cpp +++ b/tests/state_api_test.cpp @@ -4,10 +4,10 @@ #include #include -#include #include #include "common/encoding_rlp.hpp" +#include "slashing_manager/slashing_manager.hpp" #include "test_util/test_util.hpp" namespace taraxa::state_api { diff --git a/tests/tarcap_threadpool_test.cpp b/tests/tarcap_threadpool_test.cpp index 4bca7fae80..35f57c6b09 100644 --- a/tests/tarcap_threadpool_test.cpp +++ b/tests/tarcap_threadpool_test.cpp @@ -1,8 +1,7 @@ #include -#include - #include "config/config.hpp" +#include "config/version.hpp" #include "dag/dag_block.hpp" #include "logger/logger.hpp" #include "network/tarcap/packets_handler.hpp" diff --git a/tests/test_util/include/test_util/test_util.hpp b/tests/test_util/include/test_util/test_util.hpp index 7bb1e82c67..99ebd85425 100644 --- a/tests/test_util/include/test_util/test_util.hpp +++ b/tests/test_util/include/test_util/test_util.hpp @@ -3,22 +3,16 @@ #include #include -#include #include #include #include #include #include -#include #include -#include #include #include "../../gtest.hpp" -#include "common/encoding_solidity.hpp" -#include "common/vrf_wrapper.hpp" #include "config/config.hpp" -#include "network/network.hpp" #include "node/node.hpp" #include "transaction/transaction_manager.hpp" diff --git a/tests/test_util/src/test_util.cpp b/tests/test_util/src/test_util.cpp index 9650568676..860e8baa33 100644 --- a/tests/test_util/src/test_util.cpp +++ b/tests/test_util/src/test_util.cpp @@ -1,5 +1,8 @@ #include "test_util/test_util.hpp" +#include + +#include "common/encoding_solidity.hpp" #include "pbft/pbft_manager.hpp" #include "vote_manager/vote_manager.hpp" From e9ed4bf77e175c02b9ab4dcb92c2ffa7ddf5e449 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 26 Aug 2024 13:14:10 +0200 Subject: [PATCH 012/105] add support of ranges --- .../storage/src/migration/dag_block.cpp | 1 - .../dag_block/src/dag_block_bundle_rlp.cpp | 68 ++++++++++++++----- 2 files changed, 50 insertions(+), 19 deletions(-) diff --git a/libraries/core_libs/storage/src/migration/dag_block.cpp b/libraries/core_libs/storage/src/migration/dag_block.cpp index 3dab017c74..7f7ae0ca4f 100644 --- a/libraries/core_libs/storage/src/migration/dag_block.cpp +++ b/libraries/core_libs/storage/src/migration/dag_block.cpp @@ -48,7 +48,6 @@ void DagBlockData::migrate(logger::Logger& log) { executor.post([this, i, &copied_col]() { const auto bytes = db_->getPeriodDataRaw(i); const auto period_data_old_rlp = dev::RLP(bytes); - assert(period_data_old_rlp.itemCount() == 4); auto period_data = ::taraxa::PeriodData::FromOldPeriodData(period_data_old_rlp); diff --git a/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp index af34357631..532eb7e5da 100644 --- a/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp +++ b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp @@ -12,9 +12,9 @@ dev::bytes encodeDAGBlocksBundleRlp(const std::vector& blocks) { return {}; } - std::unordered_map trx_hash_map; // Map to store transaction hash and its index + std::unordered_map trx_hash_map; std::vector ordered_trx_hashes; - std::vector> indexes; + std::vector> flat_ranges; // Flat structure for each block for (const auto& block : blocks) { std::vector idx; @@ -22,11 +22,30 @@ dev::bytes encodeDAGBlocksBundleRlp(const std::vector& blocks) { for (const auto& trx : block.getTrxs()) { if (const auto [_, ok] = trx_hash_map.try_emplace(trx, static_cast(trx_hash_map.size())); ok) { - ordered_trx_hashes.push_back(trx); // Track the insertion order + ordered_trx_hashes.push_back(trx); } idx.push_back(trx_hash_map[trx]); } - indexes.push_back(idx); + + // Convert indexes into ranges and store in a flat structure + std::vector block_flat_ranges; + uint16_t range_start = idx[0]; + uint16_t range_length = 1; + + for (size_t i = 1; i < idx.size(); ++i) { + if (idx[i] == range_start + range_length) { + ++range_length; + } else { + block_flat_ranges.push_back(range_start); + block_flat_ranges.push_back(range_length); + range_start = idx[i]; + range_length = 1; + } + } + block_flat_ranges.push_back(range_start); + block_flat_ranges.push_back(range_length); + + flat_ranges.push_back(std::move(block_flat_ranges)); } dev::RLPStream blocks_bundle_rlp(kDAGBlocksBundleRlpSize); @@ -34,17 +53,20 @@ dev::bytes encodeDAGBlocksBundleRlp(const std::vector& blocks) { for (const auto& trx_hash : ordered_trx_hashes) { blocks_bundle_rlp.append(trx_hash); } - blocks_bundle_rlp.appendList(indexes.size()); - for (const auto& idx : indexes) { - blocks_bundle_rlp.appendList(idx.size()); - for (const auto& i : idx) { - blocks_bundle_rlp.append(i); + + blocks_bundle_rlp.appendList(flat_ranges.size()); + for (const auto& block_flat_ranges : flat_ranges) { + blocks_bundle_rlp.appendList(block_flat_ranges.size()); + for (const auto& range_value : block_flat_ranges) { + blocks_bundle_rlp.append(range_value); } } + blocks_bundle_rlp.appendList(blocks.size()); for (const auto& block : blocks) { blocks_bundle_rlp.appendRaw(block.rlp(true, false)); } + return blocks_bundle_rlp.invalidate(); } @@ -56,17 +78,20 @@ std::vector decodeDAGBlocksBundleRlp(const dev::RLP& blocks_bundle_rlp std::vector ordered_trx_hashes; std::vector> dags_trx_hashes; - // Decode transaction hashes and ordered_trx_hashes.reserve(blocks_bundle_rlp[0].itemCount()); std::transform(blocks_bundle_rlp[0].begin(), blocks_bundle_rlp[0].end(), std::back_inserter(ordered_trx_hashes), [](const auto& trx_hash_rlp) { return trx_hash_rlp.template toHash(); }); - for (const auto& idx_rlp : blocks_bundle_rlp[1]) { + for (const auto& block_ranges_rlp : blocks_bundle_rlp[1]) { std::vector hashes; - hashes.reserve(idx_rlp.itemCount()); - std::transform(idx_rlp.begin(), idx_rlp.end(), std::back_inserter(hashes), - [&ordered_trx_hashes](const auto& i) { return ordered_trx_hashes[i.template toInt()]; }); + for (size_t i = 0; i < block_ranges_rlp.itemCount(); i += 2) { + uint16_t start_index = block_ranges_rlp[i].toInt(); + uint16_t length = block_ranges_rlp[i + 1].toInt(); + for (uint16_t j = 0; j < length; ++j) { + hashes.push_back(ordered_trx_hashes[start_index + j]); + } + } dags_trx_hashes.push_back(std::move(hashes)); } @@ -94,11 +119,18 @@ std::shared_ptr decodeDAGBlockBundleRlp(uint64_t index, const dev::RLP std::transform(blocks_bundle_rlp[0].begin(), blocks_bundle_rlp[0].end(), std::back_inserter(ordered_trx_hashes), [](const auto& trx_hash_rlp) { return trx_hash_rlp.template toHash(); }); - const auto idx_rlp = blocks_bundle_rlp[1][index]; + const auto block_ranges_rlp = blocks_bundle_rlp[1][index]; std::vector hashes; - hashes.reserve(idx_rlp.itemCount()); - std::transform(idx_rlp.begin(), idx_rlp.end(), std::back_inserter(hashes), - [&ordered_trx_hashes](const auto& i) { return ordered_trx_hashes[i.template toInt()]; }); + + for (size_t i = 0; i < block_ranges_rlp.itemCount(); i += 2) { + uint16_t start_index = block_ranges_rlp[i].toInt(); + uint16_t length = block_ranges_rlp[i + 1].toInt(); + + for (uint16_t j = 0; j < length; ++j) { + hashes.push_back(ordered_trx_hashes[start_index + j]); + } + } + return std::make_shared(blocks_bundle_rlp[2][index], std::move(hashes)); } From c0a10c84d8291a56ac07e092ea05aeda593f6402 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 26 Aug 2024 16:23:03 +0200 Subject: [PATCH 013/105] fix storage --- libraries/core_libs/storage/src/storage.cpp | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index f1e5188ad6..e50f0ab0e5 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -1249,9 +1249,10 @@ std::vector DbStorage::getFinalizedDagBlockHashesByPeriod(PbftPeriod std::vector ret; if (auto period_data = getPeriodDataRaw(period); period_data.size() > 0) { auto dag_blocks_data = dev::RLP(period_data)[DAG_BLOCKS_POS_IN_PERIOD_DATA]; - ret.reserve(dag_blocks_data.size()); - std::transform(dag_blocks_data.begin(), dag_blocks_data.end(), std::back_inserter(ret), - [](const auto& dag_block) { return DagBlock(dag_block).getHash(); }); + const auto dag_blocks = decodeDAGBlocksBundleRlp(dag_blocks_data); + ret.reserve(dag_blocks.size()); + std::transform(dag_blocks.begin(), dag_blocks.end(), std::back_inserter(ret), + [](const auto& dag_block) { return dag_block.getHash(); }); } return ret; @@ -1261,9 +1262,10 @@ std::vector> DbStorage::getFinalizedDagBlockByPeriod(P std::vector> ret; if (auto period_data = getPeriodDataRaw(period); period_data.size() > 0) { auto dag_blocks_data = dev::RLP(period_data)[DAG_BLOCKS_POS_IN_PERIOD_DATA]; - ret.reserve(dag_blocks_data.size()); + auto dag_blocks = decodeDAGBlocksBundleRlp(dag_blocks_data); + ret.reserve(dag_blocks.size()); for (auto const block : dag_blocks_data) { - ret.emplace_back(std::make_shared(block)); + ret.emplace_back(std::make_shared(std::move(block))); } } return ret; @@ -1276,9 +1278,10 @@ DbStorage::getLastPbftblockHashAndFinalizedDagBlockByPeriod(PbftPeriod period) { if (auto period_data = getPeriodDataRaw(period); period_data.size() > 0) { auto const period_data_rlp = dev::RLP(period_data); auto dag_blocks_data = period_data_rlp[DAG_BLOCKS_POS_IN_PERIOD_DATA]; - ret.reserve(dag_blocks_data.size()); - for (auto const block : dag_blocks_data) { - ret.emplace_back(std::make_shared(block)); + auto dag_blocks = decodeDAGBlocksBundleRlp(dag_blocks_data); + ret.reserve(dag_blocks.size()); + for (auto const block : dag_blocks) { + ret.emplace_back(std::make_shared(std::move(block))); } last_pbft_block_hash = period_data_rlp[PBFT_BLOCK_POS_IN_PERIOD_DATA][PREV_BLOCK_HASH_POS_IN_PBFT_BLOCK].toHash(); From b53b5d911973fabb827d87fcf151821ecec880b6 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Thu, 29 Aug 2024 16:51:09 -0700 Subject: [PATCH 014/105] refactor blocking dependencies processing so it is not possible to forget adding unblocking method --- .../network/threadpool/priority_queue.hpp | 14 ++ .../network/src/threadpool/priority_queue.cpp | 121 +++++++++--------- tests/tarcap_threadpool_test.cpp | 32 +++++ 3 files changed, 108 insertions(+), 59 deletions(-) diff --git a/libraries/core_libs/network/include/network/threadpool/priority_queue.hpp b/libraries/core_libs/network/include/network/threadpool/priority_queue.hpp index 365bda2dd9..aa18188e0a 100644 --- a/libraries/core_libs/network/include/network/threadpool/priority_queue.hpp +++ b/libraries/core_libs/network/include/network/threadpool/priority_queue.hpp @@ -56,6 +56,20 @@ class PriorityQueue { */ size_t getPrirotityQueueSize(PacketData::PacketPriority priority) const; + /** + * @param packet_type + * @return true for non-blocking packet types, otherwise false + */ + bool isNonBlockingPacket(SubprotocolPacketType packet_type) const; + + /** + * @brief Updates packet blocking dependency + * @param packet + * @param unblock_processing if true, unblock packet processing, otherwise block processing + * @return true if blocking dependency for provided packet was updated, otherwise false + */ + bool updateBlockingDependencies(const PacketData& packet, bool unblock_processing = false); + private: /** * @brief Queue can borrow reserved thread from one of the other priority queues but each queue must have diff --git a/libraries/core_libs/network/src/threadpool/priority_queue.cpp b/libraries/core_libs/network/src/threadpool/priority_queue.cpp index 8311626ddb..b8d006884a 100644 --- a/libraries/core_libs/network/src/threadpool/priority_queue.cpp +++ b/libraries/core_libs/network/src/threadpool/priority_queue.cpp @@ -125,11 +125,40 @@ void PriorityQueue::updateDependenciesStart(const PacketData& packet) { act_total_workers_count_++; packets_queues_[packet.priority_].incrementActWorkersCount(); + updateBlockingDependencies(packet); +} + +void PriorityQueue::updateDependenciesFinish(const PacketData& packet, std::mutex& queue_mutex, + std::condition_variable& cond_var) { + assert(act_total_workers_count_ > 0); + + if (!isNonBlockingPacket(packet.type_)) { + // Note: every blocking packet must lock queue_mutex !!! + std::unique_lock lock(queue_mutex); + updateBlockingDependencies(packet, true); + cond_var.notify_all(); + } + + act_total_workers_count_--; + packets_queues_[packet.priority_].decrementActWorkersCount(); +} + +bool PriorityQueue::isNonBlockingPacket(SubprotocolPacketType packet_type) const { + // Note: any packet type that is not in this switch should be processed in updateDependencies + switch (packet_type) { + case SubprotocolPacketType::VotePacket: + case SubprotocolPacketType::GetNextVotesSyncPacket: + case SubprotocolPacketType::VotesBundlePacket: + case SubprotocolPacketType::StatusPacket: + case SubprotocolPacketType::PillarVotePacket: + return true; + } - // Process all dependencies here - it is called when packet processing has started - // !!! Important - there is a "mirror" function updateDependenciesFinish and all dependencies that are set - // here should be unset in updateDependenciesFinish + return false; +} +bool PriorityQueue::updateBlockingDependencies(const PacketData& packet, bool unblock_processing) { + // Note: any packet type that is not in this switch should be processed in isNonBlockingPacket switch (packet.type_) { // Packets that can be processed only 1 at the time // GetDagSyncPacket -> serve dag syncing data to only 1 node at the time @@ -141,83 +170,57 @@ void PriorityQueue::updateDependenciesStart(const PacketData& packet) { case SubprotocolPacketType::GetPbftSyncPacket: case SubprotocolPacketType::GetPillarVotesBundlePacket: case SubprotocolPacketType::PillarVotesBundlePacket: // TODO[2744]: remove - case SubprotocolPacketType::PbftSyncPacket: - blocked_packets_mask_.markPacketAsHardBlocked(packet, packet.type_); + case SubprotocolPacketType::PbftSyncPacket: { + if (!unblock_processing) { + blocked_packets_mask_.markPacketAsHardBlocked(packet, packet.type_); + } else { + blocked_packets_mask_.markPacketAsHardUnblocked(packet, packet.type_); + } break; + } // When syncing dag blocks, process only 1 packet at a time: // DagSyncPacket -> process sync dag blocks synchronously // DagBlockPacket -> wait with processing of new dag blocks until old blocks are synced - case SubprotocolPacketType::DagSyncPacket: - blocked_packets_mask_.markPacketAsHardBlocked(packet, packet.type_); - blocked_packets_mask_.markPacketAsPeerOrderBlocked(packet, SubprotocolPacketType::DagBlockPacket); + case SubprotocolPacketType::DagSyncPacket: { + if (!unblock_processing) { + blocked_packets_mask_.markPacketAsHardBlocked(packet, packet.type_); + blocked_packets_mask_.markPacketAsPeerOrderBlocked(packet, SubprotocolPacketType::DagBlockPacket); + } else { + blocked_packets_mask_.markPacketAsHardUnblocked(packet, packet.type_); + blocked_packets_mask_.markPacketAsPeerOrderUnblocked(packet, SubprotocolPacketType::DagBlockPacket); + } break; + } // When processing TransactionPacket, processing of all dag block packets that were received after that (from the // same peer). No need to block processing of dag blocks packets received before as it should not be possible to // send dag block before sending txs it contains... - case SubprotocolPacketType::TransactionPacket: - blocked_packets_mask_.markPacketAsPeerOrderBlocked(packet, SubprotocolPacketType::DagBlockPacket); - break; - - case SubprotocolPacketType::DagBlockPacket: - blocked_packets_mask_.setDagBlockLevelBeingProcessed(packet); - blocked_packets_mask_.setDagBlockBeingProcessed(packet); - break; - - default: - break; - } -} - -void PriorityQueue::updateDependenciesFinish(const PacketData& packet, std::mutex& queue_mutex, - std::condition_variable& cond_var) { - assert(act_total_workers_count_ > 0); - - // Process all dependencies here - it is called when packet processing is finished - - // Note: every case in this switch must lock queue_mutex !!! - switch (packet.type_) { - case SubprotocolPacketType::GetDagSyncPacket: - case SubprotocolPacketType::GetPbftSyncPacket: - case SubprotocolPacketType::GetPillarVotesBundlePacket: - case SubprotocolPacketType::PillarVotesBundlePacket: // TODO[2744]: remove - case SubprotocolPacketType::PbftSyncPacket: { - std::unique_lock lock(queue_mutex); - blocked_packets_mask_.markPacketAsHardUnblocked(packet, packet.type_); - cond_var.notify_all(); - break; - } - - case SubprotocolPacketType::DagSyncPacket: { - std::unique_lock lock(queue_mutex); - blocked_packets_mask_.markPacketAsHardUnblocked(packet, packet.type_); - blocked_packets_mask_.markPacketAsPeerOrderUnblocked(packet, SubprotocolPacketType::DagBlockPacket); - cond_var.notify_all(); - break; - } - case SubprotocolPacketType::TransactionPacket: { - std::unique_lock lock(queue_mutex); - blocked_packets_mask_.markPacketAsPeerOrderUnblocked(packet, SubprotocolPacketType::DagBlockPacket); - cond_var.notify_all(); + if (!unblock_processing) { + blocked_packets_mask_.markPacketAsPeerOrderBlocked(packet, SubprotocolPacketType::DagBlockPacket); + } else { + blocked_packets_mask_.markPacketAsPeerOrderUnblocked(packet, SubprotocolPacketType::DagBlockPacket); + } break; } case SubprotocolPacketType::DagBlockPacket: { - std::unique_lock lock(queue_mutex); - blocked_packets_mask_.unsetDagBlockLevelBeingProcessed(packet); - blocked_packets_mask_.unsetDagBlockBeingProcessed(packet); - cond_var.notify_all(); + if (!unblock_processing) { + blocked_packets_mask_.setDagBlockLevelBeingProcessed(packet); + blocked_packets_mask_.setDagBlockBeingProcessed(packet); + } else { + blocked_packets_mask_.unsetDagBlockLevelBeingProcessed(packet); + blocked_packets_mask_.unsetDagBlockBeingProcessed(packet); + } break; } default: - break; + return false; } - act_total_workers_count_--; - packets_queues_[packet.priority_].decrementActWorkersCount(); + return true; } size_t PriorityQueue::getPrirotityQueueSize(PacketData::PacketPriority priority) const { diff --git a/tests/tarcap_threadpool_test.cpp b/tests/tarcap_threadpool_test.cpp index 35f57c6b09..e889013a47 100644 --- a/tests/tarcap_threadpool_test.cpp +++ b/tests/tarcap_threadpool_test.cpp @@ -307,6 +307,38 @@ size_t queuesSize(const threadpool::PacketsThreadPool& tp) { // Threshold for packets queue to be emptied constexpr std::chrono::milliseconds QUEUE_EMPTIED_WAIT_TRESHOLD_MS = 15ms; +// Test all packet types if they are either in non-blocking or blocking list of packets +TEST_F(TarcapTpTest, packets_blocking_dependencies) { + network::threadpool::PriorityQueue priority_queue(3); + + for (auto packet_type = SubprotocolPacketType{0}; packet_type != SubprotocolPacketType::PacketCount; + packet_type = static_cast(static_cast(packet_type) + 1)) { + // Skip unreal packet types + switch (packet_type) { + case SubprotocolPacketType::HighPriorityPackets: + case SubprotocolPacketType::MidPriorityPackets: + case SubprotocolPacketType::LowPriorityPackets: + case SubprotocolPacketType::PacketCount: + continue; + } + + std::vector packet_bytes; + + // Generate proper rlp for packets that need it for processing + if (packet_type == SubprotocolPacketType::DagBlockPacket) { + DagBlock blk(blk_hash_t(1), 1, {}, {trx_hash_t(1), trx_hash_t(2)}, sig_t(3), blk_hash_t(0x4), addr_t(5)); + packet_bytes = blk.rlp(true); + } + network::threadpool::PacketData packet_data{packet_type, {}, std::move(packet_bytes)}; + packet_data.id_ = static_cast(packet_type); + + bool is_non_blocking_packet = priority_queue.isNonBlockingPacket(packet_data.type_); + bool is_blocking_packet = priority_queue.updateBlockingDependencies(packet_data); + + EXPECT_TRUE(is_non_blocking_packet != is_blocking_packet); + } +} + // Test if all "block-free" packets are processed concurrently // Note: in case someone creates new blocking dependency and does not adjust tests, this test should fail TEST_F(TarcapTpTest, block_free_packets) { From e3767ebab069a968960e7c7889de36d9f1165417 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 30 Aug 2024 10:53:13 +0200 Subject: [PATCH 015/105] Revert "add support of ranges" This reverts commit e9ed4bf77e175c02b9ab4dcb92c2ffa7ddf5e449. --- .../storage/src/migration/dag_block.cpp | 1 + .../dag_block/src/dag_block_bundle_rlp.cpp | 68 +++++-------------- 2 files changed, 19 insertions(+), 50 deletions(-) diff --git a/libraries/core_libs/storage/src/migration/dag_block.cpp b/libraries/core_libs/storage/src/migration/dag_block.cpp index 7f7ae0ca4f..3dab017c74 100644 --- a/libraries/core_libs/storage/src/migration/dag_block.cpp +++ b/libraries/core_libs/storage/src/migration/dag_block.cpp @@ -48,6 +48,7 @@ void DagBlockData::migrate(logger::Logger& log) { executor.post([this, i, &copied_col]() { const auto bytes = db_->getPeriodDataRaw(i); const auto period_data_old_rlp = dev::RLP(bytes); + assert(period_data_old_rlp.itemCount() == 4); auto period_data = ::taraxa::PeriodData::FromOldPeriodData(period_data_old_rlp); diff --git a/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp index 532eb7e5da..af34357631 100644 --- a/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp +++ b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp @@ -12,9 +12,9 @@ dev::bytes encodeDAGBlocksBundleRlp(const std::vector& blocks) { return {}; } - std::unordered_map trx_hash_map; + std::unordered_map trx_hash_map; // Map to store transaction hash and its index std::vector ordered_trx_hashes; - std::vector> flat_ranges; // Flat structure for each block + std::vector> indexes; for (const auto& block : blocks) { std::vector idx; @@ -22,30 +22,11 @@ dev::bytes encodeDAGBlocksBundleRlp(const std::vector& blocks) { for (const auto& trx : block.getTrxs()) { if (const auto [_, ok] = trx_hash_map.try_emplace(trx, static_cast(trx_hash_map.size())); ok) { - ordered_trx_hashes.push_back(trx); + ordered_trx_hashes.push_back(trx); // Track the insertion order } idx.push_back(trx_hash_map[trx]); } - - // Convert indexes into ranges and store in a flat structure - std::vector block_flat_ranges; - uint16_t range_start = idx[0]; - uint16_t range_length = 1; - - for (size_t i = 1; i < idx.size(); ++i) { - if (idx[i] == range_start + range_length) { - ++range_length; - } else { - block_flat_ranges.push_back(range_start); - block_flat_ranges.push_back(range_length); - range_start = idx[i]; - range_length = 1; - } - } - block_flat_ranges.push_back(range_start); - block_flat_ranges.push_back(range_length); - - flat_ranges.push_back(std::move(block_flat_ranges)); + indexes.push_back(idx); } dev::RLPStream blocks_bundle_rlp(kDAGBlocksBundleRlpSize); @@ -53,20 +34,17 @@ dev::bytes encodeDAGBlocksBundleRlp(const std::vector& blocks) { for (const auto& trx_hash : ordered_trx_hashes) { blocks_bundle_rlp.append(trx_hash); } - - blocks_bundle_rlp.appendList(flat_ranges.size()); - for (const auto& block_flat_ranges : flat_ranges) { - blocks_bundle_rlp.appendList(block_flat_ranges.size()); - for (const auto& range_value : block_flat_ranges) { - blocks_bundle_rlp.append(range_value); + blocks_bundle_rlp.appendList(indexes.size()); + for (const auto& idx : indexes) { + blocks_bundle_rlp.appendList(idx.size()); + for (const auto& i : idx) { + blocks_bundle_rlp.append(i); } } - blocks_bundle_rlp.appendList(blocks.size()); for (const auto& block : blocks) { blocks_bundle_rlp.appendRaw(block.rlp(true, false)); } - return blocks_bundle_rlp.invalidate(); } @@ -78,20 +56,17 @@ std::vector decodeDAGBlocksBundleRlp(const dev::RLP& blocks_bundle_rlp std::vector ordered_trx_hashes; std::vector> dags_trx_hashes; + // Decode transaction hashes and ordered_trx_hashes.reserve(blocks_bundle_rlp[0].itemCount()); std::transform(blocks_bundle_rlp[0].begin(), blocks_bundle_rlp[0].end(), std::back_inserter(ordered_trx_hashes), [](const auto& trx_hash_rlp) { return trx_hash_rlp.template toHash(); }); - for (const auto& block_ranges_rlp : blocks_bundle_rlp[1]) { + for (const auto& idx_rlp : blocks_bundle_rlp[1]) { std::vector hashes; - for (size_t i = 0; i < block_ranges_rlp.itemCount(); i += 2) { - uint16_t start_index = block_ranges_rlp[i].toInt(); - uint16_t length = block_ranges_rlp[i + 1].toInt(); + hashes.reserve(idx_rlp.itemCount()); + std::transform(idx_rlp.begin(), idx_rlp.end(), std::back_inserter(hashes), + [&ordered_trx_hashes](const auto& i) { return ordered_trx_hashes[i.template toInt()]; }); - for (uint16_t j = 0; j < length; ++j) { - hashes.push_back(ordered_trx_hashes[start_index + j]); - } - } dags_trx_hashes.push_back(std::move(hashes)); } @@ -119,18 +94,11 @@ std::shared_ptr decodeDAGBlockBundleRlp(uint64_t index, const dev::RLP std::transform(blocks_bundle_rlp[0].begin(), blocks_bundle_rlp[0].end(), std::back_inserter(ordered_trx_hashes), [](const auto& trx_hash_rlp) { return trx_hash_rlp.template toHash(); }); - const auto block_ranges_rlp = blocks_bundle_rlp[1][index]; + const auto idx_rlp = blocks_bundle_rlp[1][index]; std::vector hashes; - - for (size_t i = 0; i < block_ranges_rlp.itemCount(); i += 2) { - uint16_t start_index = block_ranges_rlp[i].toInt(); - uint16_t length = block_ranges_rlp[i + 1].toInt(); - - for (uint16_t j = 0; j < length; ++j) { - hashes.push_back(ordered_trx_hashes[start_index + j]); - } - } - + hashes.reserve(idx_rlp.itemCount()); + std::transform(idx_rlp.begin(), idx_rlp.end(), std::back_inserter(hashes), + [&ordered_trx_hashes](const auto& i) { return ordered_trx_hashes[i.template toInt()]; }); return std::make_shared(blocks_bundle_rlp[2][index], std::move(hashes)); } From 8f088dacf8e3fa9c1a4c6cbc8fcc8f15ddb11016 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 30 Aug 2024 11:37:19 +0200 Subject: [PATCH 016/105] change to batches --- .../storage/include/storage/storage.hpp | 5 +++ .../storage/src/migration/dag_block.cpp | 43 ++++++++----------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 6be77acc4f..7247c562c7 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -456,6 +456,11 @@ class DbStorage : public std::enable_shared_from_this { checkStatus(batch.Put(handle(col), toSlice(k), toSlice(v))); } + template + void insert(Batch& batch, rocksdb::ColumnFamilyHandle* col, K const& k, V const& v) { + checkStatus(batch.Put(col, toSlice(k), toSlice(v))); + } + template void remove(Column const& col, K const& k) { checkStatus(db_->Delete(write_options_, handle(col), toSlice(k))); diff --git a/libraries/core_libs/storage/src/migration/dag_block.cpp b/libraries/core_libs/storage/src/migration/dag_block.cpp index 3dab017c74..e30b833125 100644 --- a/libraries/core_libs/storage/src/migration/dag_block.cpp +++ b/libraries/core_libs/storage/src/migration/dag_block.cpp @@ -5,6 +5,7 @@ #include #include "common/thread_pool.hpp" +#include "common/util.hpp" #include "pbft/period_data.hpp" namespace taraxa::storage::migration { @@ -16,7 +17,7 @@ std::string DagBlockData::id() { return "DagBlockData"; } uint32_t DagBlockData::dbVersion() { return 1; } void DagBlockData::migrate(logger::Logger& log) { - auto orig_col = DB::Columns::period_data; + auto orig_col = DbStorage::Columns::period_data; auto copied_col = db_->copyColumn(db_->handle(orig_col), orig_col.name() + "-copy"); if (copied_col == nullptr) { @@ -24,7 +25,7 @@ void DagBlockData::migrate(logger::Logger& log) { return; } - auto it = db_->getColumnIterator(copied_col.get()); + auto it = db_->getColumnIterator(orig_col); it->SeekToFirst(); if (!it->Valid()) { return; @@ -38,37 +39,31 @@ void DagBlockData::migrate(logger::Logger& log) { it->Prev(); } memcpy(&end_period, it->key().data(), sizeof(uint64_t)); - util::ThreadPool executor{std::thread::hardware_concurrency()}; - const auto diff = (end_period - start_period) ? (end_period - start_period) : 1; uint64_t curr_progress = 0; - + auto batch = db_->createWriteBatch(); + const size_t max_size = 500000000; + it->SeekToFirst(); // Get and save data in new format for all blocks - for (uint64_t i = start_period; i <= end_period; ++i) { - executor.post([this, i, &copied_col]() { - const auto bytes = db_->getPeriodDataRaw(i); - const auto period_data_old_rlp = dev::RLP(bytes); - assert(period_data_old_rlp.itemCount() == 4); - - auto period_data = ::taraxa::PeriodData::FromOldPeriodData(period_data_old_rlp); - - db_->insert(copied_col.get(), i, period_data.rlp()); - }); - // This should slow down main loop so we are not using so much memory - while (executor.num_pending_tasks() > (executor.capacity() * 3)) { - taraxa::thisThreadSleepForMilliSeconds(50); + for (; it->Valid(); it->Next()) { + uint64_t period; + memcpy(&period, it->key().data(), sizeof(uint64_t)); + std::string raw = it->value().ToString(); + const auto period_data_old_rlp = dev::RLP(raw); + auto period_data = ::taraxa::PeriodData::FromOldPeriodData(period_data_old_rlp); + db_->insert(batch, copied_col.get(), period, period_data.rlp()); + + if (batch.GetDataSize() > max_size) { + db_->commitWriteBatch(batch); } - auto percentage = (i - start_period) * 100 / diff; + + auto percentage = (period - start_period) * 100 / diff; if (percentage > curr_progress) { curr_progress = percentage; LOG(log) << "Migration " << id() << " progress " << curr_progress << "%"; } } - - // It's not perfect to check with sleep, but it's just migration that should be run once - do { - taraxa::thisThreadSleepForMilliSeconds(100); - } while (executor.num_pending_tasks()); + db_->commitWriteBatch(batch); db_->replaceColumn(orig_col, std::move(copied_col)); } From 9373f5671277f10666d1fad80d318b388a40ce61 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 30 Aug 2024 14:19:21 +0200 Subject: [PATCH 017/105] add new handlers --- CMakeLists.txt | 2 +- libraries/common/include/common/constants.hpp | 2 +- .../v3/get_pbft_sync_packet_handler.hpp | 41 +++ .../v3/pbft_sync_packet_handler.hpp | 43 +++ .../network/tarcap/taraxa_capability.hpp | 3 + libraries/core_libs/network/src/network.cpp | 11 +- .../latest/pbft_sync_packet_handler.cpp | 2 +- .../v3/get_pbft_sync_packet_handler.cpp | 117 +++++++ .../v3/pbft_sync_packet_handler.cpp | 296 ++++++++++++++++++ .../network/src/tarcap/taraxa_capability.cpp | 63 +++- .../pbft_block/include/pbft/period_data.hpp | 3 +- .../types/pbft_block/src/period_data.cpp | 30 ++ 12 files changed, 600 insertions(+), 13 deletions(-) create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index f6614691c1..6bb171e4e3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.20) # Set current version of the project set(TARAXA_MAJOR_VERSION 1) set(TARAXA_MINOR_VERSION 11) -set(TARAXA_PATCH_VERSION 3) +set(TARAXA_PATCH_VERSION 4) set(TARAXA_VERSION ${TARAXA_MAJOR_VERSION}.${TARAXA_MINOR_VERSION}.${TARAXA_PATCH_VERSION}) # Any time a change in the network protocol is introduced this version should be increased diff --git a/libraries/common/include/common/constants.hpp b/libraries/common/include/common/constants.hpp index 2fa857ee91..e9ba096485 100644 --- a/libraries/common/include/common/constants.hpp +++ b/libraries/common/include/common/constants.hpp @@ -30,7 +30,7 @@ constexpr uint32_t kMinTransactionPoolSize{30000}; constexpr uint32_t kDefaultTransactionPoolSize{200000}; constexpr uint32_t kMaxNonFinalizedTransactions{1000000}; -const size_t kV2NetworkVersion = 2; +const size_t kV3NetworkVersion = 3; const uint32_t kRecentlyFinalizedTransactionsFactor = 2; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp new file mode 100644 index 0000000000..82d0506100 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp @@ -0,0 +1,41 @@ +#pragma once + +#include "../latest/common/packet_handler.hpp" + +namespace taraxa { +class PbftChain; +class DbStorage; +class VoteManager; +} // namespace taraxa + +namespace taraxa::network::tarcap { + class PbftSyncingState; +} + +namespace taraxa::network::tarcap::v3 { +class GetPbftSyncPacketHandler : public PacketHandler { + public: + GetPbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, std::shared_ptr db, + const addr_t& node_addr, const std::string& logs_prefix = "GET_PBFT_SYNC_PH"); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::GetPbftSyncPacket; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + + virtual void sendPbftBlocks(const std::shared_ptr& peer, PbftPeriod from_period, + size_t blocks_to_transfer, bool pbft_chain_synced); + + protected: + std::shared_ptr pbft_syncing_state_; + std::shared_ptr pbft_chain_; + std::shared_ptr vote_mgr_; + std::shared_ptr db_; +}; + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp new file mode 100644 index 0000000000..e2459081a1 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp @@ -0,0 +1,43 @@ +#pragma once + +#include "../latest/common/ext_syncing_packet_handler.hpp" +#include "common/thread_pool.hpp" +#include "vote_manager/vote_manager.hpp" + +namespace taraxa::network::tarcap::v3 { + +class PbftSyncPacketHandler : public ExtSyncingPacketHandler { + public: + PbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, + std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, + std::shared_ptr vote_mgr, std::shared_ptr db, const addr_t& node_addr, + const std::string& logs_prefix = "PBFT_SYNC_PH"); + + void handleMaliciousSyncPeer(const dev::p2p::NodeID& id); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::PbftSyncPacket; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + + protected: + virtual PeriodData decodePeriodData(const dev::RLP& period_data_rlp) const; + virtual std::vector> decodeVotesBundle(const dev::RLP& votes_bundle_rlp) const; + + void pbftSyncComplete(); + void delayedPbftSync(int counter); + + static constexpr uint32_t kDelayedPbftSyncDelayMs = 10; + + std::shared_ptr vote_mgr_; + util::ThreadPool periodic_events_tp_; + + static constexpr size_t kStandardPacketSize = 2; + static constexpr size_t kChainSyncedPacketSize = 3; +}; + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp index 6dcf70c819..b38b7ad71d 100644 --- a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp +++ b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp @@ -59,6 +59,9 @@ class TaraxaCapability final : public dev::p2p::CapabilityFace { */ static const InitPacketsHandlers kInitLatestVersionHandlers; + // TODO: remove this once we pass HF + static const InitPacketsHandlers kInitV3Handlers; + public: TaraxaCapability(TarcapVersion version, const FullNodeConfig &conf, const h256 &genesis_hash, std::weak_ptr host, const dev::KeyPair &key, diff --git a/libraries/core_libs/network/src/network.cpp b/libraries/core_libs/network/src/network.cpp index 07ad0d602e..53f51eabec 100644 --- a/libraries/core_libs/network/src/network.cpp +++ b/libraries/core_libs/network/src/network.cpp @@ -73,15 +73,16 @@ Network::Network(const FullNodeConfig &config, const h256 &genesis_hash, std::fi dev::p2p::Host::CapabilitiesFactory constructCapabilities = [&](std::weak_ptr host) { assert(!host.expired()); - assert(kV2NetworkVersion < TARAXA_NET_VERSION); + assert(kV3NetworkVersion < TARAXA_NET_VERSION); dev::p2p::Host::CapabilityList capabilities; // Register old version (V2) of taraxa capability - auto v2_tarcap = std::make_shared( - kV2NetworkVersion, config, genesis_hash, host, key, packets_tp_, all_packets_stats_, pbft_syncing_state_, db, - pbft_mgr, pbft_chain, vote_mgr, dag_mgr, trx_mgr, slashing_manager, pillar_chain_mgr); - capabilities.emplace_back(v2_tarcap); + auto v3_tarcap = std::make_shared( + kV3NetworkVersion, config, genesis_hash, host, key, packets_tp_, all_packets_stats_, pbft_syncing_state_, db, + pbft_mgr, pbft_chain, vote_mgr, dag_mgr, trx_mgr, slashing_manager, pillar_chain_mgr, + network::tarcap::TaraxaCapability::kInitV3Handlers); + capabilities.emplace_back(v3_tarcap); // Register latest version of taraxa capability auto latest_tarcap = std::make_shared( diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp index 96f5217775..561217b6e6 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp @@ -231,7 +231,7 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, } PeriodData PbftSyncPacketHandler::decodePeriodData(const dev::RLP &period_data_rlp) const { - return PeriodData::FromOldPeriodData(period_data_rlp); + return PeriodData(period_data_rlp); } std::vector> PbftSyncPacketHandler::decodeVotesBundle( diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.cpp new file mode 100644 index 0000000000..a9cd4ec9f6 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.cpp @@ -0,0 +1,117 @@ +#include "network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp" + +#include "network/tarcap/shared_states/pbft_syncing_state.hpp" +#include "pbft/pbft_chain.hpp" +#include "pbft/period_data.hpp" +#include "storage/storage.hpp" +#include "vote/pbft_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" +#include "vote_manager/vote_manager.hpp" + +namespace taraxa::network::tarcap::v3 { + +GetPbftSyncPacketHandler::GetPbftSyncPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, + std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, std::shared_ptr db, + const addr_t &node_addr, const std::string &logs_prefix) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, + logs_prefix + "GET_PBFT_SYNC_PH"), + pbft_syncing_state_(std::move(pbft_syncing_state)), + pbft_chain_(std::move(pbft_chain)), + vote_mgr_(std::move(vote_mgr)), + db_(std::move(db)) {} + +void GetPbftSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + if (constexpr size_t required_size = 1; packet_data.rlp_.itemCount() != required_size) { + throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); + } +} + +void GetPbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { + LOG(log_tr_) << "Received GetPbftSyncPacket Block"; + + const size_t height_to_sync = packet_data.rlp_[0].toInt(); + // Here need PBFT chain size, not synced period since synced blocks has not verified yet. + const size_t my_chain_size = pbft_chain_->getPbftChainSize(); + if (height_to_sync > my_chain_size) { + // Node update peers PBFT chain size in status packet. Should not request syncing period bigger than pbft chain size + std::ostringstream err_msg; + err_msg << "Peer " << packet_data.from_node_id_ << " request syncing period start at " << height_to_sync + << ". That's bigger than own PBFT chain size " << my_chain_size; + throw MaliciousPeerException(err_msg.str()); + } + + if (kConf.is_light_node && height_to_sync + kConf.light_node_history <= my_chain_size) { + std::ostringstream err_msg; + err_msg << "Peer " << packet_data.from_node_id_ << " request syncing period start at " << height_to_sync + << ". Light node does not have the data " << my_chain_size; + throw MaliciousPeerException(err_msg.str()); + } + + size_t blocks_to_transfer = 0; + auto pbft_chain_synced = false; + const auto total_period_data_size = my_chain_size - height_to_sync + 1; + if (total_period_data_size <= kConf.network.sync_level_size) { + blocks_to_transfer = total_period_data_size; + pbft_chain_synced = true; + } else { + blocks_to_transfer = kConf.network.sync_level_size; + } + LOG(log_tr_) << "Will send " << blocks_to_transfer << " PBFT blocks to " << packet_data.from_node_id_; + + sendPbftBlocks(peer, height_to_sync, blocks_to_transfer, pbft_chain_synced); +} + +// api for pbft syncing +void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr &peer, PbftPeriod from_period, + size_t blocks_to_transfer, bool pbft_chain_synced) { + const auto &peer_id = peer->getId(); + LOG(log_tr_) << "sendPbftBlocks: peer want to sync from pbft chain height " << from_period << ", will send at most " + << blocks_to_transfer << " pbft blocks to " << peer_id; + + for (auto block_period = from_period; block_period < from_period + blocks_to_transfer; block_period++) { + bool last_block = (block_period == from_period + blocks_to_transfer - 1); + auto data = db_->getPeriodDataRaw(block_period); + + if (data.size() == 0) { + // This can happen when switching from light node to full node setting + LOG(log_er_) << "DB corrupted. Cannot find period " << block_period << " PBFT block in db"; + return; + } + + data = PeriodData::ToOldPeriodData(data); + + dev::RLPStream s; + if (pbft_chain_synced && last_block) { + // Latest finalized block cert votes are saved in db as reward votes for new blocks + const auto reward_votes = vote_mgr_->getRewardVotes(); + assert(!reward_votes.empty()); + // It is possible that the node pushed another block to the chain in the meantime + if (reward_votes[0]->getPeriod() == block_period) { + s.appendList(3); + s << last_block; + s.appendRaw(data); + s.appendRaw(encodePbftVotesBundleRlp(reward_votes)); + } else { + s.appendList(2); + s << last_block; + s.appendRaw(data); + } + } else { + s.appendList(2); + s << last_block; + s.appendRaw(data); + } + + LOG(log_dg_) << "Sending PbftSyncPacket period " << block_period << " to " << peer_id; + sealAndSend(peer_id, SubprotocolPacketType::PbftSyncPacket, std::move(s)); + if (pbft_chain_synced && last_block) { + peer->syncing_ = false; + } + } +} + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp new file mode 100644 index 0000000000..277fec6ad6 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp @@ -0,0 +1,296 @@ +#include "network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp" + +#include "network/tarcap/shared_states/pbft_syncing_state.hpp" +#include "pbft/pbft_chain.hpp" +#include "pbft/pbft_manager.hpp" +#include "transaction/transaction_manager.hpp" +#include "vote/pbft_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" + +namespace taraxa::network::tarcap::v3 { + +PbftSyncPacketHandler::PbftSyncPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, + std::shared_ptr pbft_chain, + std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, + std::shared_ptr vote_mgr, std::shared_ptr db, + const addr_t &node_addr, const std::string &logs_prefix) + : ExtSyncingPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_syncing_state), + std::move(pbft_chain), std::move(pbft_mgr), std::move(dag_mgr), std::move(db), node_addr, + logs_prefix + "PBFT_SYNC_PH"), + vote_mgr_(std::move(vote_mgr)), + periodic_events_tp_(1, true) {} + +void PbftSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + if (packet_data.rlp_.itemCount() != kStandardPacketSize && packet_data.rlp_.itemCount() != kChainSyncedPacketSize) { + throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), kStandardPacketSize); + } + + // PeriodData rlp parsing cannot be done through util::rlp_tuple, which automatically checks the rlp size so it is + // checked here manually + if (packet_data.rlp_[1].itemCount() != PeriodData::kBaseRlpItemCount && + packet_data.rlp_[1].itemCount() != PeriodData::kExtendedRlpItemCount) { + throw InvalidRlpItemsCountException(packet_data.type_str_ + ":PeriodData", packet_data.rlp_[1].itemCount(), + PeriodData::kBaseRlpItemCount); + } +} + +void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { + // Note: no need to consider possible race conditions due to concurrent processing as it is + // disabled on priority_queue blocking dependencies level + const auto syncing_peer = pbft_syncing_state_->syncingPeer(); + if (!syncing_peer) { + LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << packet_data.from_node_id_.abridged() + << " but there is no current syncing peer set"; + return; + } + + if (syncing_peer->getId() != packet_data.from_node_id_) { + LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << packet_data.from_node_id_.abridged() + << " current syncing peer " << syncing_peer->getId().abridged(); + return; + } + + // Process received pbft blocks + // pbft_chain_synced is the flag to indicate own PBFT chain has synced with the peer's PBFT chain + const bool pbft_chain_synced = packet_data.rlp_.itemCount() == kChainSyncedPacketSize; + // last_block is the flag to indicate this is the last block in each syncing round, doesn't mean PBFT chain has synced + const bool last_block = packet_data.rlp_[0].toInt(); + PeriodData period_data; + try { + period_data = decodePeriodData(packet_data.rlp_[1]); + } catch (const std::runtime_error &e) { + throw MaliciousPeerException("Unable to parse PeriodData: " + std::string(e.what())); + } + + std::vector> current_block_cert_votes; + if (pbft_chain_synced) { + current_block_cert_votes = decodeVotesBundle(packet_data.rlp_[2]); + } + const auto pbft_blk_hash = period_data.pbft_blk->getBlockHash(); + + std::string received_dag_blocks_str; // This is just log related stuff + for (auto const &block : period_data.dag_blocks) { + received_dag_blocks_str += block.getHash().toString() + " "; + if (peer->dag_level_ < block.getLevel()) { + peer->dag_level_ = block.getLevel(); + } + } + + const auto pbft_block_period = period_data.pbft_blk->getPeriod(); + LOG(log_dg_) << "PbftSyncPacket received. Period: " << pbft_block_period + << ", dag Blocks: " << received_dag_blocks_str << " from " << packet_data.from_node_id_; + + peer->markPbftBlockAsKnown(pbft_blk_hash); + // Update peer's pbft period if outdated + if (peer->pbft_chain_size_ < pbft_block_period) { + peer->pbft_chain_size_ = pbft_block_period; + } + + LOG(log_tr_) << "Processing pbft block: " << pbft_blk_hash; + + if (pbft_chain_->findPbftBlockInChain(pbft_blk_hash)) { + LOG(log_wr_) << "PBFT block " << pbft_blk_hash << ", period: " << period_data.pbft_blk->getPeriod() << " from " + << packet_data.from_node_id_ << " already present in chain"; + } else { + if (pbft_block_period != pbft_mgr_->pbftSyncingPeriod() + 1) { + // This can happen if we just got synced and block was cert voted + if (pbft_chain_synced && pbft_block_period == pbft_mgr_->pbftSyncingPeriod()) { + pbftSyncComplete(); + return; + } + + LOG(log_er_) << "Block " << pbft_blk_hash << " period unexpected: " << pbft_block_period + << ". Expected period: " << pbft_mgr_->pbftSyncingPeriod() + 1; + return; + } + + // Check cert vote matches if final synced block + if (pbft_chain_synced) { + for (auto const &vote : current_block_cert_votes) { + if (vote->getBlockHash() != pbft_blk_hash) { + LOG(log_er_) << "Invalid cert votes block hash " << vote->getBlockHash() << " instead of " << pbft_blk_hash + << " from peer " << packet_data.from_node_id_.abridged() << " received, stop syncing."; + handleMaliciousSyncPeer(packet_data.from_node_id_); + return; + } + } + } + + // Check votes match the hash of previous block in the queue + auto last_pbft_block_hash = pbft_mgr_->lastPbftBlockHashFromQueueOrChain(); + // Check cert vote matches + for (auto const &vote : period_data.previous_block_cert_votes) { + if (vote->getBlockHash() != last_pbft_block_hash) { + LOG(log_er_) << "Invalid cert votes block hash " << vote->getBlockHash() << " instead of " + << last_pbft_block_hash << " from peer " << packet_data.from_node_id_.abridged() + << " received, stop syncing."; + handleMaliciousSyncPeer(packet_data.from_node_id_); + return; + } + } + + if (!pbft_mgr_->validatePillarDataInPeriodData(period_data)) { + handleMaliciousSyncPeer(packet_data.from_node_id_); + return; + } + + auto order_hash = PbftManager::calculateOrderHash(period_data.dag_blocks); + if (order_hash != period_data.pbft_blk->getOrderHash()) { + { // This is just log related stuff + std::vector trx_order; + trx_order.reserve(period_data.transactions.size()); + std::vector blk_order; + blk_order.reserve(period_data.dag_blocks.size()); + for (auto t : period_data.transactions) { + trx_order.push_back(t->getHash()); + } + for (auto b : period_data.dag_blocks) { + blk_order.push_back(b.getHash()); + } + LOG(log_er_) << "Order hash incorrect in period data " << pbft_blk_hash << " expected: " << order_hash + << " received " << period_data.pbft_blk->getOrderHash() << "; Dag order: " << blk_order + << "; Trx order: " << trx_order << "; from " << packet_data.from_node_id_.abridged() + << ", stop syncing."; + } + handleMaliciousSyncPeer(packet_data.from_node_id_); + return; + } + + // This is special case when queue is empty and we can not say for sure that all votes that are part of this block + // have been verified before + if (pbft_mgr_->periodDataQueueEmpty()) { + for (const auto &v : period_data.previous_block_cert_votes) { + if (auto vote_is_valid = vote_mgr_->validateVote(v); vote_is_valid.first == false) { + LOG(log_er_) << "Invalid reward votes in block " << period_data.pbft_blk->getBlockHash() << " from peer " + << packet_data.from_node_id_.abridged() + << " received, stop syncing. Validation failed. Err: " << vote_is_valid.second; + handleMaliciousSyncPeer(packet_data.from_node_id_); + return; + } + + vote_mgr_->addVerifiedVote(v); + } + + // And now we need to replace it with verified votes + if (auto votes = vote_mgr_->checkRewardVotes(period_data.pbft_blk, true); votes.first) { + period_data.previous_block_cert_votes = std::move(votes.second); + } else { + // checkRewardVotes could fail because we just cert voted this block and moved to next period, + // in that case we are probably fully synced + if (pbft_block_period <= vote_mgr_->getRewardVotesPbftBlockPeriod()) { + pbft_syncing_state_->setPbftSyncing(false); + return; + } + + LOG(log_er_) << "Invalid reward votes in block " << period_data.pbft_blk->getBlockHash() << " from peer " + << packet_data.from_node_id_.abridged() << " received, stop syncing."; + handleMaliciousSyncPeer(packet_data.from_node_id_); + return; + } + } + + LOG(log_tr_) << "Synced PBFT block hash " << pbft_blk_hash << " with " + << period_data.previous_block_cert_votes.size() << " cert votes"; + LOG(log_tr_) << "Synced PBFT block " << period_data; + pbft_mgr_->periodDataQueuePush(std::move(period_data), packet_data.from_node_id_, + std::move(current_block_cert_votes)); + } + + auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); + + // Reset last sync packet received time + pbft_syncing_state_->setLastSyncPacketTime(); + + if (pbft_chain_synced) { + pbftSyncComplete(); + return; + } + + if (last_block) { + // If current sync period is actually bigger than the block we just received we are probably synced + if (pbft_sync_period > pbft_block_period) { + pbft_syncing_state_->setPbftSyncing(false); + return; + } + if (pbft_syncing_state_->isPbftSyncing()) { + if (pbft_sync_period > pbft_chain_->getPbftChainSize() + (10 * kConf.network.sync_level_size)) { + LOG(log_tr_) << "Syncing pbft blocks too fast than processing. Has synced period " << pbft_sync_period + << ", PBFT chain size " << pbft_chain_->getPbftChainSize(); + periodic_events_tp_.post(kDelayedPbftSyncDelayMs, [this] { delayedPbftSync(1); }); + } else { + if (!syncPeerPbft(pbft_sync_period + 1)) { + pbft_syncing_state_->setPbftSyncing(false); + return; + } + } + } + } +} + +PeriodData PbftSyncPacketHandler::decodePeriodData(const dev::RLP &period_data_rlp) const { + return PeriodData::FromOldPeriodData(period_data_rlp); +} + +std::vector> PbftSyncPacketHandler::decodeVotesBundle( + const dev::RLP &votes_bundle_rlp) const { + return decodePbftVotesBundleRlp(votes_bundle_rlp); +} + +void PbftSyncPacketHandler::pbftSyncComplete() { + if (pbft_mgr_->periodDataQueueSize()) { + LOG(log_tr_) << "Syncing pbft blocks faster than processing. Remaining sync size " + << pbft_mgr_->periodDataQueueSize(); + periodic_events_tp_.post(kDelayedPbftSyncDelayMs, [this] { pbftSyncComplete(); }); + } else { + LOG(log_dg_) << "Syncing PBFT is completed"; + // We are pbft synced with the node we are connected to but + // calling startSyncingPbft will check if some nodes have + // greater pbft chain size and we should continue syncing with + // them, Or sync pending DAG blocks + pbft_syncing_state_->setPbftSyncing(false); + startSyncingPbft(); + if (!pbft_syncing_state_->isPbftSyncing()) { + requestPendingDagBlocks(); + } + } +} + +void PbftSyncPacketHandler::delayedPbftSync(int counter) { + const uint32_t max_delayed_pbft_sync_count = 60000 / kDelayedPbftSyncDelayMs; + auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); + if (counter > max_delayed_pbft_sync_count) { + LOG(log_er_) << "Pbft blocks stuck in queue, no new block processed in 60 seconds " << pbft_sync_period << " " + << pbft_chain_->getPbftChainSize(); + pbft_syncing_state_->setPbftSyncing(false); + LOG(log_tr_) << "Syncing PBFT is stopping"; + return; + } + + if (pbft_syncing_state_->isPbftSyncing()) { + if (pbft_sync_period > pbft_chain_->getPbftChainSize() + (10 * kConf.network.sync_level_size)) { + LOG(log_tr_) << "Syncing pbft blocks faster than processing " << pbft_sync_period << " " + << pbft_chain_->getPbftChainSize(); + periodic_events_tp_.post(kDelayedPbftSyncDelayMs, [this, counter] { delayedPbftSync(counter + 1); }); + } else { + if (!syncPeerPbft(pbft_sync_period + 1)) { + pbft_syncing_state_->setPbftSyncing(false); + } + } + } +} + +void PbftSyncPacketHandler::handleMaliciousSyncPeer(const dev::p2p::NodeID &id) { + peers_state_->set_peer_malicious(id); + + if (auto host = peers_state_->host_.lock(); host) { + LOG(log_nf_) << "Disconnect peer " << id; + host->disconnect(id, dev::p2p::UserReason); + } else { + LOG(log_er_) << "Unable to handleMaliciousSyncPeer, host == nullptr"; + } +} + +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp index 079143cb9f..0d79e71e5f 100644 --- a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp +++ b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp @@ -16,6 +16,8 @@ #include "network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "node/node.hpp" #include "pbft/pbft_chain.hpp" @@ -229,12 +231,10 @@ const TaraxaCapability::InitPacketsHandlers TaraxaCapability::kInitLatestVersion // Standard packets with mid processing priority packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, pbft_chain, pbft_mgr, dag_mgr, trx_mgr, db, - version > kV2NetworkVersion, node_addr, logs_prefix); + version > kV3NetworkVersion, node_addr, logs_prefix); - // Support for transaition from V2 to V3, once all nodes update to V3 post next hardfork, V2 support can be - // removed packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, - node_addr, version > kV2NetworkVersion, logs_prefix); + node_addr, version > kV3NetworkVersion, logs_prefix); // Non critical packets with low processing priority packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, @@ -263,4 +263,59 @@ const TaraxaCapability::InitPacketsHandlers TaraxaCapability::kInitLatestVersion return packets_handlers; }; +const TaraxaCapability::InitPacketsHandlers TaraxaCapability::kInitV3Handlers = + [](const std::string &logs_prefix, const FullNodeConfig &config, const h256 &genesis_hash, + const std::shared_ptr &peers_state, const std::shared_ptr &pbft_syncing_state, + const std::shared_ptr &packets_stats, const std::shared_ptr &db, + const std::shared_ptr &pbft_mgr, const std::shared_ptr &pbft_chain, + const std::shared_ptr &vote_mgr, const std::shared_ptr &dag_mgr, + const std::shared_ptr &trx_mgr, const std::shared_ptr &slashing_manager, + const std::shared_ptr &pillar_chain_mgr, TarcapVersion version, + const addr_t &node_addr) { + auto packets_handlers = std::make_shared(); + // Consensus packets with high processing priority + packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_mgr, pbft_chain, + vote_mgr, slashing_manager, node_addr, logs_prefix); + packets_handlers->registerHandler( + config, peers_state, packets_stats, pbft_mgr, pbft_chain, vote_mgr, slashing_manager, node_addr, logs_prefix); + packets_handlers->registerHandler( + config, peers_state, packets_stats, pbft_mgr, pbft_chain, vote_mgr, slashing_manager, node_addr, logs_prefix); + + // Standard packets with mid processing priority + packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, + pbft_chain, pbft_mgr, dag_mgr, trx_mgr, db, + version > kV3NetworkVersion, node_addr, logs_prefix); + + // Support for transition from V2 to V3, once all nodes update to V3 post next hardfork, V2 support can be + // removed + packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, + node_addr, version > kV3NetworkVersion, logs_prefix); + + // Non critical packets with low processing priority + packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, + pbft_chain, pbft_mgr, dag_mgr, db, genesis_hash, node_addr, + logs_prefix); + packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, dag_mgr, + db, node_addr, logs_prefix); + + packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, + pbft_chain, pbft_mgr, dag_mgr, trx_mgr, db, node_addr, + logs_prefix); + + packets_handlers->registerHandler( + config, peers_state, packets_stats, pbft_syncing_state, pbft_chain, vote_mgr, db, node_addr, logs_prefix); + + packets_handlers->registerHandler(config, peers_state, packets_stats, + pbft_syncing_state, pbft_chain, pbft_mgr, dag_mgr, + vote_mgr, db, node_addr, logs_prefix); + packets_handlers->registerHandler(config, peers_state, packets_stats, pillar_chain_mgr, + node_addr, logs_prefix); + packets_handlers->registerHandler(config, peers_state, packets_stats, + pillar_chain_mgr, node_addr, logs_prefix); + packets_handlers->registerHandler(config, peers_state, packets_stats, + pillar_chain_mgr, node_addr, logs_prefix); + + return packets_handlers; + }; + } // namespace taraxa::network::tarcap diff --git a/libraries/types/pbft_block/include/pbft/period_data.hpp b/libraries/types/pbft_block/include/pbft/period_data.hpp index 134af25056..432fbe5900 100644 --- a/libraries/types/pbft_block/include/pbft/period_data.hpp +++ b/libraries/types/pbft_block/include/pbft/period_data.hpp @@ -29,9 +29,10 @@ class PeriodData { const std::vector>& previous_block_cert_votes, std::optional>>&& pillar_votes = {}); explicit PeriodData(const dev::RLP& all_rlp); - explicit PeriodData(bytes const& all_rlp); + explicit PeriodData(const bytes& all_rlp); static PeriodData FromOldPeriodData(const dev::RLP& rlp); + static bytes ToOldPeriodData(const bytes& rlp); std::shared_ptr pbft_blk; std::vector> previous_block_cert_votes; // These votes are the cert votes of previous block diff --git a/libraries/types/pbft_block/src/period_data.cpp b/libraries/types/pbft_block/src/period_data.cpp index f629e2d78e..befb80ff90 100644 --- a/libraries/types/pbft_block/src/period_data.cpp +++ b/libraries/types/pbft_block/src/period_data.cpp @@ -98,6 +98,36 @@ PeriodData PeriodData::FromOldPeriodData(const dev::RLP& rlp) { return period_data; } +bytes PeriodData::ToOldPeriodData(const bytes& rlp) { + PeriodData period_data(rlp); + const auto kRlpSize = period_data.pillar_votes_.has_value() ? kBaseRlpItemCount + 1 : kBaseRlpItemCount; + dev::RLPStream s(kRlpSize); + s.appendRaw(period_data.pbft_blk->rlp(true)); + + if (period_data.pbft_blk->getPeriod() > 1) [[likely]] { + s.appendRaw(encodePbftVotesBundleRlp(period_data.previous_block_cert_votes)); + } else { + s.append(""); + } + + s.appendList(period_data.dag_blocks.size()); + for (auto const& b : period_data.dag_blocks) { + s.appendRaw(b.rlp(true)); + } + + s.appendList(period_data.transactions.size()); + for (auto const& t : period_data.transactions) { + s.appendRaw(t->rlp()); + } + + // Pillar votes are optional data of period data since ficus hardfork + if (period_data.pillar_votes_.has_value()) { + s.appendRaw(encodePillarVotesBundleRlp(*period_data.pillar_votes_)); + } + + return s.invalidate(); +} + std::ostream& operator<<(std::ostream& strm, PeriodData const& b) { strm << "[PeriodData] : " << b.pbft_blk << " , num of votes " << b.previous_block_cert_votes.size() << std::endl; return strm; From 954ccd1c51276ce7b41895e0c1cdb1d99c35f9a1 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 30 Aug 2024 14:30:51 +0200 Subject: [PATCH 018/105] fix tests and formating --- .../latest/dag_block_packet_handler.hpp | 4 +- .../latest/transaction_packet_handler.hpp | 12 +--- .../v3/get_pbft_sync_packet_handler.hpp | 2 +- .../latest/dag_block_packet_handler.cpp | 63 ++----------------- .../latest/transaction_packet_handler.cpp | 51 +-------------- .../network/src/tarcap/taraxa_capability.cpp | 14 ++--- tests/network_test.cpp | 2 +- 7 files changed, 17 insertions(+), 131 deletions(-) diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp index c2e39296c0..4552b072af 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp @@ -17,9 +17,8 @@ class DagBlockPacketHandler : public ExtSyncingPacketHandler { std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, std::shared_ptr trx_mgr, std::shared_ptr db, - bool trxs_in_dag_packet, const addr_t &node_addr, const std::string &logs_prefix = ""); + const addr_t &node_addr, const std::string &logs_prefix = ""); - void sendBlock(dev::p2p::NodeID const &peer_id, DagBlock block, const SharedTransactions &trxs); void sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, DagBlock block, const SharedTransactions &trxs); void onNewBlockReceived(DagBlock &&block, const std::shared_ptr &peer = nullptr, const std::unordered_map> &trxs = {}); @@ -34,7 +33,6 @@ class DagBlockPacketHandler : public ExtSyncingPacketHandler { protected: std::shared_ptr trx_mgr_{nullptr}; - const bool kTrxsInDagPacket; }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp index 47ba929275..208b511174 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp @@ -16,7 +16,7 @@ class TransactionPacketHandler : public PacketHandler { public: TransactionPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, - std::shared_ptr trx_mgr, const addr_t& node_addr, bool hash_gossip, + std::shared_ptr trx_mgr, const addr_t& node_addr, const std::string& logs_prefix = "TRANSACTION_PH"); /** @@ -48,15 +48,6 @@ class TransactionPacketHandler : public PacketHandler { virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; protected: - /** - * @brief Sends batch of transactions to all connected peers - * @note Support of the old V2 version, remove once most of the network is updated or after a hardfork. This method is - * used as periodic event to broadcast transactions to the other peers in network - * - * @param transactions to be sent - */ - void periodicSendTransactionsWithoutHashGossip(std::vector&& transactions); - /** * @brief select which transactions and hashes to send to which connected peer * @@ -82,7 +73,6 @@ class TransactionPacketHandler : public PacketHandler { std::atomic received_trx_count_{0}; std::atomic unique_received_trx_count_{0}; - const bool kHashGossip = true; }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp index 82d0506100..d30c50c649 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp @@ -9,7 +9,7 @@ class VoteManager; } // namespace taraxa namespace taraxa::network::tarcap { - class PbftSyncingState; +class PbftSyncingState; } namespace taraxa::network::tarcap::v3 { diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp index e856b8f209..a0b22ea34b 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp @@ -13,22 +13,17 @@ DagBlockPacketHandler::DagBlockPacketHandler(const FullNodeConfig &conf, std::sh std::shared_ptr pbft_chain, std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, std::shared_ptr trx_mgr, std::shared_ptr db, - bool trxs_in_dag_packet, const addr_t &node_addr, - const std::string &logs_prefix) + const addr_t &node_addr, const std::string &logs_prefix) : ExtSyncingPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_syncing_state), std::move(pbft_chain), std::move(pbft_mgr), std::move(dag_mgr), std::move(db), node_addr, logs_prefix + "DAG_BLOCK_PH"), - trx_mgr_(std::move(trx_mgr)), - kTrxsInDagPacket(trxs_in_dag_packet) {} + trx_mgr_(std::move(trx_mgr)) {} void DagBlockPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - constexpr size_t required_size_v2 = 8; constexpr size_t required_size = 2; // Only one dag block can be received - if (kTrxsInDagPacket && packet_data.rlp_.itemCount() != required_size) { + if (packet_data.rlp_.itemCount() != required_size) { throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); - } else if (!kTrxsInDagPacket && packet_data.rlp_.itemCount() != required_size_v2) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size_v2); } } @@ -69,51 +64,6 @@ void DagBlockPacketHandler::process(const threadpool::PacketData &packet_data, onNewBlockReceived(std::move(block), peer, transactions); } -void DagBlockPacketHandler::sendBlock(dev::p2p::NodeID const &peer_id, taraxa::DagBlock block, - const SharedTransactions &trxs) { - std::shared_ptr peer = peers_state_->getPeer(peer_id); - if (!peer) { - LOG(log_wr_) << "Send dag block " << block.getHash() << ". Failed to obtain peer " << peer_id; - return; - } - - // This lock prevents race condition between syncing and gossiping dag blocks - std::unique_lock lock(peer->mutex_for_sending_dag_blocks_); - - // Transactions are first sent in transactions packet before sending the block - uint32_t index = 0; - while (index < trxs.size()) { - const uint32_t trx_count_to_send = std::min(static_cast(kMaxTransactionsInPacket), trxs.size() - index); - - dev::RLPStream s(TransactionPacketHandler::kTransactionPacketItemCount); - s.appendList(trx_count_to_send); - - taraxa::bytes trx_bytes; - for (uint32_t i = index; i < index + trx_count_to_send; i++) { - auto trx_data = trxs[i]->rlp(); - s << trxs[i]->getHash(); - trx_bytes.insert(trx_bytes.end(), std::begin(trx_data), std::end(trx_data)); - } - - s.appendList(trx_count_to_send); - s.appendRaw(trx_bytes, trx_count_to_send); - sealAndSend(peer_id, TransactionPacket, std::move(s)); - - index += trx_count_to_send; - } - - if (!sealAndSend(peer_id, DagBlockPacket, block.streamRLP(true))) { - LOG(log_wr_) << "Sending DagBlock " << block.getHash() << " failed to " << peer_id; - return; - } - - // Mark data as known if sending was successful - peer->markDagBlockAsKnown(block.getHash()); - for (const auto &trx : trxs) { - peer->markTransactionAsKnown(trx->getHash()); - } -} - void DagBlockPacketHandler::sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, taraxa::DagBlock block, const SharedTransactions &trxs) { std::shared_ptr peer = peers_state_->getPeer(peer_id); @@ -290,11 +240,8 @@ void DagBlockPacketHandler::onNewBlockVerified(const DagBlock &block, bool propo transactions_to_send.push_back(trx); peer_and_transactions_to_log += trx_hash.abridged(); } - if (kTrxsInDagPacket) { - sendBlockWithTransactions(peer_id, block, transactions_to_send); - } else { - sendBlock(peer_id, block, transactions_to_send); - } + + sendBlockWithTransactions(peer_id, block, transactions_to_send); peer->markDagBlockAsKnown(block_hash); } } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp index 63cb41449d..adf7178eaa 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp @@ -10,10 +10,9 @@ namespace taraxa::network::tarcap { TransactionPacketHandler::TransactionPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr trx_mgr, const addr_t &node_addr, - bool hash_gossip, const std::string &logs_prefix) + const std::string &logs_prefix) : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, logs_prefix + "TRANSACTION_PH"), - trx_mgr_(std::move(trx_mgr)), - kHashGossip(hash_gossip) {} + trx_mgr_(std::move(trx_mgr)) {} void TransactionPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { auto items = packet_data.rlp_.itemCount(); @@ -100,46 +99,6 @@ inline void TransactionPacketHandler::process(const threadpool::PacketData &pack } } -void TransactionPacketHandler::periodicSendTransactionsWithoutHashGossip( - std::vector &&transactions) { - std::vector>>> - peers_with_transactions_to_send; - - auto peers = peers_state_->getAllPeers(); - for (const auto &peer : peers) { - // Confirm that status messages were exchanged otherwise message might be ignored and node would - // incorrectly markTransactionAsKnown - if (!peer.second->syncing_) { - SharedTransactions peer_trxs; - for (auto const &account_trx : transactions) { - for (auto const &trx : account_trx) { - auto trx_hash = trx->getHash(); - if (peer.second->isTransactionKnown(trx_hash)) { - continue; - } - peer_trxs.push_back(trx); - if (peer_trxs.size() == kMaxTransactionsInPacket) { - peers_with_transactions_to_send.push_back({peer.first, {peer_trxs, {}}}); - peer_trxs.clear(); - }; - } - } - if (peer_trxs.size() > 0) { - peers_with_transactions_to_send.push_back({peer.first, {peer_trxs, {}}}); - } - } - } - const auto peers_to_send_count = peers_with_transactions_to_send.size(); - if (peers_to_send_count > 0) { - // Sending it in same order favours some peers over others, always start with a different position - uint32_t start_with = rand() % peers_to_send_count; - for (uint32_t i = 0; i < peers_to_send_count; i++) { - auto peer_to_send = peers_with_transactions_to_send[(start_with + i) % peers_to_send_count]; - sendTransactions(peers[peer_to_send.first], std::move(peer_to_send.second)); - } - } -} - std::pair>> TransactionPacketHandler::transactionsToSendToPeer(std::shared_ptr peer, const std::vector &transactions, @@ -216,12 +175,6 @@ TransactionPacketHandler::transactionsToSendToPeers(std::vector &&transactions) { - // Support of old v2 net version. Remove once network is fully updated - if (!kHashGossip) { - periodicSendTransactionsWithoutHashGossip(std::move(transactions)); - return; - } - auto peers_with_transactions_to_send = transactionsToSendToPeers(std::move(transactions)); const auto peers_to_send_count = peers_with_transactions_to_send.size(); if (peers_to_send_count > 0) { diff --git a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp index 0d79e71e5f..078a6e4b22 100644 --- a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp +++ b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp @@ -230,11 +230,11 @@ const TaraxaCapability::InitPacketsHandlers TaraxaCapability::kInitLatestVersion // Standard packets with mid processing priority packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, - pbft_chain, pbft_mgr, dag_mgr, trx_mgr, db, - version > kV3NetworkVersion, node_addr, logs_prefix); + pbft_chain, pbft_mgr, dag_mgr, trx_mgr, db, node_addr, + logs_prefix); packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, - node_addr, version > kV3NetworkVersion, logs_prefix); + node_addr, logs_prefix); // Non critical packets with low processing priority packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, @@ -283,13 +283,11 @@ const TaraxaCapability::InitPacketsHandlers TaraxaCapability::kInitV3Handlers = // Standard packets with mid processing priority packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, - pbft_chain, pbft_mgr, dag_mgr, trx_mgr, db, - version > kV3NetworkVersion, node_addr, logs_prefix); + pbft_chain, pbft_mgr, dag_mgr, trx_mgr, db, node_addr, + logs_prefix); - // Support for transition from V2 to V3, once all nodes update to V3 post next hardfork, V2 support can be - // removed packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, - node_addr, version > kV3NetworkVersion, logs_prefix); + node_addr, logs_prefix); // Non critical packets with low processing priority packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, diff --git a/tests/network_test.cpp b/tests/network_test.cpp index a60f16a6b2..fbc16bd7d6 100644 --- a/tests/network_test.cpp +++ b/tests/network_test.cpp @@ -1165,7 +1165,7 @@ TEST_F(NetworkTest, transaction_gossip_selection) { class TestTransactionPacketHandler : public network::tarcap::TransactionPacketHandler { public: TestTransactionPacketHandler(std::shared_ptr peers_state) - : TransactionPacketHandler({}, peers_state, {}, {}, {}, true) {} + : TransactionPacketHandler({}, peers_state, {}, {}, {}) {} std::vector< std::pair, std::pair>>> public_transactionsToSendToPeers(std::vector transactions) { From d5e77b16deb89fe8c605e254640629a8aa0aa226 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 30 Aug 2024 14:33:12 +0200 Subject: [PATCH 019/105] fix tarcap version --- CMakeLists.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6bb171e4e3..2ce8bc7515 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,11 +3,11 @@ cmake_minimum_required(VERSION 3.20) # Set current version of the project set(TARAXA_MAJOR_VERSION 1) set(TARAXA_MINOR_VERSION 11) -set(TARAXA_PATCH_VERSION 4) +set(TARAXA_PATCH_VERSION 3) set(TARAXA_VERSION ${TARAXA_MAJOR_VERSION}.${TARAXA_MINOR_VERSION}.${TARAXA_PATCH_VERSION}) # Any time a change in the network protocol is introduced this version should be increased -set(TARAXA_NET_VERSION 3) +set(TARAXA_NET_VERSION 4) # Major version is modified when DAG blocks, pbft blocks and any basic building blocks of our blockchain is modified # in the db set(TARAXA_DB_MAJOR_VERSION 1) From f51810c01b9f7834e2e9eddfa5dc5a72092ec9f3 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 30 Aug 2024 15:51:10 +0200 Subject: [PATCH 020/105] fixed tests --- libraries/types/pbft_block/src/period_data.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/libraries/types/pbft_block/src/period_data.cpp b/libraries/types/pbft_block/src/period_data.cpp index befb80ff90..0ad9dcfabd 100644 --- a/libraries/types/pbft_block/src/period_data.cpp +++ b/libraries/types/pbft_block/src/period_data.cpp @@ -50,7 +50,11 @@ bytes PeriodData::rlp() const { s.append(""); } - s.appendRaw(encodeDAGBlocksBundleRlp(dag_blocks)); + if (dag_blocks.empty()) { + s.append(""); + } else { + s.appendRaw(encodeDAGBlocksBundleRlp(dag_blocks)); + } s.appendList(transactions.size()); for (auto const& t : transactions) { From c269e7d77101855680910f8ed5f996c6f8a65691 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 30 Aug 2024 16:01:36 +0200 Subject: [PATCH 021/105] fix final chain tests --- libraries/core_libs/storage/src/storage.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index 7415cf67a6..ec9a6edeac 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -1263,7 +1263,7 @@ std::vector> DbStorage::getFinalizedDagBlockByPeriod(P auto dag_blocks_data = dev::RLP(period_data)[DAG_BLOCKS_POS_IN_PERIOD_DATA]; auto dag_blocks = decodeDAGBlocksBundleRlp(dag_blocks_data); ret.reserve(dag_blocks.size()); - for (auto const block : dag_blocks_data) { + for (auto const block : dag_blocks) { ret.emplace_back(std::make_shared(std::move(block))); } } From 9b938658d0397057f4e711acc6d8ec162363e1a9 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Sun, 1 Sep 2024 20:50:57 +0200 Subject: [PATCH 022/105] chore: handle transactions with no account --- .../src/transaction/transaction_manager.cpp | 27 ++++++++++++------- tests/full_node_test.cpp | 9 ++++--- tests/test_util/include/test_util/samples.hpp | 2 +- .../test_util/include/test_util/test_util.hpp | 2 ++ tests/test_util/src/samples.cpp | 7 +++-- 5 files changed, 29 insertions(+), 18 deletions(-) diff --git a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp index c737efcfb1..e5fd6efa6f 100644 --- a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp +++ b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp @@ -121,20 +121,27 @@ TransactionStatus TransactionManager::insertValidatedTransaction(std::shared_ptr return TransactionStatus::Known; } - const auto account = final_chain_->getAccount(tx->getSender()).value_or(taraxa::state_api::ZeroAccount); + const auto account = final_chain_->getAccount(tx->getSender()); bool proposable = true; - // Ensure the transaction adheres to nonce ordering - if (account.nonce > tx->getNonce()) { - if (!insert_non_proposable) { - return TransactionStatus::Known; + if (account.has_value()) { + // Ensure the transaction adheres to nonce ordering + if (account->nonce > tx->getNonce()) { + if (!insert_non_proposable) { + return TransactionStatus::Known; + } + proposable = false; } - proposable = false; - } - // Transactor should have enough funds to cover the costs - // cost == V + GP * GL - if (account.balance < tx->getCost()) { + // Transactor should have enough funds to cover the costs + // cost == V + GP * GL + if (account->balance < tx->getCost()) { + if (!insert_non_proposable) { + return TransactionStatus::Known; + } + proposable = false; + } + } else { if (!insert_non_proposable) { return TransactionStatus::Known; } diff --git a/tests/full_node_test.cpp b/tests/full_node_test.cpp index 8ea1d088de..b8816637df 100644 --- a/tests/full_node_test.cpp +++ b/tests/full_node_test.cpp @@ -371,6 +371,8 @@ TEST_F(FullNodeTest, sync_five_nodes) { } } + void dummy_initial_transfer() { coin_transfer(0, dummy_client.getAddress(), 1000000, true); } + auto getIssuedTrxCount() { shared_lock l(m); return issued_trx_count; @@ -455,8 +457,9 @@ TEST_F(FullNodeTest, sync_five_nodes) { std::vector all_transactions; // transfer some coins to your friends ... - auto init_bal = own_effective_genesis_bal(nodes[0]->getConfig()) / nodes.size(); + auto init_bal = own_effective_genesis_bal(nodes[0]->getConfig()) / (nodes.size() + 1); + context.dummy_initial_transfer(); { for (size_t i(1); i < nodes.size(); ++i) { // we shouldn't wait for transaction execution because it could be in alternative dag @@ -982,7 +985,7 @@ TEST_F(FullNodeTest, sync_two_nodes2) { // send 1000 trxs try { std::cout << "Sending 1000 trxs ..." << std::endl; - sendTrx(1000, 7778); + sendTrx(1000, 7778, nodes[0]->getSecretKey()); std::cout << "1000 trxs sent ..." << std::endl; } catch (std::exception &e) { @@ -1109,7 +1112,7 @@ TEST_F(FullNodeTest, receive_send_transaction) { auto node = create_nodes(node_cfgs, true /*start*/).front(); try { - sendTrx(1000, 7778); + sendTrx(1000, 7778, node->getSecretKey()); } catch (std::exception &e) { std::cerr << e.what() << std::endl; } diff --git a/tests/test_util/include/test_util/samples.hpp b/tests/test_util/include/test_util/samples.hpp index 132370024c..828bdd93ee 100644 --- a/tests/test_util/include/test_util/samples.hpp +++ b/tests/test_util/include/test_util/samples.hpp @@ -99,7 +99,7 @@ class TxGenerator { inline auto const TX_GEN = Lazy([] { return TxGenerator(); }); -bool sendTrx(uint64_t count, unsigned port); +bool sendTrx(uint64_t count, unsigned port, dev::Secret secret); SharedTransactions createSignedTrxSamples(unsigned start, unsigned num, secret_t const &sk, bytes data = dev::fromHex("00FEDCBA9876543210000000")); diff --git a/tests/test_util/include/test_util/test_util.hpp b/tests/test_util/include/test_util/test_util.hpp index 99ebd85425..9a923f5932 100644 --- a/tests/test_util/include/test_util/test_util.hpp +++ b/tests/test_util/include/test_util/test_util.hpp @@ -151,6 +151,8 @@ struct TransactionClient { Context process(const std::shared_ptr& trx, bool wait_executed = true) const; Context coinTransfer(const addr_t& to, const val_t& val, bool wait_executed = true); + + addr_t getAddress() const { return dev::KeyPair(secret_).address(); } }; SharedTransaction make_dpos_trx(const FullNodeConfig& sender_node_cfg, const u256& value = 0, uint64_t nonce = 0, diff --git a/tests/test_util/src/samples.cpp b/tests/test_util/src/samples.cpp index 151beec493..c029a700b3 100644 --- a/tests/test_util/src/samples.cpp +++ b/tests/test_util/src/samples.cpp @@ -1,7 +1,7 @@ #include "test_util/samples.hpp" namespace taraxa::core_tests::samples { -bool sendTrx(uint64_t count, unsigned port) { +bool sendTrx(uint64_t count, unsigned port, dev::Secret secret) { auto pattern = R"( curl --silent -m 10 --output /dev/null -d \ '{ @@ -21,9 +21,8 @@ bool sendTrx(uint64_t count, unsigned port) { }' 0.0.0.0:%s )"; for (uint64_t i = 0; i < count; ++i) { - auto retcode = system(fmt(pattern, i + 1, val_t(TEST_TX_GAS_LIMIT), val_t(0), addr_t::random(), - samples::TX_GEN->getRandomUniqueSenderSecret().makeInsecure(), port) - .c_str()); + auto retcode = system( + fmt(pattern, i + 1, val_t(TEST_TX_GAS_LIMIT), val_t(0), addr_t::random(), secret.makeInsecure(), port).c_str()); if (retcode != 0) { return false; } From 25eea9da4a7d77bb37fad2302657be99388e9733 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Wed, 28 Aug 2024 15:54:27 +0200 Subject: [PATCH 023/105] chore:queue limit improvements --- libraries/config/include/config/network.hpp | 6 ++ .../network/tarcap/taraxa_capability.hpp | 7 ++ .../network/src/tarcap/taraxa_capability.cpp | 73 +++++++++++++------ 3 files changed, 64 insertions(+), 22 deletions(-) diff --git a/libraries/config/include/config/network.hpp b/libraries/config/include/config/network.hpp index 49253ca962..d9f1c07c19 100644 --- a/libraries/config/include/config/network.hpp +++ b/libraries/config/include/config/network.hpp @@ -53,6 +53,12 @@ struct DdosProtectionConfig { // Max packets queue size, 0 means unlimited uint64_t max_packets_queue_size{0}; + // Time of allowed queue over the limit + std::chrono::milliseconds queue_limit_time{5000}; + + // Time period between disconnecting peers + std::chrono::milliseconds peer_disconnect_interval{5000}; + void validate(uint32_t delegation_delay) const; }; diff --git a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp index 6dcf70c819..8a1f50f969 100644 --- a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp +++ b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp @@ -93,6 +93,7 @@ class TaraxaCapability final : public dev::p2p::CapabilityFace { private: bool filterSyncIrrelevantPackets(SubprotocolPacketType packet_type) const; + void handlePacketQueueOverLimit(std::shared_ptr host, dev::p2p::NodeID node_id, size_t tp_queue_size); private: // Capability version @@ -116,6 +117,12 @@ class TaraxaCapability final : public dev::p2p::CapabilityFace { // Main Threadpool for processing packets std::shared_ptr thread_pool_; + // Last disconnect time and number of peers + std::chrono::_V2::system_clock::time_point last_ddos_disconnect_time_ = {}; + std::chrono::_V2::system_clock::time_point queue_over_limit_start_time_ = {}; + bool queue_over_limit_ = false; + uint32_t last_disconnect_number_of_peers_ = 0; + LOG_OBJECTS_DEFINE }; diff --git a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp index 079143cb9f..568bcfbce6 100644 --- a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp +++ b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp @@ -77,6 +77,13 @@ void TaraxaCapability::onConnect(std::weak_ptr session, u256 return; } + // If queue is over the limit do not allow new nodes to connect until queue size is reduced + if (queue_over_limit_ && peers_state_->getPeersCount() >= last_disconnect_number_of_peers_) { + session_p->disconnect(dev::p2p::UserReason); + LOG(log_wr_) << "Node " << node_id << " connection dropped - queue over limit"; + return; + } + peers_state_->addPendingPeer(node_id, session_p->info().host + ":" + std::to_string(session_p->info().port)); LOG(log_nf_) << "Node " << node_id << " connected"; @@ -166,28 +173,11 @@ void TaraxaCapability::interpretCapabilityPacket(std::weak_ptr kConf.network.ddos_protection.max_packets_queue_size) { - const auto connected_peers = peers_state_->getAllPeers(); - // Always keep at least 5 connected peers - if (connected_peers.size() > 5) { - // Find peer with the highest processing time and disconnect him - std::pair peer_max_processing_time{std::chrono::microseconds(0), - dev::p2p::NodeID()}; - - for (const auto &connected_peer : connected_peers) { - const auto peer_packets_stats = connected_peer.second->getAllPacketsStatsCopy(); - - if (peer_packets_stats.second.processing_duration_ > peer_max_processing_time.first) { - peer_max_processing_time = {peer_packets_stats.second.processing_duration_, connected_peer.first}; - } - } - - // Disconnect peer with the highest processing time - LOG(log_er_) << "Max allowed packets queue size " << kConf.network.ddos_protection.max_packets_queue_size - << " exceeded: " << tp_queue_size << ". Peer with the highest processing time " - << peer_max_processing_time.second << " will be disconnected"; - host->disconnect(node_id, dev::p2p::UserReason); - return; - } + // Queue size is over the limit + handlePacketQueueOverLimit(host, node_id, tp_queue_size); + } else { + queue_over_limit_ = false; + last_disconnect_number_of_peers_ = 0; } // TODO: we are making a copy here for each packet bytes(toBytes()), which is pretty significant. Check why RLP does @@ -195,6 +185,45 @@ void TaraxaCapability::interpretCapabilityPacket(std::weak_ptrpush({version(), threadpool::PacketData(packet_type, node_id, _r.data().toBytes())}); } +void TaraxaCapability::handlePacketQueueOverLimit(std::shared_ptr host, dev::p2p::NodeID node_id, + size_t tp_queue_size) { + if (!queue_over_limit_) { + queue_over_limit_start_time_ = std::chrono::system_clock::now(); + queue_over_limit_ = true; + } + + // Check if Queue is over the limit for queue_limit_time + if ((std::chrono::system_clock::now() - queue_over_limit_start_time_) > + kConf.network.ddos_protection.queue_limit_time) { + // Only disconnect if there is more than peer_disconnect_interval since last disconnect + if ((std::chrono::system_clock::now() - last_ddos_disconnect_time_) > + kConf.network.ddos_protection.peer_disconnect_interval) { + auto connected_peers = peers_state_->getAllPeers(); + last_disconnect_number_of_peers_ = connected_peers.size(); + last_ddos_disconnect_time_ = std::chrono::system_clock::now(); + // Always keep at least 5 connected peers + if (connected_peers.size() > 5) { + // Find peers with the highest processing time and disconnect + std::pair peer_max_processing_time{std::chrono::microseconds(0), + dev::p2p::NodeID()}; + for (const auto &connected_peer : connected_peers) { + const auto peer_packets_stats = connected_peer.second->getAllPacketsStatsCopy(); + if (peer_packets_stats.second.processing_duration_ > peer_max_processing_time.first) { + peer_max_processing_time = {peer_packets_stats.second.processing_duration_, connected_peer.first}; + } + } + + // Disconnect peer with the highest processing time + LOG(log_er_) << "Max allowed packets queue size " << kConf.network.ddos_protection.max_packets_queue_size + << " exceeded: " << tp_queue_size << ". Peer with the highest processing time " + << peer_max_processing_time.second << " will be disconnected"; + host->disconnect(node_id, dev::p2p::UserReason); + connected_peers.erase(node_id); + } + } + } +} + inline bool TaraxaCapability::filterSyncIrrelevantPackets(SubprotocolPacketType packet_type) const { switch (packet_type) { case StatusPacket: From 0e1480d315bbd55d4699dc7cfe06d16efbea1da4 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 5 Sep 2024 15:11:36 +0200 Subject: [PATCH 024/105] review comments --- .../{dag_block.hpp => dag_block_period_migration.hpp} | 4 ++-- .../{dag_block.cpp => dag_block_period_migration.cpp} | 10 +++++----- .../storage/src/migration/migration_manager.cpp | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) rename libraries/core_libs/storage/include/storage/migration/{dag_block.hpp => dag_block_period_migration.hpp} (69%) rename libraries/core_libs/storage/src/migration/{dag_block.cpp => dag_block_period_migration.cpp} (83%) diff --git a/libraries/core_libs/storage/include/storage/migration/dag_block.hpp b/libraries/core_libs/storage/include/storage/migration/dag_block_period_migration.hpp similarity index 69% rename from libraries/core_libs/storage/include/storage/migration/dag_block.hpp rename to libraries/core_libs/storage/include/storage/migration/dag_block_period_migration.hpp index 3a7ac8069a..418c57869c 100644 --- a/libraries/core_libs/storage/include/storage/migration/dag_block.hpp +++ b/libraries/core_libs/storage/include/storage/migration/dag_block_period_migration.hpp @@ -2,9 +2,9 @@ #include "storage/migration/migration_base.hpp" namespace taraxa::storage::migration { -class DagBlockData : public migration::Base { +class DagBlockPeriodMigration : public migration::Base { public: - DagBlockData(std::shared_ptr db); + DagBlockPeriodMigration(std::shared_ptr db); std::string id() override; uint32_t dbVersion() override; void migrate(logger::Logger& log) override; diff --git a/libraries/core_libs/storage/src/migration/dag_block.cpp b/libraries/core_libs/storage/src/migration/dag_block_period_migration.cpp similarity index 83% rename from libraries/core_libs/storage/src/migration/dag_block.cpp rename to libraries/core_libs/storage/src/migration/dag_block_period_migration.cpp index e30b833125..3ab2a5f7eb 100644 --- a/libraries/core_libs/storage/src/migration/dag_block.cpp +++ b/libraries/core_libs/storage/src/migration/dag_block_period_migration.cpp @@ -1,4 +1,4 @@ -#include "storage/migration/dag_block.hpp" +#include "storage/migration/dag_block_period_migration.hpp" #include @@ -10,13 +10,13 @@ namespace taraxa::storage::migration { -DagBlockData::DagBlockData(std::shared_ptr db) : migration::Base(db) {} +DagBlockPeriodMigration::DagBlockPeriodMigration(std::shared_ptr db) : migration::Base(db) {} -std::string DagBlockData::id() { return "DagBlockData"; } +std::string DagBlockPeriodMigration::id() { return "DagBlockPeriodMigration"; } -uint32_t DagBlockData::dbVersion() { return 1; } +uint32_t DagBlockPeriodMigration::dbVersion() { return 1; } -void DagBlockData::migrate(logger::Logger& log) { +void DagBlockPeriodMigration::migrate(logger::Logger& log) { auto orig_col = DbStorage::Columns::period_data; auto copied_col = db_->copyColumn(db_->handle(orig_col), orig_col.name() + "-copy"); diff --git a/libraries/core_libs/storage/src/migration/migration_manager.cpp b/libraries/core_libs/storage/src/migration/migration_manager.cpp index b73ecab839..0b15aa84df 100644 --- a/libraries/core_libs/storage/src/migration/migration_manager.cpp +++ b/libraries/core_libs/storage/src/migration/migration_manager.cpp @@ -5,7 +5,7 @@ namespace taraxa::storage::migration { Manager::Manager(std::shared_ptr db, const addr_t& node_addr) : db_(db) { LOG_OBJECTS_CREATE("MIGRATIONS"); - registerMigration(); + registerMigration(); } void Manager::applyMigration(std::shared_ptr m) { if (m->isApplied()) { From 611dcfb142730bf69a0fd1c09873034f157b6cf9 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 5 Sep 2024 15:13:04 +0200 Subject: [PATCH 025/105] fix review comments --- .../storage/migration/dag_block_period_migration.hpp | 4 ++-- .../storage/src/migration/dag_block_period_migration.cpp | 8 ++++---- .../core_libs/storage/src/migration/migration_manager.cpp | 4 ++-- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/libraries/core_libs/storage/include/storage/migration/dag_block_period_migration.hpp b/libraries/core_libs/storage/include/storage/migration/dag_block_period_migration.hpp index 418c57869c..59173cf862 100644 --- a/libraries/core_libs/storage/include/storage/migration/dag_block_period_migration.hpp +++ b/libraries/core_libs/storage/include/storage/migration/dag_block_period_migration.hpp @@ -2,9 +2,9 @@ #include "storage/migration/migration_base.hpp" namespace taraxa::storage::migration { -class DagBlockPeriodMigration : public migration::Base { +class PeriodDataDagBlockMigration : public migration::Base { public: - DagBlockPeriodMigration(std::shared_ptr db); + PeriodDataDagBlockMigration(std::shared_ptr db); std::string id() override; uint32_t dbVersion() override; void migrate(logger::Logger& log) override; diff --git a/libraries/core_libs/storage/src/migration/dag_block_period_migration.cpp b/libraries/core_libs/storage/src/migration/dag_block_period_migration.cpp index 3ab2a5f7eb..2cdade2db4 100644 --- a/libraries/core_libs/storage/src/migration/dag_block_period_migration.cpp +++ b/libraries/core_libs/storage/src/migration/dag_block_period_migration.cpp @@ -10,13 +10,13 @@ namespace taraxa::storage::migration { -DagBlockPeriodMigration::DagBlockPeriodMigration(std::shared_ptr db) : migration::Base(db) {} +PeriodDataDagBlockMigration::PeriodDataDagBlockMigration(std::shared_ptr db) : migration::Base(db) {} -std::string DagBlockPeriodMigration::id() { return "DagBlockPeriodMigration"; } +std::string PeriodDataDagBlockMigration::id() { return "PeriodDataDagBlockMigration"; } -uint32_t DagBlockPeriodMigration::dbVersion() { return 1; } +uint32_t PeriodDataDagBlockMigration::dbVersion() { return 1; } -void DagBlockPeriodMigration::migrate(logger::Logger& log) { +void PeriodDataDagBlockMigration::migrate(logger::Logger& log) { auto orig_col = DbStorage::Columns::period_data; auto copied_col = db_->copyColumn(db_->handle(orig_col), orig_col.name() + "-copy"); diff --git a/libraries/core_libs/storage/src/migration/migration_manager.cpp b/libraries/core_libs/storage/src/migration/migration_manager.cpp index 0b15aa84df..b0587b1639 100644 --- a/libraries/core_libs/storage/src/migration/migration_manager.cpp +++ b/libraries/core_libs/storage/src/migration/migration_manager.cpp @@ -1,11 +1,11 @@ #include "storage/migration/migration_manager.hpp" -#include "storage/migration/dag_block.hpp" +#include "storage/migration/dag_block_period_migration.hpp" #include "storage/migration/transaction_period.hpp" namespace taraxa::storage::migration { Manager::Manager(std::shared_ptr db, const addr_t& node_addr) : db_(db) { LOG_OBJECTS_CREATE("MIGRATIONS"); - registerMigration(); + registerMigration(); } void Manager::applyMigration(std::shared_ptr m) { if (m->isApplied()) { From a0b3c43c337d26620de22126e0c2fb7cf07ae1a0 Mon Sep 17 00:00:00 2001 From: kstdl Date: Tue, 27 Aug 2024 13:53:32 +0200 Subject: [PATCH 026/105] chore: dont save duplicate data in finalChainHeader --- .../consensus/include/final_chain/data.hpp | 28 +++-- .../include/final_chain/final_chain.hpp | 21 ++-- .../consensus/src/final_chain/data.cpp | 38 +++++-- .../consensus/src/final_chain/final_chain.cpp | 102 +++++++++++------- .../core_libs/network/graphql/src/block.cpp | 6 +- libraries/core_libs/network/rpc/eth/Eth.cpp | 4 +- .../storage/migration/final_chain_header.hpp | 16 +++ .../src/migration/final_chain_header.cpp | 61 +++++++++++ .../src/migration/migration_manager.cpp | 5 +- tests/final_chain_test.cpp | 4 +- 10 files changed, 212 insertions(+), 73 deletions(-) create mode 100644 libraries/core_libs/storage/include/storage/migration/final_chain_header.hpp create mode 100644 libraries/core_libs/storage/src/migration/final_chain_header.cpp diff --git a/libraries/core_libs/consensus/include/final_chain/data.hpp b/libraries/core_libs/consensus/include/final_chain/data.hpp index 1410cfeaaf..5b0ff6d795 100644 --- a/libraries/core_libs/consensus/include/final_chain/data.hpp +++ b/libraries/core_libs/consensus/include/final_chain/data.hpp @@ -23,32 +23,42 @@ using LogBloom = dev::h2048; using LogBlooms = std::vector; using Nonce = dev::h64; -struct BlockHeader { +struct BlockHeaderData { h256 hash; h256 parent_hash; h256 state_root; h256 transactions_root; h256 receipts_root; LogBloom log_bloom; - EthBlockNumber number = 0; - uint64_t gas_limit = 0; uint64_t gas_used = 0; - bytes extra_data; - uint64_t timestamp = 0; - Address author; u256 total_reward; + dev::bytes serializeForDB() const; + HAS_RLP_FIELDS +}; + +struct BlockHeader : BlockHeaderData { + BlockHeader() = default; + BlockHeader(std::string&& raw_header_data); + BlockHeader(std::string&& raw_header_data, const PbftBlock& pbft, uint64_t gas_limit); + Address author; + uint64_t gas_limit = 0; + uint64_t timestamp = 0; + EthBlockNumber number = 0; + bytes extra_data; + + void setFromPbft(const PbftBlock& pbft); - static h256 const& uncles_hash(); + static h256 const& unclesHash(); static Nonce const& nonce(); static u256 const& difficulty(); - static h256 const& mix_hash(); + static h256 const& mixHash(); - void ethereum_rlp(dev::RLPStream& encoding) const; + dev::bytes&& ethereumRlp() const; }; static constexpr auto c_bloomIndexSize = 16; diff --git a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp index bdd630a149..06a0bdd636 100644 --- a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp +++ b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp @@ -259,19 +259,12 @@ class FinalChain { * @return bridge epoch */ h256 getBridgeEpoch(EthBlockNumber blk_num) const; - // TODO move out of here: + // TODO move out of here: std::pair getBalance(addr_t const& addr) const; - SharedTransaction makeBridgeFinalizationTransaction(); - bool isNeedToFinalize(EthBlockNumber blk_num) const; - std::vector makeSystemTransactions(PbftPeriod blk_num); std::shared_ptr finalize_(PeriodData&& new_blk, std::vector&& finalized_dag_blk_hashes, std::shared_ptr&& anchor); - std::shared_ptr appendBlock(Batch& batch, const addr_t& author, uint64_t timestamp, uint64_t gas_limit, - const h256& state_root, u256 total_reward, - const SharedTransactions& transactions = {}, - const TransactionReceipts& receipts = {}, const bytes& extra_data = {}); private: std::shared_ptr getTransactionHashes(std::optional n = {}) const; @@ -285,6 +278,18 @@ class FinalChain { static h256 blockBloomsChunkId(EthBlockNumber level, EthBlockNumber index); std::vector withBlockBloom(const LogBloom& b, EthBlockNumber from, EthBlockNumber to, EthBlockNumber level, EthBlockNumber index) const; + bool isNeedToFinalize(EthBlockNumber blk_num) const; + + SharedTransaction makeBridgeFinalizationTransaction(); + std::vector makeSystemTransactions(PbftPeriod blk_num); + + std::shared_ptr makeGenesisHeader(std::string&& raw_header) const; + std::shared_ptr appendBlock(Batch& batch, const PbftBlock& pbft_blk, const h256& state_root, + u256 total_reward, const SharedTransactions& transactions = {}, + const TransactionReceipts& receipts = {}); + std::shared_ptr appendBlock(Batch& batch, std::shared_ptr header, + const SharedTransactions& transactions = {}, + const TransactionReceipts& receipts = {}); private: std::shared_ptr db_; diff --git a/libraries/core_libs/consensus/src/final_chain/data.cpp b/libraries/core_libs/consensus/src/final_chain/data.cpp index f0a09e7b16..1778d0f42e 100644 --- a/libraries/core_libs/consensus/src/final_chain/data.cpp +++ b/libraries/core_libs/consensus/src/final_chain/data.cpp @@ -1,26 +1,48 @@ #include "final_chain/data.hpp" +#include #include #include "common/constants.hpp" +#include "pbft/pbft_block.hpp" namespace taraxa::final_chain { -h256 const& BlockHeader::uncles_hash() { return EmptyRLPListSHA3(); } +dev::bytes BlockHeaderData::serializeForDB() const { return util::rlp_enc(*this); } + +RLP_FIELDS_DEFINE(BlockHeaderData, hash, parent_hash, state_root, transactions_root, receipts_root, log_bloom, gas_used, + total_reward) + +BlockHeader::BlockHeader(std::string&& raw_header_data) + : BlockHeaderData(util::rlp_dec(dev::RLP(raw_header_data))) {} + +BlockHeader::BlockHeader(std::string&& raw_header_data_, const PbftBlock& pbft_, uint64_t gas_limit_) + : BlockHeader(std::move(raw_header_data_)) { + setFromPbft(pbft_); + gas_limit = gas_limit_; +} + +void BlockHeader::setFromPbft(const PbftBlock& pbft) { + author = pbft.getBeneficiary(); + timestamp = pbft.getTimestamp(); + number = pbft.getPeriod(); + extra_data = pbft.getExtraDataRlp(); +} + +h256 const& BlockHeader::unclesHash() { return EmptyRLPListSHA3(); } Nonce const& BlockHeader::nonce() { return EmptyNonce(); } u256 const& BlockHeader::difficulty() { return ZeroU256(); } -h256 const& BlockHeader::mix_hash() { return ZeroHash(); } - -RLP_FIELDS_DEFINE(BlockHeader, hash, parent_hash, author, state_root, transactions_root, receipts_root, log_bloom, - number, gas_limit, gas_used, timestamp, total_reward, extra_data) +h256 const& BlockHeader::mixHash() { return ZeroHash(); } -void BlockHeader::ethereum_rlp(dev::RLPStream& encoding) const { - util::rlp_tuple(encoding, parent_hash, BlockHeader::uncles_hash(), author, state_root, transactions_root, +dev::bytes&& BlockHeader::ethereumRlp() const { + dev::RLPStream rlp_strm; + util::rlp_tuple(rlp_strm, parent_hash, BlockHeader::unclesHash(), author, state_root, transactions_root, receipts_root, log_bloom, BlockHeader::difficulty(), number, gas_limit, gas_used, timestamp, - extra_data, BlockHeader::mix_hash(), BlockHeader::nonce()); + extra_data, BlockHeader::mixHash(), BlockHeader::nonce()); + return rlp_strm.invalidate(); } RLP_FIELDS_DEFINE(LogEntry, address, topics, data) diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index 060842c2c2..f8a0ab9d9a 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -3,11 +3,10 @@ #include "common/encoding_solidity.hpp" #include "common/util.hpp" #include "final_chain/trie_common.hpp" -#include "storage/storage.hpp" +#include "pbft/pbft_block.hpp" #include "transaction/system_transaction.hpp" namespace taraxa::final_chain { - FinalChain::FinalChain(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, const addr_t& node_addr) : db_(db), @@ -47,8 +46,10 @@ FinalChain::FinalChain(const std::shared_ptr& db, const taraxa::FullN // If we don't have genesis block in db then create and push it if (!last_blk_num) [[unlikely]] { auto batch = db_->createWriteBatch(); - auto header = appendBlock(batch, addr_t(), config.genesis.dag_genesis_block.getTimestamp(), kBlockGasLimit, - state_db_descriptor.state_root, u256(0)); + auto header = std::make_shared(); + header->timestamp = config.genesis.dag_genesis_block.getTimestamp(); + header->state_root = state_db_descriptor.state_root; + appendBlock(batch, header); block_headers_cache_.append(header->number, header); last_block_number_ = header->number; @@ -206,9 +207,7 @@ std::shared_ptr FinalChain::finalize_(PeriodData&& new auto rewards_stats = rewards_.processStats(new_blk, transactions_gas_used, batch); const auto& [state_root, total_reward] = state_api_.distribute_rewards(rewards_stats); - auto blk_header = - appendBlock(batch, new_blk.pbft_blk->getBeneficiary(), new_blk.pbft_blk->getTimestamp(), kBlockGasLimit, - state_root, total_reward, all_transactions, receipts, new_blk.pbft_blk->getExtraDataRlp()); + auto blk_header = appendBlock(batch, *new_blk.pbft_blk, state_root, total_reward, all_transactions, receipts); // Update number of executed DAG blocks and transactions auto num_executed_dag_blk = num_executed_dag_blk_ + finalized_dag_blk_hashes.size(); @@ -298,22 +297,29 @@ void FinalChain::prune(EthBlockNumber blk_n) { } } -std::shared_ptr FinalChain::appendBlock(Batch& batch, const addr_t& author, uint64_t timestamp, - uint64_t gas_limit, const h256& state_root, u256 total_reward, +std::shared_ptr FinalChain::appendBlock(Batch& batch, const PbftBlock& pbft_blk, const h256& state_root, + u256 total_reward, const SharedTransactions& transactions, + const TransactionReceipts& receipts) { + auto header = std::make_shared(); + header->setFromPbft(pbft_blk); + + if (auto last_block = blockHeader(); last_block) { + header->number = last_block->number + 1; + header->parent_hash = last_block->hash; + } + if (!receipts.empty()) { + header->gas_used = receipts.back().cumulative_gas_used; + } + header->state_root = state_root; + header->total_reward = total_reward; + header->gas_limit = kBlockGasLimit; + + return appendBlock(batch, header, transactions, receipts); +} + +std::shared_ptr FinalChain::appendBlock(Batch& batch, std::shared_ptr header, const SharedTransactions& transactions, - const TransactionReceipts& receipts, const bytes& extra_data) { - auto blk_header_ptr = std::make_shared(); - auto& blk_header = *blk_header_ptr; - auto last_block = blockHeader(); - blk_header.number = last_block ? last_block->number + 1 : 0; - blk_header.parent_hash = last_block ? last_block->hash : h256(); - blk_header.author = author; - blk_header.timestamp = timestamp; - blk_header.state_root = state_root; - blk_header.gas_used = receipts.empty() ? 0 : receipts.back().cumulative_gas_used; - blk_header.gas_limit = gas_limit; - blk_header.total_reward = total_reward; - blk_header.extra_data = extra_data; + const TransactionReceipts& receipts) { dev::BytesMap trxs_trie, receipts_trie; dev::RLPStream rlp_strm; auto trx_idx = 0; @@ -326,28 +332,31 @@ std::shared_ptr FinalChain::appendBlock(Batch& batch, const addr_t& receipts_trie[i_rlp] = util::rlp_enc(rlp_strm, receipt); db_->insert(batch, DbStorage::Columns::final_chain_receipt_by_trx_hash, trx->getHash(), rlp_strm.out()); - blk_header.log_bloom |= receipt.bloom(); + header->log_bloom |= receipt.bloom(); } - blk_header.transactions_root = hash256(trxs_trie); - blk_header.receipts_root = hash256(receipts_trie); - rlp_strm.clear(), blk_header.ethereum_rlp(rlp_strm); - blk_header.hash = dev::sha3(rlp_strm.out()); - db_->insert(batch, DbStorage::Columns::final_chain_blk_by_number, blk_header.number, - util::rlp_enc(rlp_strm, blk_header)); - auto log_bloom_for_index = blk_header.log_bloom; - log_bloom_for_index.shiftBloom<3>(sha3(blk_header.author.ref())); - for (uint64_t level = 0, index = blk_header.number; level < c_bloomIndexLevels; ++level, index /= c_bloomIndexSize) { + + header->transactions_root = hash256(trxs_trie); + header->receipts_root = hash256(receipts_trie); + + header->hash = dev::sha3(header->ethereumRlp()); + + auto data = header->serializeForDB(); + db_->insert(batch, DbStorage::Columns::final_chain_blk_by_number, header->number, data); + + auto log_bloom_for_index = header->log_bloom; + log_bloom_for_index.shiftBloom<3>(sha3(header->author.ref())); + for (uint64_t level = 0, index = header->number; level < c_bloomIndexLevels; ++level, index /= c_bloomIndexSize) { auto chunk_id = blockBloomsChunkId(level, index / c_bloomIndexSize); auto chunk_to_alter = blockBlooms(chunk_id); chunk_to_alter[index % c_bloomIndexSize] |= log_bloom_for_index; db_->insert(batch, DbStorage::Columns::final_chain_log_blooms_index, chunk_id, util::rlp_enc(rlp_strm, chunk_to_alter)); } - db_->insert(batch, DbStorage::Columns::final_chain_blk_hash_by_number, blk_header.number, blk_header.hash); - db_->insert(batch, DbStorage::Columns::final_chain_blk_number_by_hash, blk_header.hash, blk_header.number); - db_->insert(batch, DbStorage::Columns::final_chain_meta, DBMetaKeys::LAST_NUMBER, blk_header.number); + db_->insert(batch, DbStorage::Columns::final_chain_blk_hash_by_number, header->number, header->hash); + db_->insert(batch, DbStorage::Columns::final_chain_blk_number_by_hash, header->hash, header->number); + db_->insert(batch, DbStorage::Columns::final_chain_meta, DBMetaKeys::LAST_NUMBER, header->number); - return blk_header_ptr; + return header; } EthBlockNumber FinalChain::lastBlockNumber() const { return last_block_number_; } @@ -425,6 +434,7 @@ bytes FinalChain::getCode(const addr_t& addr, std::optional blk_ state_api::ExecutionResult FinalChain::call(const state_api::EVMTransaction& trx, std::optional blk_n) const { + std::cout << blk_n.value_or(-1) << " " << lastIfAbsent(blk_n) << std::endl; auto const blk_header = blockHeader(lastIfAbsent(blk_n)); if (!blk_header) { throw std::runtime_error("Future block"); @@ -534,11 +544,25 @@ const SharedTransactions FinalChain::getTransactions(std::optional FinalChain::makeGenesisHeader(std::string&& raw_header) const { + auto bh = std::make_shared(std::move(raw_header)); + bh->gas_limit = kBlockGasLimit; + // bh->timestamp = config.genesis.dag_genesis_block.getTimestamp(); + bh->number = 0; + return bh; +} + std::shared_ptr FinalChain::getBlockHeader(EthBlockNumber n) const { if (auto raw = db_->lookup(n, DbStorage::Columns::final_chain_blk_by_number); !raw.empty()) { - auto ret = std::make_shared(); - ret->rlp(dev::RLP(raw)); - return ret; + if (n == 0) { + return makeGenesisHeader(std::move(raw)); + } + auto pbft = db_->getPbftBlock(n); + // we should usually have a pbft block for a final chain block + if (!pbft) { + return {}; + } + return std::make_shared(std::move(raw), *pbft, kBlockGasLimit); } return {}; } diff --git a/libraries/core_libs/network/graphql/src/block.cpp b/libraries/core_libs/network/graphql/src/block.cpp index 9158d37e1c..c8a4466202 100644 --- a/libraries/core_libs/network/graphql/src/block.cpp +++ b/libraries/core_libs/network/graphql/src/block.cpp @@ -77,7 +77,7 @@ response::Value Block::getTimestamp() const noexcept { response::Value Block::getLogsBloom() const noexcept { return response::Value(block_header_->log_bloom.toString()); } -response::Value Block::getMixHash() const noexcept { return response::Value(block_header_->mix_hash().toString()); } +response::Value Block::getMixHash() const noexcept { return response::Value(block_header_->mixHash().toString()); } response::Value Block::getDifficulty() const noexcept { return response::Value(block_header_->difficulty().str()); } @@ -91,9 +91,7 @@ std::optional>> Block::getOmmers() co std::shared_ptr Block::getOmmerAt(int&&) const noexcept { return nullptr; } -response::Value Block::getOmmerHash() const noexcept { - return response::Value(block_header_->uncles_hash().toString()); -} +response::Value Block::getOmmerHash() const noexcept { return response::Value(block_header_->unclesHash().toString()); } std::optional>> Block::getTransactions() const noexcept { std::vector> ret; diff --git a/libraries/core_libs/network/rpc/eth/Eth.cpp b/libraries/core_libs/network/rpc/eth/Eth.cpp index 1f0ae169fc..dcd9a24496 100644 --- a/libraries/core_libs/network/rpc/eth/Eth.cpp +++ b/libraries/core_libs/network/rpc/eth/Eth.cpp @@ -49,7 +49,7 @@ Json::Value toJson(const LocalisedTransaction& lt) { return toJson(*lt.trx, lt.t Json::Value toJson(const BlockHeader& obj) { Json::Value res(Json::objectValue); res["parentHash"] = toJS(obj.parent_hash); - res["sha3Uncles"] = toJS(BlockHeader::uncles_hash()); + res["sha3Uncles"] = toJS(BlockHeader::unclesHash()); res["stateRoot"] = toJS(obj.state_root); res["transactionsRoot"] = toJS(obj.transactions_root); res["receiptsRoot"] = toJS(obj.receipts_root); @@ -61,7 +61,7 @@ Json::Value toJson(const BlockHeader& obj) { res["timestamp"] = toJS(obj.timestamp); res["author"] = toJS(obj.author); res["miner"] = toJS(obj.author); - res["mixHash"] = toJS(BlockHeader::mix_hash()); + res["mixHash"] = toJS(BlockHeader::mixHash()); res["nonce"] = toJS(BlockHeader::nonce()); res["uncles"] = Json::Value(Json::arrayValue); res["hash"] = toJS(obj.hash); diff --git a/libraries/core_libs/storage/include/storage/migration/final_chain_header.hpp b/libraries/core_libs/storage/include/storage/migration/final_chain_header.hpp new file mode 100644 index 0000000000..a7b2c111b1 --- /dev/null +++ b/libraries/core_libs/storage/include/storage/migration/final_chain_header.hpp @@ -0,0 +1,16 @@ +#pragma once +#include + +#include "storage/migration/migration_base.hpp" + +namespace taraxa::storage::migration { +class FinalChainHeader : public migration::Base { + public: + FinalChainHeader(std::shared_ptr db); + std::string id() override; + uint32_t dbVersion() override; + + protected: + void migrate(logger::Logger& log) override; +}; +} // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/src/migration/final_chain_header.cpp b/libraries/core_libs/storage/src/migration/final_chain_header.cpp new file mode 100644 index 0000000000..7c91beb503 --- /dev/null +++ b/libraries/core_libs/storage/src/migration/final_chain_header.cpp @@ -0,0 +1,61 @@ +#include "storage/migration/final_chain_header.hpp" + +#include "common/thread_pool.hpp" +#include "final_chain/data.hpp" + +namespace taraxa::storage::migration { + +FinalChainHeader::FinalChainHeader(std::shared_ptr db) : migration::Base(db) {} + +std::string FinalChainHeader::id() { return "FinalChainHeader"; } + +uint32_t FinalChainHeader::dbVersion() { return 1; } + +struct OldHeader : final_chain::BlockHeaderData { + Address author; + EthBlockNumber number = 0; + uint64_t gas_limit = 0; + uint64_t timestamp = 0; + bytes extra_data; + + RLP_FIELDS_DEFINE_INPLACE(hash, parent_hash, author, state_root, transactions_root, receipts_root, log_bloom, number, + gas_limit, gas_used, timestamp, total_reward, extra_data) +}; + +void FinalChainHeader::migrate(logger::Logger& log) { + auto it = db_->getColumnIterator(DbStorage::Columns::final_chain_blk_by_number); + it->SeekToFirst(); + if (!it->Valid()) { + LOG(log) << "No blocks to migrate"; + return; + } + + uint64_t start_period, end_period; + memcpy(&start_period, it->key().data(), sizeof(uint64_t)); + + it->SeekToLast(); + if (!it->Valid()) { + it->Prev(); + } + memcpy(&end_period, it->key().data(), sizeof(uint64_t)); + util::ThreadPool executor{std::thread::hardware_concurrency()}; + + const auto diff = (end_period - start_period) ? (end_period - start_period) : 1; + uint64_t curr_progress = 0; + std::cout << "Migrating " << diff << " blocks" << std::endl; + std::cout << "Start period: " << start_period << ", end period: " << end_period << std::endl; + // Get and save data in new format for all blocks + it->SeekToFirst(); + for (; it->Valid(); it->Next()) { + uint64_t period; + memcpy(&period, it->key().data(), sizeof(uint64_t)); + std::string raw = it->value().ToString(); + executor.post([this, period, raw = std::move(raw), &copied_col]() { + auto header = std::make_shared(); + header->rlp(dev::RLP(raw)); + auto newBytes = header->serializeForDB(); + db_->insert(copied_col.get(), period, newBytes); + }); + } +} +} // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/src/migration/migration_manager.cpp b/libraries/core_libs/storage/src/migration/migration_manager.cpp index b0587b1639..6eddb23c30 100644 --- a/libraries/core_libs/storage/src/migration/migration_manager.cpp +++ b/libraries/core_libs/storage/src/migration/migration_manager.cpp @@ -1,11 +1,14 @@ #include "storage/migration/migration_manager.hpp" #include "storage/migration/dag_block_period_migration.hpp" +#include "storage/migration/final_chain_header.hpp" #include "storage/migration/transaction_period.hpp" namespace taraxa::storage::migration { + Manager::Manager(std::shared_ptr db, const addr_t& node_addr) : db_(db) { + registerMigration(); + registerMigration(); LOG_OBJECTS_CREATE("MIGRATIONS"); - registerMigration(); } void Manager::applyMigration(std::shared_ptr m) { if (m->isApplied()) { diff --git a/tests/final_chain_test.cpp b/tests/final_chain_test.cpp index 577a097ea0..451bfd970a 100644 --- a/tests/final_chain_test.cpp +++ b/tests/final_chain_test.cpp @@ -113,8 +113,8 @@ struct FinalChainTest : WithDataDir { EXPECT_EQ(blk_h.extra_data, pbft_block->getExtraDataRlp()); EXPECT_EQ(blk_h.nonce(), Nonce()); EXPECT_EQ(blk_h.difficulty(), 0); - EXPECT_EQ(blk_h.mix_hash(), h256()); - EXPECT_EQ(blk_h.uncles_hash(), EmptyRLPListSHA3()); + EXPECT_EQ(blk_h.mixHash(), h256()); + EXPECT_EQ(blk_h.unclesHash(), EmptyRLPListSHA3()); EXPECT_TRUE(!blk_h.state_root.isZero()); LogBloom expected_block_log_bloom; std::unordered_map expected_balance_changes; From 1378445aff8ffc1b5596d740d2ce4f8dd42b522a Mon Sep 17 00:00:00 2001 From: kstdl Date: Thu, 29 Aug 2024 10:20:46 +0200 Subject: [PATCH 027/105] chore: optimize migration --- .../consensus/include/final_chain/data.hpp | 4 +- .../include/final_chain/final_chain.hpp | 6 +-- .../consensus/src/final_chain/data.cpp | 5 +- .../consensus/src/final_chain/final_chain.cpp | 42 +++++++++------ .../consensus/src/transaction/gas_pricer.cpp | 3 +- .../network/tarcap/taraxa_capability.hpp | 4 +- .../storage/include/storage/storage.hpp | 2 + .../src/migration/final_chain_header.cpp | 53 ++++++++----------- 8 files changed, 61 insertions(+), 58 deletions(-) diff --git a/libraries/core_libs/consensus/include/final_chain/data.hpp b/libraries/core_libs/consensus/include/final_chain/data.hpp index 5b0ff6d795..b04ce77d3d 100644 --- a/libraries/core_libs/consensus/include/final_chain/data.hpp +++ b/libraries/core_libs/consensus/include/final_chain/data.hpp @@ -24,7 +24,6 @@ using LogBlooms = std::vector; using Nonce = dev::h64; struct BlockHeaderData { - h256 hash; h256 parent_hash; h256 state_root; h256 transactions_root; @@ -42,6 +41,7 @@ struct BlockHeader : BlockHeaderData { BlockHeader() = default; BlockHeader(std::string&& raw_header_data); BlockHeader(std::string&& raw_header_data, const PbftBlock& pbft, uint64_t gas_limit); + h256 hash; Address author; uint64_t gas_limit = 0; uint64_t timestamp = 0; @@ -58,7 +58,7 @@ struct BlockHeader : BlockHeaderData { static h256 const& mixHash(); - dev::bytes&& ethereumRlp() const; + dev::bytes ethereumRlp() const; }; static constexpr auto c_bloomIndexSize = 16; diff --git a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp index 06a0bdd636..572347288c 100644 --- a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp +++ b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp @@ -18,8 +18,6 @@ namespace taraxa::final_chain { * @{ */ -enum class DBMetaKeys { LAST_NUMBER = 1 }; - /** * @brief main responsibility is blocks execution in EVM, getting data from EVM state * @@ -284,6 +282,8 @@ class FinalChain { std::vector makeSystemTransactions(PbftPeriod blk_num); std::shared_ptr makeGenesisHeader(std::string&& raw_header) const; + std::shared_ptr makeGenesisHeader(const h256& state_root) const; + std::shared_ptr appendBlock(Batch& batch, const PbftBlock& pbft_blk, const h256& state_root, u256 total_reward, const SharedTransactions& transactions = {}, const TransactionReceipts& receipts = {}); @@ -322,7 +322,7 @@ class FinalChain { std::atomic last_block_number_; - const HardforksConfig& kHardforksConfig; + const FullNodeConfig& kConfig; LOG_OBJECTS_DEFINE }; diff --git a/libraries/core_libs/consensus/src/final_chain/data.cpp b/libraries/core_libs/consensus/src/final_chain/data.cpp index 1778d0f42e..14c62fde62 100644 --- a/libraries/core_libs/consensus/src/final_chain/data.cpp +++ b/libraries/core_libs/consensus/src/final_chain/data.cpp @@ -10,7 +10,7 @@ namespace taraxa::final_chain { dev::bytes BlockHeaderData::serializeForDB() const { return util::rlp_enc(*this); } -RLP_FIELDS_DEFINE(BlockHeaderData, hash, parent_hash, state_root, transactions_root, receipts_root, log_bloom, gas_used, +RLP_FIELDS_DEFINE(BlockHeaderData, parent_hash, state_root, transactions_root, receipts_root, log_bloom, gas_used, total_reward) BlockHeader::BlockHeader(std::string&& raw_header_data) @@ -20,6 +20,7 @@ BlockHeader::BlockHeader(std::string&& raw_header_data_, const PbftBlock& pbft_, : BlockHeader(std::move(raw_header_data_)) { setFromPbft(pbft_); gas_limit = gas_limit_; + hash = dev::sha3(ethereumRlp()); } void BlockHeader::setFromPbft(const PbftBlock& pbft) { @@ -37,7 +38,7 @@ u256 const& BlockHeader::difficulty() { return ZeroU256(); } h256 const& BlockHeader::mixHash() { return ZeroHash(); } -dev::bytes&& BlockHeader::ethereumRlp() const { +dev::bytes BlockHeader::ethereumRlp() const { dev::RLPStream rlp_strm; util::rlp_tuple(rlp_strm, parent_hash, BlockHeader::unclesHash(), author, state_root, transactions_root, receipts_root, log_bloom, BlockHeader::difficulty(), number, gas_limit, gas_used, timestamp, diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index f8a0ab9d9a..c85e559593 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -37,7 +37,7 @@ FinalChain::FinalChain(const std::shared_ptr& db, const taraxa::FullN dpos_is_eligible_cache_( config.final_chain_cache_in_blocks, [this](uint64_t blk, const addr_t& addr) { return state_api_.dpos_is_eligible(blk, addr); }), - kHardforksConfig(config.genesis.state.hardforks) { + kConfig(config) { LOG_OBJECTS_CREATE("EXECUTOR"); num_executed_dag_blk_ = db_->getStatusField(taraxa::StatusDbField::ExecutedBlkCount); num_executed_trx_ = db_->getStatusField(taraxa::StatusDbField::ExecutedTrxCount); @@ -46,10 +46,8 @@ FinalChain::FinalChain(const std::shared_ptr& db, const taraxa::FullN // If we don't have genesis block in db then create and push it if (!last_blk_num) [[unlikely]] { auto batch = db_->createWriteBatch(); - auto header = std::make_shared(); - header->timestamp = config.genesis.dag_genesis_block.getTimestamp(); - header->state_root = state_db_descriptor.state_root; - appendBlock(batch, header); + auto header = makeGenesisHeader(state_db_descriptor.state_root); + appendBlock(batch, header, {}, {}); block_headers_cache_.append(header->number, header); last_block_number_ = header->number; @@ -118,13 +116,14 @@ SharedTransaction FinalChain::makeBridgeFinalizationTransaction() { auto account = getAccount(kTaraxaSystemAccount).value_or(state_api::ZeroAccount); auto trx = std::make_shared(account.nonce, 0, 0, kBlockGasLimit, finalize_method, - kHardforksConfig.ficus_hf.bridge_contract_address); + kConfig.genesis.state.hardforks.ficus_hf.bridge_contract_address); return trx; } bool FinalChain::isNeedToFinalize(EthBlockNumber blk_num) const { const static auto get_bridge_root_method = util::EncodingSolidity::packFunctionCall("shouldFinalizeEpoch()"); - return u256(call(state_api::EVMTransaction{dev::ZeroAddress, 1, kHardforksConfig.ficus_hf.bridge_contract_address, + return u256(call(state_api::EVMTransaction{dev::ZeroAddress, 1, + kConfig.genesis.state.hardforks.ficus_hf.bridge_contract_address, state_api::ZeroAccount.nonce, 0, 10000000, get_bridge_root_method}, blk_num) .code_retval) @@ -136,8 +135,9 @@ std::vector FinalChain::makeSystemTransactions(PbftPeriod blk // Make system transactions blocks sooner than next pillar block period, // e.g.: if pillar block period is 100, this will return true for period 100 - delegationDelay() == 95, 195, 295, // etc... - if (kHardforksConfig.ficus_hf.isPillarBlockPeriod(blk_num + delegationDelay())) { - if (const auto bridge_contract = getAccount(kHardforksConfig.ficus_hf.bridge_contract_address); bridge_contract) { + if (kConfig.genesis.state.hardforks.ficus_hf.isPillarBlockPeriod(blk_num + delegationDelay())) { + if (const auto bridge_contract = getAccount(kConfig.genesis.state.hardforks.ficus_hf.bridge_contract_address); + bridge_contract) { if (bridge_contract->code_size && isNeedToFinalize(blk_num - 1)) { auto finalize_trx = makeBridgeFinalizationTransaction(); system_transactions.push_back(finalize_trx); @@ -314,7 +314,7 @@ std::shared_ptr FinalChain::appendBlock(Batch& batch, const PbftBlo header->total_reward = total_reward; header->gas_limit = kBlockGasLimit; - return appendBlock(batch, header, transactions, receipts); + return appendBlock(batch, std::move(header), transactions, receipts); } std::shared_ptr FinalChain::appendBlock(Batch& batch, std::shared_ptr header, @@ -337,7 +337,6 @@ std::shared_ptr FinalChain::appendBlock(Batch& batch, std::shared_p header->transactions_root = hash256(trxs_trie); header->receipts_root = hash256(receipts_trie); - header->hash = dev::sha3(header->ethereumRlp()); auto data = header->serializeForDB(); @@ -504,7 +503,8 @@ u256 FinalChain::dposTotalSupply(EthBlockNumber blk_num) const { return state_ap h256 FinalChain::getBridgeRoot(EthBlockNumber blk_num) const { const static auto get_bridge_root_method = util::EncodingSolidity::packFunctionCall("getBridgeRoot()"); - return h256(call(state_api::EVMTransaction{dev::ZeroAddress, 1, kHardforksConfig.ficus_hf.bridge_contract_address, + return h256(call(state_api::EVMTransaction{dev::ZeroAddress, 1, + kConfig.genesis.state.hardforks.ficus_hf.bridge_contract_address, state_api::ZeroAccount.nonce, 0, 10000000, get_bridge_root_method}, blk_num) .code_retval); @@ -512,7 +512,8 @@ h256 FinalChain::getBridgeRoot(EthBlockNumber blk_num) const { h256 FinalChain::getBridgeEpoch(EthBlockNumber blk_num) const { const static auto getBridgeEpoch_method = util::EncodingSolidity::packFunctionCall("finalizedEpoch()"); - return h256(call(state_api::EVMTransaction{dev::ZeroAddress, 1, kHardforksConfig.ficus_hf.bridge_contract_address, + return h256(call(state_api::EVMTransaction{dev::ZeroAddress, 1, + kConfig.genesis.state.hardforks.ficus_hf.bridge_contract_address, state_api::ZeroAccount.nonce, 0, 10000000, getBridgeEpoch_method}, blk_num) .code_retval); @@ -546,12 +547,21 @@ const SharedTransactions FinalChain::getTransactions(std::optional FinalChain::makeGenesisHeader(std::string&& raw_header) const { auto bh = std::make_shared(std::move(raw_header)); - bh->gas_limit = kBlockGasLimit; - // bh->timestamp = config.genesis.dag_genesis_block.getTimestamp(); - bh->number = 0; + bh->gas_limit = kConfig.genesis.pbft.gas_limit; + bh->timestamp = kConfig.genesis.dag_genesis_block.getTimestamp(); + bh->hash = dev::sha3(bh->ethereumRlp()); return bh; } +std::shared_ptr FinalChain::makeGenesisHeader(const h256& state_root) const { + auto header = std::make_shared(); + header->timestamp = kConfig.genesis.dag_genesis_block.getTimestamp(); + header->state_root = state_root; + header->gas_limit = kConfig.genesis.pbft.gas_limit; + header->hash = dev::sha3(header->ethereumRlp()); + return header; +} + std::shared_ptr FinalChain::getBlockHeader(EthBlockNumber n) const { if (auto raw = db_->lookup(n, DbStorage::Columns::final_chain_blk_by_number); !raw.empty()) { if (n == 0) { diff --git a/libraries/core_libs/consensus/src/transaction/gas_pricer.cpp b/libraries/core_libs/consensus/src/transaction/gas_pricer.cpp index 4fcf9c7439..0095715306 100644 --- a/libraries/core_libs/consensus/src/transaction/gas_pricer.cpp +++ b/libraries/core_libs/consensus/src/transaction/gas_pricer.cpp @@ -1,6 +1,5 @@ #include "transaction/gas_pricer.hpp" -#include "final_chain/final_chain.hpp" #include "storage/storage.hpp" namespace taraxa { @@ -28,7 +27,7 @@ u256 GasPricer::bid() const { void GasPricer::init(const std::shared_ptr& db) { const auto last_blk_num = - db->lookup_int(final_chain::DBMetaKeys::LAST_NUMBER, DbStorage::Columns::final_chain_meta); + db->lookup_int(DBMetaKeys::LAST_NUMBER, DbStorage::Columns::final_chain_meta); if (!last_blk_num || *last_blk_num == 0) return; auto block_num = *last_blk_num; diff --git a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp index 0fa960f748..095d8e3b2a 100644 --- a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp +++ b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp @@ -121,8 +121,8 @@ class TaraxaCapability final : public dev::p2p::CapabilityFace { std::shared_ptr thread_pool_; // Last disconnect time and number of peers - std::chrono::_V2::system_clock::time_point last_ddos_disconnect_time_ = {}; - std::chrono::_V2::system_clock::time_point queue_over_limit_start_time_ = {}; + std::chrono::system_clock::time_point last_ddos_disconnect_time_ = {}; + std::chrono::system_clock::time_point queue_over_limit_start_time_ = {}; bool queue_over_limit_ = false; uint32_t last_disconnect_number_of_peers_ = 0; diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 7247c562c7..70582c4159 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -52,6 +52,8 @@ enum PbftMgrStatus : uint8_t { NextVotedNullBlockHash, }; +enum class DBMetaKeys { LAST_NUMBER = 1 }; + class DbException : public std::exception { public: explicit DbException(const std::string& desc) : desc_(desc) {} diff --git a/libraries/core_libs/storage/src/migration/final_chain_header.cpp b/libraries/core_libs/storage/src/migration/final_chain_header.cpp index 7c91beb503..0730e16bdb 100644 --- a/libraries/core_libs/storage/src/migration/final_chain_header.cpp +++ b/libraries/core_libs/storage/src/migration/final_chain_header.cpp @@ -1,6 +1,5 @@ #include "storage/migration/final_chain_header.hpp" -#include "common/thread_pool.hpp" #include "final_chain/data.hpp" namespace taraxa::storage::migration { @@ -11,51 +10,43 @@ std::string FinalChainHeader::id() { return "FinalChainHeader"; } uint32_t FinalChainHeader::dbVersion() { return 1; } -struct OldHeader : final_chain::BlockHeaderData { - Address author; - EthBlockNumber number = 0; - uint64_t gas_limit = 0; - uint64_t timestamp = 0; - bytes extra_data; - +struct OldHeader : final_chain::BlockHeader { RLP_FIELDS_DEFINE_INPLACE(hash, parent_hash, author, state_root, transactions_root, receipts_root, log_bloom, number, gas_limit, gas_used, timestamp, total_reward, extra_data) }; void FinalChainHeader::migrate(logger::Logger& log) { - auto it = db_->getColumnIterator(DbStorage::Columns::final_chain_blk_by_number); + auto orig_col = DbStorage::Columns::final_chain_blk_by_number; + auto copied_col = db_->copyColumn(db_->handle(orig_col), orig_col.name() + "-copy"); + + if (copied_col == nullptr) { + LOG(log) << "Migration " << id() << " skipped: Unable to copy " << orig_col.name() << " column"; + return; + } + + auto it = db_->getColumnIterator(copied_col.get()); it->SeekToFirst(); if (!it->Valid()) { LOG(log) << "No blocks to migrate"; return; } - uint64_t start_period, end_period; - memcpy(&start_period, it->key().data(), sizeof(uint64_t)); - - it->SeekToLast(); - if (!it->Valid()) { - it->Prev(); - } - memcpy(&end_period, it->key().data(), sizeof(uint64_t)); - util::ThreadPool executor{std::thread::hardware_concurrency()}; - - const auto diff = (end_period - start_period) ? (end_period - start_period) : 1; - uint64_t curr_progress = 0; - std::cout << "Migrating " << diff << " blocks" << std::endl; - std::cout << "Start period: " << start_period << ", end period: " << end_period << std::endl; - // Get and save data in new format for all blocks - it->SeekToFirst(); + uint64_t batch_size = 500000000; for (; it->Valid(); it->Next()) { uint64_t period; memcpy(&period, it->key().data(), sizeof(uint64_t)); std::string raw = it->value().ToString(); - executor.post([this, period, raw = std::move(raw), &copied_col]() { - auto header = std::make_shared(); - header->rlp(dev::RLP(raw)); - auto newBytes = header->serializeForDB(); - db_->insert(copied_col.get(), period, newBytes); - }); + auto header = std::make_shared(); + header->rlp(dev::RLP(raw)); + auto newBytes = header->serializeForDB(); + db_->insert(batch_, copied_col.get(), period, newBytes); + if (batch_.GetDataSize() > batch_size) { + db_->commitWriteBatch(batch_); + } } + // commit the left over batch + db_->commitWriteBatch(batch_); + + db_->replaceColumn(orig_col, std::move(copied_col)); } } // namespace taraxa::storage::migration \ No newline at end of file From 92d8def61c198abdcd1f736dbc937c9cb2ff7dc5 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Sun, 16 Jun 2024 14:45:35 -0700 Subject: [PATCH 028/105] support multiple undelegagtions --- .../include/cli/config_jsons/default/default_genesis.json | 3 ++- .../include/cli/config_jsons/devnet/devnet_genesis.json | 3 ++- .../include/cli/config_jsons/mainnet/mainnet_genesis.json | 3 ++- .../include/cli/config_jsons/testnet/testnet_genesis.json | 3 ++- libraries/config/include/config/hardfork.hpp | 3 +++ libraries/config/src/hardfork.cpp | 7 +++++-- 6 files changed, 16 insertions(+), 6 deletions(-) diff --git a/libraries/cli/include/cli/config_jsons/default/default_genesis.json b/libraries/cli/include/cli/config_jsons/default/default_genesis.json index 322d177b10..ba63885f5a 100644 --- a/libraries/cli/include/cli/config_jsons/default/default_genesis.json +++ b/libraries/cli/include/cli/config_jsons/default/default_genesis.json @@ -119,6 +119,7 @@ "block_num": 20, "pillar_blocks_interval": 10, "bridge_contract_address": "0xcAF2b453FE8382a4B8110356DF0508f6d71F22BF" - } + }, + "cornus_hf_block_num": 0 } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json index 51fb60004b..d4afa4865c 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json @@ -284,6 +284,7 @@ "block_num": 100, "pillar_blocks_interval": 10, "bridge_contract_address": "0xcAF2b453FE8382a4B8110356DF0508f6d71F22BF" - } + }, + "cornus_hf_block_num": 0 } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json index 10a7c5e93a..801a7f781b 100644 --- a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json @@ -1653,6 +1653,7 @@ "block_num": 11616000, "pillar_blocks_interval": 4000, "bridge_contract_address": "0xe126E0BaeAE904b8Cfd619Be1A8667A173b763a1" - } + }, + "cornus_hf_block_num": -1 } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index 9d34feb1c0..7f78edeef1 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -153,6 +153,7 @@ "block_num": 1000, "pillar_blocks_interval": 1000, "bridge_contract_address": "0xcAF2b453FE8382a4B8110356DF0508f6d71F22BF" - } + }, + "cornus_hf_block_num": -1 } } \ No newline at end of file diff --git a/libraries/config/include/config/hardfork.hpp b/libraries/config/include/config/hardfork.hpp index 73c8419bc3..5ffb43973e 100644 --- a/libraries/config/include/config/hardfork.hpp +++ b/libraries/config/include/config/hardfork.hpp @@ -130,6 +130,9 @@ struct HardforksConfig { // Ficus hardfork: implementation of pillar chain FicusHardforkConfig ficus_hf; + // Cornus hf - support multiple undelegations from the same validator at the same time + uint64_t cornus_hf_block_num{0}; + HAS_RLP_FIELDS }; diff --git a/libraries/config/src/hardfork.cpp b/libraries/config/src/hardfork.cpp index 70332394bf..dd0c85d5b8 100644 --- a/libraries/config/src/hardfork.cpp +++ b/libraries/config/src/hardfork.cpp @@ -158,6 +158,7 @@ Json::Value enc_json(const HardforksConfig& obj) { json["aspen_hf"] = enc_json(obj.aspen_hf); json["ficus_hf"] = enc_json(obj.ficus_hf); // json["bamboo_hf"] = enc_json(obj.bamboo_hf); + json["cornus_hf_block_num"] = dev::toJS(obj.cornus_hf_block_num); return json; } @@ -189,8 +190,10 @@ void dec_json(const Json::Value& json, HardforksConfig& obj) { dec_json(json["aspen_hf"], obj.aspen_hf); dec_json(json["ficus_hf"], obj.ficus_hf); // dec_json(json["bamboo_hf"], obj.bamboo_hf); + obj.cornus_hf_block_num = + json["cornus_hf_block_num"].isUInt64() ? dev::getUInt(json["cornus_hf_block_num"]) : uint64_t(-1); } RLP_FIELDS_DEFINE(HardforksConfig, fix_redelegate_block_num, redelegations, rewards_distribution_frequency, magnolia_hf, - phalaenopsis_hf_block_num, fix_claim_all_block_num, aspen_hf, ficus_hf) -} // namespace taraxa + phalaenopsis_hf_block_num, fix_claim_all_block_num, aspen_hf, ficus_hf, cornus_hf_block_num) +} // namespace taraxa \ No newline at end of file From d63083b8cf8e9633c06121763569daad6f06f298 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Tue, 27 Aug 2024 11:44:46 -0700 Subject: [PATCH 029/105] update evm --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 476ac57228..fd93fb52c8 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 476ac5722850410018f7ff6038fb9e3ef505fd2a +Subproject commit fd93fb52c8b5ce835a71b0686d656ebe0c6026b3 From d2937cf6ffcd7b0e404165ad4b3cc728a68bece0 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Tue, 27 Aug 2024 12:32:36 -0700 Subject: [PATCH 030/105] check if dag transactions senders exist --- libraries/config/include/config/hardfork.hpp | 5 +++-- libraries/core_libs/consensus/include/dag/dag_manager.hpp | 3 ++- libraries/core_libs/consensus/src/dag/dag_manager.cpp | 6 ++++++ .../packets_handlers/latest/dag_block_packet_handler.cpp | 1 + 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/libraries/config/include/config/hardfork.hpp b/libraries/config/include/config/hardfork.hpp index 5ffb43973e..5e8a9a3965 100644 --- a/libraries/config/include/config/hardfork.hpp +++ b/libraries/config/include/config/hardfork.hpp @@ -125,14 +125,15 @@ struct HardforksConfig { // Aspen hardfork implements new yield curve AspenHardfork aspen_hf; - bool isAspenHardforkPartOne(uint64_t block_number) const { return block_number >= aspen_hf.block_num_part_one; } - // Ficus hardfork: implementation of pillar chain FicusHardforkConfig ficus_hf; // Cornus hf - support multiple undelegations from the same validator at the same time uint64_t cornus_hf_block_num{0}; + bool isAspenHardforkPartOne(uint64_t block_number) const { return block_number >= aspen_hf.block_num_part_one; } + bool isCornusHardfork(uint64_t block_number) const { return block_number >= cornus_hf_block_num; } + HAS_RLP_FIELDS }; diff --git a/libraries/core_libs/consensus/include/dag/dag_manager.hpp b/libraries/core_libs/consensus/include/dag/dag_manager.hpp index 62232a57e9..73d479392e 100644 --- a/libraries/core_libs/consensus/include/dag/dag_manager.hpp +++ b/libraries/core_libs/consensus/include/dag/dag_manager.hpp @@ -43,7 +43,8 @@ class DagManager : public std::enable_shared_from_this { IncorrectTransactionsEstimation, BlockTooBig, FailedTipsVerification, - MissingTip + MissingTip, + FailedTxSenderVerification }; explicit DagManager(const FullNodeConfig &config, addr_t node_addr, std::shared_ptr trx_mgr, diff --git a/libraries/core_libs/consensus/src/dag/dag_manager.cpp b/libraries/core_libs/consensus/src/dag/dag_manager.cpp index 27d178fe08..a734734128 100644 --- a/libraries/core_libs/consensus/src/dag/dag_manager.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_manager.cpp @@ -639,6 +639,12 @@ std::pair DagManager::ver } for (auto t : transactions) { + if (kHardforks.isCornusHardfork(*propose_period) && !final_chain_->getAccount(t->getSender()).has_value()) { + LOG(log_nf_) << "Ignore dag block " << block_hash << " since tx's " << t->getHash() << " sender " + << t->getSender() << " does not exist"; + return {VerifyBlockReturnType::FailedTxSenderVerification, {}}; + } + all_block_trxs.emplace_back(std::move(t)); } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp index a0b22ea34b..7304f8bf13 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp @@ -187,6 +187,7 @@ void DagBlockPacketHandler::onNewBlockReceived( } } break; case DagManager::VerifyBlockReturnType::ExpiredBlock: + case DagManager::VerifyBlockReturnType::FailedTxSenderVerification: break; } } From b9fdfedb490024ee8c28cabf8c1cbb9b81137bf5 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Tue, 27 Aug 2024 13:36:00 -0700 Subject: [PATCH 031/105] check old proposed blocks presence --- .../consensus/include/pbft/proposed_blocks.hpp | 8 ++++++++ .../core_libs/consensus/src/pbft/pbft_manager.cpp | 5 +++++ .../core_libs/consensus/src/pbft/proposed_blocks.cpp | 10 ++++++++++ 3 files changed, 23 insertions(+) diff --git a/libraries/core_libs/consensus/include/pbft/proposed_blocks.hpp b/libraries/core_libs/consensus/include/pbft/proposed_blocks.hpp index 5a3772751a..e76d084d74 100644 --- a/libraries/core_libs/consensus/include/pbft/proposed_blocks.hpp +++ b/libraries/core_libs/consensus/include/pbft/proposed_blocks.hpp @@ -66,6 +66,14 @@ class ProposedBlocks { */ void cleanupProposedPbftBlocksByPeriod(PbftPeriod period); + /** + * @brief Check if there are any old proposed blocks that were supposed to be deleted + * @param current_period + * + * @return err msg in case there are some old blocks, otherwise empty optional + */ + std::optional checkOldBlocksPresence(PbftPeriod current_period) const; + private: // > std::map, bool>>> proposed_blocks_; diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index d5c2f5562a..6ca7fe5188 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -456,6 +456,11 @@ void PbftManager::initialState() { proposed_blocks_.pushProposedPbftBlock(block, false); } + // TODO[2840]: remove this check if case nodes do not log the err messages after restart + if (const auto &err_msg = proposed_blocks_.checkOldBlocksPresence(current_pbft_period); err_msg.has_value()) { + LOG(log_er_) << "Old proposed blocks saved in db -> : " << *err_msg; + } + // Process saved cert voted block from db if (auto cert_voted_block_data = db_->getCertVotedBlockInRound(); cert_voted_block_data.has_value()) { const auto [cert_voted_block_round, cert_voted_block] = *cert_voted_block_data; diff --git a/libraries/core_libs/consensus/src/pbft/proposed_blocks.cpp b/libraries/core_libs/consensus/src/pbft/proposed_blocks.cpp index fa3aec043e..814914a068 100644 --- a/libraries/core_libs/consensus/src/pbft/proposed_blocks.cpp +++ b/libraries/core_libs/consensus/src/pbft/proposed_blocks.cpp @@ -81,4 +81,14 @@ void ProposedBlocks::cleanupProposedPbftBlocksByPeriod(PbftPeriod period) { } } +std::optional ProposedBlocks::checkOldBlocksPresence(PbftPeriod current_period) const { + std::string msg; + for (auto period_it = proposed_blocks_.begin(); + period_it != proposed_blocks_.end() && period_it->first < current_period; period_it++) { + msg += std::to_string(period_it->first) + " -> " + std::to_string(period_it->second.size()) + ". "; + } + + return msg.empty() ? std::nullopt : std::make_optional(msg); +} + } // namespace taraxa From 16d306ab92910d2496173263114f08ecfa907b95 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Thu, 29 Aug 2024 12:59:52 -0700 Subject: [PATCH 032/105] Revert "check if dag transactions senders exist" This reverts commit c2eb50cad0715c7007626f214fe9ee5f322e3e9c. --- libraries/config/include/config/hardfork.hpp | 5 ++--- libraries/core_libs/consensus/include/dag/dag_manager.hpp | 3 +-- libraries/core_libs/consensus/src/dag/dag_manager.cpp | 6 ------ .../packets_handlers/latest/dag_block_packet_handler.cpp | 1 - 4 files changed, 3 insertions(+), 12 deletions(-) diff --git a/libraries/config/include/config/hardfork.hpp b/libraries/config/include/config/hardfork.hpp index 5e8a9a3965..5ffb43973e 100644 --- a/libraries/config/include/config/hardfork.hpp +++ b/libraries/config/include/config/hardfork.hpp @@ -125,15 +125,14 @@ struct HardforksConfig { // Aspen hardfork implements new yield curve AspenHardfork aspen_hf; + bool isAspenHardforkPartOne(uint64_t block_number) const { return block_number >= aspen_hf.block_num_part_one; } + // Ficus hardfork: implementation of pillar chain FicusHardforkConfig ficus_hf; // Cornus hf - support multiple undelegations from the same validator at the same time uint64_t cornus_hf_block_num{0}; - bool isAspenHardforkPartOne(uint64_t block_number) const { return block_number >= aspen_hf.block_num_part_one; } - bool isCornusHardfork(uint64_t block_number) const { return block_number >= cornus_hf_block_num; } - HAS_RLP_FIELDS }; diff --git a/libraries/core_libs/consensus/include/dag/dag_manager.hpp b/libraries/core_libs/consensus/include/dag/dag_manager.hpp index 73d479392e..62232a57e9 100644 --- a/libraries/core_libs/consensus/include/dag/dag_manager.hpp +++ b/libraries/core_libs/consensus/include/dag/dag_manager.hpp @@ -43,8 +43,7 @@ class DagManager : public std::enable_shared_from_this { IncorrectTransactionsEstimation, BlockTooBig, FailedTipsVerification, - MissingTip, - FailedTxSenderVerification + MissingTip }; explicit DagManager(const FullNodeConfig &config, addr_t node_addr, std::shared_ptr trx_mgr, diff --git a/libraries/core_libs/consensus/src/dag/dag_manager.cpp b/libraries/core_libs/consensus/src/dag/dag_manager.cpp index a734734128..27d178fe08 100644 --- a/libraries/core_libs/consensus/src/dag/dag_manager.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_manager.cpp @@ -639,12 +639,6 @@ std::pair DagManager::ver } for (auto t : transactions) { - if (kHardforks.isCornusHardfork(*propose_period) && !final_chain_->getAccount(t->getSender()).has_value()) { - LOG(log_nf_) << "Ignore dag block " << block_hash << " since tx's " << t->getHash() << " sender " - << t->getSender() << " does not exist"; - return {VerifyBlockReturnType::FailedTxSenderVerification, {}}; - } - all_block_trxs.emplace_back(std::move(t)); } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp index 7304f8bf13..a0b22ea34b 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp @@ -187,7 +187,6 @@ void DagBlockPacketHandler::onNewBlockReceived( } } break; case DagManager::VerifyBlockReturnType::ExpiredBlock: - case DagManager::VerifyBlockReturnType::FailedTxSenderVerification: break; } } From 2e3dd01031e56b29d8f0cd9c10290b62d0f4e8bb Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Fri, 6 Sep 2024 13:38:15 -0700 Subject: [PATCH 033/105] use named constants in test --- tests/pbft_manager_test.cpp | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tests/pbft_manager_test.cpp b/tests/pbft_manager_test.cpp index 356d880d53..e722673fd4 100644 --- a/tests/pbft_manager_test.cpp +++ b/tests/pbft_manager_test.cpp @@ -675,11 +675,13 @@ TEST_F(PbftManagerWithDagCreation, proposed_blocks) { std::map> blocks; // Create blocks - for (PbftPeriod period = 1; period <= 3; period++) { - for (uint32_t i = 1; i <= 40; i++) { + const auto max_period = 3; + const auto blocks_per_period = 40; + for (PbftPeriod period = 1; period <= max_period; period++) { + for (uint32_t i = 1; i <= blocks_per_period; i++) { std::vector reward_votes_hashes; auto block = - std::make_shared(blk_hash_t(1), kNullBlockHash, kNullBlockHash, kNullBlockHash, period, addr_t(), + std::make_shared(blk_hash_t(i), kNullBlockHash, kNullBlockHash, kNullBlockHash, period, addr_t(), dev::KeyPair::create().secret(), std::move(reward_votes_hashes)); blocks.insert({block->getBlockHash(), block}); } From 9e5f4a98768a8a018f36781a3ebfc08e7c186759 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Fri, 6 Sep 2024 14:22:02 -0700 Subject: [PATCH 034/105] fix votes_count_changes pillar test --- tests/pillar_chain_test.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/pillar_chain_test.cpp b/tests/pillar_chain_test.cpp index b5ac845e77..aef49243a0 100644 --- a/tests/pillar_chain_test.cpp +++ b/tests/pillar_chain_test.cpp @@ -204,8 +204,7 @@ TEST_F(PillarChainTest, votes_count_changes) { expected_validators_vote_counts_changes[redelegate_to_addr] = 0; for (size_t i = 0; i < validators_count - 3; i++) { const auto node_addr = toAddress(node_cfgs[i].node_secret); - const auto node_vote_count = - nodes[0]->getFinalChain()->dpos_eligible_vote_count(new_pillar_block_period, node_addr); + const auto node_vote_count = nodes[0]->getFinalChain()->dposEligibleVoteCount(new_pillar_block_period, node_addr); const auto redelegation_value = node_vote_count * node_cfgs[0].genesis.state.dpos.eligibility_balance_threshold; expected_validators_vote_counts_changes[node_addr] = dev::s256(node_vote_count) * -1; expected_validators_vote_counts_changes[redelegate_to_addr] += dev::s256(node_vote_count); From 492dcb4b22380fb8c57207e4dc198713e1de1269 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Wed, 11 Sep 2024 18:29:36 -0700 Subject: [PATCH 035/105] fix undelegate compatibility --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index fd93fb52c8..4e476e4862 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit fd93fb52c8b5ce835a71b0686d656ebe0c6026b3 +Subproject commit 4e476e4862a29ce765ea2306e1a28a11de02fcc1 From 473a820fab5667f873884f9c5a6d25d636ddd172 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Thu, 12 Sep 2024 12:37:50 +0200 Subject: [PATCH 036/105] fix: gossip votes to all capability version --- .../consensus/include/pbft/pbft_manager.hpp | 9 ++++++++- .../consensus/src/pbft/pbft_manager.cpp | 18 +++++++++++------- .../latest/vote_packet_handler.cpp | 3 ++- 3 files changed, 21 insertions(+), 9 deletions(-) diff --git a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp index d963e67cd6..11c49d218b 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp @@ -279,6 +279,14 @@ class PbftManager { */ bool validatePillarDataInPeriodData(const PeriodData &period_data) const; + /** + * @brief Gossips vote to the other peers + * + * @param vote + * @param voted_block + */ + void gossipVote(const std::shared_ptr &vote, const std::shared_ptr &voted_block); + private: /** * @brief Broadcast or rebroadcast 2t+1 soft/reward/previous round next votes + all own votes if needed @@ -411,7 +419,6 @@ class PbftManager { * * @param vote * @param voted_block - * @return true if successful, otherwise false */ void gossipNewVote(const std::shared_ptr &vote, const std::shared_ptr &voted_block); diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 6ca7fe5188..e5ae2d4126 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -807,6 +807,17 @@ bool PbftManager::genAndPlaceProposeVote(const std::shared_ptr &propo } void PbftManager::gossipNewVote(const std::shared_ptr &vote, const std::shared_ptr &voted_block) { + gossipVote(vote, voted_block); + + auto found_voted_block_it = current_round_broadcasted_votes_.find(vote->getBlockHash()); + if (found_voted_block_it == current_round_broadcasted_votes_.end()) { + found_voted_block_it = current_round_broadcasted_votes_.insert({vote->getBlockHash(), {}}).first; + } + + found_voted_block_it->second.emplace_back(vote->getStep()); +} + +void PbftManager::gossipVote(const std::shared_ptr &vote, const std::shared_ptr &voted_block) { assert(!voted_block || vote->getBlockHash() == voted_block->getBlockHash()); auto net = network_.lock(); @@ -817,13 +828,6 @@ void PbftManager::gossipNewVote(const std::shared_ptr &vote, const std } net->gossipVote(vote, voted_block); - - auto found_voted_block_it = current_round_broadcasted_votes_.find(vote->getBlockHash()); - if (found_voted_block_it == current_round_broadcasted_votes_.end()) { - found_voted_block_it = current_round_broadcasted_votes_.insert({vote->getBlockHash(), {}}).first; - } - - found_voted_block_it->second.emplace_back(vote->getStep()); } void PbftManager::proposeBlock_() { diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp index 67a284c4fd..341ef6ae0e 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp @@ -81,7 +81,8 @@ void VotePacketHandler::process(const threadpool::PacketData &packet_data, const // Do not mark it before, as peers have small caches of known votes. Only mark gossiping votes peer->markPbftVoteAsKnown(vote_hash); - onNewPbftVote(vote, pbft_block); + + pbft_mgr_->gossipVote(vote, pbft_block); } void VotePacketHandler::onNewPbftVote(const std::shared_ptr &vote, const std::shared_ptr &block, From 80120beb798494755d6b618a517c2cf76862e131 Mon Sep 17 00:00:00 2001 From: kstdl Date: Tue, 27 Aug 2024 17:56:56 +0200 Subject: [PATCH 037/105] chore: rename pbft_block prev_state_root to final_chain_hash --- .../consensus/src/pbft/pbft_manager.cpp | 16 +++++++--------- .../types/pbft_block/include/pbft/pbft_block.hpp | 10 +++++++--- libraries/types/pbft_block/src/pbft_block.cpp | 12 ++++++------ tests/pbft_manager_test.cpp | 2 +- 4 files changed, 21 insertions(+), 19 deletions(-) diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index e5ae2d4126..dc8a0af8d5 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -8,10 +8,8 @@ #include "config/version.hpp" #include "dag/dag.hpp" +#include "dag/dag_manager.hpp" #include "final_chain/final_chain.hpp" -#include "network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp" -#include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" -#include "network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp" #include "pbft/period_data.hpp" #include "pillar_chain/pillar_chain_manager.hpp" #include "vote_manager/vote_manager.hpp" @@ -1135,17 +1133,17 @@ PbftManager::generatePbftBlock(PbftPeriod propose_period, const blk_hash_t &prev std::transform(reward_votes.begin(), reward_votes.end(), std::back_inserter(reward_votes_hashes), [](const auto &v) { return v->getHash(); }); - h256 last_state_root; + h256 final_chain_hash; if (propose_period > final_chain_->delegationDelay()) { if (const auto header = final_chain_->blockHeader(propose_period - final_chain_->delegationDelay())) { - last_state_root = header->state_root; + final_chain_hash = header->state_root; } else { LOG(log_wr_) << "Block for period " << propose_period << " could not be proposed as we are behind"; return {}; } } try { - auto block = std::make_shared(prev_blk_hash, anchor_hash, order_hash, last_state_root, propose_period, + auto block = std::make_shared(prev_blk_hash, anchor_hash, order_hash, final_chain_hash, propose_period, node_addr_, node_sk_, std::move(reward_votes_hashes), extra_data); return {std::make_pair(std::move(block), std::move(reward_votes))}; @@ -1405,8 +1403,8 @@ PbftStateRootValidation PbftManager::validatePbftBlockStateRoot(const std::share return PbftStateRootValidation::Missing; } } - if (pbft_block->getPrevStateRoot() != prev_state_root_hash) { - LOG(log_er_) << "Block " << pbft_block_hash << " state root " << pbft_block->getPrevStateRoot() + if (pbft_block->getFinalChainHash() != prev_state_root_hash) { + LOG(log_er_) << "Block " << pbft_block_hash << " state root " << pbft_block->getFinalChainHash() << " isn't matching actual " << prev_state_root_hash; return PbftStateRootValidation::Invalid; } @@ -1883,7 +1881,7 @@ std::optional>>> Pbf if (validation_result != PbftStateRootValidation::Missing) { if (validation_result == PbftStateRootValidation::Invalid) { LOG(log_er_) << "Failed verifying block " << pbft_block_hash - << " with invalid state root: " << period_data.pbft_blk->getPrevStateRoot() + << " with invalid state root: " << period_data.pbft_blk->getFinalChainHash() << ". Disconnect malicious peer " << node_id.abridged(); sync_queue_.clear(); net->handleMaliciousSyncPeer(node_id); diff --git a/libraries/types/pbft_block/include/pbft/pbft_block.hpp b/libraries/types/pbft_block/include/pbft/pbft_block.hpp index cab5c843cc..07bb41fe59 100644 --- a/libraries/types/pbft_block/include/pbft/pbft_block.hpp +++ b/libraries/types/pbft_block/include/pbft/pbft_block.hpp @@ -23,7 +23,7 @@ class PbftBlock { blk_hash_t prev_block_hash_; blk_hash_t dag_block_hash_as_pivot_; blk_hash_t order_hash_; - blk_hash_t prev_state_root_hash_; + blk_hash_t final_chain_hash_; PbftPeriod period_; // Block index, PBFT head block is period 0, first PBFT block is period 1 uint64_t timestamp_; addr_t beneficiary_; @@ -33,7 +33,7 @@ class PbftBlock { public: PbftBlock(const blk_hash_t& prev_blk_hash, const blk_hash_t& dag_blk_hash_as_pivot, const blk_hash_t& order_hash, - const blk_hash_t& prev_state_root, PbftPeriod period, const addr_t& beneficiary, const secret_t& sk, + const blk_hash_t& final_chain_hash, PbftPeriod period, const addr_t& beneficiary, const secret_t& sk, std::vector&& reward_votes, const std::optional& extra_data = {}); explicit PbftBlock(const dev::RLP& rlp); explicit PbftBlock(const bytes& RLP); @@ -103,7 +103,11 @@ class PbftBlock { */ const auto& getOrderHash() const { return order_hash_; } - const auto& getPrevStateRoot() const { return prev_state_root_hash_; } + /** + * @brief Get final chain hash to tie final chain to the PBFT chain + * @return final chain hash + */ + const auto& getFinalChainHash() const { return final_chain_hash_; } /** * @brief Get period number diff --git a/libraries/types/pbft_block/src/pbft_block.cpp b/libraries/types/pbft_block/src/pbft_block.cpp index 6fd2de9062..c4ceb17b51 100644 --- a/libraries/types/pbft_block/src/pbft_block.cpp +++ b/libraries/types/pbft_block/src/pbft_block.cpp @@ -15,11 +15,11 @@ PbftBlock::PbftBlock(dev::RLP const& rlp) { if (rlp.itemCount() == 9) { dev::bytes extra_data_bytes; util::rlp_tuple(util::RLPDecoderRef(rlp, true), prev_block_hash_, dag_block_hash_as_pivot_, order_hash_, - prev_state_root_hash_, period_, timestamp_, reward_votes_, extra_data_bytes, signature_); + final_chain_hash_, period_, timestamp_, reward_votes_, extra_data_bytes, signature_); extra_data_ = PbftBlockExtraData::fromBytes(extra_data_bytes); } else { util::rlp_tuple(util::RLPDecoderRef(rlp, true), prev_block_hash_, dag_block_hash_as_pivot_, order_hash_, - prev_state_root_hash_, period_, timestamp_, reward_votes_, signature_); + final_chain_hash_, period_, timestamp_, reward_votes_, signature_); } calculateHash_(); @@ -27,13 +27,13 @@ PbftBlock::PbftBlock(dev::RLP const& rlp) { } PbftBlock::PbftBlock(const blk_hash_t& prev_blk_hash, const blk_hash_t& dag_blk_hash_as_pivot, - const blk_hash_t& order_hash, const blk_hash_t& prev_state_root, PbftPeriod period, + const blk_hash_t& order_hash, const blk_hash_t& final_chain_hash, PbftPeriod period, const addr_t& beneficiary, const secret_t& sk, std::vector&& reward_votes, const std::optional& extra_data) : prev_block_hash_(prev_blk_hash), dag_block_hash_as_pivot_(dag_blk_hash_as_pivot), order_hash_(order_hash), - prev_state_root_hash_(prev_state_root), + final_chain_hash_(final_chain_hash), period_(period), beneficiary_(beneficiary), reward_votes_(reward_votes), @@ -87,7 +87,7 @@ Json::Value PbftBlock::getJson() const { json["prev_block_hash"] = prev_block_hash_.toString(); json["dag_block_hash_as_pivot"] = dag_block_hash_as_pivot_.toString(); json["order_hash"] = order_hash_.toString(); - json["prev_state_root_hash"] = prev_state_root_hash_.toString(); + json["prev_state_root_hash"] = final_chain_hash_.toString(); json["period"] = (Json::Value::UInt64)period_; json["timestamp"] = (Json::Value::UInt64)timestamp_; json["block_hash"] = block_hash_.toString(); @@ -110,7 +110,7 @@ void PbftBlock::streamRLP(dev::RLPStream& strm, bool include_sig) const { strm << prev_block_hash_; strm << dag_block_hash_as_pivot_; strm << order_hash_; - strm << prev_state_root_hash_; + strm << final_chain_hash_; strm << period_; strm << timestamp_; strm.appendVector(reward_votes_); diff --git a/tests/pbft_manager_test.cpp b/tests/pbft_manager_test.cpp index e722673fd4..476552ee58 100644 --- a/tests/pbft_manager_test.cpp +++ b/tests/pbft_manager_test.cpp @@ -740,7 +740,7 @@ TEST_F(PbftManagerWithDagCreation, state_root_hash) { if (period > state_root_delay) { state_root = node->getFinalChain()->blockHeader(period - state_root_delay)->state_root; } - EXPECT_EQ(pbft_block.getPrevStateRoot(), state_root); + EXPECT_EQ(pbft_block.getFinalChainHash(), state_root); pbft_block = node->getPbftChain()->getPbftBlockInChain(pbft_block.getPrevBlockHash()); } From dc7c17ba512cf7d29f8943bd4d8ac2425de24f76 Mon Sep 17 00:00:00 2001 From: kstdl Date: Fri, 6 Sep 2024 13:54:42 +0200 Subject: [PATCH 038/105] feat: use final chain block hash instead of state root after cornus hf --- for_devs/local-net | 11 ++-- .../config_jsons/default/default_genesis.json | 2 +- libraries/config/include/config/hardfork.hpp | 2 + .../consensus/include/final_chain/data.hpp | 13 ++--- .../include/final_chain/final_chain.hpp | 8 +++ .../consensus/include/pbft/pbft_manager.hpp | 4 +- .../consensus/src/final_chain/final_chain.cpp | 18 ++++++- .../consensus/src/pbft/pbft_manager.cpp | 51 ++++++++----------- libraries/types/pbft_block/src/pbft_block.cpp | 2 +- tests/pbft_manager_test.cpp | 6 +-- 10 files changed, 66 insertions(+), 51 deletions(-) diff --git a/for_devs/local-net b/for_devs/local-net index 8add8bec84..4a9087bffb 100755 --- a/for_devs/local-net +++ b/for_devs/local-net @@ -1,5 +1,4 @@ #!/usr/bin/env python3 - import click import subprocess import threading @@ -313,7 +312,7 @@ def faucet_worker(tps): time.sleep(10) web3 = Web3(Web3.HTTPProvider('http://127.0.0.1:7017')) nonce = web3.eth.get_transaction_count( - Web3.to_checksum_address(faucet_public_address)) + Web3.toChecksumAddress(faucet_public_address)) consensus_nodes = list(consensus_nodes_public_addresses.keys()) @@ -323,10 +322,10 @@ def faucet_worker(tps): 0, len(consensus_nodes)-1)]] tx = { 'nonce': nonce, - 'to': Web3.to_checksum_address(to), - 'value': web3.to_wei(100000000, 'gwei'), + 'to': Web3.toChecksumAddress(to), + 'value': web3.toWei(100000000, 'gwei'), 'gas': 21000, - 'gasPrice': web3.to_wei(1, 'gwei'), + 'gasPrice': web3.toWei(1, 'gwei'), 'chainId': int(chain_id) } nonce = nonce + 1 @@ -338,7 +337,7 @@ def faucet_worker(tps): try: tx_hash = web3.eth.send_raw_transaction(signed_tx.rawTransaction) log_format( - 'faucet', f'{t} Dripped to {to}, tx_hash: {web3.to_hex(tx_hash)}') + 'faucet', f'{t} Dripped to {to}, tx_hash: {web3.toHex(tx_hash)}') except Exception as e: log_format('faucet', f'{t} Failed to drip to {to}. Error: {str(e)}') pass diff --git a/libraries/cli/include/cli/config_jsons/default/default_genesis.json b/libraries/cli/include/cli/config_jsons/default/default_genesis.json index ba63885f5a..4afcf79004 100644 --- a/libraries/cli/include/cli/config_jsons/default/default_genesis.json +++ b/libraries/cli/include/cli/config_jsons/default/default_genesis.json @@ -120,6 +120,6 @@ "pillar_blocks_interval": 10, "bridge_contract_address": "0xcAF2b453FE8382a4B8110356DF0508f6d71F22BF" }, - "cornus_hf_block_num": 0 + "cornus_hf_block_num": 100 } } \ No newline at end of file diff --git a/libraries/config/include/config/hardfork.hpp b/libraries/config/include/config/hardfork.hpp index 5ffb43973e..a591ee860e 100644 --- a/libraries/config/include/config/hardfork.hpp +++ b/libraries/config/include/config/hardfork.hpp @@ -133,6 +133,8 @@ struct HardforksConfig { // Cornus hf - support multiple undelegations from the same validator at the same time uint64_t cornus_hf_block_num{0}; + bool isCornusHardfork(uint64_t block_number) const { return block_number >= cornus_hf_block_num; } + HAS_RLP_FIELDS }; diff --git a/libraries/core_libs/consensus/include/final_chain/data.hpp b/libraries/core_libs/consensus/include/final_chain/data.hpp index b04ce77d3d..d6abfd76d0 100644 --- a/libraries/core_libs/consensus/include/final_chain/data.hpp +++ b/libraries/core_libs/consensus/include/final_chain/data.hpp @@ -41,12 +41,6 @@ struct BlockHeader : BlockHeaderData { BlockHeader() = default; BlockHeader(std::string&& raw_header_data); BlockHeader(std::string&& raw_header_data, const PbftBlock& pbft, uint64_t gas_limit); - h256 hash; - Address author; - uint64_t gas_limit = 0; - uint64_t timestamp = 0; - EthBlockNumber number = 0; - bytes extra_data; void setFromPbft(const PbftBlock& pbft); @@ -59,6 +53,13 @@ struct BlockHeader : BlockHeaderData { static h256 const& mixHash(); dev::bytes ethereumRlp() const; + + h256 hash; + Address author; + uint64_t gas_limit = 0; + uint64_t timestamp = 0; + EthBlockNumber number = 0; + bytes extra_data; }; static constexpr auto c_bloomIndexSize = 16; diff --git a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp index 572347288c..2344834a77 100644 --- a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp +++ b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp @@ -88,6 +88,14 @@ class FinalChain { */ std::optional blockHash(std::optional n = {}) const; + /** + * @brief Method to get the final chain hash by block number + * + * @param n block number + * @return std::optional final chain hash or nullopt + */ + std::optional finalChainHash(EthBlockNumber n) const; + /** * @brief Needed if we are changing params with hardfork and it affects Go part of code. For example DPOS contract * @param new_config state_api::Config diff --git a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp index 11c49d218b..9bdbaed94e 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp @@ -466,11 +466,11 @@ class PbftManager { bool validatePbftBlock(const std::shared_ptr &pbft_block) const; /** - * @brief Validates pbft block state root. + * @brief Validates pbft block final chain hash. * @param pbft_block PBFT block * @return validation result */ - PbftStateRootValidation validatePbftBlockStateRoot(const std::shared_ptr &pbft_block) const; + PbftStateRootValidation validateFinalChainHash(const std::shared_ptr &pbft_block) const; /** * @brief Validates pbft block extra data presence: diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index c85e559593..52dd8932ad 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -433,7 +433,6 @@ bytes FinalChain::getCode(const addr_t& addr, std::optional blk_ state_api::ExecutionResult FinalChain::call(const state_api::EVMTransaction& trx, std::optional blk_n) const { - std::cout << blk_n.value_or(-1) << " " << lastIfAbsent(blk_n) << std::endl; auto const blk_header = blockHeader(lastIfAbsent(blk_n)); if (!blk_header) { throw std::runtime_error("Future block"); @@ -577,6 +576,23 @@ std::shared_ptr FinalChain::getBlockHeader(EthBlockNumber n) return {}; } +std::optional FinalChain::finalChainHash(EthBlockNumber n) const { + auto delay = delegationDelay(); + if (n <= delay) { + // first delegation delay blocks will have zero hash + return ZeroHash(); + } + auto header = blockHeader(n - delay); + if (!header) { + return {}; + } + + if (kConfig.genesis.state.hardforks.isCornusHardfork(n)) { + return header->hash; + } + return header->state_root; +} + std::optional FinalChain::getBlockHash(EthBlockNumber n) const { auto raw = db_->lookup(n, DbStorage::Columns::final_chain_blk_hash_by_number); if (raw.empty()) { diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index dc8a0af8d5..759d656505 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -1133,18 +1133,15 @@ PbftManager::generatePbftBlock(PbftPeriod propose_period, const blk_hash_t &prev std::transform(reward_votes.begin(), reward_votes.end(), std::back_inserter(reward_votes_hashes), [](const auto &v) { return v->getHash(); }); - h256 final_chain_hash; - if (propose_period > final_chain_->delegationDelay()) { - if (const auto header = final_chain_->blockHeader(propose_period - final_chain_->delegationDelay())) { - final_chain_hash = header->state_root; - } else { - LOG(log_wr_) << "Block for period " << propose_period << " could not be proposed as we are behind"; - return {}; - } + auto final_chain_hash = final_chain_->finalChainHash(propose_period); + if (!final_chain_hash) { + LOG(log_wr_) << "Block for period " << propose_period << " could not be proposed as we are behind"; + return {}; } try { - auto block = std::make_shared(prev_blk_hash, anchor_hash, order_hash, final_chain_hash, propose_period, - node_addr_, node_sk_, std::move(reward_votes_hashes), extra_data); + auto block = + std::make_shared(prev_blk_hash, anchor_hash, order_hash, final_chain_hash.value(), propose_period, + node_addr_, node_sk_, std::move(reward_votes_hashes), extra_data); return {std::make_pair(std::move(block), std::move(reward_votes))}; } catch (const std::exception &e) { @@ -1390,25 +1387,21 @@ std::shared_ptr PbftManager::identifyLeaderBlock_(PbftRound round, Pb return empty_leader_block; } -PbftStateRootValidation PbftManager::validatePbftBlockStateRoot(const std::shared_ptr &pbft_block) const { +PbftStateRootValidation PbftManager::validateFinalChainHash(const std::shared_ptr &pbft_block) const { auto period = pbft_block->getPeriod(); - auto const &pbft_block_hash = pbft_block->getBlockHash(); - { - h256 prev_state_root_hash; - if (period > final_chain_->delegationDelay()) { - if (const auto header = final_chain_->blockHeader(period - final_chain_->delegationDelay())) { - prev_state_root_hash = header->state_root; - } else { - LOG(log_wr_) << "Block " << pbft_block_hash << " could not be validated as we are behind"; - return PbftStateRootValidation::Missing; - } - } - if (pbft_block->getFinalChainHash() != prev_state_root_hash) { - LOG(log_er_) << "Block " << pbft_block_hash << " state root " << pbft_block->getFinalChainHash() - << " isn't matching actual " << prev_state_root_hash; - return PbftStateRootValidation::Invalid; - } + const auto &pbft_block_hash = pbft_block->getBlockHash(); + + auto prev_final_chain_hash = final_chain_->finalChainHash(period); + if (!prev_final_chain_hash) { + LOG(log_wr_) << "Block " << pbft_block_hash << " could not be validated as we are behind"; + return PbftStateRootValidation::Missing; + } + if (pbft_block->getFinalChainHash() != prev_final_chain_hash) { + LOG(log_er_) << "Block " << pbft_block_hash << " state root " << pbft_block->getFinalChainHash() + << " isn't matching actual " << prev_final_chain_hash.value(); + return PbftStateRootValidation::Invalid; } + return PbftStateRootValidation::Valid; } @@ -1481,7 +1474,7 @@ bool PbftManager::validatePbftBlock(const std::shared_ptr &pbft_block auto const &pbft_block_hash = pbft_block->getBlockHash(); - if (validatePbftBlockStateRoot(pbft_block) != PbftStateRootValidation::Valid) { + if (validateFinalChainHash(pbft_block) != PbftStateRootValidation::Valid) { return false; } @@ -1877,7 +1870,7 @@ std::optional>>> Pbf bool retry_logged = false; while (true) { - auto validation_result = validatePbftBlockStateRoot(period_data.pbft_blk); + auto validation_result = validateFinalChainHash(period_data.pbft_blk); if (validation_result != PbftStateRootValidation::Missing) { if (validation_result == PbftStateRootValidation::Invalid) { LOG(log_er_) << "Failed verifying block " << pbft_block_hash diff --git a/libraries/types/pbft_block/src/pbft_block.cpp b/libraries/types/pbft_block/src/pbft_block.cpp index c4ceb17b51..0fc8875784 100644 --- a/libraries/types/pbft_block/src/pbft_block.cpp +++ b/libraries/types/pbft_block/src/pbft_block.cpp @@ -87,7 +87,7 @@ Json::Value PbftBlock::getJson() const { json["prev_block_hash"] = prev_block_hash_.toString(); json["dag_block_hash_as_pivot"] = dag_block_hash_as_pivot_.toString(); json["order_hash"] = order_hash_.toString(); - json["prev_state_root_hash"] = final_chain_hash_.toString(); + json["final_chain_hash"] = final_chain_hash_.toString(); json["period"] = (Json::Value::UInt64)period_; json["timestamp"] = (Json::Value::UInt64)timestamp_; json["block_hash"] = block_hash_.toString(); diff --git a/tests/pbft_manager_test.cpp b/tests/pbft_manager_test.cpp index 476552ee58..acd204ab9e 100644 --- a/tests/pbft_manager_test.cpp +++ b/tests/pbft_manager_test.cpp @@ -736,11 +736,7 @@ TEST_F(PbftManagerWithDagCreation, state_root_hash) { // Check that all produced blocks have correct state_root_hashes while (pbft_block.getPeriod() != 1) { auto period = pbft_block.getPeriod(); - h256 state_root; - if (period > state_root_delay) { - state_root = node->getFinalChain()->blockHeader(period - state_root_delay)->state_root; - } - EXPECT_EQ(pbft_block.getFinalChainHash(), state_root); + EXPECT_EQ(pbft_block.getFinalChainHash(), node->getFinalChain()->finalChainHash(period)); pbft_block = node->getPbftChain()->getPbftBlockInChain(pbft_block.getPrevBlockHash()); } From 19cc948218f890a1129c301022542b5795a33eb3 Mon Sep 17 00:00:00 2001 From: kstdl Date: Tue, 10 Sep 2024 15:16:56 +0200 Subject: [PATCH 039/105] fix: period dags migration memory consumption --- .../migration/dag_block_period_migration.cpp | 47 +++++++++---------- 1 file changed, 21 insertions(+), 26 deletions(-) diff --git a/libraries/core_libs/storage/src/migration/dag_block_period_migration.cpp b/libraries/core_libs/storage/src/migration/dag_block_period_migration.cpp index 2cdade2db4..045ff79ebf 100644 --- a/libraries/core_libs/storage/src/migration/dag_block_period_migration.cpp +++ b/libraries/core_libs/storage/src/migration/dag_block_period_migration.cpp @@ -4,8 +4,6 @@ #include -#include "common/thread_pool.hpp" -#include "common/util.hpp" #include "pbft/period_data.hpp" namespace taraxa::storage::migration { @@ -17,15 +15,7 @@ std::string PeriodDataDagBlockMigration::id() { return "PeriodDataDagBlockMigrat uint32_t PeriodDataDagBlockMigration::dbVersion() { return 1; } void PeriodDataDagBlockMigration::migrate(logger::Logger& log) { - auto orig_col = DbStorage::Columns::period_data; - auto copied_col = db_->copyColumn(db_->handle(orig_col), orig_col.name() + "-copy"); - - if (copied_col == nullptr) { - LOG(log) << "Migration " << id() << " skipped: Unable to copy " << orig_col.name() << " column"; - return; - } - - auto it = db_->getColumnIterator(orig_col); + auto it = db_->getColumnIterator(DbStorage::Columns::period_data); it->SeekToFirst(); if (!it->Valid()) { return; @@ -40,31 +30,36 @@ void PeriodDataDagBlockMigration::migrate(logger::Logger& log) { } memcpy(&end_period, it->key().data(), sizeof(uint64_t)); const auto diff = (end_period - start_period) ? (end_period - start_period) : 1; + uint64_t curr_progress = 0; auto batch = db_->createWriteBatch(); const size_t max_size = 500000000; - it->SeekToFirst(); - // Get and save data in new format for all blocks - for (; it->Valid(); it->Next()) { - uint64_t period; - memcpy(&period, it->key().data(), sizeof(uint64_t)); - std::string raw = it->value().ToString(); - const auto period_data_old_rlp = dev::RLP(raw); - auto period_data = ::taraxa::PeriodData::FromOldPeriodData(period_data_old_rlp); - db_->insert(batch, copied_col.get(), period, period_data.rlp()); - - if (batch.GetDataSize() > max_size) { - db_->commitWriteBatch(batch); - } + // Get and save data in new format for all blocks + for (uint64_t period = start_period; period <= end_period; period++) { + const auto bts = db_->getPeriodDataRaw(period); + const auto db_rlp = dev::RLP(bts); auto percentage = (period - start_period) * 100 / diff; if (percentage > curr_progress) { curr_progress = percentage; LOG(log) << "Migration " << id() << " progress " << curr_progress << "%"; } + // If there are no dag blocks in the period, skip it + if (db_rlp.itemCount() > 2 && db_rlp[2].itemCount() == 0) { + continue; + } + // skip if the period data is already in the new format + try { + auto period_data = ::taraxa::PeriodData::FromOldPeriodData(db_rlp); + db_->insert(batch, DbStorage::Columns::period_data, period, period_data.rlp()); + } catch (const dev::RLPException& e) { + continue; + } + if (batch.GetDataSize() > max_size) { + db_->commitWriteBatch(batch); + } } db_->commitWriteBatch(batch); - - db_->replaceColumn(orig_col, std::move(copied_col)); + db_->compactColumn(DbStorage::Columns::period_data); } } // namespace taraxa::storage::migration \ No newline at end of file From bbe761d3096c02c70beea464e178d7ef22b2386b Mon Sep 17 00:00:00 2001 From: kstdl Date: Tue, 10 Sep 2024 15:20:26 +0200 Subject: [PATCH 040/105] chore: rename period dags migration --- ...lock_period_migration.hpp => period_dag_blocks.hpp} | 4 ++-- .../storage/src/migration/migration_manager.cpp | 4 ++-- ...lock_period_migration.cpp => period_dag_blocks.cpp} | 10 +++++----- 3 files changed, 9 insertions(+), 9 deletions(-) rename libraries/core_libs/storage/include/storage/migration/{dag_block_period_migration.hpp => period_dag_blocks.hpp} (68%) rename libraries/core_libs/storage/src/migration/{dag_block_period_migration.cpp => period_dag_blocks.cpp} (81%) diff --git a/libraries/core_libs/storage/include/storage/migration/dag_block_period_migration.hpp b/libraries/core_libs/storage/include/storage/migration/period_dag_blocks.hpp similarity index 68% rename from libraries/core_libs/storage/include/storage/migration/dag_block_period_migration.hpp rename to libraries/core_libs/storage/include/storage/migration/period_dag_blocks.hpp index 59173cf862..0584717414 100644 --- a/libraries/core_libs/storage/include/storage/migration/dag_block_period_migration.hpp +++ b/libraries/core_libs/storage/include/storage/migration/period_dag_blocks.hpp @@ -2,9 +2,9 @@ #include "storage/migration/migration_base.hpp" namespace taraxa::storage::migration { -class PeriodDataDagBlockMigration : public migration::Base { +class PeriodDagBlocks : public migration::Base { public: - PeriodDataDagBlockMigration(std::shared_ptr db); + PeriodDagBlocks(std::shared_ptr db); std::string id() override; uint32_t dbVersion() override; void migrate(logger::Logger& log) override; diff --git a/libraries/core_libs/storage/src/migration/migration_manager.cpp b/libraries/core_libs/storage/src/migration/migration_manager.cpp index 6eddb23c30..a226662ef6 100644 --- a/libraries/core_libs/storage/src/migration/migration_manager.cpp +++ b/libraries/core_libs/storage/src/migration/migration_manager.cpp @@ -1,12 +1,12 @@ #include "storage/migration/migration_manager.hpp" -#include "storage/migration/dag_block_period_migration.hpp" #include "storage/migration/final_chain_header.hpp" +#include "storage/migration/period_dag_blocks.hpp" #include "storage/migration/transaction_period.hpp" namespace taraxa::storage::migration { Manager::Manager(std::shared_ptr db, const addr_t& node_addr) : db_(db) { - registerMigration(); + registerMigration(); registerMigration(); LOG_OBJECTS_CREATE("MIGRATIONS"); } diff --git a/libraries/core_libs/storage/src/migration/dag_block_period_migration.cpp b/libraries/core_libs/storage/src/migration/period_dag_blocks.cpp similarity index 81% rename from libraries/core_libs/storage/src/migration/dag_block_period_migration.cpp rename to libraries/core_libs/storage/src/migration/period_dag_blocks.cpp index 045ff79ebf..9d3ed46281 100644 --- a/libraries/core_libs/storage/src/migration/dag_block_period_migration.cpp +++ b/libraries/core_libs/storage/src/migration/period_dag_blocks.cpp @@ -1,4 +1,4 @@ -#include "storage/migration/dag_block_period_migration.hpp" +#include "storage/migration/period_dag_blocks.hpp" #include @@ -8,13 +8,13 @@ namespace taraxa::storage::migration { -PeriodDataDagBlockMigration::PeriodDataDagBlockMigration(std::shared_ptr db) : migration::Base(db) {} +PeriodDagBlocks::PeriodDagBlocks(std::shared_ptr db) : migration::Base(db) {} -std::string PeriodDataDagBlockMigration::id() { return "PeriodDataDagBlockMigration"; } +std::string PeriodDagBlocks::id() { return "PeriodDagBlocks"; } -uint32_t PeriodDataDagBlockMigration::dbVersion() { return 1; } +uint32_t PeriodDagBlocks::dbVersion() { return 1; } -void PeriodDataDagBlockMigration::migrate(logger::Logger& log) { +void PeriodDagBlocks::migrate(logger::Logger& log) { auto it = db_->getColumnIterator(DbStorage::Columns::period_data); it->SeekToFirst(); if (!it->Valid()) { From 9491cee8b65330d9e15c4ad8f05662ab602ba892 Mon Sep 17 00:00:00 2001 From: kstdl Date: Fri, 13 Sep 2024 12:01:06 +0200 Subject: [PATCH 041/105] update taraxa-evm submodule with latest develop --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 4e476e4862..1187755d64 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 4e476e4862a29ce765ea2306e1a28a11de02fcc1 +Subproject commit 1187755d649f1dc5864a24cf26be65b03ef77fae From 64beae61d04f2a9f1ba550ca3b038d8516b58dce Mon Sep 17 00:00:00 2001 From: kstdl Date: Tue, 17 Sep 2024 23:24:01 +0200 Subject: [PATCH 042/105] chore: update to rocksdb9 --- CMakeLists.txt | 6 ++-- CMakeModules/rocksdb.cmake | 32 ------------------- conanfile.py | 3 ++ libraries/core_libs/CMakeLists.txt | 2 +- .../include/final_chain/final_chain.hpp | 1 - .../transaction/transaction_manager.hpp | 2 +- .../consensus/src/final_chain/final_chain.cpp | 2 +- .../latest/pbft_sync_packet_handler.hpp | 2 +- .../v3/pbft_sync_packet_handler.hpp | 2 +- .../latest/pbft_sync_packet_handler.cpp | 2 +- .../latest/status_packet_handler.cpp | 2 +- .../v3/pbft_sync_packet_handler.cpp | 2 +- .../network/src/tarcap/taraxa_capability.cpp | 4 +-- libraries/core_libs/storage/src/storage.cpp | 8 +++-- .../dag_block/src/dag_block_bundle_rlp.cpp | 2 +- .../include/transaction/transaction.hpp | 1 + .../transaction/src/system_transaction.cpp | 2 +- libraries/types/vote/include/vote/vote.hpp | 1 + submodules/CMakeLists.txt | 10 ++++-- submodules/taraxa-evm | 2 +- tests/CMakeLists.txt | 4 +-- tests/network_test.cpp | 1 - tests/pbft_manager_test.cpp | 1 - tests/pillar_chain_test.cpp | 10 +----- tests/rewards_stats_test.cpp | 15 +++++---- tests/test_util/gtest.hpp | 2 +- .../test_util/include/test_util/test_util.hpp | 4 +-- tests/test_util/src/test_util.cpp | 2 -- 28 files changed, 46 insertions(+), 81 deletions(-) delete mode 100644 CMakeModules/rocksdb.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 2ce8bc7515..08bdd2e088 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -32,7 +32,8 @@ add_compile_options(-Wall -Wextra-semi -Wnull-dereference -Wno-unknown-pragmas - -Wno-overlength-strings) + -Wno-overlength-strings + -Wno-switch) # Set the position independent code property on all targets set(CMAKE_POSITION_INDEPENDENT_CODE ON) @@ -217,9 +218,6 @@ find_package(MPFR) set(JSONCPP_INCLUDE_DIR ${CONAN_INCLUDE_DIRS_JSONCPP}) include(ProjectJSONRPCCPP) -# rocksdb build -include(${PROJECT_SOURCE_DIR}/CMakeModules/rocksdb.cmake) - # Add sub-directories cmakes add_subdirectory(submodules) add_subdirectory(libraries) diff --git a/CMakeModules/rocksdb.cmake b/CMakeModules/rocksdb.cmake deleted file mode 100644 index 86a9c12b9a..0000000000 --- a/CMakeModules/rocksdb.cmake +++ /dev/null @@ -1,32 +0,0 @@ -# ========================================================================== # -# RocksDB key-value store # -# ========================================================================== # -include(FetchContent) - -set(CMAKE_POLICY_DEFAULT_CMP0077 NEW) - -FetchContent_Declare( - rocksdb - GIT_REPOSITORY https://github.com/facebook/rocksdb - GIT_TAG v8.5.3 - GIT_SHALLOW TRUE -) - -FetchContent_GetProperties(rocksdb) - -message(STATUS "Populating rocksdb") -set(USE_RTTI 1) -set(WITH_LZ4 ON) -set(WITH_GFLAGS OFF) -set(FAIL_ON_WARNINGS OFF) -set(PORTABLE 1 CACHE STRING "Override: Minimum CPU arch to support") # Disable -march=native -set(WITH_TESTS OFF CACHE INTERNAL "") -set(WITH_JNI OFF CACHE INTERNAL "") -set(WITH_TOOLS OFF CACHE INTERNAL "") -set(WITH_BENCHMARK_TOOLS OFF CACHE INTERNAL "") -set(WITH_CORE_TOOLS OFF CACHE INTERNAL "") -set(WITH_TRACE_TOOLS OFF CACHE INTERNAL "") -set(ROCKSDB_BUILD_SHARED ${BUILD_SHARED_LIBS} CACHE INTERNAL "") -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w -Wno-error=unused-const-variable -Wno-error=pedantic -Wno-error=format-pedantic -Wno-error=gnu-zero-variadic-macro-arguments -Wno-error=extra-semi -Wc++98-compat-extra-semi -Wno-error=deprecated-copy -Wno-deprecated-copy -Wno-error=pessimizing-move -Wno-pessimizing-move -Wno-pessimizing-move") - -FetchContent_MakeAvailable(rocksdb) diff --git a/conanfile.py b/conanfile.py index ef649a6957..221513fb83 100644 --- a/conanfile.py +++ b/conanfile.py @@ -19,6 +19,7 @@ def requirements(self): self.requires("cryptopp/8.9.0") self.requires("gtest/1.14.0") self.requires("lz4/1.9.4") + self.requires("rocksdb/9.2.1") self.requires("prometheus-cpp/1.1.0") self.requires("jsoncpp/1.9.5") @@ -60,6 +61,8 @@ def configure(self): self.options["gtest"].build_gmock = False # this links cppcheck to prce library self.options["cppcheck"].have_rules = False + self.options["rocksdb"].use_rtti = True + self.options["rocksdb"].with_lz4 = True # mpir is required by cppcheck and it causing gmp confict self.options["mpir"].enable_gmpcompat = False diff --git a/libraries/core_libs/CMakeLists.txt b/libraries/core_libs/CMakeLists.txt index bd00ca099d..146a6cf48f 100644 --- a/libraries/core_libs/CMakeLists.txt +++ b/libraries/core_libs/CMakeLists.txt @@ -56,7 +56,7 @@ target_link_libraries(core_libs PUBLIC p2p metrics Jsonrpccpp - rocksdb + CONAN_PKG::rocksdb # GraphQL cppgraphqlgen::graphqlservice diff --git a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp index 2344834a77..57d08d142d 100644 --- a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp +++ b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp @@ -34,7 +34,6 @@ class FinalChain { decltype(block_finalized_emitter_)::Subscriber const& block_finalized_ = block_finalized_emitter_; decltype(block_applying_emitter_)::Subscriber const& block_applying_ = block_applying_emitter_; - FinalChain() = default; ~FinalChain() = default; FinalChain(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, const addr_t& node_addr); FinalChain(const FinalChain&) = delete; diff --git a/libraries/core_libs/consensus/include/transaction/transaction_manager.hpp b/libraries/core_libs/consensus/include/transaction/transaction_manager.hpp index 9909d79695..4cc87377f7 100644 --- a/libraries/core_libs/consensus/include/transaction/transaction_manager.hpp +++ b/libraries/core_libs/consensus/include/transaction/transaction_manager.hpp @@ -18,10 +18,10 @@ namespace taraxa { */ enum class TransactionStatus { Inserted = 0, InsertedNonProposable, Known, Overflow }; +struct FullNodeConfig; class DagBlock; class DagManager; class FullNode; -class FullNodeConfig; /** * @brief TransactionManager class verifies and inserts incoming transactions in memory pool and handles saving diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index 52dd8932ad..ec706648bc 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -322,7 +322,7 @@ std::shared_ptr FinalChain::appendBlock(Batch& batch, std::shared_p const TransactionReceipts& receipts) { dev::BytesMap trxs_trie, receipts_trie; dev::RLPStream rlp_strm; - auto trx_idx = 0; + size_t trx_idx = 0; for (; trx_idx < transactions.size(); ++trx_idx) { const auto& trx = transactions[trx_idx]; auto i_rlp = util::rlp_enc(rlp_strm, trx_idx); diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp index 263b326ad7..9a99ce1a6c 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp @@ -29,7 +29,7 @@ class PbftSyncPacketHandler : public ExtSyncingPacketHandler { virtual std::vector> decodeVotesBundle(const dev::RLP& votes_bundle_rlp) const; void pbftSyncComplete(); - void delayedPbftSync(int counter); + void delayedPbftSync(uint32_t counter); static constexpr uint32_t kDelayedPbftSyncDelayMs = 10; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp index e2459081a1..3230b8c4dc 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp @@ -29,7 +29,7 @@ class PbftSyncPacketHandler : public ExtSyncingPacketHandler { virtual std::vector> decodeVotesBundle(const dev::RLP& votes_bundle_rlp) const; void pbftSyncComplete(); - void delayedPbftSync(int counter); + void delayedPbftSync(uint32_t counter); static constexpr uint32_t kDelayedPbftSyncDelayMs = 10; diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp index 561217b6e6..bf44485cb1 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp @@ -258,7 +258,7 @@ void PbftSyncPacketHandler::pbftSyncComplete() { } } -void PbftSyncPacketHandler::delayedPbftSync(int counter) { +void PbftSyncPacketHandler::delayedPbftSync(uint32_t counter) { const uint32_t max_delayed_pbft_sync_count = 60000 / kDelayedPbftSyncDelayMs; auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); if (counter > max_delayed_pbft_sync_count) { diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp index 8ab85c0b84..6a751ec456 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp @@ -83,7 +83,7 @@ void StatusPacketHandler::process(const threadpool::PacketData& packet_data, con LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) << "Light node " << packet_data.from_node_id_.abridged() << " would not be able to serve our syncing request. " - << "Current synced period " << pbft_synced_period << ", peer synced period " << pbft_synced_period + << "Current synced period " << pbft_synced_period << ", peer synced period " << peer_pbft_chain_size << ", peer light node history " << node_history << ". Peer will be disconnected"; disconnect(packet_data.from_node_id_, dev::p2p::UserReason); return; diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp index 277fec6ad6..8af7d276a5 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp @@ -258,7 +258,7 @@ void PbftSyncPacketHandler::pbftSyncComplete() { } } -void PbftSyncPacketHandler::delayedPbftSync(int counter) { +void PbftSyncPacketHandler::delayedPbftSync(uint32_t counter) { const uint32_t max_delayed_pbft_sync_count = 60000 / kDelayedPbftSyncDelayMs; auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); if (counter > max_delayed_pbft_sync_count) { diff --git a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp index 0b61b58ba9..9fa9e5dd51 100644 --- a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp +++ b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp @@ -246,7 +246,7 @@ const TaraxaCapability::InitPacketsHandlers TaraxaCapability::kInitLatestVersion const std::shared_ptr &pbft_mgr, const std::shared_ptr &pbft_chain, const std::shared_ptr &vote_mgr, const std::shared_ptr &dag_mgr, const std::shared_ptr &trx_mgr, const std::shared_ptr &slashing_manager, - const std::shared_ptr &pillar_chain_mgr, TarcapVersion version, + const std::shared_ptr &pillar_chain_mgr, TarcapVersion, const addr_t &node_addr) { auto packets_handlers = std::make_shared(); // Consensus packets with high processing priority @@ -299,7 +299,7 @@ const TaraxaCapability::InitPacketsHandlers TaraxaCapability::kInitV3Handlers = const std::shared_ptr &pbft_mgr, const std::shared_ptr &pbft_chain, const std::shared_ptr &vote_mgr, const std::shared_ptr &dag_mgr, const std::shared_ptr &trx_mgr, const std::shared_ptr &slashing_manager, - const std::shared_ptr &pillar_chain_mgr, TarcapVersion version, + const std::shared_ptr &pillar_chain_mgr, TarcapVersion, const addr_t &node_addr) { auto packets_handlers = std::make_shared(); // Consensus packets with high processing priority diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index ec9a6edeac..ed497069e2 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -400,7 +400,9 @@ std::optional DbStorage::getGenesisHash() { DbStorage::~DbStorage() { for (auto cf : handles_) { - checkStatus(db_->DestroyColumnFamilyHandle(cf)); + if (cf->GetName() != "default") { + checkStatus(db_->DestroyColumnFamilyHandle(cf)); + } } checkStatus(db_->Close()); } @@ -1263,7 +1265,7 @@ std::vector> DbStorage::getFinalizedDagBlockByPeriod(P auto dag_blocks_data = dev::RLP(period_data)[DAG_BLOCKS_POS_IN_PERIOD_DATA]; auto dag_blocks = decodeDAGBlocksBundleRlp(dag_blocks_data); ret.reserve(dag_blocks.size()); - for (auto const block : dag_blocks) { + for (const auto& block : dag_blocks) { ret.emplace_back(std::make_shared(std::move(block))); } } @@ -1279,7 +1281,7 @@ DbStorage::getLastPbftBlockHashAndFinalizedDagBlockByPeriod(PbftPeriod period) { auto dag_blocks_data = period_data_rlp[DAG_BLOCKS_POS_IN_PERIOD_DATA]; auto dag_blocks = decodeDAGBlocksBundleRlp(dag_blocks_data); ret.reserve(dag_blocks.size()); - for (auto const block : dag_blocks) { + for (const auto& block : dag_blocks) { ret.emplace_back(std::make_shared(std::move(block))); } last_pbft_block_hash = diff --git a/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp index af34357631..307815d245 100644 --- a/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp +++ b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp @@ -61,7 +61,7 @@ std::vector decodeDAGBlocksBundleRlp(const dev::RLP& blocks_bundle_rlp std::transform(blocks_bundle_rlp[0].begin(), blocks_bundle_rlp[0].end(), std::back_inserter(ordered_trx_hashes), [](const auto& trx_hash_rlp) { return trx_hash_rlp.template toHash(); }); - for (const auto& idx_rlp : blocks_bundle_rlp[1]) { + for (const auto idx_rlp : blocks_bundle_rlp[1]) { std::vector hashes; hashes.reserve(idx_rlp.itemCount()); std::transform(idx_rlp.begin(), idx_rlp.end(), std::back_inserter(hashes), diff --git a/libraries/types/transaction/include/transaction/transaction.hpp b/libraries/types/transaction/include/transaction/transaction.hpp index a79c6fb271..178be697a0 100644 --- a/libraries/types/transaction/include/transaction/transaction.hpp +++ b/libraries/types/transaction/include/transaction/transaction.hpp @@ -54,6 +54,7 @@ struct Transaction { const secret_t &sk, const std::optional &receiver = std::nullopt, uint64_t chain_id = 0); explicit Transaction(const dev::RLP &_rlp, bool verify_strict = false, const h256 &hash = {}); explicit Transaction(const bytes &_rlp, bool verify_strict = false, const h256 &hash = {}); + virtual ~Transaction() = default; auto isZero() const { return is_zero_; } const trx_hash_t &getHash() const; diff --git a/libraries/types/transaction/src/system_transaction.cpp b/libraries/types/transaction/src/system_transaction.cpp index 378fd324b8..5924c799cf 100644 --- a/libraries/types/transaction/src/system_transaction.cpp +++ b/libraries/types/transaction/src/system_transaction.cpp @@ -40,7 +40,7 @@ SystemTransaction::SystemTransaction(const dev::RLP &_rlp, bool verify_strict, c const addr_t &SystemTransaction::getSender() const { return sender_; } -void SystemTransaction::streamRLP(dev::RLPStream &s, bool for_signature) const { +void SystemTransaction::streamRLP(dev::RLPStream &s, bool) const { // always serialize as for the signature s.appendList(9); s << nonce_ << gas_price_ << gas_; diff --git a/libraries/types/vote/include/vote/vote.hpp b/libraries/types/vote/include/vote/vote.hpp index 5277c5712b..a51a07990f 100644 --- a/libraries/types/vote/include/vote/vote.hpp +++ b/libraries/types/vote/include/vote/vote.hpp @@ -16,6 +16,7 @@ class Vote { public: Vote() = default; Vote(const blk_hash_t& block_hash); + virtual ~Vote() = default; /** * @brief Sign the vote diff --git a/submodules/CMakeLists.txt b/submodules/CMakeLists.txt index 117cbcd879..4363497eb8 100644 --- a/submodules/CMakeLists.txt +++ b/submodules/CMakeLists.txt @@ -84,11 +84,15 @@ add_make_target(vrf libsodium.a "${VRF_AUTOTOOLS_CMD} && ${CMAKE_MAKE_PROGRAM} & # Add taraxa-evm set(EVM_BUILD_DIR ${BUILD_DIR_PREFIX}/taraxa-evm) ## add include of libs -set(EVM_BUILD_INCLUDE -I${rocksdb_SOURCE_DIR}/include) +set(EVM_BUILD_INCLUDE -I${CONAN_INCLUDE_DIRS_ROCKSDB}) ## set C flags set(EVM_BUILD_CGO_CFLAGS -O3 ${EVM_BUILD_INCLUDE}) ## add link of libs -set(EVM_BUILD_LD -L${CMAKE_BINARY_DIR}/lib -lrocksdb -L${CONAN_LIB_DIRS_LZ4} -llz4) +set(EVM_BUILD_LD -L${CONAN_LIB_DIRS_ROCKSDB} -lrocksdb -L${CONAN_LIB_DIRS_LZ4} -llz4) +## add path to homebew installed libs on macos +if (APPLE) + set(EVM_BUILD_LD ${EVM_BUILD_LD} -L/opt/homebrew/lib) +endif() ## if we need full static build use flag if(TARAXA_STATIC_BUILD) if (NOT APPLE) @@ -126,7 +130,7 @@ add_custom_command( COMMENT "Building taraxa-evm library") add_custom_target(taraxa_evm_build DEPENDS ${EVM_BUILD_DIR}/lib/${EVM_LIBRARY_NAME}) -add_dependencies(taraxa_evm_build rocksdb) +add_dependencies(taraxa_evm_build CONAN_PKG::rocksdb) add_library(taraxa-evm INTERFACE) add_dependencies(taraxa-evm taraxa_evm_build) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 1187755d64..e4d990edb9 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 1187755d649f1dc5864a24cf26be65b03ef77fae +Subproject commit e4d990edb9c8dbc586ae99236f3999693dc8282a diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 78b0e2aa21..44f7e183c0 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -29,7 +29,7 @@ target_link_libraries(dag_test test_util) add_test(dag_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/dag_test) add_executable(final_chain_test final_chain_test.cpp) -target_link_libraries(final_chain_test test_util vote) +target_link_libraries(final_chain_test test_util) add_test(final_chain_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/final_chain_test) add_executable(pillar_chain_test pillar_chain_test.cpp) @@ -37,7 +37,7 @@ target_link_libraries(pillar_chain_test test_util) add_test(pillar_chain_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/pillar_chain_test) add_executable(full_node_test full_node_test.cpp) -target_link_libraries(full_node_test test_util vote) +target_link_libraries(full_node_test test_util) add_test(full_node_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/full_node_test) add_executable(network_test network_test.cpp) diff --git a/tests/network_test.cpp b/tests/network_test.cpp index fbc16bd7d6..ce5bfb7767 100644 --- a/tests/network_test.cpp +++ b/tests/network_test.cpp @@ -1188,7 +1188,6 @@ TEST_F(NetworkTest, transaction_gossip_selection) { dev::p2p::NodeID node_id3(node_key3.pub()); addr_t node_addr1(node_key1.address()); addr_t node_addr2(node_key2.address()); - addr_t node_addr3(node_key3.address()); auto peers_state = std::make_shared(std::weak_ptr(), FullNodeConfig()); peers_state->addPendingPeer(node_id1, {}); diff --git a/tests/pbft_manager_test.cpp b/tests/pbft_manager_test.cpp index acd204ab9e..cc7cdb2f35 100644 --- a/tests/pbft_manager_test.cpp +++ b/tests/pbft_manager_test.cpp @@ -730,7 +730,6 @@ TEST_F(PbftManagerWithDagCreation, state_root_hash) { WAIT_EXPECT_EQ(ctx, node->getDB()->getNumTransactionExecuted(), nonce - 1); }); - const auto &state_root_delay = node_cfgs.front().genesis.state.dpos.delegation_delay; const auto &head_hash = node->getPbftChain()->getLastPbftBlockHash(); auto pbft_block = node->getPbftChain()->getPbftBlockInChain(head_hash); // Check that all produced blocks have correct state_root_hashes diff --git a/tests/pillar_chain_test.cpp b/tests/pillar_chain_test.cpp index aef49243a0..5f6cc5d572 100644 --- a/tests/pillar_chain_test.cpp +++ b/tests/pillar_chain_test.cpp @@ -21,16 +21,12 @@ TEST_F(PillarChainTest, pillar_chain_db) { blk_hash_t previous_pillar_block_hash(789); std::vector votes_count_changes; - const auto vote_count_change1 = votes_count_changes.emplace_back(addr_t(1), 1); - const auto vote_count_change2 = votes_count_changes.emplace_back(addr_t(2), 2); const auto pillar_block = std::make_shared( pillar_block_period, state_root, previous_pillar_block_hash, h256{}, 0, std::move(votes_count_changes)); // Pillar block vote counts std::vector vote_counts; - const auto stake1 = votes_count_changes.emplace_back(addr_t(123), 123); - const auto stake2 = votes_count_changes.emplace_back(addr_t(456), 456); // Current pillar block data - block + vote counts pillar_chain::CurrentPillarBlockDataDb current_pillar_block_data{pillar_block, vote_counts}; @@ -400,12 +396,8 @@ TEST_F(PillarChainTest, pillar_block_solidity_rlp_encoding) { blk_hash_t previous_pillar_block_hash(789); std::vector votes_count_changes; - const auto vote_count_change1 = votes_count_changes.emplace_back(addr_t(1), 1); - const auto vote_count_change2 = votes_count_changes.emplace_back(addr_t(2), 2); - - auto vcc = votes_count_changes; const auto pillar_block = pillar_chain::PillarBlock(pillar_block_period, state_root, previous_pillar_block_hash, - bridge_root, epoch, std::move(vcc)); + bridge_root, epoch, std::move(votes_count_changes)); auto validateDecodedPillarBlock = [&](const pillar_chain::PillarBlock& pillar_block) { ASSERT_EQ(pillar_block.getPeriod(), pillar_block_period); diff --git a/tests/rewards_stats_test.cpp b/tests/rewards_stats_test.cpp index b7052c2c45..ea813a1c44 100644 --- a/tests/rewards_stats_test.cpp +++ b/tests/rewards_stats_test.cpp @@ -22,7 +22,8 @@ class TestableRewardsStats : public rewards::Stats { TestableRewardsStats(const HardforksConfig::RewardsDistributionMap& rdm, std::shared_ptr db) : rewards::Stats( 100, - HardforksConfig{0, {}, rdm, MagnoliaHardfork{0, 0}, 0, 0, AspenHardfork{0, 0}, FicusHardforkConfig{0, 0}}, + HardforksConfig{ + 0, {}, rdm, MagnoliaHardfork{0, 0}, 0, 0, AspenHardfork{0, 0}, FicusHardforkConfig{0, 0, {}}}, db, [](auto) { return 100; }) {} auto getStats() { return blocks_stats_; } }; @@ -240,12 +241,14 @@ TEST_F(RewardsStatsTest, dagBlockRewards) { hfc.aspen_hf.block_num_part_two = 4; // Create two reward stats to test before and after aspen hardfork part 1 - rewards::Stats pre_aspen_reward_stats(100, - HardforksConfig{0, {}, {}, MagnoliaHardfork{0, 0}, 0, 0, AspenHardfork{6, 999}}, - db, [](auto) { return 100; }); + rewards::Stats pre_aspen_reward_stats( + 100, + HardforksConfig{0, {}, {}, MagnoliaHardfork{0, 0}, 0, 0, AspenHardfork{6, 999}, FicusHardforkConfig{0, 0, {}}}, + db, [](auto) { return 100; }); rewards::Stats post_aspen_reward_stats( - 100, HardforksConfig{0, {}, {}, MagnoliaHardfork{0, 0}, 0, 0, AspenHardfork{4, 999}}, db, - [](auto) { return 100; }); + 100, + HardforksConfig{0, {}, {}, MagnoliaHardfork{0, 0}, 0, 0, AspenHardfork{4, 999}, FicusHardforkConfig{0, 0, {}}}, + db, [](auto) { return 100; }); // Create pbft block with 5 dag blocks auto dag_key1 = dev::KeyPair::create(); diff --git a/tests/test_util/gtest.hpp b/tests/test_util/gtest.hpp index ef4f01e94f..2cf0248748 100644 --- a/tests/test_util/gtest.hpp +++ b/tests/test_util/gtest.hpp @@ -46,7 +46,7 @@ struct WithDataDir : virtual BaseTest { std::filesystem::remove_all(data_dir); std::filesystem::create_directories(data_dir); } - virtual ~WithDataDir() { std::filesystem::remove_all(data_dir); } + virtual ~WithDataDir() = default; // { std::filesystem::remove_all(data_dir); } WithDataDir(const WithDataDir &) = delete; WithDataDir(WithDataDir &&) = delete; diff --git a/tests/test_util/include/test_util/test_util.hpp b/tests/test_util/include/test_util/test_util.hpp index a5f57b493e..a537279b47 100644 --- a/tests/test_util/include/test_util/test_util.hpp +++ b/tests/test_util/include/test_util/test_util.hpp @@ -192,8 +192,8 @@ std::shared_ptr genDummyVote(PbftVoteTypes type, PbftPeriod period, Pb std::pair clearAllVotes(const std::vector>& nodes); struct NodesTest : virtual WithDataDir { - virtual ~NodesTest() {} NodesTest(); + virtual ~NodesTest() { CleanupDirs(); } NodesTest(const NodesTest&) = delete; NodesTest(NodesTest&&) = delete; NodesTest& operator=(const NodesTest&) = delete; @@ -203,8 +203,6 @@ struct NodesTest : virtual WithDataDir { void CleanupDirs(); - void TearDown() override; - std::vector make_node_cfgs(size_t total_count, size_t validators_count = 1, uint tests_speed = 1, bool enable_rpc_http = false, bool enable_rpc_ws = false); diff --git a/tests/test_util/src/test_util.cpp b/tests/test_util/src/test_util.cpp index dbb4f5ec54..4a225a8b7b 100644 --- a/tests/test_util/src/test_util.cpp +++ b/tests/test_util/src/test_util.cpp @@ -273,8 +273,6 @@ void NodesTest::CleanupDirs() { } } -void NodesTest::TearDown() { CleanupDirs(); } - std::vector NodesTest::make_node_cfgs(size_t total_count, size_t validators_count, uint tests_speed, bool enable_rpc_http, bool enable_rpc_ws) { From 08ca49a65859f1e37e0989255fb69dbcd69df314 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Wed, 18 Sep 2024 16:29:42 +0200 Subject: [PATCH 043/105] fix: dag block bundle --- .../types/dag_block/src/dag_block_bundle_rlp.cpp | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp index 307815d245..b57a196669 100644 --- a/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp +++ b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp @@ -12,16 +12,16 @@ dev::bytes encodeDAGBlocksBundleRlp(const std::vector& blocks) { return {}; } - std::unordered_map trx_hash_map; // Map to store transaction hash and its index + std::unordered_map trx_hash_map; // Map to store transaction hash and its index std::vector ordered_trx_hashes; - std::vector> indexes; + std::vector> indexes; for (const auto& block : blocks) { - std::vector idx; + std::vector idx; idx.reserve(block.getTrxs().size()); for (const auto& trx : block.getTrxs()) { - if (const auto [_, ok] = trx_hash_map.try_emplace(trx, static_cast(trx_hash_map.size())); ok) { + if (const auto [_, ok] = trx_hash_map.try_emplace(trx, static_cast(trx_hash_map.size())); ok) { ordered_trx_hashes.push_back(trx); // Track the insertion order } idx.push_back(trx_hash_map[trx]); @@ -65,7 +65,7 @@ std::vector decodeDAGBlocksBundleRlp(const dev::RLP& blocks_bundle_rlp std::vector hashes; hashes.reserve(idx_rlp.itemCount()); std::transform(idx_rlp.begin(), idx_rlp.end(), std::back_inserter(hashes), - [&ordered_trx_hashes](const auto& i) { return ordered_trx_hashes[i.template toInt()]; }); + [&ordered_trx_hashes](const auto& i) { return ordered_trx_hashes[i.template toInt()]; }); dags_trx_hashes.push_back(std::move(hashes)); } @@ -98,7 +98,7 @@ std::shared_ptr decodeDAGBlockBundleRlp(uint64_t index, const dev::RLP std::vector hashes; hashes.reserve(idx_rlp.itemCount()); std::transform(idx_rlp.begin(), idx_rlp.end(), std::back_inserter(hashes), - [&ordered_trx_hashes](const auto& i) { return ordered_trx_hashes[i.template toInt()]; }); + [&ordered_trx_hashes](const auto& i) { return ordered_trx_hashes[i.template toInt()]; }); return std::make_shared(blocks_bundle_rlp[2][index], std::move(hashes)); } From dc7ce5cbe3f7aab2584a5e29d762108bec03b9eb Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Fri, 20 Sep 2024 10:55:32 +0200 Subject: [PATCH 044/105] fix: remove undelegations v2 --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index e4d990edb9..6fe5665e7a 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit e4d990edb9c8dbc586ae99236f3999693dc8282a +Subproject commit 6fe5665e7ae281ef1e4391fadf297f5d35303f7f From 5c33303d49c865f3d1cdc088973d23b4eaf18cd4 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 23 Sep 2024 16:27:12 +0200 Subject: [PATCH 045/105] bugfix: split pillar vote packet to serverl ones --- CMakeLists.txt | 2 +- .../pillar_votes_bundle_packet_handler.hpp | 2 +- ...get_pillar_votes_bundle_packet_handler.cpp | 40 ++++++++++++++----- 3 files changed, 33 insertions(+), 11 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 08bdd2e088..c033e469b6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.20) # Set current version of the project set(TARAXA_MAJOR_VERSION 1) set(TARAXA_MINOR_VERSION 11) -set(TARAXA_PATCH_VERSION 3) +set(TARAXA_PATCH_VERSION 4) set(TARAXA_VERSION ${TARAXA_MAJOR_VERSION}.${TARAXA_MINOR_VERSION}.${TARAXA_PATCH_VERSION}) # Any time a change in the network protocol is introduced this version should be increased diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp index 550ca3e6a8..15a9ccfff7 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp @@ -18,7 +18,7 @@ class PillarVotesBundlePacketHandler : public ExtPillarVotePacketHandler { virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; - protected: + public: constexpr static size_t kMaxPillarVotesInBundleRlp{250}; }; diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp index 2c01eff1ae..cc71e189e9 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp @@ -1,5 +1,7 @@ #include "network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp" + namespace taraxa::network::tarcap { GetPillarVotesBundlePacketHandler::GetPillarVotesBundlePacketHandler( @@ -42,19 +44,39 @@ void GetPillarVotesBundlePacketHandler::process(const threadpool::PacketData &pa LOG(log_dg_) << "No pillar votes for period " << period << "and pillar block hash " << pillar_block_hash; return; } + // Check if the votes size exceeds the maximum limit and split into multiple packets if needed + const size_t total_votes = votes.size(); + size_t votes_sent = 0; - dev::RLPStream s(votes.size()); - for (const auto &sig : votes) { - s.appendRaw(sig->rlp()); - } + while (votes_sent < total_votes) { + // Determine the size of the current chunk + const size_t chunk_size = + std::min(PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp, total_votes - votes_sent); + + // Create a new RLPStream for the chunk + dev::RLPStream s(chunk_size); + for (size_t i = 0; i < chunk_size; ++i) { + const auto &sig = votes[votes_sent + i]; + s.appendRaw(sig->rlp()); + } + + // Seal and send the chunk to the peer + if (sealAndSend(peer->getId(), SubprotocolPacketType::PillarVotesBundlePacket, std::move(s))) { + // Mark the votes in this chunk as known + for (size_t i = 0; i < chunk_size; ++i) { + peer->markPillarVoteAsKnown(votes[votes_sent + i]->getHash()); + } - if (sealAndSend(peer->getId(), SubprotocolPacketType::PillarVotesBundlePacket, std::move(s))) { - for (const auto &vote : votes) { - peer->markPillarVoteAsKnown(vote->getHash()); + LOG(log_nf_) << "Pillar votes bundle for period " << period << ", hash " << pillar_block_hash << " sent to " + << peer->getId() << " (Chunk " + << (votes_sent / PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp) + 1 << "/" + << (total_votes + PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp - 1) / + PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp + << ")"; } - LOG(log_nf_) << "Pillar votes bundle for period " << period << ", hash " << pillar_block_hash << " sent to " - << peer->getId(); + // Update the votes_sent counter + votes_sent += chunk_size; } } From 2d66b0c43de91196326eaead19a2c06bf7b8f206 Mon Sep 17 00:00:00 2001 From: kstdl Date: Wed, 25 Sep 2024 12:57:15 +0200 Subject: [PATCH 046/105] chore: increase version and add cornus_hf block to testnet genesis --- CMakeLists.txt | 4 ++-- .../cli/include/cli/config_jsons/testnet/testnet_genesis.json | 2 +- submodules/taraxa-evm | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c033e469b6..ffc5a0bc55 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,8 +2,8 @@ cmake_minimum_required(VERSION 3.20) # Set current version of the project set(TARAXA_MAJOR_VERSION 1) -set(TARAXA_MINOR_VERSION 11) -set(TARAXA_PATCH_VERSION 4) +set(TARAXA_MINOR_VERSION 12) +set(TARAXA_PATCH_VERSION 0) set(TARAXA_VERSION ${TARAXA_MAJOR_VERSION}.${TARAXA_MINOR_VERSION}.${TARAXA_PATCH_VERSION}) # Any time a change in the network protocol is introduced this version should be increased diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index 7f78edeef1..b23a324f66 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -154,6 +154,6 @@ "pillar_blocks_interval": 1000, "bridge_contract_address": "0xcAF2b453FE8382a4B8110356DF0508f6d71F22BF" }, - "cornus_hf_block_num": -1 + "cornus_hf_block_num": 1529200 } } \ No newline at end of file diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 6fe5665e7a..9ca283df7f 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 6fe5665e7ae281ef1e4391fadf297f5d35303f7f +Subproject commit 9ca283df7f6904be58dd71c56278b9fd9c2f53c5 From f530b5068796ba597157b63d4b088ae1f3006913 Mon Sep 17 00:00:00 2001 From: kstdl Date: Thu, 26 Sep 2024 13:36:07 +0200 Subject: [PATCH 047/105] chore: update testnet cornus block number --- .../cli/include/cli/config_jsons/testnet/testnet_genesis.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index b23a324f66..71889b5bc8 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -154,6 +154,6 @@ "pillar_blocks_interval": 1000, "bridge_contract_address": "0xcAF2b453FE8382a4B8110356DF0508f6d71F22BF" }, - "cornus_hf_block_num": 1529200 + "cornus_hf_block_num": 1622000 } } \ No newline at end of file From 42560c09879ab81ad607faf1791295a5589c0ee5 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Mon, 12 Aug 2024 09:12:32 +0200 Subject: [PATCH 048/105] chore: dag block proposal limit --- .../cli/config_jsons/default/default_config.json | 2 +- .../cli/config_jsons/devnet/devnet_config.json | 2 +- .../cli/config_jsons/mainnet/mainnet_config.json | 2 +- .../cli/config_jsons/testnet/testnet_config.json | 2 +- libraries/common/include/common/constants.hpp | 1 + .../consensus/src/dag/dag_block_proposer.cpp | 13 ++++++++++--- .../core_libs/network/include/network/network.hpp | 7 +++++++ libraries/core_libs/network/src/network.cpp | 6 ++++++ 8 files changed, 28 insertions(+), 7 deletions(-) diff --git a/libraries/cli/include/cli/config_jsons/default/default_config.json b/libraries/cli/include/cli/config_jsons/default/default_config.json index 72e9409c3f..0f72c0bc9b 100644 --- a/libraries/cli/include/cli/config_jsons/default/default_config.json +++ b/libraries/cli/include/cli/config_jsons/default/default_config.json @@ -24,7 +24,7 @@ "packets_stats_time_period_ms": 60000, "peer_max_packets_processing_time_us": 10000000, "peer_max_packets_queue_size_limit": 100000, - "max_packets_queue_size": 200000 + "max_packets_queue_size": 100 }, "listen_ip": "0.0.0.0", "listen_port": 10002, diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json index 47d0713725..2f812df6f4 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_config.json @@ -24,7 +24,7 @@ "packets_stats_time_period_ms": 60000, "peer_max_packets_processing_time_us": 10000000, "peer_max_packets_queue_size_limit": 100000, - "max_packets_queue_size": 200000 + "max_packets_queue_size": 100 }, "listen_ip": "0.0.0.0", "listen_port": 10002, diff --git a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_config.json b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_config.json index 66cb1b4499..f82d06c4f2 100644 --- a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_config.json +++ b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_config.json @@ -24,7 +24,7 @@ "packets_stats_time_period_ms": 60000, "peer_max_packets_processing_time_us": 0, "peer_max_packets_queue_size_limit": 0, - "max_packets_queue_size": 200000 + "max_packets_queue_size": 100 }, "listen_ip": "0.0.0.0", "listen_port": 10002, diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json index d444117b3c..676a6428c8 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json @@ -24,7 +24,7 @@ "packets_stats_time_period_ms": 60000, "peer_max_packets_processing_time_us": 0, "peer_max_packets_queue_size_limit": 0, - "max_packets_queue_size": 200000 + "max_packets_queue_size": 100 }, "listen_ip": "0.0.0.0", "listen_port": 10002, diff --git a/libraries/common/include/common/constants.hpp b/libraries/common/include/common/constants.hpp index 5d7e4540e1..9122c34c96 100644 --- a/libraries/common/include/common/constants.hpp +++ b/libraries/common/include/common/constants.hpp @@ -29,6 +29,7 @@ const uint64_t kMinTxGas{21000}; constexpr uint32_t kMinTransactionPoolSize{30000}; constexpr uint32_t kDefaultTransactionPoolSize{200000}; +constexpr uint32_t kMaxNonFinalizedTransactions{1000000}; const size_t kV2NetworkVersion = 2; diff --git a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp index cad7733b8c..9587b11daf 100644 --- a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp @@ -51,6 +51,11 @@ bool DagBlockProposer::proposeDagBlock() { return false; } + // Do not propose dag blocks if number of non finalized transactions is over the limit + if (trx_mgr_->getNonfinalizedTrxSize() > kMaxNonFinalizedTransactions) { + return false; + } + auto frontier = dag_mgr_->getDagFrontier(); LOG(log_dg_) << "Get frontier with pivot: " << frontier.pivot << " tips: " << frontier.tips; assert(!frontier.pivot.isZero()); @@ -183,12 +188,14 @@ void DagBlockProposer::start() { while (!stopped_) { // Blocks are not proposed if we are behind the network and still syncing auto syncing = false; + auto packets_over_the_limit = false; if (auto net = network_.lock()) { syncing = net->pbft_syncing(); + packets_over_the_limit = net->packetQueueOverLimit(); } - // Only sleep if block was not proposed or if we are syncing, if block is proposed try to propose another block - // immediately - if (syncing || !proposeDagBlock()) { + // Only sleep if block was not proposed or if we are syncing or if packets queue is over the limit, if block is + // proposed try to propose another block immediately + if (syncing || packets_over_the_limit || !proposeDagBlock()) { thisThreadSleepForMilliSeconds(min_proposal_delay); } } diff --git a/libraries/core_libs/network/include/network/network.hpp b/libraries/core_libs/network/include/network/network.hpp index 8c654eaaa7..30fd397944 100644 --- a/libraries/core_libs/network/include/network/network.hpp +++ b/libraries/core_libs/network/include/network/network.hpp @@ -77,6 +77,13 @@ class Network { */ void requestPillarBlockVotesBundle(PbftPeriod period, const blk_hash_t &pillar_block_hash); + /** + * @brief Get packets queue status + * + * @return true if packets queue is over the limit + */ + bool packetQueueOverLimit() const; + // METHODS USED IN TESTS ONLY template std::shared_ptr getSpecificHandler() const; diff --git a/libraries/core_libs/network/src/network.cpp b/libraries/core_libs/network/src/network.cpp index 9091c68571..248f773951 100644 --- a/libraries/core_libs/network/src/network.cpp +++ b/libraries/core_libs/network/src/network.cpp @@ -132,6 +132,12 @@ void Network::start() { bool Network::isStarted() { return tp_.is_running(); } +bool Network::packetQueueOverLimit() const { + auto [hp_queue_size, mp_queue_size, lp_queue_size] = packets_tp_->getQueueSize(); + auto total_size = hp_queue_size + mp_queue_size + lp_queue_size; + return total_size > kConf.network.ddos_protection.max_packets_queue_size; +} + std::list Network::getAllNodes() const { return host_->getNodes(); } size_t Network::getPeerCount() { return host_->peer_count(); } From 43be8be8eb5ddd70c997d909b453c57e2ee604a2 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 26 Sep 2024 16:02:54 +0200 Subject: [PATCH 049/105] chore: update to latest dependecies --- CMakeModules/cpp_graphql_gen.cmake | 2 +- conanfile.py | 10 +++++----- libraries/core_libs/consensus/src/dag/dag.cpp | 10 +++++----- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/CMakeModules/cpp_graphql_gen.cmake b/CMakeModules/cpp_graphql_gen.cmake index 3816ec6256..8fff070ad2 100644 --- a/CMakeModules/cpp_graphql_gen.cmake +++ b/CMakeModules/cpp_graphql_gen.cmake @@ -6,7 +6,7 @@ set(Boost_NO_WARN_NEW_VERSIONS 1) FetchContent_Declare( cppgraphqlgen GIT_REPOSITORY https://github.com/microsoft/cppgraphqlgen.git - GIT_TAG v4.5.6 + GIT_TAG v4.5.8 GIT_SHALLOW TRUE ) set(GRAPHQL_BUILD_TESTS OFF) diff --git a/conanfile.py b/conanfile.py index 221513fb83..35a1038e44 100644 --- a/conanfile.py +++ b/conanfile.py @@ -13,14 +13,14 @@ class TaraxaConan(ConanFile): generators = "cmake" def requirements(self): - self.requires("boost/1.85.0") + self.requires("boost/1.86.0") self.requires("cppcheck/2.12") #TODO(2.14.1) - self.requires("openssl/3.2.1") + self.requires("openssl/3.3.2") self.requires("cryptopp/8.9.0") - self.requires("gtest/1.14.0") - self.requires("lz4/1.9.4") + self.requires("gtest/1.15.0") + self.requires("lz4/1.10.0") self.requires("rocksdb/9.2.1") - self.requires("prometheus-cpp/1.1.0") + self.requires("prometheus-cpp/1.2.4") self.requires("jsoncpp/1.9.5") def _configure_boost_libs(self): diff --git a/libraries/core_libs/consensus/src/dag/dag.cpp b/libraries/core_libs/consensus/src/dag/dag.cpp index 05adb111f3..f8a2f2ad8e 100644 --- a/libraries/core_libs/consensus/src/dag/dag.cpp +++ b/libraries/core_libs/consensus/src/dag/dag.cpp @@ -148,7 +148,7 @@ bool Dag::computeOrder(const blk_hash_t &anchor, std::vector &ordere dfs.push({cur.first, true}); std::vector> neighbors; // iterate through neighbors - for (std::tie(adj_s, adj_e) = adjacenct_vertices(cur.first, graph_); adj_s != adj_e; adj_s++) { + for (std::tie(adj_s, adj_e) = boost::adjacent_vertices(cur.first, graph_); adj_s != adj_e; adj_s++) { if (epfriend.find(index_map[*adj_s]) == epfriend.end()) { // not in this epoch continue; } @@ -183,7 +183,7 @@ bool Dag::reachable(vertex_t const &from, vertex_t const &to) const { vertex_t t = st.top(); st.pop(); vertex_adj_iter_t s, e; - for (std::tie(s, e) = adjacenct_vertices(t, graph_); s != e; ++s) { + for (std::tie(s, e) = boost::adjacent_vertices(t, graph_); s != e; ++s) { if (visited.count(*s)) continue; if (*s == target) return true; visited.insert(*s); @@ -221,7 +221,7 @@ std::vector PivotTree::getGhostPath(const blk_hash_t &vertex) const cur = st.top(); st.pop(); post_order.emplace_back(cur); - for (std::tie(s, e) = adjacenct_vertices(cur, graph_); s != e; s++) { + for (std::tie(s, e) = boost::adjacent_vertices(cur, graph_); s != e; s++) { st.emplace(*s); } } @@ -232,7 +232,7 @@ std::vector PivotTree::getGhostPath(const blk_hash_t &vertex) const for (auto const &n : post_order) { auto total_w = 0; // get childrens - for (std::tie(s, e) = adjacenct_vertices(n, graph_); s != e; s++) { + for (std::tie(s, e) = boost::adjacent_vertices(n, graph_); s != e; s++) { if (weight_map.count(*s)) { // bigger timestamp total_w += weight_map[*s]; } @@ -248,7 +248,7 @@ std::vector PivotTree::getGhostPath(const blk_hash_t &vertex) const size_t heavist = 0; vertex_t next = root; - for (std::tie(s, e) = adjacenct_vertices(root, graph_); s != e; s++) { + for (std::tie(s, e) = boost::adjacent_vertices(root, graph_); s != e; s++) { if (!weight_map.count(*s)) continue; // bigger timestamp size_t w = weight_map[*s]; assert(w > 0); From 7667f49c83461bebd82793214a9d5822e8b8dd00 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 27 Sep 2024 11:19:57 +0200 Subject: [PATCH 050/105] update cpp-check --- CMakeModules/cppcheck.cmake | 1 + conanfile.py | 2 +- libraries/aleth/libdevcore/Common.h | 33 ------------------- .../config/include/config/config_utils.hpp | 2 +- libraries/config/src/config_utils.cpp | 2 +- libraries/core_libs/consensus/src/dag/dag.cpp | 1 - .../consensus/src/dag/dag_manager.cpp | 3 +- 7 files changed, 6 insertions(+), 38 deletions(-) diff --git a/CMakeModules/cppcheck.cmake b/CMakeModules/cppcheck.cmake index 1472b400d8..f8d8ceacf4 100644 --- a/CMakeModules/cppcheck.cmake +++ b/CMakeModules/cppcheck.cmake @@ -10,6 +10,7 @@ else () COMMAND ${CPP_CHECK_EXE} --error-exitcode=1 --enable=all + --check-level=exhaustive --suppress=missingInclude --suppress=missingIncludeSystem # find_if - useless here diff --git a/conanfile.py b/conanfile.py index 35a1038e44..9d1f5beccf 100644 --- a/conanfile.py +++ b/conanfile.py @@ -14,7 +14,7 @@ class TaraxaConan(ConanFile): def requirements(self): self.requires("boost/1.86.0") - self.requires("cppcheck/2.12") #TODO(2.14.1) + self.requires("cppcheck/2.15.0") self.requires("openssl/3.3.2") self.requires("cryptopp/8.9.0") self.requires("gtest/1.15.0") diff --git a/libraries/aleth/libdevcore/Common.h b/libraries/aleth/libdevcore/Common.h index 6c6b1b550b..6f89f48476 100644 --- a/libraries/aleth/libdevcore/Common.h +++ b/libraries/aleth/libdevcore/Common.h @@ -127,33 +127,6 @@ using strings = std::vector; // Null/Invalid values for convenience. extern bytes const NullBytes; -/// Interprets @a _u as a two's complement signed number and returns the -/// resulting s256. -inline s256 u2s(u256 _u) { - static const bigint c_end = bigint(1) << 256; - if (boost::multiprecision::bit_test(_u, 255)) - return s256(-(c_end - _u)); - else - return s256(_u); -} - -/// @returns the two's complement signed representation of the signed number _u. -inline u256 s2u(s256 _u) { - static const bigint c_end = bigint(1) << 256; - if (_u >= 0) - return u256(_u); - else - return u256(c_end + _u); -} - -/// @returns the smallest n >= 0 such that (1 << n) >= _x -inline unsigned int toLog2(u256 _x) { - unsigned ret; - for (ret = 0; _x >>= 1; ++ret) { - } - return ret; -} - template inline u256 exp10() { return exp10() * u256(10); @@ -164,12 +137,6 @@ inline u256 exp10<0>() { return u256(1); } -/// @returns the absolute distance between _a and _b. -template -inline N diff(N const& _a, N const& _b) { - return std::max(_a, _b) - std::min(_a, _b); -} - /// RAII utility class whose destructor calls a given function. class ScopeGuard { public: diff --git a/libraries/config/include/config/config_utils.hpp b/libraries/config/include/config/config_utils.hpp index 2586973d00..333c21bbdd 100644 --- a/libraries/config/include/config/config_utils.hpp +++ b/libraries/config/include/config/config_utils.hpp @@ -14,7 +14,7 @@ std::string getConfigErr(const std::vector &path); Json::Value getConfigData(Json::Value root, const std::vector &path, bool optional = false); std::string getConfigDataAsString(const Json::Value &root, const std::vector &path, bool optional = false, - std::string value = {}); + const std::string &value = {}); uint32_t getConfigDataAsUInt(const Json::Value &root, const std::vector &path, bool optional = false, uint32_t value = 0); diff --git a/libraries/config/src/config_utils.cpp b/libraries/config/src/config_utils.cpp index 2daab50d43..f7dc9dc475 100644 --- a/libraries/config/src/config_utils.cpp +++ b/libraries/config/src/config_utils.cpp @@ -28,7 +28,7 @@ Json::Value getConfigData(Json::Value root, const std::vector &path } std::string getConfigDataAsString(const Json::Value &root, const std::vector &path, bool optional, - std::string value) { + const std::string &value) { try { Json::Value ret = getConfigData(root, path, optional); if (ret.isNull()) { diff --git a/libraries/core_libs/consensus/src/dag/dag.cpp b/libraries/core_libs/consensus/src/dag/dag.cpp index f8a2f2ad8e..d6c45716fb 100644 --- a/libraries/core_libs/consensus/src/dag/dag.cpp +++ b/libraries/core_libs/consensus/src/dag/dag.cpp @@ -257,7 +257,6 @@ std::vector PivotTree::getGhostPath(const blk_hash_t &vertex) const next = *s; } else if (w == heavist) { if (index_map[*s] < index_map[next]) { - heavist = w; next = *s; } } diff --git a/libraries/core_libs/consensus/src/dag/dag_manager.cpp b/libraries/core_libs/consensus/src/dag/dag_manager.cpp index 27d178fe08..b08db434e4 100644 --- a/libraries/core_libs/consensus/src/dag/dag_manager.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_manager.cpp @@ -375,7 +375,8 @@ uint DagManager::setDagBlockOrder(blk_hash_t const &new_anchor, PbftPeriod perio } else { db_->removeDagBlock(blk_hash); seen_blocks_.erase(blk_hash); - for (const auto &trx : dag_block->getTrxs()) expired_dag_blocks_transactions.emplace_back(trx); + const auto dag_trxs = dag_block->getTrxs(); + std::copy(dag_trxs.begin(), dag_trxs.end(), std::back_inserter(expired_dag_blocks_transactions)); } } } From b81ba77b8cace6f6306fea2af9689b6a4cbe36b7 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 27 Sep 2024 13:37:59 +0200 Subject: [PATCH 051/105] chore: add period to error output --- libraries/core_libs/consensus/src/pbft/pbft_manager.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 759d656505..13a2fca473 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -1388,7 +1388,7 @@ std::shared_ptr PbftManager::identifyLeaderBlock_(PbftRound round, Pb } PbftStateRootValidation PbftManager::validateFinalChainHash(const std::shared_ptr &pbft_block) const { - auto period = pbft_block->getPeriod(); + const auto period = pbft_block->getPeriod(); const auto &pbft_block_hash = pbft_block->getBlockHash(); auto prev_final_chain_hash = final_chain_->finalChainHash(period); @@ -1397,8 +1397,8 @@ PbftStateRootValidation PbftManager::validateFinalChainHash(const std::shared_pt return PbftStateRootValidation::Missing; } if (pbft_block->getFinalChainHash() != prev_final_chain_hash) { - LOG(log_er_) << "Block " << pbft_block_hash << " state root " << pbft_block->getFinalChainHash() - << " isn't matching actual " << prev_final_chain_hash.value(); + LOG(log_er_) << "Block " << period << " hash " << pbft_block_hash << " state root " + << pbft_block->getFinalChainHash() << " isn't matching actual " << prev_final_chain_hash.value(); return PbftStateRootValidation::Invalid; } From 58ecf1507fe18e8fb93ece9ccdaefe4a1f9feca2 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Sun, 29 Sep 2024 09:38:36 +0200 Subject: [PATCH 052/105] revert change of dpos bytecode --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 9ca283df7f..3fd026e151 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 9ca283df7f6904be58dd71c56278b9fd9c2f53c5 +Subproject commit 3fd026e151bb74d1f22db07d6c875c2c9c91a82d From 3c1d6c1004ca0b568857c8dd0ec5ef5df3d42402 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Sun, 29 Sep 2024 21:17:43 +0200 Subject: [PATCH 053/105] update evm --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 3fd026e151..6364ada14d 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 3fd026e151bb74d1f22db07d6c875c2c9c91a82d +Subproject commit 6364ada14dd94ab5048a54427cff3f7b00a8d54f From 2646f52252c68096b23048d89c2f3ff7c983f586 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 30 Sep 2024 08:47:44 +0200 Subject: [PATCH 054/105] fix the tests --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 6364ada14d..dfcc4e7520 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 6364ada14dd94ab5048a54427cff3f7b00a8d54f +Subproject commit dfcc4e752080c229cc2dce1d53a488749a298883 From d3167b92f26ed85106b470784fbbd67a7a1e1f16 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 30 Sep 2024 09:23:04 +0200 Subject: [PATCH 055/105] update evm --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index dfcc4e7520..41b7d6bb7d 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit dfcc4e752080c229cc2dce1d53a488749a298883 +Subproject commit 41b7d6bb7da5cb65e5f126df7125df8371600f91 From 8fb001419d8ff153df536fa7729a70a3f4e3a1ff Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 30 Sep 2024 16:09:47 +0200 Subject: [PATCH 056/105] chore: additional check on ws --- libraries/core_libs/network/src/ws_server.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libraries/core_libs/network/src/ws_server.cpp b/libraries/core_libs/network/src/ws_server.cpp index a2029556f9..cb991382f9 100644 --- a/libraries/core_libs/network/src/ws_server.cpp +++ b/libraries/core_libs/network/src/ws_server.cpp @@ -57,6 +57,8 @@ void WsSession::on_read(beast::error_code ec, std::size_t bytes_transferred) { } void WsSession::processAsync() { + if (closed_) return; + std::string request(static_cast(read_buffer_.data().data()), read_buffer_.size()); read_buffer_.consume(read_buffer_.size()); LOG(log_tr_) << "processAsync " << request; @@ -73,6 +75,8 @@ void WsSession::processAsync() { } void WsSession::writeAsync(std::string &&message) { + if (closed_) return; + LOG(log_tr_) << "WS WRITE " << message.c_str(); auto executor = ws_.get_executor(); if (!executor) { From 69ec027e58259f58575c76a34f2c7e9c537a4c35 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 30 Sep 2024 16:26:56 +0200 Subject: [PATCH 057/105] update HF number --- .../cli/include/cli/config_jsons/testnet/testnet_genesis.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index 71889b5bc8..658bc22702 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -154,6 +154,6 @@ "pillar_blocks_interval": 1000, "bridge_contract_address": "0xcAF2b453FE8382a4B8110356DF0508f6d71F22BF" }, - "cornus_hf_block_num": 1622000 + "cornus_hf_block_num": 1668000 } } \ No newline at end of file From 629447002fa43cb00d280645c3278a6897e39605 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 2 Oct 2024 10:47:45 +0200 Subject: [PATCH 058/105] fix cpp check --- CMakeModules/cppcheck.cmake | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/CMakeModules/cppcheck.cmake b/CMakeModules/cppcheck.cmake index f8d8ceacf4..33bb8c8186 100644 --- a/CMakeModules/cppcheck.cmake +++ b/CMakeModules/cppcheck.cmake @@ -20,7 +20,6 @@ else () # false positive --suppress=uninitMemberVar:${PROJECT_SOURCE_DIR}/*/UPnP.cpp # This is only enabled because of test functions and false positives - --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/Eth.cpp --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/Common.cpp --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/Base64.cpp --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/util.cpp @@ -28,6 +27,13 @@ else () --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/UPnP.cpp --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/logger.cpp --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/state_api.cpp + --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/watches.hpp + --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/vector_ref.h + --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/RLP.h + --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/FixedHash.h + --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/CommonData.h + --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/Common.h + --suppress=unusedFunction:${PROJECT_SOURCE_DIR}/*/Log.h # TODO remove this when we solve correct exit of programs --suppress=localMutex:${PROJECT_SOURCE_DIR}/*/main.cpp # Just style warning @@ -39,6 +45,7 @@ else () --suppress=unmatchedSuppression:${PROJECT_SOURCE_DIR}/*/Common.h --suppress=cstyleCast:${PROJECT_SOURCE_DIR}/*/vector_ref.h --suppress=cstyleCast:${PROJECT_SOURCE_DIR}/*/Common.h + --suppress=cstyleCast:${PROJECT_SOURCE_DIR}/*/transaction.cpp # not an issue here --suppress=virtualCallInConstructor:${PROJECT_SOURCE_DIR}/*/final_chain.cpp # just a warning From 05b9990f51f773772afca8b27577cff8a7cd761a Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Thu, 3 Oct 2024 14:44:50 +0200 Subject: [PATCH 059/105] chore: rpc requests limit --- libraries/config/include/config/network.hpp | 2 ++ libraries/config/src/network.cpp | 5 +++ .../network/include/network/http_server.hpp | 9 +++-- .../network/include/network/ws_server.hpp | 8 ++++- .../core_libs/network/src/http_server.cpp | 35 +++++++++++++++---- libraries/core_libs/network/src/ws_server.cpp | 24 +++++++++++-- .../core_libs/node/include/node/node.hpp | 4 +-- libraries/core_libs/node/src/node.cpp | 24 ++++++------- 8 files changed, 84 insertions(+), 27 deletions(-) diff --git a/libraries/config/include/config/network.hpp b/libraries/config/include/config/network.hpp index d9f1c07c19..477a8155cd 100644 --- a/libraries/config/include/config/network.hpp +++ b/libraries/config/include/config/network.hpp @@ -22,6 +22,8 @@ struct ConnectionConfig { // Number of threads dedicated to the rpc calls processing, default = 5 uint16_t threads_num{5}; + uint32_t max_pending_tasks{100}; + void validate() const; }; diff --git a/libraries/config/src/network.cpp b/libraries/config/src/network.cpp index 98031b74bf..cb37200713 100644 --- a/libraries/config/src/network.cpp +++ b/libraries/config/src/network.cpp @@ -33,6 +33,11 @@ void dec_json(const Json::Value &json, ConnectionConfig &config) { config.ws_port = ws_port.asUInt(); } + // max pending tasks + if (auto max_pending_tasks = getConfigData(json, {"max_pending_tasks"}, true); !max_pending_tasks.isNull()) { + config.max_pending_tasks = max_pending_tasks.asUInt(); + } + // number of threads processing rpc calls if (auto threads_num = getConfigData(json, {"threads_num"}, true); !threads_num.isNull()) { config.threads_num = threads_num.asUInt(); diff --git a/libraries/core_libs/network/include/network/http_server.hpp b/libraries/core_libs/network/include/network/http_server.hpp index 615f0cb954..1641c1a051 100644 --- a/libraries/core_libs/network/include/network/http_server.hpp +++ b/libraries/core_libs/network/include/network/http_server.hpp @@ -4,6 +4,7 @@ #include #include +#include "common/thread_pool.hpp" #include "common/types.hpp" #include "logger/logger.hpp" @@ -22,8 +23,8 @@ class HttpHandler; class HttpServer : public std::enable_shared_from_this { public: - HttpServer(boost::asio::io_context& io, boost::asio::ip::tcp::endpoint ep, const addr_t& node_addr, - const std::shared_ptr& request_processor); + HttpServer(std::shared_ptr thread_pool, boost::asio::ip::tcp::endpoint ep, const addr_t& node_addr, + const std::shared_ptr& request_processor, uint32_t max_pending_tasks); virtual ~HttpServer() { HttpServer::stop(); } @@ -31,6 +32,8 @@ class HttpServer : public std::enable_shared_from_this { bool stop(); void accept(); + uint32_t numberOfPendingTasks() const; + bool pendingTasksOverLimit() const { return numberOfPendingTasks() > kMaxPendingTasks; } boost::asio::io_context& getIoContext() { return io_context_; } std::shared_ptr getShared(); std::shared_ptr createConnection(); @@ -45,6 +48,8 @@ class HttpServer : public std::enable_shared_from_this { boost::asio::io_context& io_context_; boost::asio::ip::tcp::acceptor acceptor_; boost::asio::ip::tcp::endpoint ep_; + std::weak_ptr thread_pool_; + const uint32_t kMaxPendingTasks; LOG_OBJECTS_DEFINE }; // QQ: diff --git a/libraries/core_libs/network/include/network/ws_server.hpp b/libraries/core_libs/network/include/network/ws_server.hpp index e1290c8aef..60a3f6ccbd 100644 --- a/libraries/core_libs/network/include/network/ws_server.hpp +++ b/libraries/core_libs/network/include/network/ws_server.hpp @@ -16,6 +16,7 @@ #include #include +#include "common/thread_pool.hpp" #include "config/config.hpp" #include "dag/dag_block.hpp" #include "final_chain/data.hpp" @@ -84,7 +85,8 @@ class WsSession : public std::enable_shared_from_this { // Accepts incoming connections and launches the sessions class WsServer : public std::enable_shared_from_this, public jsonrpc::AbstractServerConnector { public: - WsServer(boost::asio::io_context& ioc, tcp::endpoint endpoint, addr_t node_addr); + WsServer(std::shared_ptr thread_pool, tcp::endpoint endpoint, addr_t node_addr, + uint32_t max_pending_tasks); virtual ~WsServer(); WsServer(const WsServer&) = delete; @@ -101,6 +103,8 @@ class WsServer : public std::enable_shared_from_this, public jsonrpc:: void newPendingTransaction(const trx_hash_t& trx_hash); void newPillarBlockData(const pillar_chain::PillarBlockData& pillar_block_data); uint32_t numberOfSessions(); + uint32_t numberOfPendingTasks() const; + bool pendingTasksOverLimit() const { return numberOfPendingTasks() > kMaxPendingTasks; } virtual std::shared_ptr createSession(tcp::socket&& socket) = 0; @@ -118,6 +122,8 @@ class WsServer : public std::enable_shared_from_this, public jsonrpc:: boost::shared_mutex sessions_mtx_; protected: + std::weak_ptr thread_pool_; + uint32_t kMaxPendingTasks; const addr_t node_addr_; }; diff --git a/libraries/core_libs/network/src/http_server.cpp b/libraries/core_libs/network/src/http_server.cpp index 1c82a131db..e89cdc9a72 100644 --- a/libraries/core_libs/network/src/http_server.cpp +++ b/libraries/core_libs/network/src/http_server.cpp @@ -2,9 +2,15 @@ namespace taraxa::net { -HttpServer::HttpServer(boost::asio::io_context &io, boost::asio::ip::tcp::endpoint ep, const addr_t &node_addr, - const std::shared_ptr &request_processor) - : request_processor_(request_processor), io_context_(io), acceptor_(io), ep_(std::move(ep)) { +HttpServer::HttpServer(std::shared_ptr thread_pool, boost::asio::ip::tcp::endpoint ep, + const addr_t &node_addr, const std::shared_ptr &request_processor, + uint32_t max_pending_tasks) + : request_processor_(request_processor), + io_context_(thread_pool->unsafe_get_io_context()), + acceptor_(thread_pool->unsafe_get_io_context()), + ep_(std::move(ep)), + thread_pool_(thread_pool), + kMaxPendingTasks(max_pending_tasks) { LOG_OBJECTS_CREATE("HTTP"); LOG(log_si_) << "Taraxa HttpServer started at port: " << ep_.port(); } @@ -66,6 +72,14 @@ bool HttpServer::stop() { return true; } +uint32_t HttpServer::numberOfPendingTasks() const { + auto thread_pool = thread_pool_.lock(); + if (thread_pool) { + return thread_pool->num_pending_tasks(); + } + return 0; +} + std::shared_ptr HttpConnection::getShared() { try { return shared_from_this(); @@ -98,10 +112,17 @@ void HttpConnection::read() { } else { assert(server_->request_processor_); LOG(server_->log_dg_) << "Received: " << request_; - response_ = server_->request_processor_->process(request_); - boost::beast::http::async_write( - socket_, response_, - [this_sp = getShared()](auto const & /*ec*/, auto /*bytes_transferred*/) { this_sp->stop(); }); + + if (server_->pendingTasksOverLimit()) { + LOG(server_->log_er_) << "HttpConnection closed - pending tasks over the limit " + << server_->numberOfPendingTasks(); + stop(); + } else { + response_ = server_->request_processor_->process(request_); + boost::beast::http::async_write( + socket_, response_, + [this_sp = getShared()](auto const & /*ec*/, auto /*bytes_transferred*/) { this_sp->stop(); }); + } } }); } diff --git a/libraries/core_libs/network/src/ws_server.cpp b/libraries/core_libs/network/src/ws_server.cpp index cb991382f9..bb361550e8 100644 --- a/libraries/core_libs/network/src/ws_server.cpp +++ b/libraries/core_libs/network/src/ws_server.cpp @@ -49,8 +49,13 @@ void WsSession::on_read(beast::error_code ec, std::size_t bytes_transferred) { return close(is_normal(ec)); } - LOG(log_tr_) << "WS READ " << (static_cast(read_buffer_.data().data())); + auto ws_server = ws_server_.lock(); + if (ws_server && ws_server->pendingTasksOverLimit()) { + LOG(log_er_) << "WS closed - pending tasks over the limit " << ws_server->numberOfPendingTasks(); + return close(true); + } + LOG(log_tr_) << "WS READ " << (static_cast(read_buffer_.data().data())); processAsync(); // Do another read do_read(); @@ -202,8 +207,13 @@ bool WsSession::is_normal(const beast::error_code &ec) const { return false; } -WsServer::WsServer(boost::asio::io_context &ioc, tcp::endpoint endpoint, addr_t node_addr) - : ioc_(ioc), acceptor_(ioc), node_addr_(std::move(node_addr)) { +WsServer::WsServer(std::shared_ptr thread_pool, tcp::endpoint endpoint, addr_t node_addr, + uint32_t max_pending_tasks) + : ioc_(thread_pool->unsafe_get_io_context()), + acceptor_(thread_pool->unsafe_get_io_context()), + thread_pool_(thread_pool), + kMaxPendingTasks(max_pending_tasks), + node_addr_(std::move(node_addr)) { LOG_OBJECTS_CREATE("WS_SERVER"); beast::error_code ec; @@ -331,4 +341,12 @@ uint32_t WsServer::numberOfSessions() { return sessions.size(); } +uint32_t WsServer::numberOfPendingTasks() const { + auto thread_pool = thread_pool_.lock(); + if (thread_pool) { + return thread_pool->num_pending_tasks(); + } + return 0; +} + } // namespace taraxa::net \ No newline at end of file diff --git a/libraries/core_libs/node/include/node/node.hpp b/libraries/core_libs/node/include/node/node.hpp index 77573b7d78..2a658ecd2f 100644 --- a/libraries/core_libs/node/include/node/node.hpp +++ b/libraries/core_libs/node/include/node/node.hpp @@ -80,8 +80,8 @@ class FullNode : public std::enable_shared_from_this { using JsonRpcServer = ModularServer; // should be destroyed after all components, since they may depend on it through unsafe pointers - std::unique_ptr rpc_thread_pool_; - std::unique_ptr graphql_thread_pool_; + std::shared_ptr rpc_thread_pool_; + std::shared_ptr graphql_thread_pool_; // In cae we will you config for this TP, it needs to be unique_ptr !!! util::ThreadPool subscription_pool_; diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index 0ee48d9c94..c05a4d4e5c 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -173,7 +173,7 @@ void FullNode::start() { // Inits rpc related members if (conf_.network.rpc) { - rpc_thread_pool_ = std::make_unique(conf_.network.rpc->threads_num); + rpc_thread_pool_ = std::make_shared(conf_.network.rpc->threads_num); net::rpc::eth::EthParams eth_rpc_params; eth_rpc_params.address = getAddress(); eth_rpc_params.chain_id = conf_.genesis.chain_id; @@ -226,16 +226,15 @@ void FullNode::start() { if (conf_.network.rpc->http_port) { auto json_rpc_processor = std::make_shared(); jsonrpc_http_ = std::make_shared( - rpc_thread_pool_->unsafe_get_io_context(), - boost::asio::ip::tcp::endpoint{conf_.network.rpc->address, *conf_.network.rpc->http_port}, getAddress(), - json_rpc_processor); + rpc_thread_pool_, boost::asio::ip::tcp::endpoint{conf_.network.rpc->address, *conf_.network.rpc->http_port}, + getAddress(), json_rpc_processor, conf_.network.rpc->max_pending_tasks); jsonrpc_api_->addConnector(json_rpc_processor); jsonrpc_http_->start(); } if (conf_.network.rpc->ws_port) { jsonrpc_ws_ = std::make_shared( - rpc_thread_pool_->unsafe_get_io_context(), - boost::asio::ip::tcp::endpoint{conf_.network.rpc->address, *conf_.network.rpc->ws_port}, getAddress()); + rpc_thread_pool_, boost::asio::ip::tcp::endpoint{conf_.network.rpc->address, *conf_.network.rpc->ws_port}, + getAddress(), conf_.network.rpc->max_pending_tasks); jsonrpc_api_->addConnector(jsonrpc_ws_); jsonrpc_ws_->run(); } @@ -280,22 +279,23 @@ void FullNode::start() { *rpc_thread_pool_); } if (conf_.network.graphql) { - graphql_thread_pool_ = std::make_unique(conf_.network.graphql->threads_num); + graphql_thread_pool_ = std::make_shared(conf_.network.graphql->threads_num); if (conf_.network.graphql->ws_port) { graphql_ws_ = std::make_shared( - graphql_thread_pool_->unsafe_get_io_context(), - boost::asio::ip::tcp::endpoint{conf_.network.graphql->address, *conf_.network.graphql->ws_port}, - getAddress()); + graphql_thread_pool_, + boost::asio::ip::tcp::endpoint{conf_.network.graphql->address, *conf_.network.graphql->ws_port}, getAddress(), + conf_.network.rpc->max_pending_tasks); // graphql_ws_->run(); } if (conf_.network.graphql->http_port) { graphql_http_ = std::make_shared( - graphql_thread_pool_->unsafe_get_io_context(), + graphql_thread_pool_, boost::asio::ip::tcp::endpoint{conf_.network.graphql->address, *conf_.network.graphql->http_port}, getAddress(), std::make_shared(final_chain_, dag_mgr_, pbft_mgr_, trx_mgr_, db_, gas_pricer_, - as_weak(network_), conf_.genesis.chain_id)); + as_weak(network_), conf_.genesis.chain_id), + conf_.network.rpc->max_pending_tasks); graphql_http_->start(); } } From 94878bfefd198d84b4a42f0ed8212817e73789df Mon Sep 17 00:00:00 2001 From: kstdl Date: Fri, 11 Oct 2024 17:54:28 +0300 Subject: [PATCH 060/105] fix: tracing execution block number --- libraries/core_libs/network/rpc/Debug.cpp | 15 +++------------ submodules/taraxa-evm | 2 +- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/libraries/core_libs/network/rpc/Debug.cpp b/libraries/core_libs/network/rpc/Debug.cpp index 11cf10699f..1f4c158f6e 100644 --- a/libraries/core_libs/network/rpc/Debug.cpp +++ b/libraries/core_libs/network/rpc/Debug.cpp @@ -3,12 +3,9 @@ #include #include -#include - #include "common/jsoncpp.hpp" #include "final_chain/state_api_data.hpp" #include "network/rpc/eth/data.hpp" -#include "pbft/pbft_manager.hpp" using namespace std; using namespace dev; @@ -17,10 +14,6 @@ using namespace taraxa; namespace taraxa::net { -inline EthBlockNumber get_ctx_block_num(EthBlockNumber block_number) { - return (block_number >= 1) ? block_number - 1 : 0; -} - Json::Value Debug::debug_traceTransaction(const std::string& transaction_hash) { Json::Value res; auto [trx, loc] = get_transaction_with_location(transaction_hash); @@ -28,8 +21,7 @@ Json::Value Debug::debug_traceTransaction(const std::string& transaction_hash) { throw std::runtime_error("Transaction not found"); } if (auto node = full_node_.lock()) { - return util::readJsonFromString( - node->getFinalChain()->trace({to_eth_trx(std::move(trx))}, get_ctx_block_num(loc->period))); + return util::readJsonFromString(node->getFinalChain()->trace({to_eth_trx(std::move(trx))}, loc->period)); } return res; } @@ -65,7 +57,7 @@ Json::Value Debug::trace_replayTransaction(const std::string& transaction_hash, } if (auto node = full_node_.lock()) { return util::readJsonFromString( - node->getFinalChain()->trace({to_eth_trx(std::move(trx))}, get_ctx_block_num(loc->period), std::move(params))); + node->getFinalChain()->trace({to_eth_trx(std::move(trx))}, loc->period, std::move(params))); } return res; } @@ -83,8 +75,7 @@ Json::Value Debug::trace_replayBlockTransactions(const std::string& block_num, c trxs.reserve(transactions->size()); std::transform(transactions->begin(), transactions->end(), std::back_inserter(trxs), [this](auto t) { return to_eth_trx(std::move(t)); }); - return util::readJsonFromString( - node->getFinalChain()->trace(std::move(trxs), get_ctx_block_num(block), std::move(params))); + return util::readJsonFromString(node->getFinalChain()->trace(std::move(trxs), block, std::move(params))); } return res; } diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 41b7d6bb7d..77135fe103 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 41b7d6bb7da5cb65e5f126df7125df8371600f91 +Subproject commit 77135fe10352156f4266e57d9dc162e33257c48c From d8e2f09b02542ce1bd9a49508948b8c643dc34c2 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 16 Oct 2024 11:35:47 +0200 Subject: [PATCH 061/105] chore: fix log lvl --- .../tarcap/packets_handlers/latest/status_packet_handler.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp index 6a751ec456..bd30c35fc3 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp @@ -68,7 +68,7 @@ void StatusPacketHandler::process(const threadpool::PacketData& packet_data, con } if (genesis_hash != kGenesisHash) { - LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) + LOG((peers_state_->getPeersCount()) ? log_nf_ : log_wr_) << "Incorrect genesis hash " << genesis_hash << ", host " << packet_data.from_node_id_.abridged() << " will be disconnected"; disconnect(packet_data.from_node_id_, dev::p2p::UserReason); From fef9c9d7930d43ca03214aeedc795154cd9c2c73 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 16 Oct 2024 14:27:36 +0200 Subject: [PATCH 062/105] feat: add function to check disk size on startup --- libraries/common/CMakeLists.txt | 2 +- libraries/common/include/common/init.hpp | 36 ++++++++++++++++++++++++ programs/taraxad/main.cpp | 10 +++++-- tests/crypto_test.cpp | 2 +- tests/dag_block_test.cpp | 2 +- tests/dag_test.cpp | 2 +- tests/full_node_test.cpp | 2 +- tests/network_test.cpp | 2 +- tests/p2p_test.cpp | 2 +- tests/pbft_chain_test.cpp | 2 +- tests/pbft_manager_test.cpp | 2 +- tests/pillar_chain_test.cpp | 2 +- tests/test_util/gtest.hpp | 2 +- tests/transaction_test.cpp | 2 +- tests/vote_test.cpp | 2 +- 15 files changed, 57 insertions(+), 15 deletions(-) create mode 100644 libraries/common/include/common/init.hpp diff --git a/libraries/common/CMakeLists.txt b/libraries/common/CMakeLists.txt index d4aef9ef0b..fef03821c7 100644 --- a/libraries/common/CMakeLists.txt +++ b/libraries/common/CMakeLists.txt @@ -1,6 +1,6 @@ set(HEADERS include/common/constants.hpp - include/common/static_init.hpp + include/common/init.hpp include/common/types.hpp include/common/config_exception.hpp include/common/vrf_wrapper.hpp diff --git a/libraries/common/include/common/init.hpp b/libraries/common/include/common/init.hpp new file mode 100644 index 0000000000..edbcc4433d --- /dev/null +++ b/libraries/common/include/common/init.hpp @@ -0,0 +1,36 @@ +#pragma once + +#include +#include + +#include "common/util.hpp" + +namespace taraxa { + +inline void static_init() { + if (sodium_init() == -1) { + throw std::runtime_error("libsodium init failure"); + } +} + +inline bool checkDiskSpace(const std::string& path, uint64_t required_space_MB) { + // Convert MB to bytes + const uint64_t required_space_bytes = required_space_MB * 1024 * 1024; + + struct statvfs stat; + + // Get file system statistics + if (statvfs(path.c_str(), &stat) != 0) { + // If statvfs fails, return false + std::cerr << "Error getting file system stats" << std::endl; + return false; + } + + // Calculate available space + const uint64_t available_space = stat.f_bsize * stat.f_bavail; + + // Check if available space is greater than or equal to the required space + return available_space >= required_space_bytes; +} + +} // namespace taraxa diff --git a/programs/taraxad/main.cpp b/programs/taraxad/main.cpp index f09cede04a..807b87b480 100644 --- a/programs/taraxad/main.cpp +++ b/programs/taraxad/main.cpp @@ -2,17 +2,23 @@ #include #include "cli/config.hpp" +#include "cli/tools.hpp" #include "common/config_exception.hpp" -#include "common/static_init.hpp" +#include "common/init.hpp" #include "node/node.hpp" using namespace taraxa; int main(int argc, const char* argv[]) { static_init(); + + if (!checkDiskSpace(cli::tools::getTaraxaDefaultConfigFile(), 100)) { + std::cerr << "Insufficient disk space" << std::endl; + return 1; + } + try { cli::Config cli_conf(argc, argv); - if (cli_conf.nodeConfigured()) { auto node = std::make_shared(cli_conf.getNodeConfiguration()); node->start(); diff --git a/tests/crypto_test.cpp b/tests/crypto_test.cpp index c33452f48f..b1c5888766 100644 --- a/tests/crypto_test.cpp +++ b/tests/crypto_test.cpp @@ -8,7 +8,7 @@ #include #include -#include "common/static_init.hpp" +#include "common/init.hpp" #include "common/vrf_wrapper.hpp" #include "logger/logger.hpp" #include "test_util/gtest.hpp" diff --git a/tests/dag_block_test.cpp b/tests/dag_block_test.cpp index 7e0f639028..62b5987b5d 100644 --- a/tests/dag_block_test.cpp +++ b/tests/dag_block_test.cpp @@ -5,7 +5,7 @@ #include #include -#include "common/static_init.hpp" +#include "common/init.hpp" #include "common/types.hpp" #include "common/util.hpp" #include "dag/dag.hpp" diff --git a/tests/dag_test.cpp b/tests/dag_test.cpp index 6885b82624..36b9abf27c 100644 --- a/tests/dag_test.cpp +++ b/tests/dag_test.cpp @@ -1,6 +1,6 @@ #include -#include "common/static_init.hpp" +#include "common/init.hpp" #include "common/types.hpp" #include "dag/dag_manager.hpp" #include "logger/logger.hpp" diff --git a/tests/full_node_test.cpp b/tests/full_node_test.cpp index b8816637df..634548d5e6 100644 --- a/tests/full_node_test.cpp +++ b/tests/full_node_test.cpp @@ -11,7 +11,7 @@ #include "cli/config.hpp" #include "cli/tools.hpp" #include "common/constants.hpp" -#include "common/static_init.hpp" +#include "common/init.hpp" #include "dag/dag_block_proposer.hpp" #include "dag/dag_manager.hpp" #include "graphql/mutation.hpp" diff --git a/tests/network_test.cpp b/tests/network_test.cpp index ce5bfb7767..1efec3887f 100644 --- a/tests/network_test.cpp +++ b/tests/network_test.cpp @@ -7,7 +7,7 @@ #include #include "common/lazy.hpp" -#include "common/static_init.hpp" +#include "common/init.hpp" #include "config/config.hpp" #include "dag/dag.hpp" #include "dag/dag_block_proposer.hpp" diff --git a/tests/p2p_test.cpp b/tests/p2p_test.cpp index 86f5aaf327..253347c7c6 100644 --- a/tests/p2p_test.cpp +++ b/tests/p2p_test.cpp @@ -8,7 +8,7 @@ #include -#include "common/static_init.hpp" +#include "common/init.hpp" #include "logger/logger.hpp" #include "network/tarcap/tarcap_version.hpp" #include "test_util/samples.hpp" diff --git a/tests/pbft_chain_test.cpp b/tests/pbft_chain_test.cpp index 5769fee427..dea4f2a9a4 100644 --- a/tests/pbft_chain_test.cpp +++ b/tests/pbft_chain_test.cpp @@ -5,7 +5,7 @@ #include #include -#include "common/static_init.hpp" +#include "common/init.hpp" #include "logger/logger.hpp" #include "network/network.hpp" #include "pbft/pbft_manager.hpp" diff --git a/tests/pbft_manager_test.cpp b/tests/pbft_manager_test.cpp index cc7cdb2f35..313c0fde60 100644 --- a/tests/pbft_manager_test.cpp +++ b/tests/pbft_manager_test.cpp @@ -1,6 +1,6 @@ #include -#include "common/static_init.hpp" +#include "common/init.hpp" #include "logger/logger.hpp" #include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" #include "test_util/node_dag_creation_fixture.hpp" diff --git a/tests/pillar_chain_test.cpp b/tests/pillar_chain_test.cpp index 5f6cc5d572..0bf310821d 100644 --- a/tests/pillar_chain_test.cpp +++ b/tests/pillar_chain_test.cpp @@ -1,7 +1,7 @@ #include #include "common/encoding_solidity.hpp" -#include "common/static_init.hpp" +#include "common/init.hpp" #include "logger/logger.hpp" #include "pbft/pbft_manager.hpp" #include "pillar_chain/pillar_chain_manager.hpp" diff --git a/tests/test_util/gtest.hpp b/tests/test_util/gtest.hpp index 2cf0248748..050d943aa7 100644 --- a/tests/test_util/gtest.hpp +++ b/tests/test_util/gtest.hpp @@ -5,7 +5,7 @@ #include #include "common/lazy.hpp" -#include "common/static_init.hpp" +#include "common/init.hpp" #include "config/config.hpp" namespace fs = std::filesystem; diff --git a/tests/transaction_test.cpp b/tests/transaction_test.cpp index 73bbd45a5d..e75356386b 100644 --- a/tests/transaction_test.cpp +++ b/tests/transaction_test.cpp @@ -6,7 +6,7 @@ #include #include -#include "common/static_init.hpp" +#include "common/init.hpp" #include "config/genesis.hpp" #include "final_chain/final_chain.hpp" #include "final_chain/trie_common.hpp" diff --git a/tests/vote_test.cpp b/tests/vote_test.cpp index fa687a9e44..5356a701d3 100644 --- a/tests/vote_test.cpp +++ b/tests/vote_test.cpp @@ -1,7 +1,7 @@ #include #include -#include "common/static_init.hpp" +#include "common/init.hpp" #include "logger/logger.hpp" #include "network/network.hpp" #include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" From db9a9cf4cac3025423a77ff78b2374d4bd8ffee0 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 16 Oct 2024 14:39:51 +0200 Subject: [PATCH 063/105] clang format --- tests/network_test.cpp | 2 +- tests/test_util/gtest.hpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/network_test.cpp b/tests/network_test.cpp index 1efec3887f..3b6952468a 100644 --- a/tests/network_test.cpp +++ b/tests/network_test.cpp @@ -6,8 +6,8 @@ #include #include -#include "common/lazy.hpp" #include "common/init.hpp" +#include "common/lazy.hpp" #include "config/config.hpp" #include "dag/dag.hpp" #include "dag/dag_block_proposer.hpp" diff --git a/tests/test_util/gtest.hpp b/tests/test_util/gtest.hpp index 050d943aa7..e8c46555f5 100644 --- a/tests/test_util/gtest.hpp +++ b/tests/test_util/gtest.hpp @@ -4,8 +4,8 @@ #include -#include "common/lazy.hpp" #include "common/init.hpp" +#include "common/lazy.hpp" #include "config/config.hpp" namespace fs = std::filesystem; From bb22fd56f8f1e98f41eb2775a782d97fe9af99b2 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 16 Oct 2024 15:28:41 +0200 Subject: [PATCH 064/105] removed old file --- libraries/common/include/common/init.hpp | 4 ++-- libraries/common/include/common/static_init.hpp | 15 --------------- programs/taraxad/main.cpp | 2 +- 3 files changed, 3 insertions(+), 18 deletions(-) delete mode 100644 libraries/common/include/common/static_init.hpp diff --git a/libraries/common/include/common/init.hpp b/libraries/common/include/common/init.hpp index edbcc4433d..59fbeb7809 100644 --- a/libraries/common/include/common/init.hpp +++ b/libraries/common/include/common/init.hpp @@ -21,9 +21,9 @@ inline bool checkDiskSpace(const std::string& path, uint64_t required_space_MB) // Get file system statistics if (statvfs(path.c_str(), &stat) != 0) { - // If statvfs fails, return false + // If statvfs fails, return true std::cerr << "Error getting file system stats" << std::endl; - return false; + return true; } // Calculate available space diff --git a/libraries/common/include/common/static_init.hpp b/libraries/common/include/common/static_init.hpp deleted file mode 100644 index f39cf34cce..0000000000 --- a/libraries/common/include/common/static_init.hpp +++ /dev/null @@ -1,15 +0,0 @@ -#pragma once - -#include - -#include "common/util.hpp" - -namespace taraxa { - -inline void static_init() { - if (sodium_init() == -1) { - throw std::runtime_error("libsodium init failure"); - } -} - -} // namespace taraxa diff --git a/programs/taraxad/main.cpp b/programs/taraxad/main.cpp index 807b87b480..17aa61336c 100644 --- a/programs/taraxad/main.cpp +++ b/programs/taraxad/main.cpp @@ -12,7 +12,7 @@ using namespace taraxa; int main(int argc, const char* argv[]) { static_init(); - if (!checkDiskSpace(cli::tools::getTaraxaDefaultConfigFile(), 100)) { + if (!checkDiskSpace(cli::tools::getTaraxaDefaultConfigFile(), 10)) { std::cerr << "Insufficient disk space" << std::endl; return 1; } From e1511c365370038bfaf50e710760a14b421378c4 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 29 Oct 2024 12:45:40 +0100 Subject: [PATCH 065/105] rework move semantics little bit --- .../include/pillar_chain/pillar_chain_manager.hpp | 2 +- .../consensus/src/pillar_chain/pillar_chain_manager.cpp | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/libraries/core_libs/consensus/include/pillar_chain/pillar_chain_manager.hpp b/libraries/core_libs/consensus/include/pillar_chain/pillar_chain_manager.hpp index cfe0d14e76..9ddf0c3a83 100644 --- a/libraries/core_libs/consensus/include/pillar_chain/pillar_chain_manager.hpp +++ b/libraries/core_libs/consensus/include/pillar_chain/pillar_chain_manager.hpp @@ -164,7 +164,7 @@ class PillarChainManager { * @param pillar_block * @param new_vote_counts */ - void saveNewPillarBlock(std::shared_ptr pillar_block, + void saveNewPillarBlock(const std::shared_ptr& pillar_block, std::vector&& new_vote_counts); private: diff --git a/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp b/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp index d582f94050..bd8257b977 100644 --- a/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp +++ b/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp @@ -106,14 +106,13 @@ std::shared_ptr PillarChainManager::createPillarBlock( return pillar_block; } -void PillarChainManager::saveNewPillarBlock(std::shared_ptr pillar_block, +void PillarChainManager::saveNewPillarBlock(const std::shared_ptr& pillar_block, std::vector&& new_vote_counts) { - CurrentPillarBlockDataDb data{std::move(pillar_block), std::move(new_vote_counts)}; - db_->saveCurrentPillarBlockData(data); + db_->saveCurrentPillarBlockData({pillar_block, new_vote_counts}); std::scoped_lock lock(mutex_); - current_pillar_block_ = std::move(data.pillar_block); - current_pillar_block_vote_counts_ = std::move(data.vote_counts); + current_pillar_block_ = pillar_block; + current_pillar_block_vote_counts_ = std::move(new_vote_counts); } std::shared_ptr PillarChainManager::genAndPlacePillarVote(PbftPeriod period, From 1a8277ce6e2566c6104c8cd3c22ea5da2abae96d Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 30 Oct 2024 08:01:40 +0100 Subject: [PATCH 066/105] fix locks --- .../consensus/src/pillar_chain/pillar_chain_manager.cpp | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp b/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp index bd8257b977..932ad31a08 100644 --- a/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp +++ b/libraries/core_libs/consensus/src/pillar_chain/pillar_chain_manager.cpp @@ -108,9 +108,8 @@ std::shared_ptr PillarChainManager::createPillarBlock( void PillarChainManager::saveNewPillarBlock(const std::shared_ptr& pillar_block, std::vector&& new_vote_counts) { - db_->saveCurrentPillarBlockData({pillar_block, new_vote_counts}); - std::scoped_lock lock(mutex_); + db_->saveCurrentPillarBlockData({pillar_block, new_vote_counts}); current_pillar_block_ = pillar_block; current_pillar_block_vote_counts_ = std::move(new_vote_counts); } @@ -332,7 +331,6 @@ bool PillarChainManager::isValidPillarBlock(const std::shared_ptr& } const auto last_finalized_pillar_block = getLastFinalizedPillarBlock(); - std::shared_lock lock(mutex_); assert(last_finalized_pillar_block); // Check if some block was not skipped From 13fa82b522eb7c90469a767af8da8b71030386e9 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Thu, 31 Oct 2024 12:47:02 +0100 Subject: [PATCH 067/105] fix: pillar block save --- .../core_libs/consensus/src/pbft/pbft_manager.cpp | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 13a2fca473..f2d4235e11 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -41,6 +41,17 @@ PbftManager::PbftManager(const FullNodeConfig &conf, std::shared_ptr const auto &node_addr = node_addr_; LOG_OBJECTS_CREATE("PBFT_MGR"); + auto current_pbft_period = pbft_chain_->getPbftChainSize(); + if (kGenesisConfig.state.hardforks.ficus_hf.isPillarBlockPeriod(current_pbft_period)) { + const auto current_pillar_block = pillar_chain_mgr_->getCurrentPillarBlock(); + // There is a race condition where pbt block could have been saved and node stopped before saving pillar block + if (current_pbft_period == + current_pillar_block->getPeriod() + kGenesisConfig.state.hardforks.ficus_hf.pillar_blocks_interval) + LOG(log_er_) << "Pillar block was not processed before restart, current period: " << current_pbft_period + << ", current pillar block period: " << current_pillar_block->getPeriod(); + processPillarBlock(current_pbft_period); + } + for (auto period = final_chain_->lastBlockNumber() + 1, curr_period = pbft_chain_->getPbftChainSize(); period <= curr_period; ++period) { auto period_raw = db_->getPeriodDataRaw(period); From 4dcc45b5ee75b92b9e42afcd6d0ba65502952f95 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Fri, 25 Oct 2024 10:53:40 +0200 Subject: [PATCH 068/105] dag proposal improvement --- libraries/common/include/common/constants.hpp | 1 + .../core_libs/consensus/include/dag/dag_manager.hpp | 3 +++ .../consensus/src/dag/dag_block_proposer.cpp | 11 +++++++++++ .../core_libs/consensus/src/dag/dag_manager.cpp | 12 ++++++++++++ 4 files changed, 27 insertions(+) diff --git a/libraries/common/include/common/constants.hpp b/libraries/common/include/common/constants.hpp index e9ba096485..aea008f1ac 100644 --- a/libraries/common/include/common/constants.hpp +++ b/libraries/common/include/common/constants.hpp @@ -29,6 +29,7 @@ const uint64_t kMinTxGas{21000}; constexpr uint32_t kMinTransactionPoolSize{30000}; constexpr uint32_t kDefaultTransactionPoolSize{200000}; constexpr uint32_t kMaxNonFinalizedTransactions{1000000}; +constexpr uint32_t kMaxNonFinalizedDagBlocks{100}; const size_t kV3NetworkVersion = 3; diff --git a/libraries/core_libs/consensus/include/dag/dag_manager.hpp b/libraries/core_libs/consensus/include/dag/dag_manager.hpp index 62232a57e9..a978a378b0 100644 --- a/libraries/core_libs/consensus/include/dag/dag_manager.hpp +++ b/libraries/core_libs/consensus/include/dag/dag_manager.hpp @@ -184,6 +184,8 @@ class DagManager : public std::enable_shared_from_this { */ std::pair getNonFinalizedBlocksSize() const; + uint32_t getNonFinalizedBlocksMinDifficulty() const; + util::Event const block_verified_{}; /** @@ -258,6 +260,7 @@ class DagManager : public std::enable_shared_from_this { blk_hash_t old_anchor_; // anchor of the second to last period PbftPeriod period_; // last period std::map> non_finalized_blks_; + uint32_t non_finalized_blks_min_difficulty_ = UINT32_MAX; DagFrontier frontier_; SortitionParamsManager sortition_params_manager_; const DagConfig &dag_config_; diff --git a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp index cdc137af23..20f8782029 100644 --- a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp @@ -89,6 +89,17 @@ bool DagBlockProposer::proposeDagBlock() { vdf_sortition::VdfSortition vdf(sortition_params, vrf_sk_, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), vote_count, max_vote_count); + + auto anchor = dag_mgr_->getAnchors().second; + if (frontier.pivot != anchor) { + if (dag_mgr_->getNonFinalizedBlocksSize().second > kMaxNonFinalizedDagBlocks) { + return false; + } + if (dag_mgr_->getNonFinalizedBlocksMinDifficulty() < vdf.getDifficulty()) { + return false; + } + } + if (vdf.isStale(sortition_params)) { if (last_propose_level_ == propose_level) { if (num_tries_ < max_num_tries_) { diff --git a/libraries/core_libs/consensus/src/dag/dag_manager.cpp b/libraries/core_libs/consensus/src/dag/dag_manager.cpp index b08db434e4..129b6abff0 100644 --- a/libraries/core_libs/consensus/src/dag/dag_manager.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_manager.cpp @@ -170,6 +170,9 @@ std::pair> DagManager::addDagBlock(DagBlock &&blk, max_level_ = std::max(current_max_level, blk.getLevel()); addToDag(blk_hash, pivot_hash, tips, blk.getLevel()); + if (non_finalized_blks_min_difficulty_ > blk.getDifficulty()) { + non_finalized_blks_min_difficulty_ = blk.getDifficulty(); + } updateFrontier(); } @@ -361,6 +364,7 @@ uint DagManager::setDagBlockOrder(blk_hash_t const &new_anchor, PbftPeriod perio std::unordered_map> expired_dag_blocks_to_remove; std::vector expired_dag_blocks_transactions; + non_finalized_blks_min_difficulty_ = UINT32_MAX; for (auto &v : non_finalized_blocks) { for (auto &blk_hash : v.second) { if (dag_order_set.count(blk_hash) != 0) { @@ -372,6 +376,9 @@ uint DagManager::setDagBlockOrder(blk_hash_t const &new_anchor, PbftPeriod perio if (validateBlockNotExpired(dag_block, expired_dag_blocks_to_remove)) { addToDag(blk_hash, pivot_hash, dag_block->getTips(), dag_block->getLevel(), false); + if (non_finalized_blks_min_difficulty_ > dag_block->getDifficulty()) { + non_finalized_blks_min_difficulty_ = dag_block->getDifficulty(); + } } else { db_->removeDagBlock(blk_hash); seen_blocks_.erase(blk_hash); @@ -572,6 +579,11 @@ DagManager::getNonFinalizedBlocksWithTransactions(const std::unordered_set DagManager::getNonFinalizedBlocksSize() const { std::shared_lock lock(mutex_); From 663766d017bae7002bb7b1a609cfe0148941f0f1 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Thu, 26 Sep 2024 15:01:30 -0700 Subject: [PATCH 069/105] separate packets parsing from handlers --- .../include/network/tarcap/packet_types.hpp | 60 +- .../tarcap/packets/dag_block_packet.hpp | 47 + .../tarcap/packets/dag_sync_packet.hpp | 45 + .../tarcap/packets/get_dag_sync_packet.hpp | 29 + .../packets/get_next_votes_bundle_packet.hpp | 24 + .../tarcap/packets/get_pbft_sync_packet.hpp | 22 + .../get_pillar_votes_bundle_packet.hpp | 26 + .../tarcap/packets/pbft_sync_packet.hpp | 49 + .../tarcap/packets/pillar_vote_packet.hpp | 34 + .../packets/pillar_votes_bundle_packet.hpp | 38 + .../network/tarcap/packets/status_packet.hpp | 54 + .../tarcap/packets/transaction_packet.hpp | 55 + .../network/tarcap/packets/vote_packet.hpp | 38 + .../tarcap/packets/votes_bundle_packet.hpp | 45 + .../network/tarcap/packets_handler.hpp | 8 +- .../latest/common/base_packet_handler.hpp | 28 + .../common/ext_pillar_vote_packet_handler.hpp | 44 +- .../common/ext_syncing_packet_handler.hpp | 168 +- .../common/ext_votes_packet_handler.hpp | 222 +- .../latest/common/packet_handler.hpp | 153 +- .../latest/dag_block_packet_handler.hpp | 10 +- .../latest/dag_sync_packet_handler.hpp | 8 +- .../latest/get_dag_sync_packet_handler.hpp | 8 +- .../get_next_votes_bundle_packet_handler.hpp | 8 +- .../latest/get_pbft_sync_packet_handler.hpp | 8 +- ...get_pillar_votes_bundle_packet_handler.hpp | 8 +- .../latest/pbft_sync_packet_handler.hpp | 8 +- .../latest/pillar_vote_packet_handler.hpp | 8 +- .../pillar_votes_bundle_packet_handler.hpp | 8 +- .../latest/status_packet_handler.hpp | 8 +- .../latest/transaction_packet_handler.hpp | 10 +- .../latest/vote_packet_handler.hpp | 8 +- .../latest/votes_bundle_packet_handler.hpp | 8 +- .../v3/get_pbft_sync_packet_handler.hpp | 41 - .../v3/pbft_sync_packet_handler.hpp | 43 - .../network/tarcap/taraxa_capability.hpp | 3 - .../network/threadpool/packet_data.hpp | 1 + libraries/core_libs/network/src/network.cpp | 7 - .../network/src/tarcap/packets_handler.cpp | 2 +- .../common/ext_bls_sig_packet_handler.cpp | 74 +- .../common/ext_syncing_packet_handler.cpp | 336 +-- .../common/ext_votes_packet_handler.cpp | 453 ++-- .../latest/common/packet_handler.cpp | 292 +-- .../latest/dag_block_packet_handler.cpp | 42 +- .../latest/dag_sync_packet_handler.cpp | 77 +- .../latest/get_dag_sync_packet_handler.cpp | 27 +- .../get_next_votes_bundle_packet_handler.cpp | 17 +- .../latest/get_pbft_sync_packet_handler.cpp | 26 +- ...get_pillar_votes_bundle_packet_handler.cpp | 33 +- .../latest/pbft_sync_packet_handler.cpp | 114 +- .../latest/pillar_vote_packet_handler.cpp | 21 +- .../pillar_votes_bundle_packet_handler.cpp | 12 +- .../latest/status_packet_handler.cpp | 103 +- .../latest/transaction_packet_handler.cpp | 66 +- .../latest/vote_packet_handler.cpp | 55 +- .../latest/votes_bundle_packet_handler.cpp | 53 +- .../v3/get_pbft_sync_packet_handler.cpp | 117 - .../v3/pbft_sync_packet_handler.cpp | 296 --- .../src/tarcap/shared_states/peers_state.cpp | 2 +- .../network/src/tarcap/taraxa_capability.cpp | 63 +- .../network/src/threadpool/packet_data.cpp | 10 +- .../src/threadpool/packets_blocking_mask.cpp | 2 +- .../network/src/threadpool/priority_queue.cpp | 34 +- libraries/logger/include/logger/logger.hpp | 16 - tests/tarcap_threadpool_test.cpp | 1937 +++++++++-------- 65 files changed, 2931 insertions(+), 2741 deletions(-) create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/dag_block_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/dag_sync_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/get_dag_sync_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/get_next_votes_bundle_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/get_pbft_sync_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/get_pillar_votes_bundle_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/pbft_sync_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/pillar_vote_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/pillar_votes_bundle_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/status_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/transaction_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/vote_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/votes_bundle_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp delete mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp delete mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp delete mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.cpp delete mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp diff --git a/libraries/core_libs/network/include/network/tarcap/packet_types.hpp b/libraries/core_libs/network/include/network/tarcap/packet_types.hpp index 6cd9baa778..94e1a14a97 100644 --- a/libraries/core_libs/network/include/network/tarcap/packet_types.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packet_types.hpp @@ -11,29 +11,29 @@ namespace taraxa::network { */ enum SubprotocolPacketType : uint32_t { // Consensus packets with high processing priority - HighPriorityPackets = 0, - VotePacket, // Vote packer can contain (optional) also pbft block - GetNextVotesSyncPacket, - VotesBundlePacket, + kHighPriorityPackets = 0, + kVotePacket, // Vote packer can contain (optional) also pbft block + kGetNextVotesSyncPacket, + kVotesBundlePacket, // Standard packets with mid processing priority - MidPriorityPackets, - DagBlockPacket, + kMidPriorityPackets, + kDagBlockPacket, // DagSyncPacket has mid priority as it is also used for ad-hoc syncing in case new dag blocks miss tips/pivot - DagSyncPacket, - TransactionPacket, + kDagSyncPacket, + kTransactionPacket, // Non critical packets with low processing priority - LowPriorityPackets, - StatusPacket, - GetPbftSyncPacket, - PbftSyncPacket, - GetDagSyncPacket, - PillarVotePacket, - GetPillarVotesBundlePacket, - PillarVotesBundlePacket, + kLowPriorityPackets, + kStatusPacket, + kGetPbftSyncPacket, + kPbftSyncPacket, + kGetDagSyncPacket, + kPillarVotePacket, + kGetPillarVotesBundlePacket, + kPillarVotesBundlePacket, - PacketCount + kPacketCount }; /** @@ -42,31 +42,31 @@ enum SubprotocolPacketType : uint32_t { */ inline std::string convertPacketTypeToString(SubprotocolPacketType packet_type) { switch (packet_type) { - case StatusPacket: + case kStatusPacket: return "StatusPacket"; - case DagBlockPacket: + case kDagBlockPacket: return "DagBlockPacket"; - case GetDagSyncPacket: + case kGetDagSyncPacket: return "GetDagSyncPacket"; - case DagSyncPacket: + case kDagSyncPacket: return "DagSyncPacket"; - case TransactionPacket: + case kTransactionPacket: return "TransactionPacket"; - case VotePacket: + case kVotePacket: return "VotePacket"; - case GetNextVotesSyncPacket: + case kGetNextVotesSyncPacket: return "GetNextVotesSyncPacket"; - case VotesBundlePacket: + case kVotesBundlePacket: return "VotesBundlePacket"; - case GetPbftSyncPacket: + case kGetPbftSyncPacket: return "GetPbftSyncPacket"; - case PbftSyncPacket: + case kPbftSyncPacket: return "PbftSyncPacket"; - case PillarVotePacket: + case kPillarVotePacket: return "PillarVotePacket"; - case GetPillarVotesBundlePacket: + case kGetPillarVotesBundlePacket: return "GetPillarVotesBundlePacket"; - case PillarVotesBundlePacket: + case kPillarVotesBundlePacket: return "PillarVotesBundlePacket"; default: break; diff --git a/libraries/core_libs/network/include/network/tarcap/packets/dag_block_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/dag_block_packet.hpp new file mode 100644 index 0000000000..87c88a9e57 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/dag_block_packet.hpp @@ -0,0 +1,47 @@ +#pragma once + +#include "dag/dag_block.hpp" +#include "transaction/system_transaction.hpp" + +namespace taraxa::network { + +// TODO: create new version of this packet without manual parsing +struct DagBlockPacket { + DagBlockPacket(const dev::RLP& packet_rlp) { + constexpr size_t required_size = 2; + // Only one dag block can be received + if (packet_rlp.itemCount() != required_size) { + throw InvalidRlpItemsCountException("DagBlockPacket", packet_rlp.itemCount(), required_size); + } + + dev::RLP dag_rlp; + + // TODO: bad rlp form - we should not check itemsCount here... + if (packet_rlp.itemCount() == 2) { + const auto trx_count = packet_rlp[0].itemCount(); + transactions.reserve(trx_count); + + for (const auto tx_rlp : packet_rlp[0]) { + try { + auto trx = std::make_shared(tx_rlp); + transactions.emplace(trx->getHash(), std::move(trx)); + } catch (const Transaction::InvalidTransaction& e) { + throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); + } + } + dag_rlp = packet_rlp[1]; + } else { + dag_rlp = packet_rlp; + } + + dag_block = DagBlock(dag_rlp); + }; + + // TODO: make this a vector for automatic encoding/decoding... + std::unordered_map> transactions; + DagBlock dag_block; + + // RLP_FIELDS_DEFINE_INPLACE(transactions, dag_block) +}; + +} // namespace taraxa::network diff --git a/libraries/core_libs/network/include/network/tarcap/packets/dag_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/dag_sync_packet.hpp new file mode 100644 index 0000000000..1f38458bfd --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/dag_sync_packet.hpp @@ -0,0 +1,45 @@ +#pragma once + +#include "dag/dag_block.hpp" +#include "transaction/system_transaction.hpp" + +namespace taraxa::network { + +// TODO: create new version of this packet without manual parsing +struct DagSyncPacket { + DagSyncPacket(const dev::RLP& packet_rlp) { + if (constexpr size_t required_size = 4; packet_rlp.itemCount() != required_size) { + throw InvalidRlpItemsCountException("DagSyncPacket", packet_rlp.itemCount(), required_size); + } + + auto it = packet_rlp.begin(); + request_period = (*it++).toInt(); + response_period = (*it++).toInt(); + + const auto trx_count = (*it).itemCount(); + transactions.reserve(trx_count); + + for (const auto tx_rlp : (*it++)) { + try { + auto trx = std::make_shared(tx_rlp); + transactions.emplace(trx->getHash(), std::move(trx)); + } catch (const Transaction::InvalidTransaction& e) { + throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); + } + } + + dag_blocks.reserve((*it).itemCount()); + for (const auto block_rlp : *it) { + dag_blocks.emplace_back(DagBlock{block_rlp}); + } + }; + + PbftPeriod request_period; + PbftPeriod response_period; + std::unordered_map> transactions; + std::vector dag_blocks; + + // RLP_FIELDS_DEFINE_INPLACE(transactions, dag_block) +}; + +} // namespace taraxa::network diff --git a/libraries/core_libs/network/include/network/tarcap/packets/get_dag_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/get_dag_sync_packet.hpp new file mode 100644 index 0000000000..cbd0674c9b --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/get_dag_sync_packet.hpp @@ -0,0 +1,29 @@ +#pragma once + +#include "dag/dag_block.hpp" +#include "transaction/system_transaction.hpp" + +namespace taraxa::network { + +// TODO: create new version of this packet without manual parsing +struct GetDagSyncPacket { + GetDagSyncPacket(const dev::RLP& packet_rlp) { + if (constexpr size_t required_size = 2; packet_rlp.itemCount() != required_size) { + throw InvalidRlpItemsCountException("GetDagSyncPacket", packet_rlp.itemCount(), required_size); + } + + auto it = packet_rlp.begin(); + peer_period = (*it++).toInt(); + + for (const auto block_hash_rlp : *it) { + blocks_hashes.emplace(block_hash_rlp.toHash()); + } + }; + + PbftPeriod peer_period; + std::unordered_set blocks_hashes; + + // RLP_FIELDS_DEFINE_INPLACE(transactions, dag_block) +}; + +} // namespace taraxa::network diff --git a/libraries/core_libs/network/include/network/tarcap/packets/get_next_votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/get_next_votes_bundle_packet.hpp new file mode 100644 index 0000000000..853feef923 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/get_next_votes_bundle_packet.hpp @@ -0,0 +1,24 @@ +#pragma once + +#include "common/encoding_rlp.hpp" + +namespace taraxa::network { + +struct GetNextVotesBundlePacket { + GetNextVotesBundlePacket() = default; + GetNextVotesBundlePacket(const GetNextVotesBundlePacket&) = default; + GetNextVotesBundlePacket(GetNextVotesBundlePacket&&) = default; + GetNextVotesBundlePacket& operator=(const GetNextVotesBundlePacket&) = default; + GetNextVotesBundlePacket& operator=(GetNextVotesBundlePacket&&) = default; + + GetNextVotesBundlePacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); }; + + dev::bytes encode() { return util::rlp_enc(*this); } + + PbftPeriod peer_pbft_period; + PbftRound peer_pbft_round; + + RLP_FIELDS_DEFINE_INPLACE(peer_pbft_period, peer_pbft_round) +}; + +} // namespace taraxa::network diff --git a/libraries/core_libs/network/include/network/tarcap/packets/get_pbft_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/get_pbft_sync_packet.hpp new file mode 100644 index 0000000000..5372370914 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/get_pbft_sync_packet.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include "common/encoding_rlp.hpp" + +namespace taraxa::network { + +struct GetPbftSyncPacket { + GetPbftSyncPacket() = default; + GetPbftSyncPacket(const GetPbftSyncPacket&) = default; + GetPbftSyncPacket(GetPbftSyncPacket&&) = default; + GetPbftSyncPacket& operator=(const GetPbftSyncPacket&) = default; + GetPbftSyncPacket& operator=(GetPbftSyncPacket&&) = default; + + GetPbftSyncPacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } + dev::bytes encode() { return util::rlp_enc(*this); } + + size_t height_to_sync; + + RLP_FIELDS_DEFINE_INPLACE(height_to_sync) +}; + +} // namespace taraxa::network diff --git a/libraries/core_libs/network/include/network/tarcap/packets/get_pillar_votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/get_pillar_votes_bundle_packet.hpp new file mode 100644 index 0000000000..85483f7d11 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/get_pillar_votes_bundle_packet.hpp @@ -0,0 +1,26 @@ +#pragma once + +#include "common/encoding_rlp.hpp" + +namespace taraxa::network { + +struct GetPillarVotesBundlePacket { + GetPillarVotesBundlePacket() = default; + GetPillarVotesBundlePacket(const GetPillarVotesBundlePacket&) = default; + GetPillarVotesBundlePacket(GetPillarVotesBundlePacket&&) = default; + GetPillarVotesBundlePacket& operator=(const GetPillarVotesBundlePacket&) = default; + GetPillarVotesBundlePacket& operator=(GetPillarVotesBundlePacket&&) = default; + + GetPillarVotesBundlePacket(const dev::RLP& packet_rlp) { + *this = util::rlp_dec(packet_rlp); + } + + dev::bytes encode() { return util::rlp_enc(*this); } + + PbftPeriod period; + blk_hash_t pillar_block_hash; + + RLP_FIELDS_DEFINE_INPLACE(period, pillar_block_hash) +}; + +} // namespace taraxa::network diff --git a/libraries/core_libs/network/include/network/tarcap/packets/pbft_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/pbft_sync_packet.hpp new file mode 100644 index 0000000000..6ad908fd62 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/pbft_sync_packet.hpp @@ -0,0 +1,49 @@ +#pragma once + +#include "pbft/period_data.hpp" +#include "vote/pbft_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" + +namespace taraxa::network { + +// TODO: create new version of this packet without manual parsing +struct PbftSyncPacket { + PbftSyncPacket(const dev::RLP& packet_rlp) { + if (packet_rlp.itemCount() != kStandardPacketSize && packet_rlp.itemCount() != kChainSyncedPacketSize) { + throw InvalidRlpItemsCountException("PbftSyncPacket", packet_rlp.itemCount(), kStandardPacketSize); + } + + // PeriodData rlp parsing cannot be done through util::rlp_tuple, which automatically checks the rlp size so it is + // checked here manually + if (packet_rlp[1].itemCount() != PeriodData::kBaseRlpItemCount && + packet_rlp[1].itemCount() != PeriodData::kExtendedRlpItemCount) { + throw InvalidRlpItemsCountException("PbftSyncPacket:PeriodData", packet_rlp[1].itemCount(), + PeriodData::kBaseRlpItemCount); + } + + // last_block is the flag to indicate this is the last block in each syncing round, doesn't mean PBFT chain has + // synced + last_block = packet_rlp[0].toInt(); + try { + period_data = PeriodData(packet_rlp[1]); + } catch (const std::runtime_error& e) { + throw MaliciousPeerException("Unable to parse PeriodData: " + std::string(e.what())); + } + + // pbft_chain_synced is the flag to indicate own PBFT chain has synced with the peer's PBFT chain + if (packet_rlp.itemCount() == kChainSyncedPacketSize) { + current_block_cert_votes = decodePbftVotesBundleRlp(packet_rlp[2]); + } + }; + + bool last_block; + PeriodData period_data; + std::vector> current_block_cert_votes; + + const size_t kStandardPacketSize = 2; + const size_t kChainSyncedPacketSize = 3; + + // RLP_FIELDS_DEFINE_INPLACE(transactions, dag_block) +}; + +} // namespace taraxa::network diff --git a/libraries/core_libs/network/include/network/tarcap/packets/pillar_vote_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/pillar_vote_packet.hpp new file mode 100644 index 0000000000..0abb37d38c --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/pillar_vote_packet.hpp @@ -0,0 +1,34 @@ +#pragma once + +#include "common/encoding_rlp.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" +#include "vote/pillar_vote.hpp" + +namespace taraxa::network { + +struct PillarVotePacket { + PillarVotePacket() = default; + PillarVotePacket(const PillarVotePacket&) = default; + PillarVotePacket(PillarVotePacket&&) = default; + PillarVotePacket& operator=(const PillarVotePacket&) = default; + PillarVotePacket& operator=(PillarVotePacket&&) = default; + + // PillarVotePacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } + // dev::bytes encode() { return util::rlp_enc(*this); } + + PillarVotePacket(const dev::RLP& packet_rlp) { + auto items = packet_rlp.itemCount(); + if (items != PillarVote::kStandardRlpSize) { + throw InvalidRlpItemsCountException("PillarVotePacket", items, PillarVote::kStandardRlpSize); + } + + pillar_vote = std::make_shared(packet_rlp); + } + + // TODO: will shared_ptr work ? + std::shared_ptr pillar_vote; + + // RLP_FIELDS_DEFINE_INPLACE(pillar_vote) +}; + +} // namespace taraxa::network diff --git a/libraries/core_libs/network/include/network/tarcap/packets/pillar_votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/pillar_votes_bundle_packet.hpp new file mode 100644 index 0000000000..41669dbe71 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/pillar_votes_bundle_packet.hpp @@ -0,0 +1,38 @@ +#pragma once + +#include "common/encoding_rlp.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" +#include "vote/pillar_vote.hpp" + +namespace taraxa::network { + +struct PillarVotesBundlePacket { + PillarVotesBundlePacket() = default; + PillarVotesBundlePacket(const PillarVotesBundlePacket&) = default; + PillarVotesBundlePacket(PillarVotesBundlePacket&&) = default; + PillarVotesBundlePacket& operator=(const PillarVotesBundlePacket&) = default; + PillarVotesBundlePacket& operator=(PillarVotesBundlePacket&&) = default; + + // PillarVotesBundlePacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); + // } dev::bytes encode() { return util::rlp_enc(*this); } + + PillarVotesBundlePacket(const dev::RLP& packet_rlp) { + auto items = packet_rlp.itemCount(); + if (items == 0 || items > kMaxPillarVotesInBundleRlp) { + throw InvalidRlpItemsCountException("PillarVotesBundlePacket", items, kMaxPillarVotesInBundleRlp); + } + + for (const auto vote_rlp : packet_rlp) { + pillar_votes.emplace_back(std::make_shared(vote_rlp)); + } + } + + // TODO: will shared_ptr work ? + std::vector> pillar_votes; + + constexpr static size_t kMaxPillarVotesInBundleRlp{250}; + + // RLP_FIELDS_DEFINE_INPLACE(pillar_votes) +}; + +} // namespace taraxa::network diff --git a/libraries/core_libs/network/include/network/tarcap/packets/status_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/status_packet.hpp new file mode 100644 index 0000000000..ebc46b782c --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/status_packet.hpp @@ -0,0 +1,54 @@ +#pragma once + +namespace taraxa::network { + +// TODO: create new version of this packet without manual parsing +struct StatusPacket { + StatusPacket(const dev::RLP& packet_rlp) { + if (const auto items_count = packet_rlp.itemCount(); + items_count != kInitialStatusPacketItemsCount && items_count != kStandardStatusPacketItemsCount) { + throw InvalidRlpItemsCountException("StatusPacket", packet_rlp.itemCount(), kStandardStatusPacketItemsCount); + } + + auto it = packet_rlp.begin(); + if (packet_rlp.itemCount() == kInitialStatusPacketItemsCount) { + peer_chain_id = (*it++).toInt(); + peer_dag_level = (*it++).toInt(); + genesis_hash = (*it++).toHash(); + peer_pbft_chain_size = (*it++).toInt(); + peer_syncing = (*it++).toInt(); + peer_pbft_round = (*it++).toInt(); + node_major_version = (*it++).toInt(); + node_minor_version = (*it++).toInt(); + node_patch_version = (*it++).toInt(); + is_light_node = (*it++).toInt(); + node_history = (*it++).toInt(); + } else { + peer_dag_level = (*it++).toInt(); + peer_pbft_chain_size = (*it++).toInt(); + peer_syncing = (*it++).toInt(); + peer_pbft_round = (*it++).toInt(); + } + } + + bool isInitialStatusPacket() const { return peer_chain_id.has_value(); } + + uint64_t peer_dag_level; + PbftPeriod peer_pbft_chain_size; + bool peer_syncing; + PbftRound peer_pbft_round; + std::optional peer_chain_id; + std::optional genesis_hash; + std::optional node_major_version; + std::optional node_minor_version; + std::optional node_patch_version; + std::optional is_light_node; + std::optional node_history; + + const uint16_t kInitialStatusPacketItemsCount = 11; + const uint16_t kStandardStatusPacketItemsCount = 4; + + // RLP_FIELDS_DEFINE_INPLACE(transactions, dag_block) +}; + +} // namespace taraxa::network diff --git a/libraries/core_libs/network/include/network/tarcap/packets/transaction_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/transaction_packet.hpp new file mode 100644 index 0000000000..7e94bc6a9a --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/transaction_packet.hpp @@ -0,0 +1,55 @@ +#pragma once + +#include "transaction/transaction.hpp" + +namespace taraxa::network { + +// TODO: create new version of this packet without manual parsing +struct TransactionPacket { + TransactionPacket(const dev::RLP& packet_rlp) { + auto items = packet_rlp.itemCount(); + if (items != kTransactionPacketItemCount) { + throw InvalidRlpItemsCountException("TransactionPacket", items, kTransactionPacketItemCount); + } + auto hashes_count = packet_rlp[0].itemCount(); + auto trx_count = packet_rlp[1].itemCount(); + + if (hashes_count < trx_count) { + throw InvalidRlpItemsCountException("TransactionPacket", hashes_count, trx_count); + } + if (hashes_count == 0 || hashes_count > kMaxTransactionsInPacket + kMaxHashesInPacket) { + throw InvalidRlpItemsCountException("TransactionPacket", hashes_count, + kMaxTransactionsInPacket + kMaxHashesInPacket); + } + + if (trx_count > kMaxTransactionsInPacket) { + throw InvalidRlpItemsCountException("TransactionPacket", trx_count, kMaxTransactionsInPacket); + } + + // TODO: these hashes do not make sense after separating parsing + // // First extract only transaction hashes + // for (const auto tx_hash_rlp : packet_rlp[0]) { + // auto trx_hash = tx_hash_rlp.toHash(); + // txs_hashes.emplace_back(std::move(trx_hash)); + // } + + for (const auto tx_rlp : packet_rlp[1]) { + try { + auto tx = std::make_shared(tx_rlp.data().toBytes()); + transactions.emplace_back(std::move(tx)); + } catch (const Transaction::InvalidTransaction& e) { + throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); + } + } + }; + + std::vector> transactions; + + const uint32_t kTransactionPacketItemCount = 2; + const uint32_t kMaxTransactionsInPacket{500}; + const uint32_t kMaxHashesInPacket{5000}; + + // RLP_FIELDS_DEFINE_INPLACE(transactions, dag_block) +}; + +} // namespace taraxa::network diff --git a/libraries/core_libs/network/include/network/tarcap/packets/vote_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/vote_packet.hpp new file mode 100644 index 0000000000..ae641d5e85 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/vote_packet.hpp @@ -0,0 +1,38 @@ +#pragma once + +#include "pbft/pbft_block.hpp" +#include "vote/pbft_vote.hpp" + +namespace taraxa::network { + +// TODO: create new version of this packet without manual parsing +struct VotePacket { + VotePacket(const dev::RLP& packet_rlp) { + auto items = packet_rlp.itemCount(); + // Vote packet can contain either just a vote or vote + block + peer_chain_size + if (items != kVotePacketSize && items != kExtendedVotePacketSize) { + throw InvalidRlpItemsCountException("VotePacket", items, kExtendedVotePacketSize); + } + + vote = std::make_shared(packet_rlp[0]); + if (const size_t item_count = packet_rlp.itemCount(); item_count == kExtendedVotePacketSize) { + try { + pbft_block = std::make_shared(packet_rlp[1]); + } catch (const std::exception& e) { + throw MaliciousPeerException(e.what()); + } + peer_chain_size = packet_rlp[2].toInt(); + } + }; + + std::shared_ptr vote; + std::shared_ptr pbft_block; + std::optional peer_chain_size; + + const size_t kVotePacketSize{1}; + const size_t kExtendedVotePacketSize{3}; + + // RLP_FIELDS_DEFINE_INPLACE(transactions, dag_block) +}; + +} // namespace taraxa::network diff --git a/libraries/core_libs/network/include/network/tarcap/packets/votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/votes_bundle_packet.hpp new file mode 100644 index 0000000000..cc3b3b4ddc --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/votes_bundle_packet.hpp @@ -0,0 +1,45 @@ +#pragma once + +#include "vote/pbft_vote.hpp" + +namespace taraxa::network { + +// TODO: create new version of this packet without manual parsing +struct VotesBundlePacket { + VotesBundlePacket(const dev::RLP& packet_rlp) { + auto items = packet_rlp.itemCount(); + if (items != kPbftVotesBundleRlpSize) { + throw InvalidRlpItemsCountException("VotesBundlePacket", items, kPbftVotesBundleRlpSize); + } + + auto votes_count = packet_rlp[kPbftVotesBundleRlpSize - 1].itemCount(); + if (votes_count == 0 || votes_count > kMaxVotesInBundleRlp) { + throw InvalidRlpItemsCountException("VotesBundlePacket", items, kMaxVotesInBundleRlp); + } + + votes_bundle_block_hash = packet_rlp[0].toHash(); + votes_bundle_pbft_period = packet_rlp[1].toInt(); + votes_bundle_pbft_round = packet_rlp[2].toInt(); + votes_bundle_votes_step = packet_rlp[3].toInt(); + + for (const auto vote_rlp : packet_rlp[4]) { + auto vote = std::make_shared(votes_bundle_block_hash, votes_bundle_pbft_period, votes_bundle_pbft_round, + votes_bundle_votes_step, vote_rlp); + votes.emplace_back(std::move(vote)); + } + }; + + blk_hash_t votes_bundle_block_hash; + PbftPeriod votes_bundle_pbft_period; + PbftRound votes_bundle_pbft_round; + PbftStep votes_bundle_votes_step; + std::vector> votes; + + const size_t kMaxVotesInBundleRlp{1000}; + + // TODO: votes size must be <1, limit> + // RLP_FIELDS_DEFINE_INPLACE(votes_bundle_block_hash, votes_bundle_pbft_period, votes_bundle_pbft_round, + // votes_bundle_votes_step, votes) +}; + +} // namespace taraxa::network diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handler.hpp index 2e1f01cf67..8839a7c615 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handler.hpp @@ -3,7 +3,7 @@ #include #include -#include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" +#include "network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp" namespace taraxa::network::tarcap { @@ -18,9 +18,9 @@ class PacketsHandler { * control the lifetime of the object or not. * * @param packet_type - * @return reference to std::shared_ptr + * @return reference to std::shared_ptr */ - const std::shared_ptr& getSpecificHandler(SubprotocolPacketType packet_type) const; + const std::shared_ptr& getSpecificHandler(SubprotocolPacketType packet_type) const; /** * @brief templated getSpecificHandler method for getting specific packet handler based on @@ -44,7 +44,7 @@ class PacketsHandler { private: // Map of all packets handlers, factory method selects specific packet handler for processing based on packet type - std::unordered_map> packets_handlers_; + std::unordered_map> packets_handlers_; }; template diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp new file mode 100644 index 0000000000..8a67bab338 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp @@ -0,0 +1,28 @@ +#pragma once + +#include "network/threadpool/packet_data.hpp" + +namespace taraxa::network::tarcap { + +/** + * @brief Base Packet handler base class that consists processPacket function + */ +class BasePacketHandler { + public: + BasePacketHandler() = default; + virtual ~BasePacketHandler() = default; + BasePacketHandler(const BasePacketHandler&) = default; + BasePacketHandler(BasePacketHandler&&) = default; + BasePacketHandler& operator=(const BasePacketHandler&) = default; + BasePacketHandler& operator=(BasePacketHandler&&) = default; + + /** + * @brief Packet processing function wrapper + * + * @param packet_data + */ + // TODO: use unique_ptr for packet data for easier & quicker copying + virtual void processPacket(const threadpool::PacketData& packet_data) = 0; +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp index 91bc38fd27..ab87d68089 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp @@ -1,32 +1,42 @@ #pragma once #include "packet_handler.hpp" - -namespace taraxa { -class KeyManager; -class PillarVote; - -namespace pillar_chain { -class PillarChainManager; -} // namespace pillar_chain - -namespace final_chain { -class FinalChain; -} - -} // namespace taraxa +#include "pillar_chain/pillar_chain_manager.hpp" namespace taraxa::network::tarcap { -class ExtPillarVotePacketHandler : public PacketHandler { +template +class ExtPillarVotePacketHandler : public PacketHandler { public: ExtPillarVotePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pillar_chain_manager, - const addr_t& node_addr, const std::string& log_channel); + const addr_t& node_addr, const std::string& log_channel) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel), + pillar_chain_manager_{std::move(pillar_chain_manager)} {} protected: - bool processPillarVote(const std::shared_ptr& vote, const std::shared_ptr& peer); + bool processPillarVote(const std::shared_ptr& vote, const std::shared_ptr& peer) { + if (!pillar_chain_manager_->isRelevantPillarVote(vote)) { + LOG(this->log_dg_) << "Drop irrelevant pillar vote " << vote->getHash() << ", period " << vote->getPeriod() + << " from peer " << peer->getId(); + return false; + } + + if (!pillar_chain_manager_->validatePillarVote(vote)) { + // TODO: enable for mainnet + // std::ostringstream err_msg; + // err_msg << "Invalid pillar vote " << vote->getHash() << " from peer " << peer->getId(); + // throw MaliciousPeerException(err_msg.str()); + return false; + } + + pillar_chain_manager_->addVerifiedPillarVote(vote); + + // Mark pillar vote as known for peer + peer->markPillarVoteAsKnown(vote->getHash()); + return true; + } protected: std::shared_ptr pillar_chain_manager_; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp index fa79bffaad..06c4e61317 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp @@ -1,30 +1,31 @@ #pragma once #include "dag/dag_manager.hpp" +#include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "packet_handler.hpp" - -namespace taraxa { -class PbftChain; -class PbftManager; -class DagManager; -class DbStorage; -} // namespace taraxa +#include "pbft/pbft_chain.hpp" +#include "pbft/pbft_manager.hpp" namespace taraxa::network::tarcap { -class PbftSyncingState; - /** * @brief ExtSyncingPacketHandler is extended abstract PacketHandler with added functions that are used in packet * handlers that need to interact with syncing process in some way */ -class ExtSyncingPacketHandler : public PacketHandler { +template +class ExtSyncingPacketHandler : public PacketHandler { public: ExtSyncingPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, - std::shared_ptr db, const addr_t &node_addr, const std::string &log_channel_name); + std::shared_ptr db, const addr_t &node_addr, const std::string &log_channel_name) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel_name), + pbft_syncing_state_(std::move(pbft_syncing_state)), + pbft_chain_(std::move(pbft_chain)), + pbft_mgr_(std::move(pbft_mgr)), + dag_mgr_(std::move(dag_mgr)), + db_(std::move(db)) {} virtual ~ExtSyncingPacketHandler() = default; ExtSyncingPacketHandler &operator=(const ExtSyncingPacketHandler &) = delete; @@ -34,7 +35,44 @@ class ExtSyncingPacketHandler : public PacketHandler { * @brief Start syncing pbft if needed * */ - void startSyncingPbft(); + void startSyncingPbft() { + if (pbft_syncing_state_->isPbftSyncing()) { + LOG(this->log_dg_) << "startSyncingPbft called but syncing_ already true"; + return; + } + + std::shared_ptr peer = getMaxChainPeer(); + if (!peer) { + LOG(this->log_nf_) << "Restarting syncing PBFT not possible since no connected peers"; + return; + } + + auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); + if (peer->pbft_chain_size_ > pbft_sync_period) { + auto peer_id = peer->getId().abridged(); + auto peer_pbft_chain_size = peer->pbft_chain_size_.load(); + if (!pbft_syncing_state_->setPbftSyncing(true, pbft_sync_period, std::move(peer))) { + LOG(this->log_dg_) << "startSyncingPbft called but syncing_ already true"; + return; + } + LOG(this->log_si_) << "Restarting syncing PBFT from peer " << peer_id << ", peer PBFT chain size " + << peer_pbft_chain_size << ", own PBFT chain synced at period " << pbft_sync_period; + + if (syncPeerPbft(pbft_sync_period + 1)) { + // Disable snapshots only if are syncing from scratch + if (pbft_syncing_state_->isDeepPbftSyncing()) { + db_->disableSnapshots(); + } + } else { + pbft_syncing_state_->setPbftSyncing(false); + } + } else { + LOG(this->log_nf_) << "Restarting syncing PBFT not needed since our pbft chain size: " << pbft_sync_period << "(" + << pbft_chain_->getPbftChainSize() << ")" + << " is greater or equal than max node pbft chain size:" << peer->pbft_chain_size_; + db_->enableSnapshots(); + } + } /** * @brief Send sync request to the current syncing peer with specified request_period @@ -43,14 +81,112 @@ class ExtSyncingPacketHandler : public PacketHandler { * * @return true if sync request was sent, otherwise false */ - bool syncPeerPbft(PbftPeriod request_period); + bool syncPeerPbft(PbftPeriod request_period) { + const auto syncing_peer = pbft_syncing_state_->syncingPeer(); + if (!syncing_peer) { + LOG(this->log_er_) << "Unable to send GetPbftSyncPacket. No syncing peer set."; + return false; + } + + if (request_period > syncing_peer->pbft_chain_size_) { + LOG(this->log_wr_) << "Invalid syncPeerPbft argument. Node " << syncing_peer->getId() << " chain size " + << syncing_peer->pbft_chain_size_ << ", requested period " << request_period; + return false; + } + + LOG(this->log_nf_) << "Send GetPbftSyncPacket with period " << request_period << " to node " + << syncing_peer->getId(); + return this->sealAndSend(syncing_peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, + std::move(dev::RLPStream(1) << request_period)); + } void requestDagBlocks(const dev::p2p::NodeID &_nodeID, const std::unordered_set &blocks, - PbftPeriod period); - void requestPendingDagBlocks(std::shared_ptr peer = nullptr); + PbftPeriod period) { + dev::RLPStream s(2); // Period + blocks list + s.append(period); + s.append(blocks); + + this->sealAndSend(_nodeID, SubprotocolPacketType::kGetDagSyncPacket, std::move(s)); + } + + void requestPendingDagBlocks(std::shared_ptr peer = nullptr) { + if (!peer) { + peer = getMaxChainPeer([](const std::shared_ptr &peer) { + if (peer->peer_dag_synced_ || !peer->dagSyncingAllowed()) { + return false; + } + return true; + }); + if (!peer) { + LOG(this->log_nf_) << "requestPendingDagBlocks not possible since no peers are matching conditions"; + return; + } + } + + if (!peer) { + LOG(this->log_nf_) << "requestPendingDagBlocks not possible since no connected peers"; + return; + } + + // This prevents ddos requesting dag blocks. We can only request this one time from one peer. + if (peer->peer_dag_synced_) { + LOG(this->log_nf_) << "requestPendingDagBlocks not possible since already requested for peer"; + return; + } + + // Only request dag blocks if periods are matching + auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); + if (pbft_sync_period == peer->pbft_chain_size_) { + // This prevents parallel requests + if (bool b = false; !peer->peer_dag_syncing_.compare_exchange_strong(b, !b)) { + LOG(this->log_nf_) << "requestPendingDagBlocks not possible since already requesting for peer"; + return; + } + LOG(this->log_nf_) << "Request pending blocks from peer " << peer->getId(); + std::unordered_set known_non_finalized_blocks; + auto [period, blocks] = dag_mgr_->getNonFinalizedBlocks(); + for (auto &level_blocks : blocks) { + for (auto &block : level_blocks.second) { + known_non_finalized_blocks.insert(block); + } + } + + requestDagBlocks(peer->getId(), known_non_finalized_blocks, period); + } + } std::shared_ptr getMaxChainPeer(std::function &)> filter_func = - [](const std::shared_ptr &) { return true; }); + [](const std::shared_ptr &) { return true; }) { + std::shared_ptr max_pbft_chain_peer; + PbftPeriod max_pbft_chain_size = 0; + uint64_t max_node_dag_level = 0; + + // Find peer with max pbft chain and dag level + for (auto const &peer : this->peers_state_->getAllPeers()) { + // Apply the filter function + if (!filter_func(peer.second)) { + continue; + } + + if (peer.second->pbft_chain_size_ > max_pbft_chain_size) { + if (peer.second->peer_light_node && + pbft_mgr_->pbftSyncingPeriod() + peer.second->peer_light_node_history < peer.second->pbft_chain_size_) { + LOG(this->log_er_) << "Disconnecting from light node peer " << peer.first + << " History: " << peer.second->peer_light_node_history + << " chain size: " << peer.second->pbft_chain_size_; + this->disconnect(peer.first, dev::p2p::UserReason); + continue; + } + max_pbft_chain_size = peer.second->pbft_chain_size_; + max_node_dag_level = peer.second->dag_level_; + max_pbft_chain_peer = peer.second; + } else if (peer.second->pbft_chain_size_ == max_pbft_chain_size && peer.second->dag_level_ > max_node_dag_level) { + max_node_dag_level = peer.second->dag_level_; + max_pbft_chain_peer = peer.second; + } + } + return max_pbft_chain_peer; + } protected: std::shared_ptr pbft_syncing_state_{nullptr}; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp index 62dc18f0cc..8e9e50398e 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp @@ -1,15 +1,11 @@ #pragma once +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "packet_handler.hpp" - -namespace taraxa { -class PbftVote; -class PbftManager; -class PbftChain; -class PbftBlock; -class VoteManager; -class SlashingManager; -} // namespace taraxa +#include "pbft/pbft_manager.hpp" +#include "vote/pbft_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" +#include "vote_manager/vote_manager.hpp" namespace taraxa::network::tarcap { @@ -17,13 +13,21 @@ namespace taraxa::network::tarcap { * @brief ExtVotesPacketHandler is extended abstract PacketHandler with added functions that are used in packet * handlers that process pbft votes */ -class ExtVotesPacketHandler : public PacketHandler { +template +class ExtVotesPacketHandler : public PacketHandler { public: ExtVotesPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, std::shared_ptr slashing_manager, const addr_t& node_addr, - const std::string& log_channel_name); + const std::string& log_channel_name) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel_name), + last_votes_sync_request_time_(std::chrono::system_clock::now()), + last_pbft_block_sync_request_time_(std::chrono::system_clock::now()), + pbft_mgr_(std::move(pbft_mgr)), + pbft_chain_(std::move(pbft_chain)), + vote_mgr_(std::move(vote_mgr)), + slashing_manager_(std::move(slashing_manager)) {} virtual ~ExtVotesPacketHandler() = default; ExtVotesPacketHandler(const ExtVotesPacketHandler&) = delete; @@ -41,17 +45,117 @@ class ExtVotesPacketHandler : public PacketHandler { * @return if vote was successfully processed, otherwise false */ bool processVote(const std::shared_ptr& vote, const std::shared_ptr& pbft_block, - const std::shared_ptr& peer, bool validate_max_round_step); + const std::shared_ptr& peer, bool validate_max_round_step) { + if (pbft_block && !validateVoteAndBlock(vote, pbft_block)) { + throw MaliciousPeerException("Received vote's voted value != received pbft block"); + } + + if (vote_mgr_->voteInVerifiedMap(vote)) { + LOG(this->log_dg_) << "Vote " << vote->getHash() << " already inserted in verified queue"; + return false; + } + + // Validate vote's period, round and step min/max values + if (const auto vote_valid = validateVotePeriodRoundStep(vote, peer, validate_max_round_step); !vote_valid.first) { + LOG(this->log_wr_) << "Vote period/round/step " << vote->getHash() + << " validation failed. Err: " << vote_valid.second; + return false; + } + + // Check is vote is unique per period, round & step & voter -> each address can generate just 1 vote + // (for a value that isn't NBH) per period, round & step + if (auto vote_valid = vote_mgr_->isUniqueVote(vote); !vote_valid.first) { + // Create double voting proof + slashing_manager_->submitDoubleVotingProof(vote, vote_valid.second); + throw MaliciousPeerException("Received double vote", vote->getVoter()); + } + + // Validate vote's signature, vrf, etc... + if (const auto vote_valid = vote_mgr_->validateVote(vote); !vote_valid.first) { + LOG(this->log_wr_) << "Vote " << vote->getHash() << " validation failed. Err: " << vote_valid.second; + return false; + } + + if (!vote_mgr_->addVerifiedVote(vote)) { + LOG(this->log_dg_) << "Vote " << vote->getHash() << " already inserted in verified queue(race condition)"; + return false; + } + + if (pbft_block) { + pbft_mgr_->processProposedBlock(pbft_block, vote); + } + + return true; + } /** * @brief Checks is vote is relevant for current pbft state in terms of period, round and type * @param vote * @return true if vote is relevant for current pbft state, otherwise false */ - bool isPbftRelevantVote(const std::shared_ptr& vote) const; + bool isPbftRelevantVote(const std::shared_ptr& vote) const { + const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); + + if (vote->getPeriod() >= current_pbft_period && vote->getRound() >= current_pbft_round) { + // Standard current or future vote + return true; + } else if (vote->getPeriod() == current_pbft_period && vote->getRound() == (current_pbft_round - 1) && + vote->getType() == PbftVoteTypes::next_vote) { + // Previous round next vote + return true; + } else if (vote->getPeriod() == current_pbft_period - 1 && vote->getType() == PbftVoteTypes::cert_vote) { + // Previous period cert vote - potential reward vote + return true; + } + + return false; + } virtual void sendPbftVotesBundle(const std::shared_ptr& peer, - std::vector>&& votes); + std::vector>&& votes) { + if (votes.empty()) { + return; + } + + auto sendVotes = [this, &peer](std::vector>&& votes) { + auto votes_bytes = encodePbftVotesBundleRlp(std::move(votes)); + if (votes_bytes.empty()) { + LOG(this->log_er_) << "Unable to send VotesBundle rlp"; + return; + } + + dev::RLPStream votes_rlp_stream; + votes_rlp_stream.appendRaw(votes_bytes); + + if (this->sealAndSend(peer->getId(), SubprotocolPacketType::kVotesBundlePacket, std::move(votes_rlp_stream))) { + LOG(this->log_dg_) << " Votes bundle with " << votes.size() << " votes sent to " << peer->getId(); + for (const auto& vote : votes) { + peer->markPbftVoteAsKnown(vote->getHash()); + } + } + }; + + if (votes.size() <= kMaxVotesInBundleRlp) { + sendVotes(std::move(votes)); + return; + } else { + // Need to split votes into multiple packets + size_t index = 0; + while (index < votes.size()) { + const size_t votes_count = std::min(kMaxVotesInBundleRlp, votes.size() - index); + + const auto begin_it = std::next(votes.begin(), index); + const auto end_it = std::next(begin_it, votes_count); + + std::vector> votes_sub_vector; + std::move(begin_it, end_it, std::back_inserter(votes_sub_vector)); + + sendVotes(std::move(votes_sub_vector)); + + index += votes_count; + } + } + } private: /** @@ -64,7 +168,86 @@ class ExtVotesPacketHandler : public PacketHandler { */ std::pair validateVotePeriodRoundStep(const std::shared_ptr& vote, const std::shared_ptr& peer, - bool validate_max_round_step); + bool validate_max_round_step) { + const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); + + auto genErrMsg = [period = current_pbft_period, round = current_pbft_round, + step = pbft_mgr_->getPbftStep()](const std::shared_ptr& vote) -> std::string { + std::stringstream err; + err << "Vote " << vote->getHash() << " (period, round, step) = (" << vote->getPeriod() << ", " << vote->getRound() + << ", " << vote->getStep() << "). Current PBFT (period, round, step) = (" << period << ", " << round << ", " + << step << ")"; + return err.str(); + }; + + // Period validation + // vote->getPeriod() == current_pbft_period - 1 && cert_vote -> potential reward vote + if (vote->getPeriod() < current_pbft_period - 1 || + (vote->getPeriod() == current_pbft_period - 1 && vote->getType() != PbftVoteTypes::cert_vote)) { + return {false, "Invalid period(too small): " + genErrMsg(vote)}; + } else if (this->kConf.network.ddos_protection.vote_accepting_periods && + vote->getPeriod() - 1 > + current_pbft_period + this->kConf.network.ddos_protection.vote_accepting_periods) { + // skip this check if kConf.network.ddos_protection.vote_accepting_periods == 0 + // vote->getPeriod() - 1 is here because votes are validated against vote_period - 1 in dpos contract + // Do not request round sync too often here + if (vote->getVoter() == peer->getId() && + std::chrono::system_clock::now() - last_pbft_block_sync_request_time_ > kSyncRequestInterval) { + // request PBFT chain sync from this node + this->sealAndSend( + peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, + std::move(dev::RLPStream(1) << std::max(vote->getPeriod() - 1, peer->pbft_chain_size_.load()))); + last_pbft_block_sync_request_time_ = std::chrono::system_clock::now(); + } + + return {false, "Invalid period(too big): " + genErrMsg(vote)}; + } + + // Round validation + auto checking_round = current_pbft_round; + // If period is not the same we assume current round is equal to 1 + // So we won't accept votes for future period with round bigger than kConf.network.vote_accepting_steps + if (current_pbft_period != vote->getPeriod()) { + checking_round = 1; + } + + // vote->getRound() == checking_round - 1 && next_vote -> previous round next vote + if (vote->getRound() < checking_round - 1 || + (vote->getRound() == checking_round - 1 && vote->getType() != PbftVoteTypes::next_vote)) { + return {false, "Invalid round(too small): " + genErrMsg(vote)}; + } else if (validate_max_round_step && this->kConf.network.ddos_protection.vote_accepting_rounds && + vote->getRound() >= checking_round + this->kConf.network.ddos_protection.vote_accepting_rounds) { + // skip this check if kConf.network.vote_accepting_rounds == 0 + // Trigger votes(round) syncing only if we are in sync in terms of period + if (current_pbft_period == vote->getPeriod()) { + // Do not request round sync too often here + if (vote->getVoter() == peer->getId() && + std::chrono::system_clock::now() - last_votes_sync_request_time_ > kSyncRequestInterval) { + // request round votes sync from this node + this->requestPbftNextVotesAtPeriodRound(peer->getId(), current_pbft_period, current_pbft_round); + last_votes_sync_request_time_ = std::chrono::system_clock::now(); + } + } + + return {false, "Invalid round(too big): " + genErrMsg(vote)}; + } + + // Step validation + auto checking_step = pbft_mgr_->getPbftStep(); + // If period or round is not the same we assume current step is equal to 1 + // So we won't accept votes for future rounds with step bigger than kConf.network.vote_accepting_steps + if (current_pbft_period != vote->getPeriod() || current_pbft_round != vote->getRound()) { + checking_step = 1; + } + + // skip check if kConf.network.vote_accepting_steps == 0 + if (validate_max_round_step && this->kConf.network.ddos_protection.vote_accepting_steps && + vote->getStep() >= checking_step + this->kConf.network.ddos_protection.vote_accepting_steps) { + return {false, "Invalid step(too big): " + genErrMsg(vote)}; + } + + return {true, ""}; + } /** * @brief Validates provided vote if voted value == provided block @@ -73,7 +256,14 @@ class ExtVotesPacketHandler : public PacketHandler { * @param pbft_block * @return true if validation successful, otherwise false */ - bool validateVoteAndBlock(const std::shared_ptr& vote, const std::shared_ptr& pbft_block) const; + bool validateVoteAndBlock(const std::shared_ptr& vote, const std::shared_ptr& pbft_block) const { + if (pbft_block->getBlockHash() != vote->getBlockHash()) { + LOG(this->log_er_) << "Vote " << vote->getHash() << " voted block " << vote->getBlockHash() << " != actual block " + << pbft_block->getBlockHash(); + return false; + } + return true; + } protected: constexpr static size_t kMaxVotesInBundleRlp{1000}; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp index 0eb23e7d34..af6e0d3a4a 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp @@ -8,7 +8,10 @@ #include "exceptions.hpp" #include "logger/logger.hpp" #include "network/tarcap/packet_types.hpp" +#include "network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "network/tarcap/shared_states/peers_state.hpp" +#include "network/tarcap/stats/time_period_packets_stats.hpp" #include "network/tarcap/taraxa_peer.hpp" #include "network/threadpool/packet_data.hpp" @@ -17,16 +20,19 @@ namespace taraxa::network::tarcap { // Taraxa capability name constexpr char TARAXA_CAPABILITY_NAME[] = "taraxa"; -class TimePeriodPacketsStats; - /** * @brief Packet handler base class that consists of shared state and some commonly used functions */ -class PacketHandler { +template +class PacketHandler : public BasePacketHandler { public: PacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, const addr_t& node_addr, - const std::string& log_channel_name); + const std::string& log_channel_name) + : kConf(conf), peers_state_(std::move(peers_state)), packets_stats_(std::move(packets_stats)) { + LOG_OBJECTS_CREATE(log_channel_name); + } + virtual ~PacketHandler() = default; PacketHandler(const PacketHandler&) = default; PacketHandler(PacketHandler&&) = default; @@ -38,27 +44,91 @@ class PacketHandler { * * @param packet_data */ - void processPacket(const threadpool::PacketData& packet_data); - - void requestPbftNextVotesAtPeriodRound(const dev::p2p::NodeID& peerID, PbftPeriod pbft_period, PbftRound pbft_round); + // TODO: use unique_ptr for packet data for easier & quicker copying + void processPacket(const threadpool::PacketData& packet_data) override { + try { + const auto begin = std::chrono::steady_clock::now(); + + // It can rarely happen that packet was received and pushed into the queue when peer was still in peers map, + // in the meantime the connection was lost and we started to process packet from such peer + const auto peer = peers_state_->getPacketSenderPeer(packet_data.from_node_id_, packet_data.type_); + if (!peer.first) [[unlikely]] { + LOG(log_wr_) << "Unable to process packet. Reason: " << peer.second; + disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + return; + } + + // TODO: can be removed after taraxa net version is completely switched to 5 + checkPacketRlpIsList(packet_data); + + // Main processing function + process(PacketType{packet_data.rlp_}, peer.first); + + auto processing_duration = + std::chrono::duration_cast(std::chrono::steady_clock::now() - begin); + auto tp_wait_duration = std::chrono::duration_cast(std::chrono::steady_clock::now() - + packet_data.receive_time_); + + PacketStats packet_stats{1 /* count */, packet_data.rlp_.data().size(), processing_duration, tp_wait_duration}; + peer.first->addSentPacket(packet_data.type_str_, packet_stats); + + if (kConf.network.ddos_protection.log_packets_stats) { + packets_stats_->addReceivedPacket(packet_data.type_str_, packet_data.from_node_id_, packet_stats); + } + + } catch (const MaliciousPeerException& e) { + // thrown during packets processing -> malicious peer, invalid rlp items count, ... + // If there is custom peer set in exception, disconnect him, not packet sender + if (const auto custom_peer = e.getPeer(); custom_peer.has_value()) { + handle_caught_exception(e.what(), packet_data, *custom_peer, e.getDisconnectReason(), + true /* set peer as malicious */); + } else { + handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, e.getDisconnectReason(), + true /* set peer as malicious */); + } + } catch (const PacketProcessingException& e) { + // thrown during packets processing... + handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, e.getDisconnectReason(), + true /* set peer as malicious */); + } catch (const dev::RLPException& e) { + // thrown during parsing inside aleth/libdevcore -> type mismatch + handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, dev::p2p::DisconnectReason::BadProtocol, + true /* set peer as malicious */); + } catch (const std::exception& e) { + handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_); + } catch (...) { + handle_caught_exception("Unknown exception", packet_data, packet_data.from_node_id_); + } + } + + // TODO: probbaly should not be here but in specific packet class ??? + void requestPbftNextVotesAtPeriodRound(const dev::p2p::NodeID& peerID, PbftPeriod pbft_period, PbftRound pbft_round) { + LOG(log_dg_) << "Sending GetNextVotesSyncPacket with period:" << pbft_period << ", round:" << pbft_round; + // TODO: use packet class instead of manually creating rlp + sealAndSend(peerID, SubprotocolPacketType::kGetNextVotesSyncPacket, + std::move(dev::RLPStream(2) << pbft_period << pbft_round)); + } private: void handle_caught_exception(std::string_view exception_msg, const threadpool::PacketData& packet_data, const dev::p2p::NodeID& peer, dev::p2p::DisconnectReason disconnect_reason = dev::p2p::DisconnectReason::UserReason, - bool set_peer_as_malicious = false); + bool set_peer_as_malicious = false) { + LOG(log_er_) << "Exception caught during packet processing: " << exception_msg << " ." + << "PacketData: " << jsonToUnstyledString(packet_data.getPacketDataJson()) + << ", disconnect peer: " << peer.toString(); - /** - * @brief Main packet processing function - */ - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) = 0; + if (set_peer_as_malicious) { + peers_state_->set_peer_malicious(peer); + } + + disconnect(peer, disconnect_reason); + } /** - * @brief Validates packet rlp format - items count - * - * @throws InvalidRlpItemsCountException exception + * @brief Main packet processing function */ - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const = 0; + virtual void process(PacketType&& packet, const std::shared_ptr& peer) = 0; protected: /** @@ -67,10 +137,53 @@ class PacketHandler { * @param packet_data * @throws InvalidRlpItemsCountException exception */ - void checkPacketRlpIsList(const threadpool::PacketData& packet_data) const; - - bool sealAndSend(const dev::p2p::NodeID& nodeID, SubprotocolPacketType packet_type, dev::RLPStream&& rlp); - void disconnect(const dev::p2p::NodeID& node_id, dev::p2p::DisconnectReason reason); + void checkPacketRlpIsList(const threadpool::PacketData& packet_data) const { + if (!packet_data.rlp_.isList()) { + throw InvalidRlpItemsCountException(packet_data.type_str_ + " RLP must be a list. ", 0, 1); + } + } + + bool sealAndSend(const dev::p2p::NodeID& node_id, SubprotocolPacketType packet_type, dev::RLPStream&& rlp) { + auto host = peers_state_->host_.lock(); + if (!host) { + LOG(log_er_) << "sealAndSend failed to obtain host"; + return false; + } + + if (const auto peer = peers_state_->getPacketSenderPeer(node_id, packet_type); !peer.first) [[unlikely]] { + LOG(log_wr_) << "Unable to send packet. Reason: " << peer.second; + host->disconnect(node_id, dev::p2p::UserReason); + return false; + } + + const auto begin = std::chrono::steady_clock::now(); + const size_t packet_size = rlp.out().size(); + + host->send(node_id, TARAXA_CAPABILITY_NAME, packet_type, rlp.invalidate(), + [begin, node_id, packet_size, packet_type, this]() { + if (!kConf.network.ddos_protection.log_packets_stats) { + return; + } + + PacketStats packet_stats{ + 1 /* count */, packet_size, + std::chrono::duration_cast(std::chrono::steady_clock::now() - begin), + std::chrono::microseconds{0}}; + + packets_stats_->addSentPacket(convertPacketTypeToString(packet_type), node_id, packet_stats); + }); + + return true; + } + + void disconnect(const dev::p2p::NodeID& node_id, dev::p2p::DisconnectReason reason) { + if (auto host = peers_state_->host_.lock(); host) { + LOG(log_nf_) << "Disconnect node " << node_id.abridged(); + host->disconnect(node_id, reason); + } else { + LOG(log_er_) << "Unable to disconnect node " << node_id.abridged() << " due to invalid host."; + } + } protected: // Node config diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp index 4552b072af..377a83c8a3 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp @@ -1,6 +1,7 @@ #pragma once #include "common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/packets/dag_block_packet.hpp" namespace taraxa { class TransactionManager; @@ -8,9 +9,7 @@ class TransactionManager; namespace taraxa::network::tarcap { -class TestState; - -class DagBlockPacketHandler : public ExtSyncingPacketHandler { +class DagBlockPacketHandler : public ExtSyncingPacketHandler { public: DagBlockPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -25,11 +24,10 @@ class DagBlockPacketHandler : public ExtSyncingPacketHandler { void onNewBlockVerified(const DagBlock &block, bool proposed, const SharedTransactions &trxs); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::DagBlockPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagBlockPacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData &packet_data) const override; - virtual void process(const threadpool::PacketData &packet_data, const std::shared_ptr &peer) override; + virtual void process(DagBlockPacket &&packet, const std::shared_ptr &peer) override; protected: std::shared_ptr trx_mgr_{nullptr}; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_sync_packet_handler.hpp index 631e94fb8c..d4477986f4 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_sync_packet_handler.hpp @@ -1,6 +1,7 @@ #pragma once #include "common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/packets/dag_sync_packet.hpp" namespace taraxa { class TransactionManager; @@ -8,7 +9,7 @@ class TransactionManager; namespace taraxa::network::tarcap { -class DagSyncPacketHandler : public ExtSyncingPacketHandler { +class DagSyncPacketHandler : public ExtSyncingPacketHandler { public: DagSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -18,11 +19,10 @@ class DagSyncPacketHandler : public ExtSyncingPacketHandler { const addr_t& node_addr, const std::string& logs_prefix = ""); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::DagSyncPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagSyncPacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(DagSyncPacket&& packet, const std::shared_ptr& peer) override; protected: std::shared_ptr trx_mgr_{nullptr}; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp index 5c7c01e7ea..c50315b02e 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp @@ -1,6 +1,7 @@ #pragma once #include "common/packet_handler.hpp" +#include "network/tarcap/packets/get_dag_sync_packet.hpp" #include "transaction/transaction.hpp" namespace taraxa { @@ -11,7 +12,7 @@ class TransactionManager; namespace taraxa::network::tarcap { -class GetDagSyncPacketHandler : public PacketHandler { +class GetDagSyncPacketHandler : public PacketHandler { public: GetDagSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -23,11 +24,10 @@ class GetDagSyncPacketHandler : public PacketHandler { SharedTransactions&& transactions, PbftPeriod request_period, PbftPeriod period); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::GetDagSyncPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetDagSyncPacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(GetDagSyncPacket&& packet, const std::shared_ptr& peer) override; protected: std::shared_ptr trx_mgr_; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.hpp index acabecec08..ae3e4e64e9 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.hpp @@ -1,6 +1,7 @@ #pragma once #include "common/ext_votes_packet_handler.hpp" +#include "network/tarcap/packets/get_next_votes_bundle_packet.hpp" namespace taraxa { class PbftManager; @@ -9,7 +10,7 @@ class VoteManager; namespace taraxa::network::tarcap { -class GetNextVotesBundlePacketHandler : public ExtVotesPacketHandler { +class GetNextVotesBundlePacketHandler : public ExtVotesPacketHandler { public: GetNextVotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -19,11 +20,10 @@ class GetNextVotesBundlePacketHandler : public ExtVotesPacketHandler { const std::string& logs_prefix = "GET_NEXT_VOTES_BUNDLE_PH"); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::GetNextVotesSyncPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetNextVotesSyncPacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(GetNextVotesBundlePacket&& packet, const std::shared_ptr& peer) override; }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.hpp index 763bebb372..250449cf75 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.hpp @@ -1,6 +1,7 @@ #pragma once #include "common/packet_handler.hpp" +#include "network/tarcap/packets/get_pbft_sync_packet.hpp" namespace taraxa { class PbftChain; @@ -12,7 +13,7 @@ namespace taraxa::network::tarcap { class PbftSyncingState; -class GetPbftSyncPacketHandler : public PacketHandler { +class GetPbftSyncPacketHandler : public PacketHandler { public: GetPbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -21,11 +22,10 @@ class GetPbftSyncPacketHandler : public PacketHandler { const addr_t& node_addr, const std::string& logs_prefix = "GET_PBFT_SYNC_PH"); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::GetPbftSyncPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetPbftSyncPacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(GetPbftSyncPacket&& packet, const std::shared_ptr& peer) override; virtual void sendPbftBlocks(const std::shared_ptr& peer, PbftPeriod from_period, size_t blocks_to_transfer, bool pbft_chain_synced); diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp index 9d85989729..58f7b2481c 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp @@ -1,11 +1,12 @@ #pragma once #include "common/packet_handler.hpp" +#include "network/tarcap/packets/get_pillar_votes_bundle_packet.hpp" #include "pillar_chain/pillar_chain_manager.hpp" namespace taraxa::network::tarcap { -class GetPillarVotesBundlePacketHandler : public PacketHandler { +class GetPillarVotesBundlePacketHandler : public PacketHandler { public: GetPillarVotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -16,11 +17,10 @@ class GetPillarVotesBundlePacketHandler : public PacketHandler { const std::shared_ptr& peer); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::GetPillarVotesBundlePacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetPillarVotesBundlePacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(GetPillarVotesBundlePacket&& packet, const std::shared_ptr& peer) override; protected: constexpr static size_t kGetPillarVotesBundlePacketSize{2}; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp index 9a99ce1a6c..621e23a262 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp @@ -2,11 +2,12 @@ #include "common/ext_syncing_packet_handler.hpp" #include "common/thread_pool.hpp" +#include "network/tarcap/packets/pbft_sync_packet.hpp" #include "vote_manager/vote_manager.hpp" namespace taraxa::network::tarcap { -class PbftSyncPacketHandler : public ExtSyncingPacketHandler { +class PbftSyncPacketHandler : public ExtSyncingPacketHandler { public: PbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -18,11 +19,10 @@ class PbftSyncPacketHandler : public ExtSyncingPacketHandler { void handleMaliciousSyncPeer(const dev::p2p::NodeID& id); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::PbftSyncPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPbftSyncPacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(PbftSyncPacket&& packet, const std::shared_ptr& peer) override; protected: virtual PeriodData decodePeriodData(const dev::RLP& period_data_rlp) const; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_vote_packet_handler.hpp index 18f64d1767..8bdbb52531 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_vote_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_vote_packet_handler.hpp @@ -1,10 +1,11 @@ #pragma once +#include "network/tarcap/packets/pillar_vote_packet.hpp" #include "network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp" namespace taraxa::network::tarcap { -class PillarVotePacketHandler : public ExtPillarVotePacketHandler { +class PillarVotePacketHandler : public ExtPillarVotePacketHandler { public: PillarVotePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -15,11 +16,10 @@ class PillarVotePacketHandler : public ExtPillarVotePacketHandler { void sendPillarVote(const std::shared_ptr& peer, const std::shared_ptr& vote); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::PillarVotePacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPillarVotePacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(PillarVotePacket&& packet, const std::shared_ptr& peer) override; }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp index 15a9ccfff7..0f6812c663 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp @@ -1,10 +1,11 @@ #pragma once +#include "network/tarcap/packets/pillar_votes_bundle_packet.hpp" #include "network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp" namespace taraxa::network::tarcap { -class PillarVotesBundlePacketHandler : public ExtPillarVotePacketHandler { +class PillarVotesBundlePacketHandler : public ExtPillarVotePacketHandler { public: PillarVotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -12,11 +13,10 @@ class PillarVotesBundlePacketHandler : public ExtPillarVotePacketHandler { const addr_t& node_addr, const std::string& logs_prefix); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::PillarVotesBundlePacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPillarVotesBundlePacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(PillarVotesBundlePacket&& packet, const std::shared_ptr& peer) override; public: constexpr static size_t kMaxPillarVotesInBundleRlp{250}; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/status_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/status_packet_handler.hpp index 755db336be..bfb75f42a0 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/status_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/status_packet_handler.hpp @@ -1,10 +1,11 @@ #pragma once #include "common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/packets/status_packet.hpp" namespace taraxa::network::tarcap { -class StatusPacketHandler : public ExtSyncingPacketHandler { +class StatusPacketHandler : public ExtSyncingPacketHandler { public: StatusPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -17,11 +18,10 @@ class StatusPacketHandler : public ExtSyncingPacketHandler { void sendStatusToPeers(); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::StatusPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kStatusPacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(StatusPacket&& packet, const std::shared_ptr& peer) override; protected: static constexpr uint16_t kInitialStatusPacketItemsCount = 11; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp index 208b511174..7ed3474b4b 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp @@ -1,6 +1,7 @@ #pragma once #include "common/packet_handler.hpp" +#include "network/tarcap/packets/transaction_packet.hpp" #include "transaction/transaction.hpp" namespace taraxa { @@ -10,9 +11,7 @@ enum class TransactionStatus; namespace taraxa::network::tarcap { -class TestState; - -class TransactionPacketHandler : public PacketHandler { +class TransactionPacketHandler : public PacketHandler { public: TransactionPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -38,14 +37,13 @@ class TransactionPacketHandler : public PacketHandler { void periodicSendTransactions(std::vector&& transactions); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::TransactionPacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kTransactionPacket; // 2 items: hashes and transactions static constexpr uint32_t kTransactionPacketItemCount = 2; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(TransactionPacket&& packet, const std::shared_ptr& peer) override; protected: /** diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/vote_packet_handler.hpp index 1c5bb1b18a..c60a2c9788 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/vote_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/vote_packet_handler.hpp @@ -1,10 +1,11 @@ #pragma once #include "common/ext_votes_packet_handler.hpp" +#include "network/tarcap/packets/vote_packet.hpp" namespace taraxa::network::tarcap { -class VotePacketHandler : public ExtVotesPacketHandler { +class VotePacketHandler : public ExtVotesPacketHandler { public: VotePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, @@ -25,11 +26,10 @@ class VotePacketHandler : public ExtVotesPacketHandler { const std::shared_ptr& block); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::VotePacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotePacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(VotePacket&& packet, const std::shared_ptr& peer) override; protected: const size_t kVotePacketSize{1}; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp index 629485166d..6755333dc0 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp @@ -1,10 +1,11 @@ #pragma once #include "common/ext_votes_packet_handler.hpp" +#include "network/tarcap/packets/votes_bundle_packet.hpp" namespace taraxa::network::tarcap { -class VotesBundlePacketHandler : public ExtVotesPacketHandler { +class VotesBundlePacketHandler : public ExtVotesPacketHandler { public: VotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, @@ -23,11 +24,10 @@ class VotesBundlePacketHandler : public ExtVotesPacketHandler { const std::optional& exclude_node = {}); // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::VotesBundlePacket; + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotesBundlePacket; private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + virtual void process(VotesBundlePacket&& packet, const std::shared_ptr& peer) override; }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp deleted file mode 100644 index d30c50c649..0000000000 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp +++ /dev/null @@ -1,41 +0,0 @@ -#pragma once - -#include "../latest/common/packet_handler.hpp" - -namespace taraxa { -class PbftChain; -class DbStorage; -class VoteManager; -} // namespace taraxa - -namespace taraxa::network::tarcap { -class PbftSyncingState; -} - -namespace taraxa::network::tarcap::v3 { -class GetPbftSyncPacketHandler : public PacketHandler { - public: - GetPbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, - std::shared_ptr packets_stats, - std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, - std::shared_ptr vote_mgr, std::shared_ptr db, - const addr_t& node_addr, const std::string& logs_prefix = "GET_PBFT_SYNC_PH"); - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::GetPbftSyncPacket; - - private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; - - virtual void sendPbftBlocks(const std::shared_ptr& peer, PbftPeriod from_period, - size_t blocks_to_transfer, bool pbft_chain_synced); - - protected: - std::shared_ptr pbft_syncing_state_; - std::shared_ptr pbft_chain_; - std::shared_ptr vote_mgr_; - std::shared_ptr db_; -}; - -} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp deleted file mode 100644 index 3230b8c4dc..0000000000 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp +++ /dev/null @@ -1,43 +0,0 @@ -#pragma once - -#include "../latest/common/ext_syncing_packet_handler.hpp" -#include "common/thread_pool.hpp" -#include "vote_manager/vote_manager.hpp" - -namespace taraxa::network::tarcap::v3 { - -class PbftSyncPacketHandler : public ExtSyncingPacketHandler { - public: - PbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, - std::shared_ptr packets_stats, - std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, - std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, - std::shared_ptr vote_mgr, std::shared_ptr db, const addr_t& node_addr, - const std::string& logs_prefix = "PBFT_SYNC_PH"); - - void handleMaliciousSyncPeer(const dev::p2p::NodeID& id); - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::PbftSyncPacket; - - private: - virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; - virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; - - protected: - virtual PeriodData decodePeriodData(const dev::RLP& period_data_rlp) const; - virtual std::vector> decodeVotesBundle(const dev::RLP& votes_bundle_rlp) const; - - void pbftSyncComplete(); - void delayedPbftSync(uint32_t counter); - - static constexpr uint32_t kDelayedPbftSyncDelayMs = 10; - - std::shared_ptr vote_mgr_; - util::ThreadPool periodic_events_tp_; - - static constexpr size_t kStandardPacketSize = 2; - static constexpr size_t kChainSyncedPacketSize = 3; -}; - -} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp index 095d8e3b2a..8a2ccff8d6 100644 --- a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp +++ b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp @@ -59,9 +59,6 @@ class TaraxaCapability final : public dev::p2p::CapabilityFace { */ static const InitPacketsHandlers kInitLatestVersionHandlers; - // TODO: remove this once we pass HF - static const InitPacketsHandlers kInitV3Handlers; - public: TaraxaCapability(TarcapVersion version, const FullNodeConfig &conf, const h256 &genesis_hash, std::weak_ptr host, const dev::KeyPair &key, diff --git a/libraries/core_libs/network/include/network/threadpool/packet_data.hpp b/libraries/core_libs/network/include/network/threadpool/packet_data.hpp index ab530a19b6..19670ec716 100644 --- a/libraries/core_libs/network/include/network/threadpool/packet_data.hpp +++ b/libraries/core_libs/network/include/network/threadpool/packet_data.hpp @@ -40,6 +40,7 @@ class PacketData { PacketId id_{0}; // Unique packet id (counter) std::chrono::steady_clock::time_point receive_time_; SubprotocolPacketType type_; + // TODO: might not need anymore ??? std::string type_str_; PacketPriority priority_; dev::p2p::NodeID from_node_id_; diff --git a/libraries/core_libs/network/src/network.cpp b/libraries/core_libs/network/src/network.cpp index 53f51eabec..0262050a3f 100644 --- a/libraries/core_libs/network/src/network.cpp +++ b/libraries/core_libs/network/src/network.cpp @@ -77,13 +77,6 @@ Network::Network(const FullNodeConfig &config, const h256 &genesis_hash, std::fi dev::p2p::Host::CapabilityList capabilities; - // Register old version (V2) of taraxa capability - auto v3_tarcap = std::make_shared( - kV3NetworkVersion, config, genesis_hash, host, key, packets_tp_, all_packets_stats_, pbft_syncing_state_, db, - pbft_mgr, pbft_chain, vote_mgr, dag_mgr, trx_mgr, slashing_manager, pillar_chain_mgr, - network::tarcap::TaraxaCapability::kInitV3Handlers); - capabilities.emplace_back(v3_tarcap); - // Register latest version of taraxa capability auto latest_tarcap = std::make_shared( TARAXA_NET_VERSION, config, genesis_hash, host, key, packets_tp_, all_packets_stats_, pbft_syncing_state_, db, diff --git a/libraries/core_libs/network/src/tarcap/packets_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handler.cpp index 6d7ca8ff85..0fc450ccc7 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handler.cpp @@ -2,7 +2,7 @@ namespace taraxa::network::tarcap { -const std::shared_ptr& PacketsHandler::getSpecificHandler(SubprotocolPacketType packet_type) const { +const std::shared_ptr& PacketsHandler::getSpecificHandler(SubprotocolPacketType packet_type) const { auto selected_handler = packets_handlers_.find(packet_type); if (selected_handler == packets_handlers_.end()) { diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_bls_sig_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_bls_sig_packet_handler.cpp index 90b3ab940e..fe4ce45241 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_bls_sig_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_bls_sig_packet_handler.cpp @@ -1,37 +1,37 @@ -#include "network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp" -#include "pillar_chain/pillar_chain_manager.hpp" - -namespace taraxa::network::tarcap { - -ExtPillarVotePacketHandler::ExtPillarVotePacketHandler( - const FullNodeConfig &conf, std::shared_ptr peers_state, - std::shared_ptr packets_stats, - std::shared_ptr pillar_chain_manager, const addr_t &node_addr, - const std::string &log_channel) - : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel), - pillar_chain_manager_{std::move(pillar_chain_manager)} {} - -bool ExtPillarVotePacketHandler::processPillarVote(const std::shared_ptr &vote, - const std::shared_ptr &peer) { - if (!pillar_chain_manager_->isRelevantPillarVote(vote)) { - LOG(log_dg_) << "Drop irrelevant pillar vote " << vote->getHash() << ", period " << vote->getPeriod() - << " from peer " << peer->getId(); - return false; - } - - if (!pillar_chain_manager_->validatePillarVote(vote)) { - // TODO: enable for mainnet - // std::ostringstream err_msg; - // err_msg << "Invalid pillar vote " << vote->getHash() << " from peer " << peer->getId(); - // throw MaliciousPeerException(err_msg.str()); - return false; - } - - pillar_chain_manager_->addVerifiedPillarVote(vote); - - // Mark pillar vote as known for peer - peer->markPillarVoteAsKnown(vote->getHash()); - return true; -} - -} // namespace taraxa::network::tarcap +// #include "network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp" +// #include "pillar_chain/pillar_chain_manager.hpp" +// +// namespace taraxa::network::tarcap { +// +// ExtPillarVotePacketHandler::ExtPillarVotePacketHandler( +// const FullNodeConfig &conf, std::shared_ptr peers_state, +// std::shared_ptr packets_stats, +// std::shared_ptr pillar_chain_manager, const addr_t &node_addr, +// const std::string &log_channel) +// : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel), +// pillar_chain_manager_{std::move(pillar_chain_manager)} {} +// +// bool ExtPillarVotePacketHandler::processPillarVote(const std::shared_ptr &vote, +// const std::shared_ptr &peer) { +// if (!pillar_chain_manager_->isRelevantPillarVote(vote)) { +// LOG(log_dg_) << "Drop irrelevant pillar vote " << vote->getHash() << ", period " << vote->getPeriod() +// << " from peer " << peer->getId(); +// return false; +// } +// +// if (!pillar_chain_manager_->validatePillarVote(vote)) { +// // TODO: enable for mainnet +// // std::ostringstream err_msg; +// // err_msg << "Invalid pillar vote " << vote->getHash() << " from peer " << peer->getId(); +// // throw MaliciousPeerException(err_msg.str()); +// return false; +// } +// +// pillar_chain_manager_->addVerifiedPillarVote(vote); +// +// // Mark pillar vote as known for peer +// peer->markPillarVoteAsKnown(vote->getHash()); +// return true; +// } +// +// } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.cpp index 4dbe10aa1c..5ea59761bc 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.cpp @@ -1,167 +1,169 @@ -#include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" - -#include "network/tarcap/shared_states/pbft_syncing_state.hpp" -#include "pbft/pbft_chain.hpp" -#include "pbft/pbft_manager.hpp" - -namespace taraxa::network::tarcap { - -ExtSyncingPacketHandler::ExtSyncingPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, - std::shared_ptr packets_stats, - std::shared_ptr pbft_syncing_state, - std::shared_ptr pbft_chain, - std::shared_ptr pbft_mgr, - std::shared_ptr dag_mgr, std::shared_ptr db, - const addr_t &node_addr, const std::string &log_channel_name) - : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel_name), - pbft_syncing_state_(std::move(pbft_syncing_state)), - pbft_chain_(std::move(pbft_chain)), - pbft_mgr_(std::move(pbft_mgr)), - dag_mgr_(std::move(dag_mgr)), - db_(std::move(db)) {} - -void ExtSyncingPacketHandler::startSyncingPbft() { - if (pbft_syncing_state_->isPbftSyncing()) { - LOG(log_dg_) << "startSyncingPbft called but syncing_ already true"; - return; - } - - std::shared_ptr peer = getMaxChainPeer(); - if (!peer) { - LOG(log_nf_) << "Restarting syncing PBFT not possible since no connected peers"; - return; - } - - auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); - if (peer->pbft_chain_size_ > pbft_sync_period) { - auto peer_id = peer->getId().abridged(); - auto peer_pbft_chain_size = peer->pbft_chain_size_.load(); - if (!pbft_syncing_state_->setPbftSyncing(true, pbft_sync_period, std::move(peer))) { - LOG(log_dg_) << "startSyncingPbft called but syncing_ already true"; - return; - } - LOG(log_si_) << "Restarting syncing PBFT from peer " << peer_id << ", peer PBFT chain size " << peer_pbft_chain_size - << ", own PBFT chain synced at period " << pbft_sync_period; - - if (syncPeerPbft(pbft_sync_period + 1)) { - // Disable snapshots only if are syncing from scratch - if (pbft_syncing_state_->isDeepPbftSyncing()) { - db_->disableSnapshots(); - } - } else { - pbft_syncing_state_->setPbftSyncing(false); - } - } else { - LOG(log_nf_) << "Restarting syncing PBFT not needed since our pbft chain size: " << pbft_sync_period << "(" - << pbft_chain_->getPbftChainSize() << ")" - << " is greater or equal than max node pbft chain size:" << peer->pbft_chain_size_; - db_->enableSnapshots(); - } -} -bool ExtSyncingPacketHandler::syncPeerPbft(PbftPeriod request_period) { - const auto syncing_peer = pbft_syncing_state_->syncingPeer(); - if (!syncing_peer) { - LOG(log_er_) << "Unable to send GetPbftSyncPacket. No syncing peer set."; - return false; - } - - if (request_period > syncing_peer->pbft_chain_size_) { - LOG(log_wr_) << "Invalid syncPeerPbft argument. Node " << syncing_peer->getId() << " chain size " - << syncing_peer->pbft_chain_size_ << ", requested period " << request_period; - return false; - } - - LOG(log_nf_) << "Send GetPbftSyncPacket with period " << request_period << " to node " << syncing_peer->getId(); - return sealAndSend(syncing_peer->getId(), SubprotocolPacketType::GetPbftSyncPacket, - std::move(dev::RLPStream(1) << request_period)); -} - -std::shared_ptr ExtSyncingPacketHandler::getMaxChainPeer( - std::function &)> filter_func) { - std::shared_ptr max_pbft_chain_peer; - PbftPeriod max_pbft_chain_size = 0; - uint64_t max_node_dag_level = 0; - - // Find peer with max pbft chain and dag level - for (auto const &peer : peers_state_->getAllPeers()) { - // Apply the filter function - if (!filter_func(peer.second)) { - continue; - } - - if (peer.second->pbft_chain_size_ > max_pbft_chain_size) { - if (peer.second->peer_light_node && - pbft_mgr_->pbftSyncingPeriod() + peer.second->peer_light_node_history < peer.second->pbft_chain_size_) { - LOG(log_er_) << "Disconnecting from light node peer " << peer.first - << " History: " << peer.second->peer_light_node_history - << " chain size: " << peer.second->pbft_chain_size_; - disconnect(peer.first, dev::p2p::UserReason); - continue; - } - max_pbft_chain_size = peer.second->pbft_chain_size_; - max_node_dag_level = peer.second->dag_level_; - max_pbft_chain_peer = peer.second; - } else if (peer.second->pbft_chain_size_ == max_pbft_chain_size && peer.second->dag_level_ > max_node_dag_level) { - max_node_dag_level = peer.second->dag_level_; - max_pbft_chain_peer = peer.second; - } - } - return max_pbft_chain_peer; -} - -void ExtSyncingPacketHandler::requestPendingDagBlocks(std::shared_ptr peer) { - if (!peer) { - peer = getMaxChainPeer([](const std::shared_ptr &peer) { - if (peer->peer_dag_synced_ || !peer->dagSyncingAllowed()) { - return false; - } - return true; - }); - if (!peer) { - LOG(log_nf_) << "requestPendingDagBlocks not possible since no peers are matching conditions"; - return; - } - } - - if (!peer) { - LOG(log_nf_) << "requestPendingDagBlocks not possible since no connected peers"; - return; - } - - // This prevents ddos requesting dag blocks. We can only request this one time from one peer. - if (peer->peer_dag_synced_) { - LOG(log_nf_) << "requestPendingDagBlocks not possible since already requested for peer"; - return; - } - - // Only request dag blocks if periods are matching - auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); - if (pbft_sync_period == peer->pbft_chain_size_) { - // This prevents parallel requests - if (bool b = false; !peer->peer_dag_syncing_.compare_exchange_strong(b, !b)) { - LOG(log_nf_) << "requestPendingDagBlocks not possible since already requesting for peer"; - return; - } - LOG(log_nf_) << "Request pending blocks from peer " << peer->getId(); - std::unordered_set known_non_finalized_blocks; - auto [period, blocks] = dag_mgr_->getNonFinalizedBlocks(); - for (auto &level_blocks : blocks) { - for (auto &block : level_blocks.second) { - known_non_finalized_blocks.insert(block); - } - } - - requestDagBlocks(peer->getId(), known_non_finalized_blocks, period); - } -} - -void ExtSyncingPacketHandler::requestDagBlocks(const dev::p2p::NodeID &_nodeID, - const std::unordered_set &blocks, PbftPeriod period) { - dev::RLPStream s(2); // Period + blocks list - s.append(period); - s.append(blocks); - - sealAndSend(_nodeID, SubprotocolPacketType::GetDagSyncPacket, std::move(s)); -} - -} // namespace taraxa::network::tarcap +// #include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" +// +// #include "network/tarcap/shared_states/pbft_syncing_state.hpp" +// #include "pbft/pbft_chain.hpp" +// #include "pbft/pbft_manager.hpp" +// +// namespace taraxa::network::tarcap { +// +// ExtSyncingPacketHandler::ExtSyncingPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, +// std::shared_ptr packets_stats, +// std::shared_ptr pbft_syncing_state, +// std::shared_ptr pbft_chain, +// std::shared_ptr pbft_mgr, +// std::shared_ptr dag_mgr, std::shared_ptr db, +// const addr_t &node_addr, const std::string &log_channel_name) +// : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel_name), +// pbft_syncing_state_(std::move(pbft_syncing_state)), +// pbft_chain_(std::move(pbft_chain)), +// pbft_mgr_(std::move(pbft_mgr)), +// dag_mgr_(std::move(dag_mgr)), +// db_(std::move(db)) {} +// +// void ExtSyncingPacketHandler::startSyncingPbft() { +// if (pbft_syncing_state_->isPbftSyncing()) { +// LOG(log_dg_) << "startSyncingPbft called but syncing_ already true"; +// return; +// } +// +// std::shared_ptr peer = getMaxChainPeer(); +// if (!peer) { +// LOG(log_nf_) << "Restarting syncing PBFT not possible since no connected peers"; +// return; +// } +// +// auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); +// if (peer->pbft_chain_size_ > pbft_sync_period) { +// auto peer_id = peer->getId().abridged(); +// auto peer_pbft_chain_size = peer->pbft_chain_size_.load(); +// if (!pbft_syncing_state_->setPbftSyncing(true, pbft_sync_period, std::move(peer))) { +// LOG(log_dg_) << "startSyncingPbft called but syncing_ already true"; +// return; +// } +// LOG(log_si_) << "Restarting syncing PBFT from peer " << peer_id << ", peer PBFT chain size " << +// peer_pbft_chain_size +// << ", own PBFT chain synced at period " << pbft_sync_period; +// +// if (syncPeerPbft(pbft_sync_period + 1)) { +// // Disable snapshots only if are syncing from scratch +// if (pbft_syncing_state_->isDeepPbftSyncing()) { +// db_->disableSnapshots(); +// } +// } else { +// pbft_syncing_state_->setPbftSyncing(false); +// } +// } else { +// LOG(log_nf_) << "Restarting syncing PBFT not needed since our pbft chain size: " << pbft_sync_period << "(" +// << pbft_chain_->getPbftChainSize() << ")" +// << " is greater or equal than max node pbft chain size:" << peer->pbft_chain_size_; +// db_->enableSnapshots(); +// } +// } +// bool ExtSyncingPacketHandler::syncPeerPbft(PbftPeriod request_period) { +// const auto syncing_peer = pbft_syncing_state_->syncingPeer(); +// if (!syncing_peer) { +// LOG(log_er_) << "Unable to send GetPbftSyncPacket. No syncing peer set."; +// return false; +// } +// +// if (request_period > syncing_peer->pbft_chain_size_) { +// LOG(log_wr_) << "Invalid syncPeerPbft argument. Node " << syncing_peer->getId() << " chain size " +// << syncing_peer->pbft_chain_size_ << ", requested period " << request_period; +// return false; +// } +// +// LOG(log_nf_) << "Send GetPbftSyncPacket with period " << request_period << " to node " << syncing_peer->getId(); +// return sealAndSend(syncing_peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, +// std::move(dev::RLPStream(1) << request_period)); +// } +// +// std::shared_ptr ExtSyncingPacketHandler::getMaxChainPeer( +// std::function &)> filter_func) { +// std::shared_ptr max_pbft_chain_peer; +// PbftPeriod max_pbft_chain_size = 0; +// uint64_t max_node_dag_level = 0; +// +// // Find peer with max pbft chain and dag level +// for (auto const &peer : peers_state_->getAllPeers()) { +// // Apply the filter function +// if (!filter_func(peer.second)) { +// continue; +// } +// +// if (peer.second->pbft_chain_size_ > max_pbft_chain_size) { +// if (peer.second->peer_light_node && +// pbft_mgr_->pbftSyncingPeriod() + peer.second->peer_light_node_history < peer.second->pbft_chain_size_) { +// LOG(log_er_) << "Disconnecting from light node peer " << peer.first +// << " History: " << peer.second->peer_light_node_history +// << " chain size: " << peer.second->pbft_chain_size_; +// disconnect(peer.first, dev::p2p::UserReason); +// continue; +// } +// max_pbft_chain_size = peer.second->pbft_chain_size_; +// max_node_dag_level = peer.second->dag_level_; +// max_pbft_chain_peer = peer.second; +// } else if (peer.second->pbft_chain_size_ == max_pbft_chain_size && peer.second->dag_level_ > max_node_dag_level) +// { +// max_node_dag_level = peer.second->dag_level_; +// max_pbft_chain_peer = peer.second; +// } +// } +// return max_pbft_chain_peer; +// } +// +// void ExtSyncingPacketHandler::requestPendingDagBlocks(std::shared_ptr peer) { +// if (!peer) { +// peer = getMaxChainPeer([](const std::shared_ptr &peer) { +// if (peer->peer_dag_synced_ || !peer->dagSyncingAllowed()) { +// return false; +// } +// return true; +// }); +// if (!peer) { +// LOG(log_nf_) << "requestPendingDagBlocks not possible since no peers are matching conditions"; +// return; +// } +// } +// +// if (!peer) { +// LOG(log_nf_) << "requestPendingDagBlocks not possible since no connected peers"; +// return; +// } +// +// // This prevents ddos requesting dag blocks. We can only request this one time from one peer. +// if (peer->peer_dag_synced_) { +// LOG(log_nf_) << "requestPendingDagBlocks not possible since already requested for peer"; +// return; +// } +// +// // Only request dag blocks if periods are matching +// auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); +// if (pbft_sync_period == peer->pbft_chain_size_) { +// // This prevents parallel requests +// if (bool b = false; !peer->peer_dag_syncing_.compare_exchange_strong(b, !b)) { +// LOG(log_nf_) << "requestPendingDagBlocks not possible since already requesting for peer"; +// return; +// } +// LOG(log_nf_) << "Request pending blocks from peer " << peer->getId(); +// std::unordered_set known_non_finalized_blocks; +// auto [period, blocks] = dag_mgr_->getNonFinalizedBlocks(); +// for (auto &level_blocks : blocks) { +// for (auto &block : level_blocks.second) { +// known_non_finalized_blocks.insert(block); +// } +// } +// +// requestDagBlocks(peer->getId(), known_non_finalized_blocks, period); +// } +// } +// +// void ExtSyncingPacketHandler::requestDagBlocks(const dev::p2p::NodeID &_nodeID, +// const std::unordered_set &blocks, PbftPeriod period) { +// dev::RLPStream s(2); // Period + blocks list +// s.append(period); +// s.append(blocks); +// +// sealAndSend(_nodeID, SubprotocolPacketType::kGetDagSyncPacket, std::move(s)); +// } +// +// } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.cpp index 37ed74f413..4f9ad37be4 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.cpp @@ -1,225 +1,228 @@ -#include "network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp" - -#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" -#include "pbft/pbft_manager.hpp" -#include "vote/pbft_vote.hpp" -#include "vote/votes_bundle_rlp.hpp" -#include "vote_manager/vote_manager.hpp" - -namespace taraxa::network::tarcap { - -ExtVotesPacketHandler::ExtVotesPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, - std::shared_ptr packets_stats, - std::shared_ptr pbft_mgr, - std::shared_ptr pbft_chain, - std::shared_ptr vote_mgr, - std::shared_ptr slashing_manager, const addr_t &node_addr, - const std::string &log_channel_name) - : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel_name), - last_votes_sync_request_time_(std::chrono::system_clock::now()), - last_pbft_block_sync_request_time_(std::chrono::system_clock::now()), - pbft_mgr_(std::move(pbft_mgr)), - pbft_chain_(std::move(pbft_chain)), - vote_mgr_(std::move(vote_mgr)), - slashing_manager_(std::move(slashing_manager)) {} - -bool ExtVotesPacketHandler::processVote(const std::shared_ptr &vote, - const std::shared_ptr &pbft_block, - const std::shared_ptr &peer, bool validate_max_round_step) { - if (pbft_block && !validateVoteAndBlock(vote, pbft_block)) { - throw MaliciousPeerException("Received vote's voted value != received pbft block"); - } - - if (vote_mgr_->voteInVerifiedMap(vote)) { - LOG(log_dg_) << "Vote " << vote->getHash() << " already inserted in verified queue"; - return false; - } - - // Validate vote's period, round and step min/max values - if (const auto vote_valid = validateVotePeriodRoundStep(vote, peer, validate_max_round_step); !vote_valid.first) { - LOG(log_wr_) << "Vote period/round/step " << vote->getHash() << " validation failed. Err: " << vote_valid.second; - return false; - } - - // Check is vote is unique per period, round & step & voter -> each address can generate just 1 vote - // (for a value that isn't NBH) per period, round & step - if (auto vote_valid = vote_mgr_->isUniqueVote(vote); !vote_valid.first) { - // Create double voting proof - slashing_manager_->submitDoubleVotingProof(vote, vote_valid.second); - throw MaliciousPeerException("Received double vote", vote->getVoter()); - } - - // Validate vote's signature, vrf, etc... - if (const auto vote_valid = vote_mgr_->validateVote(vote); !vote_valid.first) { - LOG(log_wr_) << "Vote " << vote->getHash() << " validation failed. Err: " << vote_valid.second; - return false; - } - - if (!vote_mgr_->addVerifiedVote(vote)) { - LOG(log_dg_) << "Vote " << vote->getHash() << " already inserted in verified queue(race condition)"; - return false; - } - - if (pbft_block) { - pbft_mgr_->processProposedBlock(pbft_block, vote); - } - - return true; -} - -std::pair ExtVotesPacketHandler::validateVotePeriodRoundStep(const std::shared_ptr &vote, - const std::shared_ptr &peer, - bool validate_max_round_step) { - const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); - - auto genErrMsg = [period = current_pbft_period, round = current_pbft_round, - step = pbft_mgr_->getPbftStep()](const std::shared_ptr &vote) -> std::string { - std::stringstream err; - err << "Vote " << vote->getHash() << " (period, round, step) = (" << vote->getPeriod() << ", " << vote->getRound() - << ", " << vote->getStep() << "). Current PBFT (period, round, step) = (" << period << ", " << round << ", " - << step << ")"; - return err.str(); - }; - - // Period validation - // vote->getPeriod() == current_pbft_period - 1 && cert_vote -> potential reward vote - if (vote->getPeriod() < current_pbft_period - 1 || - (vote->getPeriod() == current_pbft_period - 1 && vote->getType() != PbftVoteTypes::cert_vote)) { - return {false, "Invalid period(too small): " + genErrMsg(vote)}; - } else if (kConf.network.ddos_protection.vote_accepting_periods && - vote->getPeriod() - 1 > current_pbft_period + kConf.network.ddos_protection.vote_accepting_periods) { - // skip this check if kConf.network.ddos_protection.vote_accepting_periods == 0 - // vote->getPeriod() - 1 is here because votes are validated against vote_period - 1 in dpos contract - // Do not request round sync too often here - if (vote->getVoter() == peer->getId() && - std::chrono::system_clock::now() - last_pbft_block_sync_request_time_ > kSyncRequestInterval) { - // request PBFT chain sync from this node - sealAndSend(peer->getId(), SubprotocolPacketType::GetPbftSyncPacket, - std::move(dev::RLPStream(1) << std::max(vote->getPeriod() - 1, peer->pbft_chain_size_.load()))); - last_pbft_block_sync_request_time_ = std::chrono::system_clock::now(); - } - - return {false, "Invalid period(too big): " + genErrMsg(vote)}; - } - - // Round validation - auto checking_round = current_pbft_round; - // If period is not the same we assume current round is equal to 1 - // So we won't accept votes for future period with round bigger than kConf.network.vote_accepting_steps - if (current_pbft_period != vote->getPeriod()) { - checking_round = 1; - } - - // vote->getRound() == checking_round - 1 && next_vote -> previous round next vote - if (vote->getRound() < checking_round - 1 || - (vote->getRound() == checking_round - 1 && vote->getType() != PbftVoteTypes::next_vote)) { - return {false, "Invalid round(too small): " + genErrMsg(vote)}; - } else if (validate_max_round_step && kConf.network.ddos_protection.vote_accepting_rounds && - vote->getRound() >= checking_round + kConf.network.ddos_protection.vote_accepting_rounds) { - // skip this check if kConf.network.vote_accepting_rounds == 0 - // Trigger votes(round) syncing only if we are in sync in terms of period - if (current_pbft_period == vote->getPeriod()) { - // Do not request round sync too often here - if (vote->getVoter() == peer->getId() && - std::chrono::system_clock::now() - last_votes_sync_request_time_ > kSyncRequestInterval) { - // request round votes sync from this node - requestPbftNextVotesAtPeriodRound(peer->getId(), current_pbft_period, current_pbft_round); - last_votes_sync_request_time_ = std::chrono::system_clock::now(); - } - } - - return {false, "Invalid round(too big): " + genErrMsg(vote)}; - } - - // Step validation - auto checking_step = pbft_mgr_->getPbftStep(); - // If period or round is not the same we assume current step is equal to 1 - // So we won't accept votes for future rounds with step bigger than kConf.network.vote_accepting_steps - if (current_pbft_period != vote->getPeriod() || current_pbft_round != vote->getRound()) { - checking_step = 1; - } - - // skip check if kConf.network.vote_accepting_steps == 0 - if (validate_max_round_step && kConf.network.ddos_protection.vote_accepting_steps && - vote->getStep() >= checking_step + kConf.network.ddos_protection.vote_accepting_steps) { - return {false, "Invalid step(too big): " + genErrMsg(vote)}; - } - - return {true, ""}; -} - -bool ExtVotesPacketHandler::validateVoteAndBlock(const std::shared_ptr &vote, - const std::shared_ptr &pbft_block) const { - if (pbft_block->getBlockHash() != vote->getBlockHash()) { - LOG(log_er_) << "Vote " << vote->getHash() << " voted block " << vote->getBlockHash() << " != actual block " - << pbft_block->getBlockHash(); - return false; - } - return true; -} - -bool ExtVotesPacketHandler::isPbftRelevantVote(const std::shared_ptr &vote) const { - const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); - - if (vote->getPeriod() >= current_pbft_period && vote->getRound() >= current_pbft_round) { - // Standard current or future vote - return true; - } else if (vote->getPeriod() == current_pbft_period && vote->getRound() == (current_pbft_round - 1) && - vote->getType() == PbftVoteTypes::next_vote) { - // Previous round next vote - return true; - } else if (vote->getPeriod() == current_pbft_period - 1 && vote->getType() == PbftVoteTypes::cert_vote) { - // Previous period cert vote - potential reward vote - return true; - } - - return false; -} - -void ExtVotesPacketHandler::sendPbftVotesBundle(const std::shared_ptr &peer, - std::vector> &&votes) { - if (votes.empty()) { - return; - } - - auto sendVotes = [this, &peer](std::vector> &&votes) { - auto votes_bytes = encodePbftVotesBundleRlp(std::move(votes)); - if (votes_bytes.empty()) { - LOG(log_er_) << "Unable to send VotesBundle rlp"; - return; - } - - dev::RLPStream votes_rlp_stream; - votes_rlp_stream.appendRaw(votes_bytes); - - if (sealAndSend(peer->getId(), SubprotocolPacketType::VotesBundlePacket, std::move(votes_rlp_stream))) { - LOG(log_dg_) << " Votes bundle with " << votes.size() << " votes sent to " << peer->getId(); - for (const auto &vote : votes) { - peer->markPbftVoteAsKnown(vote->getHash()); - } - } - }; - - if (votes.size() <= kMaxVotesInBundleRlp) { - sendVotes(std::move(votes)); - return; - } else { - // Need to split votes into multiple packets - size_t index = 0; - while (index < votes.size()) { - const size_t votes_count = std::min(kMaxVotesInBundleRlp, votes.size() - index); - - const auto begin_it = std::next(votes.begin(), index); - const auto end_it = std::next(begin_it, votes_count); - - std::vector> votes_sub_vector; - std::move(begin_it, end_it, std::back_inserter(votes_sub_vector)); - - sendVotes(std::move(votes_sub_vector)); - - index += votes_count; - } - } -} - -} // namespace taraxa::network::tarcap +// #include "network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp" +// +// #include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" +// #include "pbft/pbft_manager.hpp" +// #include "vote/pbft_vote.hpp" +// #include "vote/votes_bundle_rlp.hpp" +// #include "vote_manager/vote_manager.hpp" +// +// namespace taraxa::network::tarcap { +// +// ExtVotesPacketHandler::ExtVotesPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, +// std::shared_ptr packets_stats, +// std::shared_ptr pbft_mgr, +// std::shared_ptr pbft_chain, +// std::shared_ptr vote_mgr, +// std::shared_ptr slashing_manager, const addr_t +// &node_addr, const std::string &log_channel_name) +// : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel_name), +// last_votes_sync_request_time_(std::chrono::system_clock::now()), +// last_pbft_block_sync_request_time_(std::chrono::system_clock::now()), +// pbft_mgr_(std::move(pbft_mgr)), +// pbft_chain_(std::move(pbft_chain)), +// vote_mgr_(std::move(vote_mgr)), +// slashing_manager_(std::move(slashing_manager)) {} +// +// bool ExtVotesPacketHandler::processVote(const std::shared_ptr &vote, +// const std::shared_ptr &pbft_block, +// const std::shared_ptr &peer, bool validate_max_round_step) { +// if (pbft_block && !validateVoteAndBlock(vote, pbft_block)) { +// throw MaliciousPeerException("Received vote's voted value != received pbft block"); +// } +// +// if (vote_mgr_->voteInVerifiedMap(vote)) { +// LOG(log_dg_) << "Vote " << vote->getHash() << " already inserted in verified queue"; +// return false; +// } +// +// // Validate vote's period, round and step min/max values +// if (const auto vote_valid = validateVotePeriodRoundStep(vote, peer, validate_max_round_step); !vote_valid.first) { +// LOG(log_wr_) << "Vote period/round/step " << vote->getHash() << " validation failed. Err: " << vote_valid.second; +// return false; +// } +// +// // Check is vote is unique per period, round & step & voter -> each address can generate just 1 vote +// // (for a value that isn't NBH) per period, round & step +// if (auto vote_valid = vote_mgr_->isUniqueVote(vote); !vote_valid.first) { +// // Create double voting proof +// slashing_manager_->submitDoubleVotingProof(vote, vote_valid.second); +// throw MaliciousPeerException("Received double vote", vote->getVoter()); +// } +// +// // Validate vote's signature, vrf, etc... +// if (const auto vote_valid = vote_mgr_->validateVote(vote); !vote_valid.first) { +// LOG(log_wr_) << "Vote " << vote->getHash() << " validation failed. Err: " << vote_valid.second; +// return false; +// } +// +// if (!vote_mgr_->addVerifiedVote(vote)) { +// LOG(log_dg_) << "Vote " << vote->getHash() << " already inserted in verified queue(race condition)"; +// return false; +// } +// +// if (pbft_block) { +// pbft_mgr_->processProposedBlock(pbft_block, vote); +// } +// +// return true; +// } +// +// std::pair ExtVotesPacketHandler::validateVotePeriodRoundStep(const std::shared_ptr +// &vote, +// const std::shared_ptr +// &peer, bool validate_max_round_step) +// { +// const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); +// +// auto genErrMsg = [period = current_pbft_period, round = current_pbft_round, +// step = pbft_mgr_->getPbftStep()](const std::shared_ptr &vote) -> std::string { +// std::stringstream err; +// err << "Vote " << vote->getHash() << " (period, round, step) = (" << vote->getPeriod() << ", " << +// vote->getRound() +// << ", " << vote->getStep() << "). Current PBFT (period, round, step) = (" << period << ", " << round << ", " +// << step << ")"; +// return err.str(); +// }; +// +// // Period validation +// // vote->getPeriod() == current_pbft_period - 1 && cert_vote -> potential reward vote +// if (vote->getPeriod() < current_pbft_period - 1 || +// (vote->getPeriod() == current_pbft_period - 1 && vote->getType() != PbftVoteTypes::cert_vote)) { +// return {false, "Invalid period(too small): " + genErrMsg(vote)}; +// } else if (kConf.network.ddos_protection.vote_accepting_periods && +// vote->getPeriod() - 1 > current_pbft_period + kConf.network.ddos_protection.vote_accepting_periods) { +// // skip this check if kConf.network.ddos_protection.vote_accepting_periods == 0 +// // vote->getPeriod() - 1 is here because votes are validated against vote_period - 1 in dpos contract +// // Do not request round sync too often here +// if (vote->getVoter() == peer->getId() && +// std::chrono::system_clock::now() - last_pbft_block_sync_request_time_ > kSyncRequestInterval) { +// // request PBFT chain sync from this node +// sealAndSend(peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, +// std::move(dev::RLPStream(1) << std::max(vote->getPeriod() - 1, peer->pbft_chain_size_.load()))); +// last_pbft_block_sync_request_time_ = std::chrono::system_clock::now(); +// } +// +// return {false, "Invalid period(too big): " + genErrMsg(vote)}; +// } +// +// // Round validation +// auto checking_round = current_pbft_round; +// // If period is not the same we assume current round is equal to 1 +// // So we won't accept votes for future period with round bigger than kConf.network.vote_accepting_steps +// if (current_pbft_period != vote->getPeriod()) { +// checking_round = 1; +// } +// +// // vote->getRound() == checking_round - 1 && next_vote -> previous round next vote +// if (vote->getRound() < checking_round - 1 || +// (vote->getRound() == checking_round - 1 && vote->getType() != PbftVoteTypes::next_vote)) { +// return {false, "Invalid round(too small): " + genErrMsg(vote)}; +// } else if (validate_max_round_step && kConf.network.ddos_protection.vote_accepting_rounds && +// vote->getRound() >= checking_round + kConf.network.ddos_protection.vote_accepting_rounds) { +// // skip this check if kConf.network.vote_accepting_rounds == 0 +// // Trigger votes(round) syncing only if we are in sync in terms of period +// if (current_pbft_period == vote->getPeriod()) { +// // Do not request round sync too often here +// if (vote->getVoter() == peer->getId() && +// std::chrono::system_clock::now() - last_votes_sync_request_time_ > kSyncRequestInterval) { +// // request round votes sync from this node +// requestPbftNextVotesAtPeriodRound(peer->getId(), current_pbft_period, current_pbft_round); +// last_votes_sync_request_time_ = std::chrono::system_clock::now(); +// } +// } +// +// return {false, "Invalid round(too big): " + genErrMsg(vote)}; +// } +// +// // Step validation +// auto checking_step = pbft_mgr_->getPbftStep(); +// // If period or round is not the same we assume current step is equal to 1 +// // So we won't accept votes for future rounds with step bigger than kConf.network.vote_accepting_steps +// if (current_pbft_period != vote->getPeriod() || current_pbft_round != vote->getRound()) { +// checking_step = 1; +// } +// +// // skip check if kConf.network.vote_accepting_steps == 0 +// if (validate_max_round_step && kConf.network.ddos_protection.vote_accepting_steps && +// vote->getStep() >= checking_step + kConf.network.ddos_protection.vote_accepting_steps) { +// return {false, "Invalid step(too big): " + genErrMsg(vote)}; +// } +// +// return {true, ""}; +// } +// +// bool ExtVotesPacketHandler::validateVoteAndBlock(const std::shared_ptr &vote, +// const std::shared_ptr &pbft_block) const { +// if (pbft_block->getBlockHash() != vote->getBlockHash()) { +// LOG(log_er_) << "Vote " << vote->getHash() << " voted block " << vote->getBlockHash() << " != actual block " +// << pbft_block->getBlockHash(); +// return false; +// } +// return true; +// } +// +// bool ExtVotesPacketHandler::isPbftRelevantVote(const std::shared_ptr &vote) const { +// const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); +// +// if (vote->getPeriod() >= current_pbft_period && vote->getRound() >= current_pbft_round) { +// // Standard current or future vote +// return true; +// } else if (vote->getPeriod() == current_pbft_period && vote->getRound() == (current_pbft_round - 1) && +// vote->getType() == PbftVoteTypes::next_vote) { +// // Previous round next vote +// return true; +// } else if (vote->getPeriod() == current_pbft_period - 1 && vote->getType() == PbftVoteTypes::cert_vote) { +// // Previous period cert vote - potential reward vote +// return true; +// } +// +// return false; +// } +// +// void ExtVotesPacketHandler::sendPbftVotesBundle(const std::shared_ptr &peer, +// std::vector> &&votes) { +// if (votes.empty()) { +// return; +// } +// +// auto sendVotes = [this, &peer](std::vector> &&votes) { +// auto votes_bytes = encodePbftVotesBundleRlp(std::move(votes)); +// if (votes_bytes.empty()) { +// LOG(log_er_) << "Unable to send VotesBundle rlp"; +// return; +// } +// +// dev::RLPStream votes_rlp_stream; +// votes_rlp_stream.appendRaw(votes_bytes); +// +// if (sealAndSend(peer->getId(), SubprotocolPacketType::kVotesBundlePacket, std::move(votes_rlp_stream))) { +// LOG(log_dg_) << " Votes bundle with " << votes.size() << " votes sent to " << peer->getId(); +// for (const auto &vote : votes) { +// peer->markPbftVoteAsKnown(vote->getHash()); +// } +// } +// }; +// +// if (votes.size() <= kMaxVotesInBundleRlp) { +// sendVotes(std::move(votes)); +// return; +// } else { +// // Need to split votes into multiple packets +// size_t index = 0; +// while (index < votes.size()) { +// const size_t votes_count = std::min(kMaxVotesInBundleRlp, votes.size() - index); +// +// const auto begin_it = std::next(votes.begin(), index); +// const auto end_it = std::next(begin_it, votes_count); +// +// std::vector> votes_sub_vector; +// std::move(begin_it, end_it, std::back_inserter(votes_sub_vector)); +// +// sendVotes(std::move(votes_sub_vector)); +// +// index += votes_count; +// } +// } +// } +// +// } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/packet_handler.cpp index 0113a486aa..f623f3965a 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/packet_handler.cpp @@ -1,143 +1,149 @@ -#include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" - -#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" -#include "network/tarcap/stats/time_period_packets_stats.hpp" - -namespace taraxa::network::tarcap { - -PacketHandler::PacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, - std::shared_ptr packets_stats, const addr_t& node_addr, - const std::string& log_channel_name) - : kConf(conf), peers_state_(std::move(peers_state)), packets_stats_(std::move(packets_stats)) { - LOG_OBJECTS_CREATE(log_channel_name); -} - -void PacketHandler::checkPacketRlpIsList(const threadpool::PacketData& packet_data) const { - if (!packet_data.rlp_.isList()) { - throw InvalidRlpItemsCountException(packet_data.type_str_ + " RLP must be a list. ", 0, 1); - } -} - -void PacketHandler::processPacket(const threadpool::PacketData& packet_data) { - try { - const auto begin = std::chrono::steady_clock::now(); - - // It can rarely happen that packet was received and pushed into the queue when peer was still in peers map, - // in the meantime the connection was lost and we started to process packet from such peer - const auto peer = peers_state_->getPacketSenderPeer(packet_data.from_node_id_, packet_data.type_); - if (!peer.first) [[unlikely]] { - LOG(log_wr_) << "Unable to process packet. Reason: " << peer.second; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); - return; - } - - // Validates packet rlp format - // In case there is a type mismatch, one of the dev::RLPException's is thrown during further parsing in process - // function - checkPacketRlpIsList(packet_data); - validatePacketRlpFormat(packet_data); - - // Main processing function - process(packet_data, peer.first); - - auto processing_duration = - std::chrono::duration_cast(std::chrono::steady_clock::now() - begin); - auto tp_wait_duration = std::chrono::duration_cast(std::chrono::steady_clock::now() - - packet_data.receive_time_); - - PacketStats packet_stats{1 /* count */, packet_data.rlp_.data().size(), processing_duration, tp_wait_duration}; - peer.first->addSentPacket(packet_data.type_str_, packet_stats); - - if (kConf.network.ddos_protection.log_packets_stats) { - packets_stats_->addReceivedPacket(packet_data.type_str_, packet_data.from_node_id_, packet_stats); - } - - } catch (const MaliciousPeerException& e) { - // thrown during packets processing -> malicious peer, invalid rlp items count, ... - // If there is custom peer set in exception, disconnect him, not packet sender - if (const auto custom_peer = e.getPeer(); custom_peer.has_value()) { - handle_caught_exception(e.what(), packet_data, *custom_peer, e.getDisconnectReason(), - true /* set peer as malicious */); - } else { - handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, e.getDisconnectReason(), - true /* set peer as malicious */); - } - } catch (const PacketProcessingException& e) { - // thrown during packets processing... - handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, e.getDisconnectReason(), - true /* set peer as malicious */); - } catch (const dev::RLPException& e) { - // thrown during parsing inside aleth/libdevcore -> type mismatch - handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, dev::p2p::DisconnectReason::BadProtocol, - true /* set peer as malicious */); - } catch (const std::exception& e) { - handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_); - } catch (...) { - handle_caught_exception("Unknown exception", packet_data, packet_data.from_node_id_); - } -} - -void PacketHandler::handle_caught_exception(std::string_view exception_msg, const threadpool::PacketData& packet_data, - const dev::p2p::NodeID& peer, dev::p2p::DisconnectReason disconnect_reason, - bool set_peer_as_malicious) { - LOG(log_er_) << "Exception caught during packet processing: " << exception_msg << " ." - << "PacketData: " << jsonToUnstyledString(packet_data.getPacketDataJson()) - << ", disconnect peer: " << peer.toString(); - - if (set_peer_as_malicious) { - peers_state_->set_peer_malicious(peer); - } - - disconnect(peer, disconnect_reason); -} - -bool PacketHandler::sealAndSend(const dev::p2p::NodeID& node_id, SubprotocolPacketType packet_type, - dev::RLPStream&& rlp) { - auto host = peers_state_->host_.lock(); - if (!host) { - LOG(log_er_) << "sealAndSend failed to obtain host"; - return false; - } - - if (const auto peer = peers_state_->getPacketSenderPeer(node_id, packet_type); !peer.first) [[unlikely]] { - LOG(log_wr_) << "Unable to send packet. Reason: " << peer.second; - host->disconnect(node_id, dev::p2p::UserReason); - return false; - } - - const auto begin = std::chrono::steady_clock::now(); - const size_t packet_size = rlp.out().size(); - - host->send(node_id, TARAXA_CAPABILITY_NAME, packet_type, rlp.invalidate(), - [begin, node_id, packet_size, packet_type, this]() { - if (!kConf.network.ddos_protection.log_packets_stats) { - return; - } - - PacketStats packet_stats{ - 1 /* count */, packet_size, - std::chrono::duration_cast(std::chrono::steady_clock::now() - begin), - std::chrono::microseconds{0}}; - - packets_stats_->addSentPacket(convertPacketTypeToString(packet_type), node_id, packet_stats); - }); - - return true; -} - -void PacketHandler::disconnect(const dev::p2p::NodeID& node_id, dev::p2p::DisconnectReason reason) { - if (auto host = peers_state_->host_.lock(); host) { - LOG(log_nf_) << "Disconnect node " << node_id.abridged(); - host->disconnect(node_id, reason); - } else { - LOG(log_er_) << "Unable to disconnect node " << node_id.abridged() << " due to invalid host."; - } -} - -void PacketHandler::requestPbftNextVotesAtPeriodRound(const dev::p2p::NodeID& peerID, PbftPeriod pbft_period, - PbftRound pbft_round) { - LOG(log_dg_) << "Sending GetNextVotesSyncPacket with period:" << pbft_period << ", round:" << pbft_round; - sealAndSend(peerID, GetNextVotesSyncPacket, std::move(dev::RLPStream(2) << pbft_period << pbft_round)); -} - -} // namespace taraxa::network::tarcap +// #include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" +// +// #include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" +// #include "network/tarcap/stats/time_period_packets_stats.hpp" +// +// namespace taraxa::network::tarcap { +// +// PacketHandler::PacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, +// std::shared_ptr packets_stats, const addr_t& node_addr, +// const std::string& log_channel_name) +// : kConf(conf), peers_state_(std::move(peers_state)), packets_stats_(std::move(packets_stats)) { +// LOG_OBJECTS_CREATE(log_channel_name); +// } +// +// void PacketHandler::checkPacketRlpIsList(const threadpool::PacketData& packet_data) const { +// if (!packet_data.rlp_.isList()) { +// throw InvalidRlpItemsCountException(packet_data.type_str_ + " RLP must be a list. ", 0, 1); +// } +// } +// +// void PacketHandler::processPacket(const PacketType& packet_data) { +// try { +// const auto begin = std::chrono::steady_clock::now(); +// +// // It can rarely happen that packet was received and pushed into the queue when peer was still in peers map, +// // in the meantime the connection was lost and we started to process packet from such peer +// const auto peer = peers_state_->getPacketSenderPeer(packet_data.from_node_id_, packet_data.type_); +// if (!peer.first) [[unlikely]] { +// LOG(log_wr_) << "Unable to process packet. Reason: " << peer.second; +// disconnect(packet_data.from_node_id_, dev::p2p::UserReason); +// return; +// } +// +// // Validates packet rlp format +// // In case there is a type mismatch, one of the dev::RLPException's is thrown during further parsing in process +// // function +// checkPacketRlpIsList(packet_data); +// validatePacketRlpFormat(packet_data); +// +// // TODO: call decode function and remove checkPacketRlpIsList & validatePacketRlpFormat +// PacketType packet{packet_data}; +// packet.decocode(); +// +// // Main processing function +// process(packet, peer.first); +// +// auto processing_duration = +// std::chrono::duration_cast(std::chrono::steady_clock::now() - begin); +// auto tp_wait_duration = std::chrono::duration_cast(std::chrono::steady_clock::now() - +// packet_data.receive_time_); +// +// PacketStats packet_stats{1 /* count */, packet_data.rlp_.data().size(), processing_duration, tp_wait_duration}; +// peer.first->addSentPacket(packet_data.type_str_, packet_stats); +// +// if (kConf.network.ddos_protection.log_packets_stats) { +// packets_stats_->addReceivedPacket(packet_data.type_str_, packet_data.from_node_id_, packet_stats); +// } +// +// } catch (const MaliciousPeerException& e) { +// // thrown during packets processing -> malicious peer, invalid rlp items count, ... +// // If there is custom peer set in exception, disconnect him, not packet sender +// if (const auto custom_peer = e.getPeer(); custom_peer.has_value()) { +// handle_caught_exception(e.what(), packet_data, *custom_peer, e.getDisconnectReason(), +// true /* set peer as malicious */); +// } else { +// handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, e.getDisconnectReason(), +// true /* set peer as malicious */); +// } +// } catch (const PacketProcessingException& e) { +// // thrown during packets processing... +// handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, e.getDisconnectReason(), +// true /* set peer as malicious */); +// } catch (const dev::RLPException& e) { +// // thrown during parsing inside aleth/libdevcore -> type mismatch +// handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, +// dev::p2p::DisconnectReason::BadProtocol, +// true /* set peer as malicious */); +// } catch (const std::exception& e) { +// handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_); +// } catch (...) { +// handle_caught_exception("Unknown exception", packet_data, packet_data.from_node_id_); +// } +// } +// +// void PacketHandler::handle_caught_exception(std::string_view exception_msg, const threadpool::PacketData& +// packet_data, +// const dev::p2p::NodeID& peer, dev::p2p::DisconnectReason +// disconnect_reason, bool set_peer_as_malicious) { +// LOG(log_er_) << "Exception caught during packet processing: " << exception_msg << " ." +// << "PacketData: " << jsonToUnstyledString(packet_data.getPacketDataJson()) +// << ", disconnect peer: " << peer.toString(); +// +// if (set_peer_as_malicious) { +// peers_state_->set_peer_malicious(peer); +// } +// +// disconnect(peer, disconnect_reason); +// } +// +// bool PacketHandler::sealAndSend(const dev::p2p::NodeID& node_id, SubprotocolPacketType packet_type, +// dev::RLPStream&& rlp) { +// auto host = peers_state_->host_.lock(); +// if (!host) { +// LOG(log_er_) << "sealAndSend failed to obtain host"; +// return false; +// } +// +// if (const auto peer = peers_state_->getPacketSenderPeer(node_id, packet_type); !peer.first) [[unlikely]] { +// LOG(log_wr_) << "Unable to send packet. Reason: " << peer.second; +// host->disconnect(node_id, dev::p2p::UserReason); +// return false; +// } +// +// const auto begin = std::chrono::steady_clock::now(); +// const size_t packet_size = rlp.out().size(); +// +// host->send(node_id, TARAXA_CAPABILITY_NAME, packet_type, rlp.invalidate(), +// [begin, node_id, packet_size, packet_type, this]() { +// if (!kConf.network.ddos_protection.log_packets_stats) { +// return; +// } +// +// PacketStats packet_stats{ +// 1 /* count */, packet_size, +// std::chrono::duration_cast(std::chrono::steady_clock::now() - begin), +// std::chrono::microseconds{0}}; +// +// packets_stats_->addSentPacket(convertPacketTypeToString(packet_type), node_id, packet_stats); +// }); +// +// return true; +// } +// +// void PacketHandler::disconnect(const dev::p2p::NodeID& node_id, dev::p2p::DisconnectReason reason) { +// if (auto host = peers_state_->host_.lock(); host) { +// LOG(log_nf_) << "Disconnect node " << node_id.abridged(); +// host->disconnect(node_id, reason); +// } else { +// LOG(log_er_) << "Unable to disconnect node " << node_id.abridged() << " due to invalid host."; +// } +// } +// +// void PacketHandler::requestPbftNextVotesAtPeriodRound(const dev::p2p::NodeID& peerID, PbftPeriod pbft_period, +// PbftRound pbft_round) { +// LOG(log_dg_) << "Sending GetNextVotesSyncPacket with period:" << pbft_period << ", round:" << pbft_round; +// sealAndSend(peerID, GetNextVotesSyncPacket, std::move(dev::RLPStream(2) << pbft_period << pbft_round)); +// } +// +// } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp index a0b22ea34b..253f237e59 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp @@ -19,49 +19,25 @@ DagBlockPacketHandler::DagBlockPacketHandler(const FullNodeConfig &conf, std::sh logs_prefix + "DAG_BLOCK_PH"), trx_mgr_(std::move(trx_mgr)) {} -void DagBlockPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - constexpr size_t required_size = 2; - // Only one dag block can be received - if (packet_data.rlp_.itemCount() != required_size) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); - } -} +void DagBlockPacketHandler::process(DagBlockPacket &&packet, const std::shared_ptr &peer) { + blk_hash_t const hash = packet.dag_block.getHash(); -void DagBlockPacketHandler::process(const threadpool::PacketData &packet_data, - const std::shared_ptr &peer) { - std::unordered_map> transactions; - auto dag_rlp = packet_data.rlp_; - if (packet_data.rlp_.itemCount() == 2) { - const auto trx_count = packet_data.rlp_[0].itemCount(); - transactions.reserve(trx_count); - - for (const auto tx_rlp : packet_data.rlp_[0]) { - try { - auto trx = std::make_shared(tx_rlp); - peer->markTransactionAsKnown(trx->getHash()); - transactions.emplace(trx->getHash(), std::move(trx)); - } catch (const Transaction::InvalidTransaction &e) { - throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); - } - } - dag_rlp = packet_data.rlp_[1]; + for (const auto &tx : packet.transactions) { + peer->markTransactionAsKnown(tx.first); } - DagBlock block(dag_rlp); - blk_hash_t const hash = block.getHash(); - peer->markDagBlockAsKnown(hash); - if (block.getLevel() > peer->dag_level_) { - peer->dag_level_ = block.getLevel(); + if (packet.dag_block.getLevel() > peer->dag_level_) { + peer->dag_level_ = packet.dag_block.getLevel(); } // Do not process this block in case we already have it - if (dag_mgr_->isDagBlockKnown(block.getHash())) { + if (dag_mgr_->isDagBlockKnown(packet.dag_block.getHash())) { LOG(log_tr_) << "Received known DagBlockPacket " << hash << "from: " << peer->getId(); return; } - onNewBlockReceived(std::move(block), peer, transactions); + onNewBlockReceived(std::move(packet.dag_block), peer, packet.transactions); } void DagBlockPacketHandler::sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, taraxa::DagBlock block, @@ -88,7 +64,7 @@ void DagBlockPacketHandler::sendBlockWithTransactions(dev::p2p::NodeID const &pe s.appendRaw(block.rlp(true)); - if (!sealAndSend(peer_id, DagBlockPacket, std::move(s))) { + if (!sealAndSend(peer_id, SubprotocolPacketType::kDagBlockPacket, std::move(s))) { LOG(log_wr_) << "Sending DagBlock " << block.getHash() << " failed to " << peer_id; return; } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_sync_packet_handler.cpp index 9ceb0edf39..8d7d599299 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_sync_packet_handler.cpp @@ -20,70 +20,33 @@ DagSyncPacketHandler::DagSyncPacketHandler(const FullNodeConfig& conf, std::shar logs_prefix + "DAG_SYNC_PH"), trx_mgr_(std::move(trx_mgr)) {} -void DagSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData& packet_data) const { - if (constexpr size_t required_size = 4; packet_data.rlp_.itemCount() != required_size) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); - } -} - -void DagSyncPacketHandler::process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) { - auto it = packet_data.rlp_.begin(); - const auto request_period = (*it++).toInt(); - const auto response_period = (*it++).toInt(); - +void DagSyncPacketHandler::process(DagSyncPacket&& packet, const std::shared_ptr& peer) { // If the periods did not match restart syncing - if (response_period > request_period) { - LOG(log_dg_) << "Received DagSyncPacket with mismatching periods: " << response_period << " " << request_period - << " from " << packet_data.from_node_id_.abridged(); - if (peer->pbft_chain_size_ < response_period) { - peer->pbft_chain_size_ = response_period; + if (packet.response_period > packet.request_period) { + LOG(log_dg_) << "Received DagSyncPacket with mismatching periods: " << packet.response_period << " " + << packet.request_period << " from " << peer->getId(); + if (peer->pbft_chain_size_ < packet.response_period) { + peer->pbft_chain_size_ = packet.response_period; } peer->peer_dag_syncing_ = false; // We might be behind, restart pbft sync if needed startSyncingPbft(); return; - } else if (response_period < request_period) { + } else if (packet.response_period < packet.request_period) { // This should not be possible for honest node std::ostringstream err_msg; - err_msg << "Received DagSyncPacket with mismatching periods: response_period(" << response_period - << ") != request_period(" << request_period << ")"; + err_msg << "Received DagSyncPacket with mismatching periods: response_period(" << packet.response_period + << ") != request_period(" << packet.request_period << ")"; throw MaliciousPeerException(err_msg.str()); } std::vector transactions_to_log; - std::unordered_map> transactions; - const auto trx_count = (*it).itemCount(); - transactions.reserve(trx_count); - transactions_to_log.reserve(trx_count); - - for (const auto tx_rlp : (*it++)) { - try { - auto trx = std::make_shared(tx_rlp); - peer->markTransactionAsKnown(trx->getHash()); - transactions.emplace(trx->getHash(), std::move(trx)); - } catch (const Transaction::InvalidTransaction& e) { - throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); - } - } - - std::vector dag_blocks; - std::vector dag_blocks_to_log; - dag_blocks.reserve((*it).itemCount()); - dag_blocks_to_log.reserve((*it).itemCount()); - - for (const auto block_rlp : *it) { - DagBlock block(block_rlp); - peer->markDagBlockAsKnown(block.getHash()); - if (dag_mgr_->isDagBlockKnown(block.getHash())) { - LOG(log_tr_) << "Received known DagBlock " << block.getHash() << "from: " << peer->getId(); - continue; - } - dag_blocks.emplace_back(std::move(block)); - } - - for (auto& trx : transactions) { + transactions_to_log.reserve(packet.transactions.size()); + for (auto& trx : packet.transactions) { + peer->markTransactionAsKnown(trx.first); transactions_to_log.push_back(trx.first); + if (trx_mgr_->isTransactionKnown(trx.first)) { continue; } @@ -96,10 +59,18 @@ void DagSyncPacketHandler::process(const threadpool::PacketData& packet_data, co } } - for (auto& block : dag_blocks) { + std::vector dag_blocks_to_log; + dag_blocks_to_log.reserve(packet.dag_blocks.size()); + for (auto& block : packet.dag_blocks) { dag_blocks_to_log.push_back(block.getHash()); + peer->markDagBlockAsKnown(block.getHash()); + + if (dag_mgr_->isDagBlockKnown(block.getHash())) { + LOG(log_tr_) << "Received known DagBlock " << block.getHash() << "from: " << peer->getId(); + continue; + } - auto verified = dag_mgr_->verifyBlock(block, transactions); + auto verified = dag_mgr_->verifyBlock(block, packet.transactions); if (verified.first != DagManager::VerifyBlockReturnType::Verified) { std::ostringstream err_msg; err_msg << "DagBlock " << block.getHash() << " failed verification with error code " @@ -126,7 +97,7 @@ void DagSyncPacketHandler::process(const threadpool::PacketData& packet_data, co peer->peer_dag_syncing_ = false; LOG(log_dg_) << "Received DagSyncPacket with blocks: " << dag_blocks_to_log - << " Transactions: " << transactions_to_log << " from " << packet_data.from_node_id_; + << " Transactions: " << transactions_to_log << " from " << peer->getId(); } } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.cpp index a841fbac42..4329a0db8c 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.cpp @@ -15,19 +15,13 @@ GetDagSyncPacketHandler::GetDagSyncPacketHandler(const FullNodeConfig &conf, std dag_mgr_(std::move(dag_mgr)), db_(std::move(db)) {} -void GetDagSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - if (constexpr size_t required_size = 2; packet_data.rlp_.itemCount() != required_size) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); - } -} - -void GetDagSyncPacketHandler::process(const threadpool::PacketData &packet_data, +void GetDagSyncPacketHandler::process(GetDagSyncPacket &&packet, [[maybe_unused]] const std::shared_ptr &peer) { if (!peer->requestDagSyncingAllowed()) { // This should not be possible for honest node // Each node should perform dag syncing only when allowed std::ostringstream err_msg; - err_msg << "Received multiple GetDagSyncPackets from " << packet_data.from_node_id_.abridged(); + err_msg << "Received multiple GetDagSyncPackets from " << peer->getId().abridged(); throw MaliciousPeerException(err_msg.str()); } @@ -35,21 +29,16 @@ void GetDagSyncPacketHandler::process(const threadpool::PacketData &packet_data, // This lock prevents race condition between syncing and gossiping dag blocks std::unique_lock lock(peer->mutex_for_sending_dag_blocks_); - std::unordered_set blocks_hashes; - auto it = packet_data.rlp_.begin(); - const auto peer_period = (*it++).toInt(); - std::string blocks_hashes_to_log; - for (const auto block_hash_rlp : *it) { - blk_hash_t hash = block_hash_rlp.toHash(); + blocks_hashes_to_log.reserve(packet.blocks_hashes.size()); + for (const auto &hash : packet.blocks_hashes) { blocks_hashes_to_log += hash.abridged(); - blocks_hashes.emplace(hash); } LOG(log_dg_) << "Received GetDagSyncPacket: " << blocks_hashes_to_log << " from " << peer->getId(); - auto [period, blocks, transactions] = dag_mgr_->getNonFinalizedBlocksWithTransactions(blocks_hashes); - if (peer_period == period) { + auto [period, blocks, transactions] = dag_mgr_->getNonFinalizedBlocksWithTransactions(packet.blocks_hashes); + if (packet.peer_period == period) { peer->syncing_ = false; peer->peer_requested_dag_syncing_ = true; peer->peer_requested_dag_syncing_time_ = @@ -59,7 +48,7 @@ void GetDagSyncPacketHandler::process(const threadpool::PacketData &packet_data, blocks.clear(); transactions.clear(); } - sendBlocks(packet_data.from_node_id_, std::move(blocks), std::move(transactions), peer_period, period); + sendBlocks(peer->getId(), std::move(blocks), std::move(transactions), packet.peer_period, period); } void GetDagSyncPacketHandler::sendBlocks(const dev::p2p::NodeID &peer_id, @@ -83,7 +72,7 @@ void GetDagSyncPacketHandler::sendBlocks(const dev::p2p::NodeID &peer_id, s.appendRaw(block->rlp(true)); } - sealAndSend(peer_id, SubprotocolPacketType::DagSyncPacket, std::move(s)); + sealAndSend(peer_id, SubprotocolPacketType::kDagSyncPacket, std::move(s)); } } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.cpp index b7443b6e5d..5d2e55c4e9 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.cpp @@ -14,25 +14,16 @@ GetNextVotesBundlePacketHandler::GetNextVotesBundlePacketHandler( std::move(pbft_chain), std::move(vote_mgr), std::move(slashing_manager), node_addr, logs_prefix + "GET_NEXT_VOTES_BUNDLE_PH") {} -void GetNextVotesBundlePacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - if (constexpr size_t required_size = 2; packet_data.rlp_.itemCount() != required_size) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); - } -} - -void GetNextVotesBundlePacketHandler::process(const threadpool::PacketData &packet_data, +void GetNextVotesBundlePacketHandler::process(GetNextVotesBundlePacket &&packet, const std::shared_ptr &peer) { LOG(log_dg_) << "Received GetNextVotesSyncPacket request"; - - const PbftPeriod peer_pbft_period = packet_data.rlp_[0].toInt(); - const PbftRound peer_pbft_round = packet_data.rlp_[1].toInt(); const auto [pbft_round, pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); // Send votes only for current_period == peer_period && current_period >= peer_round - if (pbft_period != peer_pbft_period || pbft_round == 1 || pbft_round < peer_pbft_round) { + if (pbft_period != packet.peer_pbft_period || pbft_round == 1 || pbft_round < packet.peer_pbft_round) { LOG(log_nf_) << "No previous round next votes sync packet will be sent. pbft_period " << pbft_period - << ", peer_pbft_period " << peer_pbft_period << ", pbft_round " << pbft_round << ", peer_pbft_round " - << peer_pbft_round; + << ", peer_pbft_period " << packet.peer_pbft_period << ", pbft_round " << pbft_round + << ", peer_pbft_round " << packet.peer_pbft_round; return; } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp index e8af16d529..aa238f02f4 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp @@ -22,46 +22,38 @@ GetPbftSyncPacketHandler::GetPbftSyncPacketHandler(const FullNodeConfig &conf, s vote_mgr_(std::move(vote_mgr)), db_(std::move(db)) {} -void GetPbftSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - if (constexpr size_t required_size = 1; packet_data.rlp_.itemCount() != required_size) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); - } -} - -void GetPbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, - const std::shared_ptr &peer) { +void GetPbftSyncPacketHandler::process(GetPbftSyncPacket &&packet, const std::shared_ptr &peer) { LOG(log_tr_) << "Received GetPbftSyncPacket Block"; - const size_t height_to_sync = packet_data.rlp_[0].toInt(); // Here need PBFT chain size, not synced period since synced blocks has not verified yet. const size_t my_chain_size = pbft_chain_->getPbftChainSize(); - if (height_to_sync > my_chain_size) { + if (packet.height_to_sync > my_chain_size) { // Node update peers PBFT chain size in status packet. Should not request syncing period bigger than pbft chain size std::ostringstream err_msg; - err_msg << "Peer " << packet_data.from_node_id_ << " request syncing period start at " << height_to_sync + err_msg << "Peer " << peer->getId() << " request syncing period start at " << packet.height_to_sync << ". That's bigger than own PBFT chain size " << my_chain_size; throw MaliciousPeerException(err_msg.str()); } - if (kConf.is_light_node && height_to_sync + kConf.light_node_history <= my_chain_size) { + if (kConf.is_light_node && packet.height_to_sync + kConf.light_node_history <= my_chain_size) { std::ostringstream err_msg; - err_msg << "Peer " << packet_data.from_node_id_ << " request syncing period start at " << height_to_sync + err_msg << "Peer " << peer->getId() << " request syncing period start at " << packet.height_to_sync << ". Light node does not have the data " << my_chain_size; throw MaliciousPeerException(err_msg.str()); } size_t blocks_to_transfer = 0; auto pbft_chain_synced = false; - const auto total_period_data_size = my_chain_size - height_to_sync + 1; + const auto total_period_data_size = my_chain_size - packet.height_to_sync + 1; if (total_period_data_size <= kConf.network.sync_level_size) { blocks_to_transfer = total_period_data_size; pbft_chain_synced = true; } else { blocks_to_transfer = kConf.network.sync_level_size; } - LOG(log_tr_) << "Will send " << blocks_to_transfer << " PBFT blocks to " << packet_data.from_node_id_; + LOG(log_tr_) << "Will send " << blocks_to_transfer << " PBFT blocks to " << peer->getId(); - sendPbftBlocks(peer, height_to_sync, blocks_to_transfer, pbft_chain_synced); + sendPbftBlocks(peer, packet.height_to_sync, blocks_to_transfer, pbft_chain_synced); } // api for pbft syncing @@ -104,7 +96,7 @@ void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr } LOG(log_dg_) << "Sending PbftSyncPacket period " << block_period << " to " << peer_id; - sealAndSend(peer_id, SubprotocolPacketType::PbftSyncPacket, std::move(s)); + sealAndSend(peer_id, SubprotocolPacketType::kPbftSyncPacket, std::move(s)); if (pbft_chain_synced && last_block) { peer->syncing_ = false; } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp index cc71e189e9..883d70dc6d 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp @@ -13,35 +13,27 @@ GetPillarVotesBundlePacketHandler::GetPillarVotesBundlePacketHandler( logs_prefix + "GET_PILLAR_VOTES_BUNDLE_PH"), pillar_chain_manager_(std::move(pillar_chain_manager)) {} -void GetPillarVotesBundlePacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - auto items = packet_data.rlp_.itemCount(); - if (items != kGetPillarVotesBundlePacketSize) { - throw InvalidRlpItemsCountException(packet_data.type_str_, items, kGetPillarVotesBundlePacketSize); - } -} - -void GetPillarVotesBundlePacketHandler::process(const threadpool::PacketData &packet_data, +void GetPillarVotesBundlePacketHandler::process(GetPillarVotesBundlePacket &&packet, const std::shared_ptr &peer) { LOG(log_dg_) << "GetPillarVotesBundlePacketHandler received from peer " << peer->getId(); - const PbftPeriod period = packet_data.rlp_[0].toInt(); - const blk_hash_t pillar_block_hash = packet_data.rlp_[1].toHash(); - if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(period)) { + if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(packet.period)) { std::ostringstream err_msg; - err_msg << "Pillar votes bundle request for period " << period << ", ficus hardfork block num " + err_msg << "Pillar votes bundle request for period " << packet.period << ", ficus hardfork block num " << kConf.genesis.state.hardforks.ficus_hf.block_num; throw MaliciousPeerException(err_msg.str()); } - if (!kConf.genesis.state.hardforks.ficus_hf.isPbftWithPillarBlockPeriod(period)) { + if (!kConf.genesis.state.hardforks.ficus_hf.isPbftWithPillarBlockPeriod(packet.period)) { std::ostringstream err_msg; - err_msg << "Pillar votes bundle request for period " << period << ". Wrong requested period"; + err_msg << "Pillar votes bundle request for period " << packet.period << ". Wrong requested period"; throw MaliciousPeerException(err_msg.str()); } - const auto votes = pillar_chain_manager_->getVerifiedPillarVotes(period, pillar_block_hash); + const auto votes = pillar_chain_manager_->getVerifiedPillarVotes(packet.period, packet.pillar_block_hash); if (votes.empty()) { - LOG(log_dg_) << "No pillar votes for period " << period << "and pillar block hash " << pillar_block_hash; + LOG(log_dg_) << "No pillar votes for period " << packet.period << "and pillar block hash " + << packet.pillar_block_hash; return; } // Check if the votes size exceeds the maximum limit and split into multiple packets if needed @@ -61,14 +53,14 @@ void GetPillarVotesBundlePacketHandler::process(const threadpool::PacketData &pa } // Seal and send the chunk to the peer - if (sealAndSend(peer->getId(), SubprotocolPacketType::PillarVotesBundlePacket, std::move(s))) { + if (sealAndSend(peer->getId(), SubprotocolPacketType::kPillarVotesBundlePacket, std::move(s))) { // Mark the votes in this chunk as known for (size_t i = 0; i < chunk_size; ++i) { peer->markPillarVoteAsKnown(votes[votes_sent + i]->getHash()); } - LOG(log_nf_) << "Pillar votes bundle for period " << period << ", hash " << pillar_block_hash << " sent to " - << peer->getId() << " (Chunk " + LOG(log_nf_) << "Pillar votes bundle for period " << packet.period << ", hash " << packet.pillar_block_hash + << " sent to " << peer->getId() << " (Chunk " << (votes_sent / PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp) + 1 << "/" << (total_votes + PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp - 1) / PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp @@ -82,11 +74,12 @@ void GetPillarVotesBundlePacketHandler::process(const threadpool::PacketData &pa void GetPillarVotesBundlePacketHandler::requestPillarVotesBundle(PbftPeriod period, const blk_hash_t &pillar_block_hash, const std::shared_ptr &peer) { + // TODO: create GetPillarVotesBundlePacket object and call encode instead of manullaty creating packet here dev::RLPStream s(kGetPillarVotesBundlePacketSize); s << period; s << pillar_block_hash; - if (sealAndSend(peer->getId(), SubprotocolPacketType::GetPillarVotesBundlePacket, std::move(s))) { + if (sealAndSend(peer->getId(), SubprotocolPacketType::kGetPillarVotesBundlePacket, std::move(s))) { LOG(log_nf_) << "Requested pillar votes bundle for period " << period << " and pillar block " << pillar_block_hash << " from peer " << peer->getId(); } else { diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp index bf44485cb1..58419901ba 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp @@ -22,66 +22,38 @@ PbftSyncPacketHandler::PbftSyncPacketHandler(const FullNodeConfig &conf, std::sh vote_mgr_(std::move(vote_mgr)), periodic_events_tp_(1, true) {} -void PbftSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - if (packet_data.rlp_.itemCount() != kStandardPacketSize && packet_data.rlp_.itemCount() != kChainSyncedPacketSize) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), kStandardPacketSize); - } - - // PeriodData rlp parsing cannot be done through util::rlp_tuple, which automatically checks the rlp size so it is - // checked here manually - if (packet_data.rlp_[1].itemCount() != PeriodData::kBaseRlpItemCount && - packet_data.rlp_[1].itemCount() != PeriodData::kExtendedRlpItemCount) { - throw InvalidRlpItemsCountException(packet_data.type_str_ + ":PeriodData", packet_data.rlp_[1].itemCount(), - PeriodData::kBaseRlpItemCount); - } -} - -void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, - const std::shared_ptr &peer) { +void PbftSyncPacketHandler::process(PbftSyncPacket &&packet, const std::shared_ptr &peer) { // Note: no need to consider possible race conditions due to concurrent processing as it is // disabled on priority_queue blocking dependencies level const auto syncing_peer = pbft_syncing_state_->syncingPeer(); if (!syncing_peer) { - LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << packet_data.from_node_id_.abridged() + LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << peer->getId().abridged() << " but there is no current syncing peer set"; return; } - if (syncing_peer->getId() != packet_data.from_node_id_) { - LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << packet_data.from_node_id_.abridged() + if (syncing_peer->getId() != peer->getId()) { + LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << peer->getId().abridged() << " current syncing peer " << syncing_peer->getId().abridged(); return; } // Process received pbft blocks // pbft_chain_synced is the flag to indicate own PBFT chain has synced with the peer's PBFT chain - const bool pbft_chain_synced = packet_data.rlp_.itemCount() == kChainSyncedPacketSize; - // last_block is the flag to indicate this is the last block in each syncing round, doesn't mean PBFT chain has synced - const bool last_block = packet_data.rlp_[0].toInt(); - PeriodData period_data; - try { - period_data = decodePeriodData(packet_data.rlp_[1]); - } catch (const std::runtime_error &e) { - throw MaliciousPeerException("Unable to parse PeriodData: " + std::string(e.what())); - } - - std::vector> current_block_cert_votes; - if (pbft_chain_synced) { - current_block_cert_votes = decodeVotesBundle(packet_data.rlp_[2]); - } - const auto pbft_blk_hash = period_data.pbft_blk->getBlockHash(); + const bool pbft_chain_synced = !packet.current_block_cert_votes.empty(); + const auto pbft_blk_hash = packet.period_data.pbft_blk->getBlockHash(); std::string received_dag_blocks_str; // This is just log related stuff - for (auto const &block : period_data.dag_blocks) { + for (auto const &block : packet.period_data.dag_blocks) { received_dag_blocks_str += block.getHash().toString() + " "; if (peer->dag_level_ < block.getLevel()) { peer->dag_level_ = block.getLevel(); } } - const auto pbft_block_period = period_data.pbft_blk->getPeriod(); + const auto pbft_block_period = packet.period_data.pbft_blk->getPeriod(); LOG(log_dg_) << "PbftSyncPacket received. Period: " << pbft_block_period - << ", dag Blocks: " << received_dag_blocks_str << " from " << packet_data.from_node_id_; + << ", dag Blocks: " << received_dag_blocks_str << " from " << peer->getId(); peer->markPbftBlockAsKnown(pbft_blk_hash); // Update peer's pbft period if outdated @@ -92,8 +64,8 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, LOG(log_tr_) << "Processing pbft block: " << pbft_blk_hash; if (pbft_chain_->findPbftBlockInChain(pbft_blk_hash)) { - LOG(log_wr_) << "PBFT block " << pbft_blk_hash << ", period: " << period_data.pbft_blk->getPeriod() << " from " - << packet_data.from_node_id_ << " already present in chain"; + LOG(log_wr_) << "PBFT block " << pbft_blk_hash << ", period: " << packet.period_data.pbft_blk->getPeriod() + << " from " << peer->getId() << " already present in chain"; } else { if (pbft_block_period != pbft_mgr_->pbftSyncingPeriod() + 1) { // This can happen if we just got synced and block was cert voted @@ -109,11 +81,11 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, // Check cert vote matches if final synced block if (pbft_chain_synced) { - for (auto const &vote : current_block_cert_votes) { + for (auto const &vote : packet.current_block_cert_votes) { if (vote->getBlockHash() != pbft_blk_hash) { LOG(log_er_) << "Invalid cert votes block hash " << vote->getBlockHash() << " instead of " << pbft_blk_hash - << " from peer " << packet_data.from_node_id_.abridged() << " received, stop syncing."; - handleMaliciousSyncPeer(packet_data.from_node_id_); + << " from peer " << peer->getId().abridged() << " received, stop syncing."; + handleMaliciousSyncPeer(peer->getId()); return; } } @@ -122,52 +94,50 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, // Check votes match the hash of previous block in the queue auto last_pbft_block_hash = pbft_mgr_->lastPbftBlockHashFromQueueOrChain(); // Check cert vote matches - for (auto const &vote : period_data.previous_block_cert_votes) { + for (auto const &vote : packet.period_data.previous_block_cert_votes) { if (vote->getBlockHash() != last_pbft_block_hash) { LOG(log_er_) << "Invalid cert votes block hash " << vote->getBlockHash() << " instead of " - << last_pbft_block_hash << " from peer " << packet_data.from_node_id_.abridged() - << " received, stop syncing."; - handleMaliciousSyncPeer(packet_data.from_node_id_); + << last_pbft_block_hash << " from peer " << peer->getId().abridged() << " received, stop syncing."; + handleMaliciousSyncPeer(peer->getId()); return; } } - if (!pbft_mgr_->validatePillarDataInPeriodData(period_data)) { - handleMaliciousSyncPeer(packet_data.from_node_id_); + if (!pbft_mgr_->validatePillarDataInPeriodData(packet.period_data)) { + handleMaliciousSyncPeer(peer->getId()); return; } - auto order_hash = PbftManager::calculateOrderHash(period_data.dag_blocks); - if (order_hash != period_data.pbft_blk->getOrderHash()) { + auto order_hash = PbftManager::calculateOrderHash(packet.period_data.dag_blocks); + if (order_hash != packet.period_data.pbft_blk->getOrderHash()) { { // This is just log related stuff std::vector trx_order; - trx_order.reserve(period_data.transactions.size()); + trx_order.reserve(packet.period_data.transactions.size()); std::vector blk_order; - blk_order.reserve(period_data.dag_blocks.size()); - for (auto t : period_data.transactions) { + blk_order.reserve(packet.period_data.dag_blocks.size()); + for (auto t : packet.period_data.transactions) { trx_order.push_back(t->getHash()); } - for (auto b : period_data.dag_blocks) { + for (auto b : packet.period_data.dag_blocks) { blk_order.push_back(b.getHash()); } LOG(log_er_) << "Order hash incorrect in period data " << pbft_blk_hash << " expected: " << order_hash - << " received " << period_data.pbft_blk->getOrderHash() << "; Dag order: " << blk_order - << "; Trx order: " << trx_order << "; from " << packet_data.from_node_id_.abridged() - << ", stop syncing."; + << " received " << packet.period_data.pbft_blk->getOrderHash() << "; Dag order: " << blk_order + << "; Trx order: " << trx_order << "; from " << peer->getId().abridged() << ", stop syncing."; } - handleMaliciousSyncPeer(packet_data.from_node_id_); + handleMaliciousSyncPeer(peer->getId()); return; } // This is special case when queue is empty and we can not say for sure that all votes that are part of this block // have been verified before if (pbft_mgr_->periodDataQueueEmpty()) { - for (const auto &v : period_data.previous_block_cert_votes) { + for (const auto &v : packet.period_data.previous_block_cert_votes) { if (auto vote_is_valid = vote_mgr_->validateVote(v); vote_is_valid.first == false) { - LOG(log_er_) << "Invalid reward votes in block " << period_data.pbft_blk->getBlockHash() << " from peer " - << packet_data.from_node_id_.abridged() + LOG(log_er_) << "Invalid reward votes in block " << packet.period_data.pbft_blk->getBlockHash() + << " from peer " << peer->getId().abridged() << " received, stop syncing. Validation failed. Err: " << vote_is_valid.second; - handleMaliciousSyncPeer(packet_data.from_node_id_); + handleMaliciousSyncPeer(peer->getId()); return; } @@ -175,8 +145,8 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, } // And now we need to replace it with verified votes - if (auto votes = vote_mgr_->checkRewardVotes(period_data.pbft_blk, true); votes.first) { - period_data.previous_block_cert_votes = std::move(votes.second); + if (auto votes = vote_mgr_->checkRewardVotes(packet.period_data.pbft_blk, true); votes.first) { + packet.period_data.previous_block_cert_votes = std::move(votes.second); } else { // checkRewardVotes could fail because we just cert voted this block and moved to next period, // in that case we are probably fully synced @@ -185,18 +155,18 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, return; } - LOG(log_er_) << "Invalid reward votes in block " << period_data.pbft_blk->getBlockHash() << " from peer " - << packet_data.from_node_id_.abridged() << " received, stop syncing."; - handleMaliciousSyncPeer(packet_data.from_node_id_); + LOG(log_er_) << "Invalid reward votes in block " << packet.period_data.pbft_blk->getBlockHash() << " from peer " + << peer->getId().abridged() << " received, stop syncing."; + handleMaliciousSyncPeer(peer->getId()); return; } } LOG(log_tr_) << "Synced PBFT block hash " << pbft_blk_hash << " with " - << period_data.previous_block_cert_votes.size() << " cert votes"; - LOG(log_tr_) << "Synced PBFT block " << period_data; - pbft_mgr_->periodDataQueuePush(std::move(period_data), packet_data.from_node_id_, - std::move(current_block_cert_votes)); + << packet.period_data.previous_block_cert_votes.size() << " cert votes"; + LOG(log_tr_) << "Synced PBFT block " << packet.period_data; + pbft_mgr_->periodDataQueuePush(std::move(packet.period_data), peer->getId(), + std::move(packet.current_block_cert_votes)); } auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); @@ -209,7 +179,7 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, return; } - if (last_block) { + if (packet.last_block) { // If current sync period is actually bigger than the block we just received we are probably synced if (pbft_sync_period > pbft_block_period) { pbft_syncing_state_->setPbftSyncing(false); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_vote_packet_handler.cpp index d0ef86ee62..67d235299d 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_vote_packet_handler.cpp @@ -11,25 +11,16 @@ PillarVotePacketHandler::PillarVotePacketHandler(const FullNodeConfig &conf, std : ExtPillarVotePacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pillar_chain_manager), node_addr, logs_prefix + "PILLAR_VOTE_PH") {} -void PillarVotePacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - auto items = packet_data.rlp_.itemCount(); - if (items != PillarVote::kStandardRlpSize) { - throw InvalidRlpItemsCountException(packet_data.type_str_, items, PillarVote::kStandardRlpSize); - } -} - -void PillarVotePacketHandler::process(const threadpool::PacketData &packet_data, - const std::shared_ptr &peer) { - const auto pillar_vote = std::make_shared(packet_data.rlp_); - if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(pillar_vote->getPeriod())) { +void PillarVotePacketHandler::process(PillarVotePacket &&packet, const std::shared_ptr &peer) { + if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(packet.pillar_vote->getPeriod())) { std::ostringstream err_msg; - err_msg << "Pillar vote " << pillar_vote->getHash() << ", period " << pillar_vote->getPeriod() + err_msg << "Pillar vote " << packet.pillar_vote->getHash() << ", period " << packet.pillar_vote->getPeriod() << " < ficus hardfork block num"; throw MaliciousPeerException(err_msg.str()); } - if (processPillarVote(pillar_vote, peer)) { - onNewPillarVote(pillar_vote); + if (processPillarVote(packet.pillar_vote, peer)) { + onNewPillarVote(packet.pillar_vote); } } @@ -54,7 +45,7 @@ void PillarVotePacketHandler::sendPillarVote(const std::shared_ptr & dev::RLPStream s; s.appendRaw(vote->rlp()); - if (sealAndSend(peer->getId(), SubprotocolPacketType::PillarVotePacket, std::move(s))) { + if (sealAndSend(peer->getId(), SubprotocolPacketType::kPillarVotePacket, std::move(s))) { peer->markPillarVoteAsKnown(vote->getHash()); LOG(log_dg_) << "Pillar vote " << vote->getHash() << ", period " << vote->getPeriod() << " sent to " << peer->getId(); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.cpp index b9473c59c8..dfc4ee7089 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.cpp @@ -12,20 +12,12 @@ PillarVotesBundlePacketHandler::PillarVotesBundlePacketHandler( : ExtPillarVotePacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pillar_chain_manager), node_addr, logs_prefix + "PILLAR_VOTES_BUNDLE_PH") {} -void PillarVotesBundlePacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - auto items = packet_data.rlp_.itemCount(); - if (items == 0 || items > kMaxPillarVotesInBundleRlp) { - throw InvalidRlpItemsCountException(packet_data.type_str_, items, kMaxPillarVotesInBundleRlp); - } -} - -void PillarVotesBundlePacketHandler::process(const threadpool::PacketData &packet_data, +void PillarVotesBundlePacketHandler::process(PillarVotesBundlePacket &&packet, const std::shared_ptr &peer) { // TODO[2744]: there could be the same protection as in pbft syncing that only requested bundle packet is accepted LOG(log_dg_) << "PillarVotesBundlePacket received from peer " << peer->getId(); - for (const auto vote_rlp : packet_data.rlp_) { - const auto pillar_vote = std::make_shared(vote_rlp); + for (const auto &pillar_vote : packet.pillar_votes) { if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(pillar_vote->getPeriod())) { std::ostringstream err_msg; err_msg << "Synced pillar vote " << pillar_vote->getHash() << ", period " << pillar_vote->getPeriod() diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp index bd30c35fc3..766d1cb3a5 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp @@ -21,105 +21,82 @@ StatusPacketHandler::StatusPacketHandler(const FullNodeConfig& conf, std::shared logs_prefix + "STATUS_PH"), kGenesisHash(genesis_hash) {} -void StatusPacketHandler::validatePacketRlpFormat(const threadpool::PacketData& packet_data) const { - if (const auto items_count = packet_data.rlp_.itemCount(); - items_count != kInitialStatusPacketItemsCount && items_count != kStandardStatusPacketItemsCount) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), - kStandardStatusPacketItemsCount); - } -} - -void StatusPacketHandler::process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) { +void StatusPacketHandler::process(StatusPacket&& packet, const std::shared_ptr& peer) { // Important !!! Use only "selected_peer" and not "peer" in this function as "peer" might be nullptr auto selected_peer = peer; const auto pbft_synced_period = pbft_mgr_->pbftSyncingPeriod(); // Initial status packet - if (packet_data.rlp_.itemCount() == kInitialStatusPacketItemsCount) { + if (packet.isInitialStatusPacket()) { if (!selected_peer) { - selected_peer = peers_state_->getPendingPeer(packet_data.from_node_id_); + selected_peer = peers_state_->getPendingPeer(peer->getId()); if (!selected_peer) { - LOG(log_wr_) << "Peer " << packet_data.from_node_id_.abridged() + LOG(log_wr_) << "Peer " << peer->getId().abridged() << " missing in both peers and pending peers map - will be disconnected."; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + disconnect(peer->getId(), dev::p2p::UserReason); return; } } - auto it = packet_data.rlp_.begin(); - auto const peer_chain_id = (*it++).toInt(); - auto const peer_dag_level = (*it++).toInt(); - auto const genesis_hash = (*it++).toHash(); - auto const peer_pbft_chain_size = (*it++).toInt(); - auto const peer_syncing = (*it++).toInt(); - auto const peer_pbft_round = (*it++).toInt(); - auto const node_major_version = (*it++).toInt(); - auto const node_minor_version = (*it++).toInt(); - auto const node_patch_version = (*it++).toInt(); - auto const is_light_node = (*it++).toInt(); - auto const node_history = (*it++).toInt(); - - if (peer_chain_id != kConf.genesis.chain_id) { + if (*packet.peer_chain_id != kConf.genesis.chain_id) { LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) - << "Incorrect network id " << peer_chain_id << ", host " << packet_data.from_node_id_.abridged() + << "Incorrect network id " << *packet.peer_chain_id << ", host " << peer->getId().abridged() << " will be disconnected"; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + disconnect(peer->getId(), dev::p2p::UserReason); return; } - if (genesis_hash != kGenesisHash) { + if (*packet.genesis_hash != kGenesisHash) { LOG((peers_state_->getPeersCount()) ? log_nf_ : log_wr_) - << "Incorrect genesis hash " << genesis_hash << ", host " << packet_data.from_node_id_.abridged() + << "Incorrect genesis hash " << *packet.genesis_hash << ", host " << peer->getId().abridged() << " will be disconnected"; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + disconnect(peer->getId(), dev::p2p::UserReason); return; } // If this is a light node and it cannot serve our sync request disconnect from it - if (is_light_node) { + if (*packet.is_light_node) { selected_peer->peer_light_node = true; - selected_peer->peer_light_node_history = node_history; - if (pbft_synced_period + node_history < peer_pbft_chain_size) { + selected_peer->peer_light_node_history = *packet.node_history; + if (pbft_synced_period + *packet.node_history < packet.peer_pbft_chain_size) { LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) - << "Light node " << packet_data.from_node_id_.abridged() - << " would not be able to serve our syncing request. " - << "Current synced period " << pbft_synced_period << ", peer synced period " << peer_pbft_chain_size - << ", peer light node history " << node_history << ". Peer will be disconnected"; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + << "Light node " << peer->getId().abridged() << " would not be able to serve our syncing request. " + << "Current synced period " << pbft_synced_period << ", peer synced period " << packet.peer_pbft_chain_size + << ", peer light node history " << *packet.node_history << ". Peer will be disconnected"; + disconnect(peer->getId(), dev::p2p::UserReason); return; } } - selected_peer->dag_level_ = peer_dag_level; - selected_peer->pbft_chain_size_ = peer_pbft_chain_size; - selected_peer->syncing_ = peer_syncing; - selected_peer->pbft_period_ = peer_pbft_chain_size + 1; - selected_peer->pbft_round_ = peer_pbft_round; + selected_peer->dag_level_ = packet.peer_dag_level; + selected_peer->pbft_chain_size_ = packet.peer_pbft_chain_size; + selected_peer->syncing_ = packet.peer_syncing; + selected_peer->pbft_period_ = packet.peer_pbft_chain_size + 1; + selected_peer->pbft_round_ = packet.peer_pbft_round; - peers_state_->setPeerAsReadyToSendMessages(packet_data.from_node_id_, selected_peer); + peers_state_->setPeerAsReadyToSendMessages(peer->getId(), selected_peer); - LOG(log_dg_) << "Received initial status message from " << packet_data.from_node_id_ << ", network id " - << peer_chain_id << ", peer DAG max level " << selected_peer->dag_level_ << ", genesis " - << genesis_hash << ", peer pbft chain size " << selected_peer->pbft_chain_size_ << ", peer syncing " - << std::boolalpha << selected_peer->syncing_ << ", peer pbft period " << selected_peer->pbft_period_ - << ", peer pbft round " << selected_peer->pbft_round_ << ", node major version" << node_major_version - << ", node minor version" << node_minor_version << ", node patch version" << node_patch_version; + LOG(log_dg_) << "Received initial status message from " << peer->getId() << ", network id " << *packet.peer_chain_id + << ", peer DAG max level " << selected_peer->dag_level_ << ", genesis " << *packet.genesis_hash + << ", peer pbft chain size " << selected_peer->pbft_chain_size_ << ", peer syncing " << std::boolalpha + << selected_peer->syncing_ << ", peer pbft period " << selected_peer->pbft_period_ + << ", peer pbft round " << selected_peer->pbft_round_ << ", node major version" + << *packet.node_major_version << ", node minor version" << *packet.node_minor_version + << ", node patch version" << *packet.node_patch_version; } else { // Standard status packet - // TODO: initial and standard status packet could be separated... if (!selected_peer) { - LOG(log_er_) << "Received standard status packet from " << packet_data.from_node_id_.abridged() + LOG(log_er_) << "Received standard status packet from " << peer->getId().abridged() << ", without previously received initial status packet. Will be disconnected"; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + disconnect(peer->getId(), dev::p2p::UserReason); return; } - auto it = packet_data.rlp_.begin(); - selected_peer->dag_level_ = (*it++).toInt(); - selected_peer->pbft_chain_size_ = (*it++).toInt(); + selected_peer->dag_level_ = packet.peer_dag_level; + selected_peer->pbft_chain_size_ = packet.peer_pbft_chain_size; selected_peer->pbft_period_ = selected_peer->pbft_chain_size_ + 1; - selected_peer->syncing_ = (*it++).toInt(); - selected_peer->pbft_round_ = (*it++).toInt(); + selected_peer->syncing_ = packet.peer_syncing; + selected_peer->pbft_round_ = packet.peer_pbft_round; // TODO: Address malicious status if (!pbft_syncing_state_->isPbftSyncing()) { @@ -147,7 +124,7 @@ void StatusPacketHandler::process(const threadpool::PacketData& packet_data, con } selected_peer->last_status_pbft_chain_size_ = selected_peer->pbft_chain_size_.load(); - LOG(log_dg_) << "Received status message from " << packet_data.from_node_id_ << ", peer DAG max level " + LOG(log_dg_) << "Received status message from " << peer->getId() << ", peer DAG max level " << selected_peer->dag_level_ << ", peer pbft chain size " << selected_peer->pbft_chain_size_ << ", peer syncing " << std::boolalpha << selected_peer->syncing_ << ", peer pbft round " << selected_peer->pbft_round_; @@ -168,14 +145,14 @@ bool StatusPacketHandler::sendStatus(const dev::p2p::NodeID& node_id, bool initi if (initial) { success = sealAndSend( - node_id, StatusPacket, + node_id, SubprotocolPacketType::kStatusPacket, std::move(dev::RLPStream(kInitialStatusPacketItemsCount) << kConf.genesis.chain_id << dag_max_level << kGenesisHash << pbft_chain_size << pbft_syncing_state_->isPbftSyncing() << pbft_round << TARAXA_MAJOR_VERSION << TARAXA_MINOR_VERSION << TARAXA_PATCH_VERSION << kConf.is_light_node << kConf.light_node_history)); } else { success = sealAndSend( - node_id, StatusPacket, + node_id, SubprotocolPacketType::kStatusPacket, std::move(dev::RLPStream(kStandardStatusPacketItemsCount) << dag_max_level << pbft_chain_size << pbft_syncing_state_->isDeepPbftSyncing() << pbft_round)); } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp index adf7178eaa..d5286035f7 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp @@ -14,61 +14,15 @@ TransactionPacketHandler::TransactionPacketHandler(const FullNodeConfig &conf, s : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, logs_prefix + "TRANSACTION_PH"), trx_mgr_(std::move(trx_mgr)) {} -void TransactionPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - auto items = packet_data.rlp_.itemCount(); - if (items != kTransactionPacketItemCount) { - throw InvalidRlpItemsCountException(packet_data.type_str_, items, kTransactionPacketItemCount); - } - auto hashes_count = packet_data.rlp_[0].itemCount(); - auto trx_count = packet_data.rlp_[1].itemCount(); - - if (hashes_count < trx_count) { - throw InvalidRlpItemsCountException(packet_data.type_str_, hashes_count, trx_count); - } - if (hashes_count == 0 || hashes_count > kMaxTransactionsInPacket + kMaxHashesInPacket) { - throw InvalidRlpItemsCountException(packet_data.type_str_, hashes_count, - kMaxTransactionsInPacket + kMaxHashesInPacket); - } - - if (trx_count > kMaxTransactionsInPacket) { - throw InvalidRlpItemsCountException(packet_data.type_str_, trx_count, kMaxTransactionsInPacket); - } -} - -inline void TransactionPacketHandler::process(const threadpool::PacketData &packet_data, - const std::shared_ptr &peer) { - std::vector received_transactions; - - const auto transaction_hashes_count = packet_data.rlp_[0].itemCount(); - const auto transaction_count = packet_data.rlp_[1].itemCount(); - received_transactions.reserve(transaction_count); - - std::vector trx_hashes; - trx_hashes.reserve(transaction_hashes_count); - - // First extract only transaction hashes - for (const auto trx_hash_rlp : packet_data.rlp_[0]) { - auto trx_hash = trx_hash_rlp.toHash(); - peer->markTransactionAsKnown(trx_hash); - trx_hashes.emplace_back(std::move(trx_hash)); - } - - for (size_t tx_idx = 0; tx_idx < transaction_count; tx_idx++) { - const auto &trx_hash = trx_hashes[tx_idx]; - +inline void TransactionPacketHandler::process(TransactionPacket &&packet, const std::shared_ptr &peer) { + size_t unseen_txs_count = 0; + for (auto &transaction : packet.transactions) { // Skip any transactions that are already known to the trx mgr - if (trx_mgr_->isTransactionKnown(trx_hash)) { + if (trx_mgr_->isTransactionKnown(transaction->getHash())) { continue; } - std::shared_ptr transaction; - // Deserialization is expensive, do it only for the transactions we are about to process - try { - transaction = std::make_shared(packet_data.rlp_[1][tx_idx].data().toBytes()); - received_transactions.emplace_back(trx_hash); - } catch (const Transaction::InvalidTransaction &e) { - throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); - } + unseen_txs_count++; const auto [verified, reason] = trx_mgr_->verifyTransaction(transaction); if (!verified) { @@ -92,10 +46,10 @@ inline void TransactionPacketHandler::process(const threadpool::PacketData &pack } } - if (transaction_count > 0) { - LOG(log_tr_) << "Received TransactionPacket with " << packet_data.rlp_.itemCount() << " transactions"; - LOG(log_dg_) << "Received TransactionPacket with " << received_transactions.size() - << " unseen transactions:" << received_transactions << " from: " << peer->getId().abridged(); + if (!packet.transactions.empty()) { + LOG(log_tr_) << "Received TransactionPacket with " << packet.transactions.size() << " transactions"; + LOG(log_dg_) << "Received TransactionPacket with " << packet.transactions.size() + << " unseen transactions:" << unseen_txs_count << " from: " << peer->getId().abridged(); } } @@ -212,7 +166,7 @@ void TransactionPacketHandler::sendTransactions(std::shared_ptr peer s.appendRaw(trx->rlp()); } - if (sealAndSend(peer_id, TransactionPacket, std::move(s))) { + if (sealAndSend(peer_id, SubprotocolPacketType::kTransactionPacket, std::move(s))) { for (const auto &trx : transactions.first) { peer->markTransactionAsKnown(trx->getHash()); } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp index 341ef6ae0e..0d0a3e2d33 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp @@ -15,44 +15,26 @@ VotePacketHandler::VotePacketHandler(const FullNodeConfig &conf, std::shared_ptr std::move(pbft_chain), std::move(vote_mgr), std::move(slashing_manager), node_addr, logs_prefix + "PBFT_VOTE_PH") {} -void VotePacketHandler::validatePacketRlpFormat([[maybe_unused]] const threadpool::PacketData &packet_data) const { - auto items = packet_data.rlp_.itemCount(); - // Vote packet can contain either just a vote or vote + block + peer_chain_size - if (items != kVotePacketSize && items != kExtendedVotePacketSize) { - throw InvalidRlpItemsCountException(packet_data.type_str_, items, kExtendedVotePacketSize); - } -} - -void VotePacketHandler::process(const threadpool::PacketData &packet_data, const std::shared_ptr &peer) { +void VotePacketHandler::process(VotePacket &&packet, const std::shared_ptr &peer) { const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); - // Optional packet items - std::shared_ptr pbft_block{nullptr}; - std::optional peer_chain_size{}; - - std::shared_ptr vote = std::make_shared(packet_data.rlp_[0]); - if (const size_t item_count = packet_data.rlp_.itemCount(); item_count == kExtendedVotePacketSize) { - try { - pbft_block = std::make_shared(packet_data.rlp_[1]); - } catch (const std::exception &e) { - throw MaliciousPeerException(e.what()); - } - peer_chain_size = packet_data.rlp_[2].toInt(); - LOG(log_dg_) << "Received PBFT vote " << vote->getHash() << " with PBFT block " << pbft_block->getBlockHash(); + if (packet.pbft_block) { + LOG(log_dg_) << "Received PBFT vote " << packet.vote->getHash() << " with PBFT block " + << packet.pbft_block->getBlockHash(); } else { - LOG(log_dg_) << "Received PBFT vote " << vote->getHash(); + LOG(log_dg_) << "Received PBFT vote " << packet.vote->getHash(); } // Update peer's max chain size - if (peer_chain_size.has_value() && *peer_chain_size > peer->pbft_chain_size_) { - peer->pbft_chain_size_ = *peer_chain_size; + if (packet.peer_chain_size.has_value() && *packet.peer_chain_size > peer->pbft_chain_size_) { + peer->pbft_chain_size_ = *packet.peer_chain_size; } - const auto vote_hash = vote->getHash(); + const auto vote_hash = packet.vote->getHash(); - if (!isPbftRelevantVote(vote)) { + if (!isPbftRelevantVote(packet.vote)) { LOG(log_dg_) << "Drop irrelevant vote " << vote_hash << " for current pbft state. Vote (period, round, step) = (" - << vote->getPeriod() << ", " << vote->getRound() << ", " << vote->getStep() + << packet.vote->getPeriod() << ", " << packet.vote->getRound() << ", " << packet.vote->getStep() << "). Current PBFT (period, round, step) = (" << current_pbft_period << ", " << current_pbft_round << ", " << pbft_mgr_->getPbftStep() << ")"; return; @@ -64,25 +46,26 @@ void VotePacketHandler::process(const threadpool::PacketData &packet_data, const return; } - if (pbft_block) { - if (pbft_block->getBlockHash() != vote->getBlockHash()) { + if (packet.pbft_block) { + if (packet.pbft_block->getBlockHash() != packet.vote->getBlockHash()) { std::ostringstream err_msg; - err_msg << "Vote " << vote->getHash().abridged() << " voted block " << vote->getBlockHash().abridged() - << " != actual block " << pbft_block->getBlockHash().abridged(); + err_msg << "Vote " << packet.vote->getHash().abridged() << " voted block " + << packet.vote->getBlockHash().abridged() << " != actual block " + << packet.pbft_block->getBlockHash().abridged(); throw MaliciousPeerException(err_msg.str()); } - peer->markPbftBlockAsKnown(pbft_block->getBlockHash()); + peer->markPbftBlockAsKnown(packet.pbft_block->getBlockHash()); } - if (!processVote(vote, pbft_block, peer, true)) { + if (!processVote(packet.vote, packet.pbft_block, peer, true)) { return; } // Do not mark it before, as peers have small caches of known votes. Only mark gossiping votes peer->markPbftVoteAsKnown(vote_hash); - pbft_mgr_->gossipVote(vote, pbft_block); + pbft_mgr_->gossipVote(packet.vote, packet.pbft_block); } void VotePacketHandler::onNewPbftVote(const std::shared_ptr &vote, const std::shared_ptr &block, @@ -126,7 +109,7 @@ void VotePacketHandler::sendPbftVote(const std::shared_ptr &peer, co s.appendRaw(vote->rlp(true, false)); } - if (sealAndSend(peer->getId(), SubprotocolPacketType::VotePacket, std::move(s))) { + if (sealAndSend(peer->getId(), SubprotocolPacketType::kVotePacket, std::move(s))) { peer->markPbftVoteAsKnown(vote->getHash()); if (block) { peer->markPbftBlockAsKnown(block->getBlockHash()); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp index 68f653fbb8..451279dfe1 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp @@ -17,48 +17,27 @@ VotesBundlePacketHandler::VotesBundlePacketHandler(const FullNodeConfig &conf, s std::move(pbft_chain), std::move(vote_mgr), std::move(slashing_manager), node_addr, logs_prefix + "VOTES_BUNDLE_PH") {} -void VotesBundlePacketHandler::validatePacketRlpFormat( - [[maybe_unused]] const threadpool::PacketData &packet_data) const { - auto items = packet_data.rlp_.itemCount(); - if (items != kPbftVotesBundleRlpSize) { - throw InvalidRlpItemsCountException(packet_data.type_str_, items, kPbftVotesBundleRlpSize); - } - - auto votes_count = packet_data.rlp_[kPbftVotesBundleRlpSize - 1].itemCount(); - if (votes_count == 0 || votes_count > kMaxVotesInBundleRlp) { - throw InvalidRlpItemsCountException(packet_data.type_str_, items, kMaxVotesInBundleRlp); - } -} - -void VotesBundlePacketHandler::process(const threadpool::PacketData &packet_data, - const std::shared_ptr &peer) { +void VotesBundlePacketHandler::process(VotesBundlePacket &&packet, const std::shared_ptr &peer) { const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); - const auto votes_bundle_block_hash = packet_data.rlp_[0].toHash(); - const auto votes_bundle_pbft_period = packet_data.rlp_[1].toInt(); - const auto votes_bundle_pbft_round = packet_data.rlp_[2].toInt(); - const auto votes_bundle_votes_step = packet_data.rlp_[3].toInt(); - - const auto &reference_vote = - std::make_shared(votes_bundle_block_hash, votes_bundle_pbft_period, votes_bundle_pbft_round, - votes_bundle_votes_step, packet_data.rlp_[4][0]); + const auto &reference_vote = packet.votes.front(); const auto votes_bundle_votes_type = reference_vote->getType(); // Votes sync bundles are allowed to cotain only votes bundles of the same type, period, round and step so if first // vote is irrelevant, all of them are - if (!isPbftRelevantVote(reference_vote)) { + if (!isPbftRelevantVote(packet.votes[0])) { LOG(log_wr_) << "Drop votes sync bundle as it is irrelevant for current pbft state. Votes (period, round, step) = (" - << votes_bundle_pbft_period << ", " << votes_bundle_pbft_round << ", " << reference_vote->getStep() - << "). Current PBFT (period, round, step) = (" << current_pbft_period << ", " << current_pbft_round - << ", " << pbft_mgr_->getPbftStep() << ")"; + << packet.votes_bundle_pbft_period << ", " << packet.votes_bundle_pbft_round << ", " + << reference_vote->getStep() << "). Current PBFT (period, round, step) = (" << current_pbft_period + << ", " << current_pbft_round << ", " << pbft_mgr_->getPbftStep() << ")"; return; } // VotesBundlePacket does not support propose votes if (reference_vote->getType() == PbftVoteTypes::propose_vote) { - LOG(log_er_) << "Dropping votes bundle packet due to received \"propose\" votes from " << packet_data.from_node_id_ + LOG(log_er_) << "Dropping votes bundle packet due to received \"propose\" votes from " << peer->getId() << ". The peer may be a malicious player, will be disconnected"; - disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + disconnect(peer->getId(), dev::p2p::UserReason); return; } @@ -69,10 +48,8 @@ void VotesBundlePacketHandler::process(const threadpool::PacketData &packet_data check_max_round_step = false; } - std::vector> votes; - for (const auto vote_rlp : packet_data.rlp_[4]) { - auto vote = std::make_shared(votes_bundle_block_hash, votes_bundle_pbft_period, votes_bundle_pbft_round, - votes_bundle_votes_step, vote_rlp); + size_t processed_votes_count = 0; + for (const auto &vote : packet.votes) { peer->markPbftVoteAsKnown(vote->getHash()); // Do not process vote that has already been validated @@ -87,14 +64,14 @@ void VotesBundlePacketHandler::process(const threadpool::PacketData &packet_data continue; } - votes.push_back(std::move(vote)); + processed_votes_count++; } - LOG(log_nf_) << "Received " << packet_data.rlp_[4].itemCount() << " (processed " << votes.size() - << " ) sync votes from peer " << packet_data.from_node_id_ << " node current round " - << current_pbft_round << ", peer pbft round " << votes_bundle_pbft_round; + LOG(log_nf_) << "Received " << packet.votes.size() << " (processed " << processed_votes_count + << " ) sync votes from peer " << peer->getId() << " node current round " << current_pbft_round + << ", peer pbft round " << packet.votes_bundle_pbft_round; - onNewPbftVotesBundle(votes, false, packet_data.from_node_id_); + onNewPbftVotesBundle(packet.votes, false, peer->getId()); } void VotesBundlePacketHandler::onNewPbftVotesBundle(const std::vector> &votes, diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.cpp deleted file mode 100644 index a9cd4ec9f6..0000000000 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.cpp +++ /dev/null @@ -1,117 +0,0 @@ -#include "network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp" - -#include "network/tarcap/shared_states/pbft_syncing_state.hpp" -#include "pbft/pbft_chain.hpp" -#include "pbft/period_data.hpp" -#include "storage/storage.hpp" -#include "vote/pbft_vote.hpp" -#include "vote/votes_bundle_rlp.hpp" -#include "vote_manager/vote_manager.hpp" - -namespace taraxa::network::tarcap::v3 { - -GetPbftSyncPacketHandler::GetPbftSyncPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, - std::shared_ptr packets_stats, - std::shared_ptr pbft_syncing_state, - std::shared_ptr pbft_chain, - std::shared_ptr vote_mgr, std::shared_ptr db, - const addr_t &node_addr, const std::string &logs_prefix) - : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, - logs_prefix + "GET_PBFT_SYNC_PH"), - pbft_syncing_state_(std::move(pbft_syncing_state)), - pbft_chain_(std::move(pbft_chain)), - vote_mgr_(std::move(vote_mgr)), - db_(std::move(db)) {} - -void GetPbftSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - if (constexpr size_t required_size = 1; packet_data.rlp_.itemCount() != required_size) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); - } -} - -void GetPbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, - const std::shared_ptr &peer) { - LOG(log_tr_) << "Received GetPbftSyncPacket Block"; - - const size_t height_to_sync = packet_data.rlp_[0].toInt(); - // Here need PBFT chain size, not synced period since synced blocks has not verified yet. - const size_t my_chain_size = pbft_chain_->getPbftChainSize(); - if (height_to_sync > my_chain_size) { - // Node update peers PBFT chain size in status packet. Should not request syncing period bigger than pbft chain size - std::ostringstream err_msg; - err_msg << "Peer " << packet_data.from_node_id_ << " request syncing period start at " << height_to_sync - << ". That's bigger than own PBFT chain size " << my_chain_size; - throw MaliciousPeerException(err_msg.str()); - } - - if (kConf.is_light_node && height_to_sync + kConf.light_node_history <= my_chain_size) { - std::ostringstream err_msg; - err_msg << "Peer " << packet_data.from_node_id_ << " request syncing period start at " << height_to_sync - << ". Light node does not have the data " << my_chain_size; - throw MaliciousPeerException(err_msg.str()); - } - - size_t blocks_to_transfer = 0; - auto pbft_chain_synced = false; - const auto total_period_data_size = my_chain_size - height_to_sync + 1; - if (total_period_data_size <= kConf.network.sync_level_size) { - blocks_to_transfer = total_period_data_size; - pbft_chain_synced = true; - } else { - blocks_to_transfer = kConf.network.sync_level_size; - } - LOG(log_tr_) << "Will send " << blocks_to_transfer << " PBFT blocks to " << packet_data.from_node_id_; - - sendPbftBlocks(peer, height_to_sync, blocks_to_transfer, pbft_chain_synced); -} - -// api for pbft syncing -void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr &peer, PbftPeriod from_period, - size_t blocks_to_transfer, bool pbft_chain_synced) { - const auto &peer_id = peer->getId(); - LOG(log_tr_) << "sendPbftBlocks: peer want to sync from pbft chain height " << from_period << ", will send at most " - << blocks_to_transfer << " pbft blocks to " << peer_id; - - for (auto block_period = from_period; block_period < from_period + blocks_to_transfer; block_period++) { - bool last_block = (block_period == from_period + blocks_to_transfer - 1); - auto data = db_->getPeriodDataRaw(block_period); - - if (data.size() == 0) { - // This can happen when switching from light node to full node setting - LOG(log_er_) << "DB corrupted. Cannot find period " << block_period << " PBFT block in db"; - return; - } - - data = PeriodData::ToOldPeriodData(data); - - dev::RLPStream s; - if (pbft_chain_synced && last_block) { - // Latest finalized block cert votes are saved in db as reward votes for new blocks - const auto reward_votes = vote_mgr_->getRewardVotes(); - assert(!reward_votes.empty()); - // It is possible that the node pushed another block to the chain in the meantime - if (reward_votes[0]->getPeriod() == block_period) { - s.appendList(3); - s << last_block; - s.appendRaw(data); - s.appendRaw(encodePbftVotesBundleRlp(reward_votes)); - } else { - s.appendList(2); - s << last_block; - s.appendRaw(data); - } - } else { - s.appendList(2); - s << last_block; - s.appendRaw(data); - } - - LOG(log_dg_) << "Sending PbftSyncPacket period " << block_period << " to " << peer_id; - sealAndSend(peer_id, SubprotocolPacketType::PbftSyncPacket, std::move(s)); - if (pbft_chain_synced && last_block) { - peer->syncing_ = false; - } - } -} - -} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp deleted file mode 100644 index 8af7d276a5..0000000000 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp +++ /dev/null @@ -1,296 +0,0 @@ -#include "network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp" - -#include "network/tarcap/shared_states/pbft_syncing_state.hpp" -#include "pbft/pbft_chain.hpp" -#include "pbft/pbft_manager.hpp" -#include "transaction/transaction_manager.hpp" -#include "vote/pbft_vote.hpp" -#include "vote/votes_bundle_rlp.hpp" - -namespace taraxa::network::tarcap::v3 { - -PbftSyncPacketHandler::PbftSyncPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, - std::shared_ptr packets_stats, - std::shared_ptr pbft_syncing_state, - std::shared_ptr pbft_chain, - std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, - std::shared_ptr vote_mgr, std::shared_ptr db, - const addr_t &node_addr, const std::string &logs_prefix) - : ExtSyncingPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_syncing_state), - std::move(pbft_chain), std::move(pbft_mgr), std::move(dag_mgr), std::move(db), node_addr, - logs_prefix + "PBFT_SYNC_PH"), - vote_mgr_(std::move(vote_mgr)), - periodic_events_tp_(1, true) {} - -void PbftSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { - if (packet_data.rlp_.itemCount() != kStandardPacketSize && packet_data.rlp_.itemCount() != kChainSyncedPacketSize) { - throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), kStandardPacketSize); - } - - // PeriodData rlp parsing cannot be done through util::rlp_tuple, which automatically checks the rlp size so it is - // checked here manually - if (packet_data.rlp_[1].itemCount() != PeriodData::kBaseRlpItemCount && - packet_data.rlp_[1].itemCount() != PeriodData::kExtendedRlpItemCount) { - throw InvalidRlpItemsCountException(packet_data.type_str_ + ":PeriodData", packet_data.rlp_[1].itemCount(), - PeriodData::kBaseRlpItemCount); - } -} - -void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, - const std::shared_ptr &peer) { - // Note: no need to consider possible race conditions due to concurrent processing as it is - // disabled on priority_queue blocking dependencies level - const auto syncing_peer = pbft_syncing_state_->syncingPeer(); - if (!syncing_peer) { - LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << packet_data.from_node_id_.abridged() - << " but there is no current syncing peer set"; - return; - } - - if (syncing_peer->getId() != packet_data.from_node_id_) { - LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << packet_data.from_node_id_.abridged() - << " current syncing peer " << syncing_peer->getId().abridged(); - return; - } - - // Process received pbft blocks - // pbft_chain_synced is the flag to indicate own PBFT chain has synced with the peer's PBFT chain - const bool pbft_chain_synced = packet_data.rlp_.itemCount() == kChainSyncedPacketSize; - // last_block is the flag to indicate this is the last block in each syncing round, doesn't mean PBFT chain has synced - const bool last_block = packet_data.rlp_[0].toInt(); - PeriodData period_data; - try { - period_data = decodePeriodData(packet_data.rlp_[1]); - } catch (const std::runtime_error &e) { - throw MaliciousPeerException("Unable to parse PeriodData: " + std::string(e.what())); - } - - std::vector> current_block_cert_votes; - if (pbft_chain_synced) { - current_block_cert_votes = decodeVotesBundle(packet_data.rlp_[2]); - } - const auto pbft_blk_hash = period_data.pbft_blk->getBlockHash(); - - std::string received_dag_blocks_str; // This is just log related stuff - for (auto const &block : period_data.dag_blocks) { - received_dag_blocks_str += block.getHash().toString() + " "; - if (peer->dag_level_ < block.getLevel()) { - peer->dag_level_ = block.getLevel(); - } - } - - const auto pbft_block_period = period_data.pbft_blk->getPeriod(); - LOG(log_dg_) << "PbftSyncPacket received. Period: " << pbft_block_period - << ", dag Blocks: " << received_dag_blocks_str << " from " << packet_data.from_node_id_; - - peer->markPbftBlockAsKnown(pbft_blk_hash); - // Update peer's pbft period if outdated - if (peer->pbft_chain_size_ < pbft_block_period) { - peer->pbft_chain_size_ = pbft_block_period; - } - - LOG(log_tr_) << "Processing pbft block: " << pbft_blk_hash; - - if (pbft_chain_->findPbftBlockInChain(pbft_blk_hash)) { - LOG(log_wr_) << "PBFT block " << pbft_blk_hash << ", period: " << period_data.pbft_blk->getPeriod() << " from " - << packet_data.from_node_id_ << " already present in chain"; - } else { - if (pbft_block_period != pbft_mgr_->pbftSyncingPeriod() + 1) { - // This can happen if we just got synced and block was cert voted - if (pbft_chain_synced && pbft_block_period == pbft_mgr_->pbftSyncingPeriod()) { - pbftSyncComplete(); - return; - } - - LOG(log_er_) << "Block " << pbft_blk_hash << " period unexpected: " << pbft_block_period - << ". Expected period: " << pbft_mgr_->pbftSyncingPeriod() + 1; - return; - } - - // Check cert vote matches if final synced block - if (pbft_chain_synced) { - for (auto const &vote : current_block_cert_votes) { - if (vote->getBlockHash() != pbft_blk_hash) { - LOG(log_er_) << "Invalid cert votes block hash " << vote->getBlockHash() << " instead of " << pbft_blk_hash - << " from peer " << packet_data.from_node_id_.abridged() << " received, stop syncing."; - handleMaliciousSyncPeer(packet_data.from_node_id_); - return; - } - } - } - - // Check votes match the hash of previous block in the queue - auto last_pbft_block_hash = pbft_mgr_->lastPbftBlockHashFromQueueOrChain(); - // Check cert vote matches - for (auto const &vote : period_data.previous_block_cert_votes) { - if (vote->getBlockHash() != last_pbft_block_hash) { - LOG(log_er_) << "Invalid cert votes block hash " << vote->getBlockHash() << " instead of " - << last_pbft_block_hash << " from peer " << packet_data.from_node_id_.abridged() - << " received, stop syncing."; - handleMaliciousSyncPeer(packet_data.from_node_id_); - return; - } - } - - if (!pbft_mgr_->validatePillarDataInPeriodData(period_data)) { - handleMaliciousSyncPeer(packet_data.from_node_id_); - return; - } - - auto order_hash = PbftManager::calculateOrderHash(period_data.dag_blocks); - if (order_hash != period_data.pbft_blk->getOrderHash()) { - { // This is just log related stuff - std::vector trx_order; - trx_order.reserve(period_data.transactions.size()); - std::vector blk_order; - blk_order.reserve(period_data.dag_blocks.size()); - for (auto t : period_data.transactions) { - trx_order.push_back(t->getHash()); - } - for (auto b : period_data.dag_blocks) { - blk_order.push_back(b.getHash()); - } - LOG(log_er_) << "Order hash incorrect in period data " << pbft_blk_hash << " expected: " << order_hash - << " received " << period_data.pbft_blk->getOrderHash() << "; Dag order: " << blk_order - << "; Trx order: " << trx_order << "; from " << packet_data.from_node_id_.abridged() - << ", stop syncing."; - } - handleMaliciousSyncPeer(packet_data.from_node_id_); - return; - } - - // This is special case when queue is empty and we can not say for sure that all votes that are part of this block - // have been verified before - if (pbft_mgr_->periodDataQueueEmpty()) { - for (const auto &v : period_data.previous_block_cert_votes) { - if (auto vote_is_valid = vote_mgr_->validateVote(v); vote_is_valid.first == false) { - LOG(log_er_) << "Invalid reward votes in block " << period_data.pbft_blk->getBlockHash() << " from peer " - << packet_data.from_node_id_.abridged() - << " received, stop syncing. Validation failed. Err: " << vote_is_valid.second; - handleMaliciousSyncPeer(packet_data.from_node_id_); - return; - } - - vote_mgr_->addVerifiedVote(v); - } - - // And now we need to replace it with verified votes - if (auto votes = vote_mgr_->checkRewardVotes(period_data.pbft_blk, true); votes.first) { - period_data.previous_block_cert_votes = std::move(votes.second); - } else { - // checkRewardVotes could fail because we just cert voted this block and moved to next period, - // in that case we are probably fully synced - if (pbft_block_period <= vote_mgr_->getRewardVotesPbftBlockPeriod()) { - pbft_syncing_state_->setPbftSyncing(false); - return; - } - - LOG(log_er_) << "Invalid reward votes in block " << period_data.pbft_blk->getBlockHash() << " from peer " - << packet_data.from_node_id_.abridged() << " received, stop syncing."; - handleMaliciousSyncPeer(packet_data.from_node_id_); - return; - } - } - - LOG(log_tr_) << "Synced PBFT block hash " << pbft_blk_hash << " with " - << period_data.previous_block_cert_votes.size() << " cert votes"; - LOG(log_tr_) << "Synced PBFT block " << period_data; - pbft_mgr_->periodDataQueuePush(std::move(period_data), packet_data.from_node_id_, - std::move(current_block_cert_votes)); - } - - auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); - - // Reset last sync packet received time - pbft_syncing_state_->setLastSyncPacketTime(); - - if (pbft_chain_synced) { - pbftSyncComplete(); - return; - } - - if (last_block) { - // If current sync period is actually bigger than the block we just received we are probably synced - if (pbft_sync_period > pbft_block_period) { - pbft_syncing_state_->setPbftSyncing(false); - return; - } - if (pbft_syncing_state_->isPbftSyncing()) { - if (pbft_sync_period > pbft_chain_->getPbftChainSize() + (10 * kConf.network.sync_level_size)) { - LOG(log_tr_) << "Syncing pbft blocks too fast than processing. Has synced period " << pbft_sync_period - << ", PBFT chain size " << pbft_chain_->getPbftChainSize(); - periodic_events_tp_.post(kDelayedPbftSyncDelayMs, [this] { delayedPbftSync(1); }); - } else { - if (!syncPeerPbft(pbft_sync_period + 1)) { - pbft_syncing_state_->setPbftSyncing(false); - return; - } - } - } - } -} - -PeriodData PbftSyncPacketHandler::decodePeriodData(const dev::RLP &period_data_rlp) const { - return PeriodData::FromOldPeriodData(period_data_rlp); -} - -std::vector> PbftSyncPacketHandler::decodeVotesBundle( - const dev::RLP &votes_bundle_rlp) const { - return decodePbftVotesBundleRlp(votes_bundle_rlp); -} - -void PbftSyncPacketHandler::pbftSyncComplete() { - if (pbft_mgr_->periodDataQueueSize()) { - LOG(log_tr_) << "Syncing pbft blocks faster than processing. Remaining sync size " - << pbft_mgr_->periodDataQueueSize(); - periodic_events_tp_.post(kDelayedPbftSyncDelayMs, [this] { pbftSyncComplete(); }); - } else { - LOG(log_dg_) << "Syncing PBFT is completed"; - // We are pbft synced with the node we are connected to but - // calling startSyncingPbft will check if some nodes have - // greater pbft chain size and we should continue syncing with - // them, Or sync pending DAG blocks - pbft_syncing_state_->setPbftSyncing(false); - startSyncingPbft(); - if (!pbft_syncing_state_->isPbftSyncing()) { - requestPendingDagBlocks(); - } - } -} - -void PbftSyncPacketHandler::delayedPbftSync(uint32_t counter) { - const uint32_t max_delayed_pbft_sync_count = 60000 / kDelayedPbftSyncDelayMs; - auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); - if (counter > max_delayed_pbft_sync_count) { - LOG(log_er_) << "Pbft blocks stuck in queue, no new block processed in 60 seconds " << pbft_sync_period << " " - << pbft_chain_->getPbftChainSize(); - pbft_syncing_state_->setPbftSyncing(false); - LOG(log_tr_) << "Syncing PBFT is stopping"; - return; - } - - if (pbft_syncing_state_->isPbftSyncing()) { - if (pbft_sync_period > pbft_chain_->getPbftChainSize() + (10 * kConf.network.sync_level_size)) { - LOG(log_tr_) << "Syncing pbft blocks faster than processing " << pbft_sync_period << " " - << pbft_chain_->getPbftChainSize(); - periodic_events_tp_.post(kDelayedPbftSyncDelayMs, [this, counter] { delayedPbftSync(counter + 1); }); - } else { - if (!syncPeerPbft(pbft_sync_period + 1)) { - pbft_syncing_state_->setPbftSyncing(false); - } - } - } -} - -void PbftSyncPacketHandler::handleMaliciousSyncPeer(const dev::p2p::NodeID &id) { - peers_state_->set_peer_malicious(id); - - if (auto host = peers_state_->host_.lock(); host) { - LOG(log_nf_) << "Disconnect peer " << id; - host->disconnect(id, dev::p2p::UserReason); - } else { - LOG(log_er_) << "Unable to handleMaliciousSyncPeer, host == nullptr"; - } -} - -} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/shared_states/peers_state.cpp b/libraries/core_libs/network/src/tarcap/shared_states/peers_state.cpp index 22f3d7c08d..b109b15d12 100644 --- a/libraries/core_libs/network/src/tarcap/shared_states/peers_state.cpp +++ b/libraries/core_libs/network/src/tarcap/shared_states/peers_state.cpp @@ -40,7 +40,7 @@ std::pair, std::string> PeersState::getPacketSenderP // If peer is in pending_peers_, it means he has not yet sent us initial status packet and // we can receive/send only StatusPacket from/to him if (const auto it_peer = pending_peers_.find(node_id); it_peer != pending_peers_.end()) { - if (packet_type == SubprotocolPacketType::StatusPacket) { + if (packet_type == SubprotocolPacketType::kStatusPacket) { return {it_peer->second, ""}; } else { std::ostringstream error; diff --git a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp index 9fa9e5dd51..06940294fc 100644 --- a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp +++ b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp @@ -16,8 +16,6 @@ #include "network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp" -#include "network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp" -#include "network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "node/node.hpp" #include "pbft/pbft_chain.hpp" @@ -62,7 +60,7 @@ std::string TaraxaCapability::name() const { return TARAXA_CAPABILITY_NAME; } TarcapVersion TaraxaCapability::version() const { return version_; } -unsigned TaraxaCapability::messageCount() const { return SubprotocolPacketType::PacketCount; } +unsigned TaraxaCapability::messageCount() const { return SubprotocolPacketType::kPacketCount; } void TaraxaCapability::onConnect(std::weak_ptr session, u256 const &) { const auto session_p = session.lock(); @@ -228,9 +226,9 @@ void TaraxaCapability::handlePacketQueueOverLimit(std::shared_ptr &peers_state, const std::shared_ptr &pbft_syncing_state, - const std::shared_ptr &packets_stats, const std::shared_ptr &db, - const std::shared_ptr &pbft_mgr, const std::shared_ptr &pbft_chain, - const std::shared_ptr &vote_mgr, const std::shared_ptr &dag_mgr, - const std::shared_ptr &trx_mgr, const std::shared_ptr &slashing_manager, - const std::shared_ptr &pillar_chain_mgr, TarcapVersion, - const addr_t &node_addr) { - auto packets_handlers = std::make_shared(); - // Consensus packets with high processing priority - packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_mgr, pbft_chain, - vote_mgr, slashing_manager, node_addr, logs_prefix); - packets_handlers->registerHandler( - config, peers_state, packets_stats, pbft_mgr, pbft_chain, vote_mgr, slashing_manager, node_addr, logs_prefix); - packets_handlers->registerHandler( - config, peers_state, packets_stats, pbft_mgr, pbft_chain, vote_mgr, slashing_manager, node_addr, logs_prefix); - - // Standard packets with mid processing priority - packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, - pbft_chain, pbft_mgr, dag_mgr, trx_mgr, db, node_addr, - logs_prefix); - - packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, - node_addr, logs_prefix); - - // Non critical packets with low processing priority - packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, - pbft_chain, pbft_mgr, dag_mgr, db, genesis_hash, node_addr, - logs_prefix); - packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, dag_mgr, - db, node_addr, logs_prefix); - - packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, - pbft_chain, pbft_mgr, dag_mgr, trx_mgr, db, node_addr, - logs_prefix); - - packets_handlers->registerHandler( - config, peers_state, packets_stats, pbft_syncing_state, pbft_chain, vote_mgr, db, node_addr, logs_prefix); - - packets_handlers->registerHandler(config, peers_state, packets_stats, - pbft_syncing_state, pbft_chain, pbft_mgr, dag_mgr, - vote_mgr, db, node_addr, logs_prefix); - packets_handlers->registerHandler(config, peers_state, packets_stats, pillar_chain_mgr, - node_addr, logs_prefix); - packets_handlers->registerHandler(config, peers_state, packets_stats, - pillar_chain_mgr, node_addr, logs_prefix); - packets_handlers->registerHandler(config, peers_state, packets_stats, - pillar_chain_mgr, node_addr, logs_prefix); - - return packets_handlers; - }; - } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/threadpool/packet_data.cpp b/libraries/core_libs/network/src/threadpool/packet_data.cpp index 5f8c187c46..84fb8ec5de 100644 --- a/libraries/core_libs/network/src/threadpool/packet_data.cpp +++ b/libraries/core_libs/network/src/threadpool/packet_data.cpp @@ -17,13 +17,13 @@ PacketData::PacketData(SubprotocolPacketType type, const dev::p2p::NodeID& from_ * @return PacketPriority based om packet_type */ PacketData::PacketPriority PacketData::getPacketPriority(SubprotocolPacketType packet_type) { - if (packet_type > SubprotocolPacketType::HighPriorityPackets && - packet_type < SubprotocolPacketType::MidPriorityPackets) { + if (packet_type > SubprotocolPacketType::kHighPriorityPackets && + packet_type < SubprotocolPacketType::kMidPriorityPackets) { return PacketPriority::High; - } else if (packet_type > SubprotocolPacketType::MidPriorityPackets && - packet_type < SubprotocolPacketType::LowPriorityPackets) { + } else if (packet_type > SubprotocolPacketType::kMidPriorityPackets && + packet_type < SubprotocolPacketType::kLowPriorityPackets) { return PacketPriority::Mid; - } else if (packet_type > SubprotocolPacketType::LowPriorityPackets) { + } else if (packet_type > SubprotocolPacketType::kLowPriorityPackets) { return PacketPriority::Low; } diff --git a/libraries/core_libs/network/src/threadpool/packets_blocking_mask.cpp b/libraries/core_libs/network/src/threadpool/packets_blocking_mask.cpp index 2eec31f637..9341d3885c 100644 --- a/libraries/core_libs/network/src/threadpool/packets_blocking_mask.cpp +++ b/libraries/core_libs/network/src/threadpool/packets_blocking_mask.cpp @@ -207,7 +207,7 @@ bool PacketsBlockingMask::isPacketBlocked(const PacketData& packet_data) const { // Custom blocks for specific packet types... // Check if DagBlockPacket is blocked by processing some dag blocks with <= dag level - if (packet_data.type_ == SubprotocolPacketType::DagBlockPacket && + if (packet_data.type_ == SubprotocolPacketType::kDagBlockPacket && (isDagBlockPacketBlockedByLevel(packet_data) || isDagBlockPacketBlockedBySameDagBlock(packet_data))) { return true; } diff --git a/libraries/core_libs/network/src/threadpool/priority_queue.cpp b/libraries/core_libs/network/src/threadpool/priority_queue.cpp index b8d006884a..14b7754004 100644 --- a/libraries/core_libs/network/src/threadpool/priority_queue.cpp +++ b/libraries/core_libs/network/src/threadpool/priority_queue.cpp @@ -146,11 +146,11 @@ void PriorityQueue::updateDependenciesFinish(const PacketData& packet, std::mute bool PriorityQueue::isNonBlockingPacket(SubprotocolPacketType packet_type) const { // Note: any packet type that is not in this switch should be processed in updateDependencies switch (packet_type) { - case SubprotocolPacketType::VotePacket: - case SubprotocolPacketType::GetNextVotesSyncPacket: - case SubprotocolPacketType::VotesBundlePacket: - case SubprotocolPacketType::StatusPacket: - case SubprotocolPacketType::PillarVotePacket: + case SubprotocolPacketType::kVotePacket: + case SubprotocolPacketType::kGetNextVotesSyncPacket: + case SubprotocolPacketType::kVotesBundlePacket: + case SubprotocolPacketType::kStatusPacket: + case SubprotocolPacketType::kPillarVotePacket: return true; } @@ -166,11 +166,11 @@ bool PriorityQueue::updateBlockingDependencies(const PacketData& packet, bool un // GetPillarVotesBundlePacket -> serve pillar votes syncing data to only 1 node at the time // PillarVotesBundlePacket -> process only 1 packet at a time. TODO[2744]: remove after protection mechanism is // implemented PbftSyncPacket -> process sync pbft blocks synchronously - case SubprotocolPacketType::GetDagSyncPacket: - case SubprotocolPacketType::GetPbftSyncPacket: - case SubprotocolPacketType::GetPillarVotesBundlePacket: - case SubprotocolPacketType::PillarVotesBundlePacket: // TODO[2744]: remove - case SubprotocolPacketType::PbftSyncPacket: { + case SubprotocolPacketType::kGetDagSyncPacket: + case SubprotocolPacketType::kGetPbftSyncPacket: + case SubprotocolPacketType::kGetPillarVotesBundlePacket: + case SubprotocolPacketType::kPillarVotesBundlePacket: // TODO[2744]: remove + case SubprotocolPacketType::kPbftSyncPacket: { if (!unblock_processing) { blocked_packets_mask_.markPacketAsHardBlocked(packet, packet.type_); } else { @@ -182,13 +182,13 @@ bool PriorityQueue::updateBlockingDependencies(const PacketData& packet, bool un // When syncing dag blocks, process only 1 packet at a time: // DagSyncPacket -> process sync dag blocks synchronously // DagBlockPacket -> wait with processing of new dag blocks until old blocks are synced - case SubprotocolPacketType::DagSyncPacket: { + case SubprotocolPacketType::kDagSyncPacket: { if (!unblock_processing) { blocked_packets_mask_.markPacketAsHardBlocked(packet, packet.type_); - blocked_packets_mask_.markPacketAsPeerOrderBlocked(packet, SubprotocolPacketType::DagBlockPacket); + blocked_packets_mask_.markPacketAsPeerOrderBlocked(packet, SubprotocolPacketType::kDagBlockPacket); } else { blocked_packets_mask_.markPacketAsHardUnblocked(packet, packet.type_); - blocked_packets_mask_.markPacketAsPeerOrderUnblocked(packet, SubprotocolPacketType::DagBlockPacket); + blocked_packets_mask_.markPacketAsPeerOrderUnblocked(packet, SubprotocolPacketType::kDagBlockPacket); } break; } @@ -196,16 +196,16 @@ bool PriorityQueue::updateBlockingDependencies(const PacketData& packet, bool un // When processing TransactionPacket, processing of all dag block packets that were received after that (from the // same peer). No need to block processing of dag blocks packets received before as it should not be possible to // send dag block before sending txs it contains... - case SubprotocolPacketType::TransactionPacket: { + case SubprotocolPacketType::kTransactionPacket: { if (!unblock_processing) { - blocked_packets_mask_.markPacketAsPeerOrderBlocked(packet, SubprotocolPacketType::DagBlockPacket); + blocked_packets_mask_.markPacketAsPeerOrderBlocked(packet, SubprotocolPacketType::kDagBlockPacket); } else { - blocked_packets_mask_.markPacketAsPeerOrderUnblocked(packet, SubprotocolPacketType::DagBlockPacket); + blocked_packets_mask_.markPacketAsPeerOrderUnblocked(packet, SubprotocolPacketType::kDagBlockPacket); } break; } - case SubprotocolPacketType::DagBlockPacket: { + case SubprotocolPacketType::kDagBlockPacket: { if (!unblock_processing) { blocked_packets_mask_.setDagBlockLevelBeingProcessed(packet); blocked_packets_mask_.setDagBlockBeingProcessed(packet); diff --git a/libraries/logger/include/logger/logger.hpp b/libraries/logger/include/logger/logger.hpp index da66005fa5..bfb2ef212e 100644 --- a/libraries/logger/include/logger/logger.hpp +++ b/libraries/logger/include/logger/logger.hpp @@ -65,14 +65,6 @@ void InitLogging(Config& logging_config, const addr_t& node_id); mutable taraxa::logger::Logger log_dg_; \ mutable taraxa::logger::Logger log_tr_; -#define LOG_OBJECTS_DEFINE_SUB(group) \ - mutable taraxa::logger::Logger log_si_##group##_; \ - mutable taraxa::logger::Logger log_er_##group##_; \ - mutable taraxa::logger::Logger log_wr_##group##_; \ - mutable taraxa::logger::Logger log_nf_##group##_; \ - mutable taraxa::logger::Logger log_dg_##group##_; \ - mutable taraxa::logger::Logger log_tr_##group##_; - #define LOG_OBJECTS_CREATE(channel) \ log_si_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Silent, channel, node_addr); \ log_er_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Error, channel, node_addr); \ @@ -80,11 +72,3 @@ void InitLogging(Config& logging_config, const addr_t& node_id); log_nf_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Info, channel, node_addr); \ log_tr_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Trace, channel, node_addr); \ log_dg_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Debug, channel, node_addr); - -#define LOG_OBJECTS_CREATE_SUB(channel, group) \ - log_si_##group##_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Silent, channel, node_addr); \ - log_er_##group##_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Error, channel, node_addr); \ - log_wr_##group##_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Warning, channel, node_addr); \ - log_nf_##group##_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Info, channel, node_addr); \ - log_tr_##group##_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Trace, channel, node_addr); \ - log_dg_##group##_ = taraxa::logger::createLogger(taraxa::logger::Verbosity::Debug, channel, node_addr); diff --git a/tests/tarcap_threadpool_test.cpp b/tests/tarcap_threadpool_test.cpp index e889013a47..199054ab9c 100644 --- a/tests/tarcap_threadpool_test.cpp +++ b/tests/tarcap_threadpool_test.cpp @@ -1,259 +1,265 @@ -#include + #include -#include "config/config.hpp" -#include "config/version.hpp" -#include "dag/dag_block.hpp" -#include "logger/logger.hpp" -#include "network/tarcap/packets_handler.hpp" -#include "network/threadpool/tarcap_thread_pool.hpp" -#include "test_util/test_util.hpp" + #include "config/config.hpp" + #include "config/version.hpp" + #include "dag/dag_block.hpp" + #include "logger/logger.hpp" + #include "network/tarcap/packets_handler.hpp" + #include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" + #include "network/tarcap/shared_states/peers_state.hpp" + #include "network/threadpool/tarcap_thread_pool.hpp" + #include "test_util/test_util.hpp" -namespace taraxa::core_tests { + namespace taraxa::core_tests { -using namespace std::literals; + using namespace std::literals; // Do not use NodesTest from "test_util/gtest.hpp" as its functionality is not needed in this test -struct NodesTest : virtual testing::Test { - testing::UnitTest* current_test = ::testing::UnitTest::GetInstance(); - testing::TestInfo const* current_test_info = current_test->current_test_info(); - - NodesTest() = default; - virtual ~NodesTest() = default; - - NodesTest(const NodesTest&) = delete; - NodesTest(NodesTest&&) = delete; - NodesTest& operator=(const NodesTest&) = delete; - NodesTest& operator=(NodesTest&&) = delete; -}; - -struct TarcapTpTest : NodesTest {}; - -using namespace taraxa::network; - -class PacketsProcessingInfo { - public: - struct PacketProcessingTimes { - std::chrono::steady_clock::time_point start_time_; - std::chrono::steady_clock::time_point finish_time_; - }; - - public: - void addPacketProcessingTimes(threadpool::PacketData::PacketId packet_id, - const PacketProcessingTimes& packet_processing_times) { - std::scoped_lock lock(mutex_); - bool res = packets_processing_times_.emplace(packet_id, packet_processing_times).second; - assert(res); - } - - PacketProcessingTimes getPacketProcessingTimes(threadpool::PacketData::PacketId packet_id) const { - std::shared_lock lock(mutex_); - - auto found_packet_info = packets_processing_times_.find(packet_id); - - // Failed to obtain processing times for packet id: packet_id. Processing did not finish yet. This should be - // caught in processing times comparing - if (found_packet_info == packets_processing_times_.end()) { - return {}; - } - - return found_packet_info->second; - } - - size_t getPacketProcessingTimesCount() const { - std::shared_lock lock(mutex_); - return packets_processing_times_.size(); - } - - private: - std::unordered_map packets_processing_times_; - mutable std::shared_mutex mutex_; -}; + struct NodesTest : virtual testing::Test { + testing::UnitTest* current_test = ::testing::UnitTest::GetInstance(); + testing::TestInfo const* current_test_info = current_test->current_test_info(); + + NodesTest() = default; + virtual ~NodesTest() = default; + + NodesTest(const NodesTest&) = delete; + NodesTest(NodesTest&&) = delete; + NodesTest& operator=(const NodesTest&) = delete; + NodesTest& operator=(NodesTest&&) = delete; + }; + + struct TarcapTpTest : NodesTest {}; + + using namespace taraxa::network; + + class PacketsProcessingInfo { + public: + struct PacketProcessingTimes { + std::chrono::steady_clock::time_point start_time_; + std::chrono::steady_clock::time_point finish_time_; + }; + + public: + void addPacketProcessingTimes(threadpool::PacketData::PacketId packet_id, + const PacketProcessingTimes& packet_processing_times) { + std::scoped_lock lock(mutex_); + bool res = packets_processing_times_.emplace(packet_id, packet_processing_times).second; + assert(res); + } + + PacketProcessingTimes getPacketProcessingTimes(threadpool::PacketData::PacketId packet_id) const { + std::shared_lock lock(mutex_); + + auto found_packet_info = packets_processing_times_.find(packet_id); + + // Failed to obtain processing times for packet id: packet_id. Processing did not finish yet. This should be + // caught in processing times comparing + if (found_packet_info == packets_processing_times_.end()) { + return {}; + } + + return found_packet_info->second; + } + + size_t getPacketProcessingTimesCount() const { + std::shared_lock lock(mutex_); + return packets_processing_times_.size(); + } + + private: + std::unordered_map packets_processing_times_; + mutable std::shared_mutex mutex_; + }; // Help functions for tests -struct HandlersInitData { - FullNodeConfig conf; - dev::p2p::NodeID sender_node_id; - addr_t own_node_addr; - - std::shared_ptr peers_state; - std::shared_ptr packets_stats; - std::shared_ptr packets_processing_info; - - dev::p2p::NodeID copySender() { return sender_node_id; } -}; - -class DummyPacketHandler : public tarcap::PacketHandler { - public: - DummyPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : PacketHandler(init_data.conf, init_data.peers_state, init_data.packets_stats, init_data.own_node_addr, - log_channel_name), - processing_delay_ms_(processing_delay_ms), - packets_proc_info_(init_data.packets_processing_info) {} - - virtual ~DummyPacketHandler() = default; - DummyPacketHandler(const DummyPacketHandler&) = default; - DummyPacketHandler(DummyPacketHandler&&) = default; - DummyPacketHandler& operator=(const DummyPacketHandler&) = delete; - DummyPacketHandler& operator=(DummyPacketHandler&&) = delete; - - private: - void validatePacketRlpFormat([[maybe_unused]] const threadpool::PacketData& packet_data) const override {} - - void process(const threadpool::PacketData& packet_data, - [[maybe_unused]] const std::shared_ptr& peer) override { - // Note do not use LOG() before saving start & finish time as it is internally synchronized and can - // cause delays, which result in tests fails - auto start_time = std::chrono::steady_clock::now(); - std::this_thread::sleep_for(std::chrono::milliseconds(processing_delay_ms_)); - auto finish_time = std::chrono::steady_clock::now(); - - LOG(log_dg_) << "Processing packet: " << packet_data.type_str_ << ", id(" << packet_data.id_ << ") finished. " - << "Start time: " << start_time.time_since_epoch().count() - << ", finish time: " << finish_time.time_since_epoch().count(); - - packets_proc_info_->addPacketProcessingTimes(packet_data.id_, {start_time, finish_time}); - } - - uint32_t processing_delay_ms_{0}; - std::shared_ptr packets_proc_info_; -}; - -class DummyTransactionPacketHandler : public DummyPacketHandler { - public: - DummyTransactionPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::TransactionPacket; -}; - -class DummyDagBlockPacketHandler : public DummyPacketHandler { - public: - DummyDagBlockPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::DagBlockPacket; -}; - -class DummyStatusPacketHandler : public DummyPacketHandler { - public: - DummyStatusPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::StatusPacket; -}; - -class DummyVotePacketHandler : public DummyPacketHandler { - public: - DummyVotePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::VotePacket; -}; - -class DummyGetNextVotesBundlePacketHandler : public DummyPacketHandler { - public: - DummyGetNextVotesBundlePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::GetNextVotesSyncPacket; -}; - -class DummyVotesBundlePacketHandler : public DummyPacketHandler { - public: - DummyVotesBundlePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::VotesBundlePacket; -}; - -class DummyGetDagSyncPacketHandler : public DummyPacketHandler { - public: - DummyGetDagSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::GetDagSyncPacket; -}; - -class DummyGetPbftSyncPacketHandler : public DummyPacketHandler { - public: - DummyGetPbftSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + struct HandlersInitData { + FullNodeConfig conf; + dev::p2p::NodeID sender_node_id; + addr_t own_node_addr; + + std::shared_ptr peers_state; + std::shared_ptr packets_stats; + std::shared_ptr packets_processing_info; + + dev::p2p::NodeID copySender() { return sender_node_id; } + }; + + struct DummyPacket { + DummyPacket(const dev::RLP& packet_rlp) {} + }; + + class DummyPacketHandler : public tarcap::PacketHandler { + public: + DummyPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : PacketHandler(init_data.conf, init_data.peers_state, init_data.packets_stats, init_data.own_node_addr, + log_channel_name), + processing_delay_ms_(processing_delay_ms), + packets_proc_info_(init_data.packets_processing_info) {} + + virtual ~DummyPacketHandler() = default; + DummyPacketHandler(const DummyPacketHandler&) = default; + DummyPacketHandler(DummyPacketHandler&&) = default; + DummyPacketHandler& operator=(const DummyPacketHandler&) = delete; + DummyPacketHandler& operator=(DummyPacketHandler&&) = delete; + + private: + void validatePacketRlpFormat([[maybe_unused]] const threadpool::PacketData& packet_data) const override {} + + void process(DummyPacket&& packet_data, + [[maybe_unused]] const std::shared_ptr& peer) override { + // Note do not use LOG() before saving start & finish time as it is internally synchronized and can + // cause delays, which result in tests fails + auto start_time = std::chrono::steady_clock::now(); + std::this_thread::sleep_for(std::chrono::milliseconds(processing_delay_ms_)); + auto finish_time = std::chrono::steady_clock::now(); + + LOG(log_dg_) << "Processing packet: " << packet_data.type_str_ << ", id(" << packet_data.id_ << ") finished. " + << "Start time: " << start_time.time_since_epoch().count() + << ", finish time: " << finish_time.time_since_epoch().count(); + + packets_proc_info_->addPacketProcessingTimes(packet_data.id_, {start_time, finish_time}); + } + + uint32_t processing_delay_ms_{0}; + std::shared_ptr packets_proc_info_; + }; + + class DummyTransactionPacketHandler : public DummyPacketHandler { + public: + DummyTransactionPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kTransactionPacket; + }; + + class DummyDagBlockPacketHandler : public DummyPacketHandler { + public: + DummyDagBlockPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagBlockPacket; + }; + + class DummyStatusPacketHandler : public DummyPacketHandler { + public: + DummyStatusPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kStatusPacket; + }; + + class DummyVotePacketHandler : public DummyPacketHandler { + public: + DummyVotePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotePacket; + }; + + class DummyGetNextVotesBundlePacketHandler : public DummyPacketHandler { + public: + DummyGetNextVotesBundlePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetNextVotesSyncPacket; + }; + + class DummyVotesBundlePacketHandler : public DummyPacketHandler { + public: + DummyVotesBundlePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotesBundlePacket; + }; + + class DummyGetDagSyncPacketHandler : public DummyPacketHandler { + public: + DummyGetDagSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::GetPbftSyncPacket; -}; + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetDagSyncPacket; + }; -class DummyDagSyncPacketHandler : public DummyPacketHandler { - public: - DummyDagSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + class DummyGetPbftSyncPacketHandler : public DummyPacketHandler { + public: + DummyGetPbftSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::DagSyncPacket; -}; + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetPbftSyncPacket; + }; -class DummyPbftSyncPacketHandler : public DummyPacketHandler { - public: - DummyPbftSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + class DummyDagSyncPacketHandler : public DummyPacketHandler { + public: + DummyDagSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::PbftSyncPacket; -}; - -HandlersInitData createHandlersInitData() { - HandlersInitData ret_init_data; - - ret_init_data.sender_node_id = dev::p2p::NodeID(1); - ret_init_data.own_node_addr = addr_t(2); - ret_init_data.peers_state = std::make_shared(std::weak_ptr(), FullNodeConfig()); - ret_init_data.packets_stats = - std::make_shared(std::chrono::milliseconds(0), ret_init_data.own_node_addr); - ret_init_data.packets_processing_info = std::make_shared(); - - // Enable packets from sending peer to be processed - auto peer = ret_init_data.peers_state->addPendingPeer(ret_init_data.sender_node_id, ""); - ret_init_data.peers_state->setPeerAsReadyToSendMessages(ret_init_data.sender_node_id, peer); - - return ret_init_data; -} - -std::pair createPacket( - const dev::p2p::NodeID& sender_node_id, SubprotocolPacketType packet_type, - std::optional> packet_rlp_bytes = {}) { - if (packet_rlp_bytes.has_value()) { - threadpool::PacketData packet_data(packet_type, sender_node_id, std::move(packet_rlp_bytes.value())); - return {TARAXA_NET_VERSION, std::move(packet_data)}; - } - - dev::RLPStream s(0); - threadpool::PacketData packet_data(packet_type, sender_node_id, s.invalidate()); - return {TARAXA_NET_VERSION, std::move(packet_data)}; -} - -bytes createDagBlockRlp(level_t level, uint32_t sig = 777) { - // Creates dag block rlp as it is required for blocking mask to extract dag block level - DagBlock blk(blk_hash_t(10), level, {}, {}, sig_t(sig), blk_hash_t(1), addr_t(15)); - return blk.rlp(true); -} + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagSyncPacket; + }; + + class DummyPbftSyncPacketHandler : public DummyPacketHandler { + public: + DummyPbftSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPbftSyncPacket; + }; + + HandlersInitData createHandlersInitData() { + HandlersInitData ret_init_data; + + ret_init_data.sender_node_id = dev::p2p::NodeID(1); + ret_init_data.own_node_addr = addr_t(2); + ret_init_data.peers_state = std::make_shared(std::weak_ptr(), + FullNodeConfig()); ret_init_data.packets_stats = + std::make_shared(std::chrono::milliseconds(0), ret_init_data.own_node_addr); + ret_init_data.packets_processing_info = std::make_shared(); + + // Enable packets from sending peer to be processed + auto peer = ret_init_data.peers_state->addPendingPeer(ret_init_data.sender_node_id, ""); + ret_init_data.peers_state->setPeerAsReadyToSendMessages(ret_init_data.sender_node_id, peer); + + return ret_init_data; + } + + std::pair createPacket( + const dev::p2p::NodeID& sender_node_id, SubprotocolPacketType packet_type, + std::optional> packet_rlp_bytes = {}) { + if (packet_rlp_bytes.has_value()) { + threadpool::PacketData packet_data(packet_type, sender_node_id, std::move(packet_rlp_bytes.value())); + return {TARAXA_NET_VERSION, std::move(packet_data)}; + } + + dev::RLPStream s(0); + threadpool::PacketData packet_data(packet_type, sender_node_id, s.invalidate()); + return {TARAXA_NET_VERSION, std::move(packet_data)}; + } + + bytes createDagBlockRlp(level_t level, uint32_t sig = 777) { + // Creates dag block rlp as it is required for blocking mask to extract dag block level + DagBlock blk(blk_hash_t(10), level, {}, {}, sig_t(sig), blk_hash_t(1), addr_t(15)); + return blk.rlp(true); + } /** * @brief Check all combinations(without repetition) of provided packets that they were processed concurrently: @@ -262,7 +268,7 @@ bytes createDagBlockRlp(level_t level, uint32_t sig = 777) { * * @param packets */ -void checkConcurrentProcessing( + void checkConcurrentProcessing( const std::vector>& packets) { assert(packets.size() >= 2); @@ -284,7 +290,7 @@ void checkConcurrentProcessing( * * @param packets */ -void checkSerialProcessing( + void checkSerialProcessing( const std::vector>& packets) { assert(packets.size() >= 2); @@ -298,320 +304,325 @@ void checkSerialProcessing( } } -size_t queuesSize(const threadpool::PacketsThreadPool& tp) { + size_t queuesSize(const threadpool::PacketsThreadPool& tp) { const auto [high_priority_queue_size, mid_priority_queue_size, low_priority_queue_size] = tp.getQueueSize(); return high_priority_queue_size + mid_priority_queue_size + low_priority_queue_size; } // Threshold for packets queue to be emptied -constexpr std::chrono::milliseconds QUEUE_EMPTIED_WAIT_TRESHOLD_MS = 15ms; + constexpr std::chrono::milliseconds QUEUE_EMPTIED_WAIT_TRESHOLD_MS = 15ms; // Test all packet types if they are either in non-blocking or blocking list of packets -TEST_F(TarcapTpTest, packets_blocking_dependencies) { - network::threadpool::PriorityQueue priority_queue(3); - - for (auto packet_type = SubprotocolPacketType{0}; packet_type != SubprotocolPacketType::PacketCount; - packet_type = static_cast(static_cast(packet_type) + 1)) { - // Skip unreal packet types - switch (packet_type) { - case SubprotocolPacketType::HighPriorityPackets: - case SubprotocolPacketType::MidPriorityPackets: - case SubprotocolPacketType::LowPriorityPackets: - case SubprotocolPacketType::PacketCount: - continue; - } - - std::vector packet_bytes; - - // Generate proper rlp for packets that need it for processing - if (packet_type == SubprotocolPacketType::DagBlockPacket) { - DagBlock blk(blk_hash_t(1), 1, {}, {trx_hash_t(1), trx_hash_t(2)}, sig_t(3), blk_hash_t(0x4), addr_t(5)); - packet_bytes = blk.rlp(true); - } - network::threadpool::PacketData packet_data{packet_type, {}, std::move(packet_bytes)}; - packet_data.id_ = static_cast(packet_type); - - bool is_non_blocking_packet = priority_queue.isNonBlockingPacket(packet_data.type_); - bool is_blocking_packet = priority_queue.updateBlockingDependencies(packet_data); - - EXPECT_TRUE(is_non_blocking_packet != is_blocking_packet); - } -} + TEST_F(TarcapTpTest, packets_blocking_dependencies) { + network::threadpool::PriorityQueue priority_queue(3); + + for (auto packet_type = SubprotocolPacketType{0}; packet_type != SubprotocolPacketType::kPacketCount; + packet_type = static_cast(static_cast(packet_type) + 1)) { + // Skip unreal packet types + switch (packet_type) { + case SubprotocolPacketType::kHighPriorityPackets: + case SubprotocolPacketType::kMidPriorityPackets: + case SubprotocolPacketType::kLowPriorityPackets: + case SubprotocolPacketType::kPacketCount: + continue; + } + + std::vector packet_bytes; + + // Generate proper rlp for packets that need it for processing + if (packet_type == SubprotocolPacketType::kDagBlockPacket) { + DagBlock blk(blk_hash_t(1), 1, {}, {trx_hash_t(1), trx_hash_t(2)}, sig_t(3), blk_hash_t(0x4), addr_t(5)); + packet_bytes = blk.rlp(true); + } + network::threadpool::PacketData packet_data{packet_type, {}, std::move(packet_bytes)}; + packet_data.id_ = static_cast(packet_type); + + bool is_non_blocking_packet = priority_queue.isNonBlockingPacket(packet_data.type_); + bool is_blocking_packet = priority_queue.updateBlockingDependencies(packet_data); + + EXPECT_TRUE(is_non_blocking_packet != is_blocking_packet); + } + } // Test if all "block-free" packets are processed concurrently // Note: in case someone creates new blocking dependency and does not adjust tests, this test should fail -TEST_F(TarcapTpTest, block_free_packets) { - HandlersInitData init_data = createHandlersInitData(); - - // Creates sender 2 to bypass peer order block on Transaction -> DagBlock packet. In case those packets sent - // 2 different senders those packets are "block-free" - dev::p2p::NodeID sender2(3); - auto peer = init_data.peers_state->addPendingPeer(sender2, ""); - init_data.peers_state->setPeerAsReadyToSendMessages(sender2, peer); - - auto packets_handler = std::make_shared(); - - packets_handler->registerHandler(init_data, "TX_PH", 20); - packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); - packets_handler->registerHandler(init_data, "STATUS_PH", 20); - packets_handler->registerHandler(init_data, "VOTE_PH", 20); - packets_handler->registerHandler(init_data, "GET_NEXT_VOTES_SYNC_PH", 20); - packets_handler->registerHandler(init_data, "VOTES_SYNC_PH", 20); - - // Creates threadpool - // Note: make num of threads >= num of packets to check if they are processed concurrently without blocks, otherwise - // some blocks would be blocked for processing due to max threads limit - threadpool::PacketsThreadPool tp(18); - tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); - - // Pushes packets to the tp - auto packet = createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket, {}); - if (packet.second.rlp_.isList()) { - std::cout << "is list"; - } else { - std::cout << "not list"; - } - const auto packet0_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket, {})).value(); - const auto packet1_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket, {})).value(); - const auto packet2_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket, {})).value(); - const auto packet3_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket, {})).value(); - - const auto packet4_dag_block_id = - tp.push(createPacket(dev::p2p::NodeID(sender2), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(0, 1)})) - .value(); - const auto packet5_dag_block_id = - tp.push(createPacket(dev::p2p::NodeID(sender2), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(0, 2)})) - .value(); - - const auto packet8_status_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::StatusPacket, {})).value(); - const auto packet9_status_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::StatusPacket, {})).value(); - - const auto packet12_vote_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::VotePacket, {})).value(); - const auto packet13_vote_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::VotePacket, {})).value(); - - const auto packet14_get_pbft_next_votes_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::GetNextVotesSyncPacket, {})).value(); - const auto packet15_get_pbft_next_votes_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::GetNextVotesSyncPacket, {})).value(); - - const auto packet16_pbft_next_votes_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::VotesBundlePacket, {})).value(); - - size_t packets_count = 0; - const auto packet17_pbft_next_votes_id = packets_count = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::VotesBundlePacket, {})).value(); - - tp.startProcessing(); - - // How should packets be processed: - // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or - // synchronously due to some blocking dependencies - depends on situation), check - // PriorityQueue::updateDependenciesStart - /* - ---------------------- - - packet0_transaction - - ---------------------- - ---------------------- - - packet1_transaction - - ---------------------- - ----------------------- - - packet2_transaction - - ----------------------- - - -||- - ... - - ----------------------- - - packet17_votes_sync - - ----------------------- - 0.....................20.................... time [ms] - */ - - // All packets should be already being processed after short amount of time - std::this_thread::sleep_for(QUEUE_EMPTIED_WAIT_TRESHOLD_MS); - EXPECT_EQ(queuesSize(tp), 0); - - // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to locking - EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { - // Check if transactions was propagated to node0 - WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) - }); - - // Check order of packets how they were processed - const auto packets_proc_info = init_data.packets_processing_info; - - const auto packet0_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_tx_id); - const auto packet1_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_tx_id); - const auto packet2_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_tx_id); - const auto packet3_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_tx_id); - - const auto packet4_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_dag_block_id); - const auto packet5_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet5_dag_block_id); - - const auto packet8_status_proc_info = packets_proc_info->getPacketProcessingTimes(packet8_status_id); - const auto packet9_status_proc_info = packets_proc_info->getPacketProcessingTimes(packet9_status_id); - - const auto packet12_vote_proc_info = packets_proc_info->getPacketProcessingTimes(packet12_vote_id); - const auto packet13_vote_proc_info = packets_proc_info->getPacketProcessingTimes(packet13_vote_id); - - const auto packet14_get_pbft_next_votes_proc_info = - packets_proc_info->getPacketProcessingTimes(packet14_get_pbft_next_votes_id); - const auto packet15_get_pbft_next_votes_proc_info = - packets_proc_info->getPacketProcessingTimes(packet15_get_pbft_next_votes_id); - - const auto packet16_pbft_next_votes_proc_info = - packets_proc_info->getPacketProcessingTimes(packet16_pbft_next_votes_id); - const auto packet17_pbft_next_votes_proc_info = - packets_proc_info->getPacketProcessingTimes(packet17_pbft_next_votes_id); - - checkConcurrentProcessing({ - {packet0_tx_proc_info, "packet0_tx"}, - {packet1_tx_proc_info, "packet1_tx"}, - {packet2_tx_proc_info, "packet2_tx"}, - {packet3_tx_proc_info, "packet3_tx"}, - {packet4_dag_block_proc_info, "packet4_dag_block"}, - {packet5_dag_block_proc_info, "packet5_dag_block"}, - {packet8_status_proc_info, "packet8_status"}, - {packet9_status_proc_info, "packet9_status"}, - {packet12_vote_proc_info, "packet12_vote"}, - {packet13_vote_proc_info, "packet13_vote"}, - {packet14_get_pbft_next_votes_proc_info, "packet14_get_pbft_next_votes"}, - {packet15_get_pbft_next_votes_proc_info, "packet15_get_pbft_next_votes"}, - {packet16_pbft_next_votes_proc_info, "packet16_pbft_next_votes"}, - {packet17_pbft_next_votes_proc_info, "packet17_pbft_next_votes"}, - }); -} + TEST_F(TarcapTpTest, block_free_packets) { + HandlersInitData init_data = createHandlersInitData(); + + // Creates sender 2 to bypass peer order block on Transaction -> DagBlock packet. In case those packets sent + // 2 different senders those packets are "block-free" + dev::p2p::NodeID sender2(3); + auto peer = init_data.peers_state->addPendingPeer(sender2, ""); + init_data.peers_state->setPeerAsReadyToSendMessages(sender2, peer); + + auto packets_handler = std::make_shared(); + + packets_handler->registerHandler(init_data, "TX_PH", 20); + packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); + packets_handler->registerHandler(init_data, "STATUS_PH", 20); + packets_handler->registerHandler(init_data, "VOTE_PH", 20); + packets_handler->registerHandler(init_data, "GET_NEXT_VOTES_SYNC_PH", 20); + packets_handler->registerHandler(init_data, "VOTES_SYNC_PH", 20); + + // Creates threadpool + // Note: make num of threads >= num of packets to check if they are processed concurrently without blocks, + otherwise + // some blocks would be blocked for processing due to max threads limit + threadpool::PacketsThreadPool tp(18); + tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); + + // Pushes packets to the tp + auto packet = createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {}); + if (packet.second.rlp_.isList()) { + std::cout << "is list"; + } else { + std::cout << "not list"; + } + const auto packet0_tx_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); + const auto packet1_tx_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); + const auto packet2_tx_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); + const auto packet3_tx_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); + + const auto packet4_dag_block_id = + tp.push( + createPacket(dev::p2p::NodeID(sender2), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, + 1)})) + .value(); + const auto packet5_dag_block_id = + tp.push( + createPacket(dev::p2p::NodeID(sender2), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, + 2)})) + .value(); + + const auto packet8_status_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); + const auto packet9_status_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); + + const auto packet12_vote_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); + const auto packet13_vote_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); + + const auto packet14_get_pbft_next_votes_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetNextVotesSyncPacket, {})).value(); + const auto packet15_get_pbft_next_votes_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetNextVotesSyncPacket, {})).value(); + + const auto packet16_pbft_next_votes_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotesBundlePacket, {})).value(); + + size_t packets_count = 0; + const auto packet17_pbft_next_votes_id = packets_count = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotesBundlePacket, {})).value(); + + tp.startProcessing(); + + // How should packets be processed: + // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or + // synchronously due to some blocking dependencies - depends on situation), check + // PriorityQueue::updateDependenciesStart + /* + ---------------------- + - packet0_transaction - + ---------------------- + ---------------------- + - packet1_transaction - + ---------------------- + ----------------------- + - packet2_transaction - + ----------------------- + + -||- + ... + + ----------------------- + - packet17_votes_sync - + ----------------------- + 0.....................20.................... time [ms] + */ + + // All packets should be already being processed after short amount of time + std::this_thread::sleep_for(QUEUE_EMPTIED_WAIT_TRESHOLD_MS); + EXPECT_EQ(queuesSize(tp), 0); + + // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to + locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { + // Check if transactions was propagated to node0 + WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) + }); + + // Check order of packets how they were processed + const auto packets_proc_info = init_data.packets_processing_info; + + const auto packet0_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_tx_id); + const auto packet1_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_tx_id); + const auto packet2_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_tx_id); + const auto packet3_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_tx_id); + + const auto packet4_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_dag_block_id); + const auto packet5_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet5_dag_block_id); + + const auto packet8_status_proc_info = packets_proc_info->getPacketProcessingTimes(packet8_status_id); + const auto packet9_status_proc_info = packets_proc_info->getPacketProcessingTimes(packet9_status_id); + + const auto packet12_vote_proc_info = packets_proc_info->getPacketProcessingTimes(packet12_vote_id); + const auto packet13_vote_proc_info = packets_proc_info->getPacketProcessingTimes(packet13_vote_id); + + const auto packet14_get_pbft_next_votes_proc_info = + packets_proc_info->getPacketProcessingTimes(packet14_get_pbft_next_votes_id); + const auto packet15_get_pbft_next_votes_proc_info = + packets_proc_info->getPacketProcessingTimes(packet15_get_pbft_next_votes_id); + + const auto packet16_pbft_next_votes_proc_info = + packets_proc_info->getPacketProcessingTimes(packet16_pbft_next_votes_id); + const auto packet17_pbft_next_votes_proc_info = + packets_proc_info->getPacketProcessingTimes(packet17_pbft_next_votes_id); + + checkConcurrentProcessing({ + {packet0_tx_proc_info, "packet0_tx"}, + {packet1_tx_proc_info, "packet1_tx"}, + {packet2_tx_proc_info, "packet2_tx"}, + {packet3_tx_proc_info, "packet3_tx"}, + {packet4_dag_block_proc_info, "packet4_dag_block"}, + {packet5_dag_block_proc_info, "packet5_dag_block"}, + {packet8_status_proc_info, "packet8_status"}, + {packet9_status_proc_info, "packet9_status"}, + {packet12_vote_proc_info, "packet12_vote"}, + {packet13_vote_proc_info, "packet13_vote"}, + {packet14_get_pbft_next_votes_proc_info, "packet14_get_pbft_next_votes"}, + {packet15_get_pbft_next_votes_proc_info, "packet15_get_pbft_next_votes"}, + {packet16_pbft_next_votes_proc_info, "packet16_pbft_next_votes"}, + {packet17_pbft_next_votes_proc_info, "packet17_pbft_next_votes"}, + }); + } // Test "hard blocking dependencies" related synchronous processing of certain packets: // // Packets types that are currently hard blocked for processing in another threads due to dependencies, // e.g. syncing packets must be processed synchronously one by one, etc... // Each packet type might be simultaneously blocked by multiple different packets that are being processed. -TEST_F(TarcapTpTest, hard_blocking_deps) { - HandlersInitData init_data = createHandlersInitData(); - - auto packets_handler = std::make_shared(); - packets_handler->registerHandler(init_data, "GET_DAG_SYNC_PH", 20); - packets_handler->registerHandler(init_data, "GET_PBFT_SYNC_PH", 20); - packets_handler->registerHandler(init_data, "DAG_SYNC_PH", 20); - packets_handler->registerHandler(init_data, "PBFT_SYNC_PH", 20); - - // Creates threadpool - threadpool::PacketsThreadPool tp(10); - tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); - - // Pushes packets to the tp - const auto packet0_dag_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagSyncPacket, {})).value(); - const auto packet1_dag_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagSyncPacket, {})).value(); - const auto packet2_get_dag_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::GetDagSyncPacket, {})).value(); - const auto packet3_get_dag_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::GetDagSyncPacket, {})).value(); - const auto packet4_get_pbft_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::GetPbftSyncPacket, {})).value(); - const auto packet5_get_pbft_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::GetPbftSyncPacket, {})).value(); - const auto packet6_pbft_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::PbftSyncPacket, {})).value(); - const auto packet7_pbft_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::PbftSyncPacket, {})).value(); - - size_t packets_count = 0; - const auto packet8_get_dag_sync_id = packets_count = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::GetDagSyncPacket, {})).value(); - - tp.startProcessing(); - - // How should packets be processed: - // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or - // synchronously due to some blocking dependencies - depends on situation), check - // PriorityQueue::updateDependenciesStart - /* - ------------------------ - --- packet0_dag_sync --- - ------------------------ - ------------------------ - --- packet1_dag_sync --- - ------------------------ - ------------------------- - -- packet2_get_dag_sync - - ------------------------- - ------------------------- - -- packet3_get_dag_sync - - ------------------------- - ------------------------- - - packet4_get_pbft_sync - - ------------------------- - ------------------------- - - packet5_get_pbft_sync - - ------------------------- - ------------------------ - --- packet6_pbft_sync -- - ------------------------ - ------------------------ - --- packet7_pbft_sync -- - ------------------------ - ------------------------ - - packet8_get_dag_sync - - ------------------------ - 0......................20........................40........................60.......... time - */ - - // All packets should be already being processed after short amount of time - std::this_thread::sleep_for(60ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); - EXPECT_EQ(queuesSize(tp), 0); - - // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to locking - EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { - // Check if transactions was propagated to node0 - WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) - }); - - // Check order of packets how they were processed - const auto packets_proc_info = init_data.packets_processing_info; - - const auto packet0_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_dag_sync_id); - const auto packet1_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_dag_sync_id); - const auto packet2_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_get_dag_sync_id); - const auto packet3_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_get_dag_sync_id); - const auto packet4_get_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_get_pbft_sync_id); - const auto packet5_get_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet5_get_pbft_sync_id); - const auto packet6_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet6_pbft_sync_id); - const auto packet7_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet7_pbft_sync_id); - const auto packet8_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet8_get_dag_sync_id); - - checkConcurrentProcessing({ - {packet0_dag_sync_proc_info, "packet0_dag_sync"}, - {packet2_get_dag_sync_proc_info, "packet2_get_dag_sync"}, - {packet4_get_pbft_sync_proc_info, "packet4_get_pbft_sync"}, - {packet6_pbft_sync_proc_info, "packet6_pbft_sync"}, - }); - - checkConcurrentProcessing({ - {packet1_dag_sync_proc_info, "packet1_dag_sync"}, - {packet3_get_dag_sync_proc_info, "packet3_get_dag_sync"}, - {packet5_get_pbft_sync_proc_info, "packet5_get_pbft_sync"}, - {packet7_pbft_sync_proc_info, "packet7_pbft_sync"}, - }); - - EXPECT_GT(packet1_dag_sync_proc_info.start_time_, packet0_dag_sync_proc_info.finish_time_); - EXPECT_GT(packet3_get_dag_sync_proc_info.start_time_, packet2_get_dag_sync_proc_info.finish_time_); - EXPECT_GT(packet5_get_pbft_sync_proc_info.start_time_, packet4_get_pbft_sync_proc_info.finish_time_); - EXPECT_GT(packet7_pbft_sync_proc_info.start_time_, packet6_pbft_sync_proc_info.finish_time_); - - EXPECT_GT(packet8_get_dag_sync_proc_info.start_time_, packet3_get_dag_sync_proc_info.finish_time_); -} + TEST_F(TarcapTpTest, hard_blocking_deps) { + HandlersInitData init_data = createHandlersInitData(); + + auto packets_handler = std::make_shared(); + packets_handler->registerHandler(init_data, "GET_DAG_SYNC_PH", 20); + packets_handler->registerHandler(init_data, "GET_PBFT_SYNC_PH", 20); + packets_handler->registerHandler(init_data, "DAG_SYNC_PH", 20); + packets_handler->registerHandler(init_data, "PBFT_SYNC_PH", 20); + + // Creates threadpool + threadpool::PacketsThreadPool tp(10); + tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); + + // Pushes packets to the tp + const auto packet0_dag_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket, {})).value(); + const auto packet1_dag_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket, {})).value(); + const auto packet2_get_dag_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); + const auto packet3_get_dag_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); + const auto packet4_get_pbft_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetPbftSyncPacket, {})).value(); + const auto packet5_get_pbft_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetPbftSyncPacket, {})).value(); + const auto packet6_pbft_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kPbftSyncPacket, {})).value(); + const auto packet7_pbft_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kPbftSyncPacket, {})).value(); + + size_t packets_count = 0; + const auto packet8_get_dag_sync_id = packets_count = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); + + tp.startProcessing(); + + // How should packets be processed: + // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or + // synchronously due to some blocking dependencies - depends on situation), check + // PriorityQueue::updateDependenciesStart + /* + ------------------------ + --- packet0_dag_sync --- + ------------------------ + ------------------------ + --- packet1_dag_sync --- + ------------------------ + ------------------------- + -- packet2_get_dag_sync - + ------------------------- + ------------------------- + -- packet3_get_dag_sync - + ------------------------- + ------------------------- + - packet4_get_pbft_sync - + ------------------------- + ------------------------- + - packet5_get_pbft_sync - + ------------------------- + ------------------------ + --- packet6_pbft_sync -- + ------------------------ + ------------------------ + --- packet7_pbft_sync -- + ------------------------ + ------------------------ + - packet8_get_dag_sync - + ------------------------ + 0......................20........................40........................60.......... time + */ + + // All packets should be already being processed after short amount of time + std::this_thread::sleep_for(60ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); + EXPECT_EQ(queuesSize(tp), 0); + + // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to + locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { + // Check if transactions was propagated to node0 + WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) + }); + + // Check order of packets how they were processed + const auto packets_proc_info = init_data.packets_processing_info; + + const auto packet0_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_dag_sync_id); + const auto packet1_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_dag_sync_id); + const auto packet2_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_get_dag_sync_id); + const auto packet3_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_get_dag_sync_id); + const auto packet4_get_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_get_pbft_sync_id); + const auto packet5_get_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet5_get_pbft_sync_id); + const auto packet6_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet6_pbft_sync_id); + const auto packet7_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet7_pbft_sync_id); + const auto packet8_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet8_get_dag_sync_id); + + checkConcurrentProcessing({ + {packet0_dag_sync_proc_info, "packet0_dag_sync"}, + {packet2_get_dag_sync_proc_info, "packet2_get_dag_sync"}, + {packet4_get_pbft_sync_proc_info, "packet4_get_pbft_sync"}, + {packet6_pbft_sync_proc_info, "packet6_pbft_sync"}, + }); + + checkConcurrentProcessing({ + {packet1_dag_sync_proc_info, "packet1_dag_sync"}, + {packet3_get_dag_sync_proc_info, "packet3_get_dag_sync"}, + {packet5_get_pbft_sync_proc_info, "packet5_get_pbft_sync"}, + {packet7_pbft_sync_proc_info, "packet7_pbft_sync"}, + }); + + EXPECT_GT(packet1_dag_sync_proc_info.start_time_, packet0_dag_sync_proc_info.finish_time_); + EXPECT_GT(packet3_get_dag_sync_proc_info.start_time_, packet2_get_dag_sync_proc_info.finish_time_); + EXPECT_GT(packet5_get_pbft_sync_proc_info.start_time_, packet4_get_pbft_sync_proc_info.finish_time_); + EXPECT_GT(packet7_pbft_sync_proc_info.start_time_, packet6_pbft_sync_proc_info.finish_time_); + + EXPECT_GT(packet8_get_dag_sync_proc_info.start_time_, packet3_get_dag_sync_proc_info.finish_time_); + } // Test "peer-order blocking dependencies" related to specific (peer & order) combination: // @@ -619,249 +630,255 @@ TEST_F(TarcapTpTest, hard_blocking_deps) { // time (order), e.g.: new dag block packet processing is blocked until all transactions packets that were received // before it are processed. This blocking dependency is applied only for the same peer so transaction packet from one // peer does not block new dag block packet from another peer -TEST_F(TarcapTpTest, peer_order_blocking_deps) { - HandlersInitData init_data = createHandlersInitData(); - - auto packets_handler = std::make_shared(); - packets_handler->registerHandler(init_data, "TX_PH", 20); - packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 0); - packets_handler->registerHandler(init_data, "SYNC_TEST_PH", 40); - - // Creates threadpool - threadpool::PacketsThreadPool tp(10); - tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); - - // Pushes packets to the tp - const auto packet0_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket)).value(); - const auto packet1_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket)).value(); - const auto packet2_dag_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagSyncPacket)).value(); - const auto packet3_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket)).value(); - - size_t packets_count = 0; - const auto packet4_dag_block_id = packets_count = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(1)})) - .value(); - - // How should packets be processed: - // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or - // synchronously due to some blocking dependencies - depends on situation), check - // PriorityQueue::updateDependenciesStart - /* - -------------- - - packet0_tx - - -------------- - -------------- - - packet1_tx - - -------------- - ---------------------------- - ----- packet2_dag_sync ----- - ---------------------------- - -------------- - - packet3_tx - - -------------- - --------------------- - - packet4_dag_block - - --------------------- - 0............20.............40....................60.................. time [ms] - */ - - tp.startProcessing(); - - // All packets should be already being processed after short amount of time - std::this_thread::sleep_for(60ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); - EXPECT_EQ(queuesSize(tp), 0); - - // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to locking - EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { - // Check if transactions was propagated to node0 - WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) - }); - - // Check order of packets how they were processed - const auto packets_proc_info = init_data.packets_processing_info; - - const auto packet0_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_tx_id); - const auto packet1_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_tx_id); - const auto packet3_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_tx_id); - const auto packet4_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_dag_block_id); - const auto packet2_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_dag_sync_id); - - checkConcurrentProcessing({ - {packet0_tx_proc_info, "packet0_tx"}, - {packet1_tx_proc_info, "packet1_tx"}, - {packet2_dag_sync_proc_info, "packet2_dag_sync"}, - {packet3_tx_proc_info, "packet3_tx"}, - }); - - EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet0_tx_proc_info.finish_time_); - EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet1_tx_proc_info.finish_time_); - EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet3_tx_proc_info.finish_time_); - - EXPECT_GT(packet4_dag_block_proc_info.start_time_, packet2_dag_sync_proc_info.finish_time_); -} + TEST_F(TarcapTpTest, peer_order_blocking_deps) { + HandlersInitData init_data = createHandlersInitData(); + + auto packets_handler = std::make_shared(); + packets_handler->registerHandler(init_data, "TX_PH", 20); + packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 0); + packets_handler->registerHandler(init_data, "SYNC_TEST_PH", 40); + + // Creates threadpool + threadpool::PacketsThreadPool tp(10); + tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); + + // Pushes packets to the tp + const auto packet0_tx_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); + const auto packet1_tx_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); + const auto packet2_dag_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket)).value(); + const auto packet3_tx_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); + + size_t packets_count = 0; + const auto packet4_dag_block_id = packets_count = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1)})) + .value(); + + // How should packets be processed: + // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or + // synchronously due to some blocking dependencies - depends on situation), check + // PriorityQueue::updateDependenciesStart + /* + -------------- + - packet0_tx - + -------------- + -------------- + - packet1_tx - + -------------- + ---------------------------- + ----- packet2_dag_sync ----- + ---------------------------- + -------------- + - packet3_tx - + -------------- + --------------------- + - packet4_dag_block - + --------------------- + 0............20.............40....................60.................. time [ms] + */ + + tp.startProcessing(); + + // All packets should be already being processed after short amount of time + std::this_thread::sleep_for(60ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); + EXPECT_EQ(queuesSize(tp), 0); + + // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to + locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { + // Check if transactions was propagated to node0 + WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) + }); + + // Check order of packets how they were processed + const auto packets_proc_info = init_data.packets_processing_info; + + const auto packet0_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_tx_id); + const auto packet1_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_tx_id); + const auto packet3_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_tx_id); + const auto packet4_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_dag_block_id); + const auto packet2_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_dag_sync_id); + + checkConcurrentProcessing({ + {packet0_tx_proc_info, "packet0_tx"}, + {packet1_tx_proc_info, "packet1_tx"}, + {packet2_dag_sync_proc_info, "packet2_dag_sync"}, + {packet3_tx_proc_info, "packet3_tx"}, + }); + + EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet0_tx_proc_info.finish_time_); + EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet1_tx_proc_info.finish_time_); + EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet3_tx_proc_info.finish_time_); + + EXPECT_GT(packet4_dag_block_proc_info.start_time_, packet2_dag_sync_proc_info.finish_time_); + } // Test "dag-block blocking dependencies" related to dag blocks: // // Same dag blocks should not be processed at the same time -TEST_F(TarcapTpTest, same_dag_blks_ordering) { - HandlersInitData init_data = createHandlersInitData(); - - auto packets_handler = std::make_shared(); - packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); - - // Creates threadpool - threadpool::PacketsThreadPool tp(10); - tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); - - auto dag_block = createDagBlockRlp(0); - - // Pushes packets to the tp - const auto blk0_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {dag_block})).value(); - const auto blk1_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {dag_block})).value(); - const auto blk2_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {dag_block})).value(); - const auto blk3_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {dag_block})).value(); - - size_t packets_count = 0; - const auto blk4_id = packets_count = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {dag_block})).value(); - - tp.startProcessing(); - - // How should dag blocks packets be processed: - // Same dag blocks should not be processed concurrently but one after another - - // All packets should be already being processed after short amount of time - std::this_thread::sleep_for(200ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); - EXPECT_EQ(queuesSize(tp), 0); - - // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to locking - EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { - // Check if transactions was propagated to node0 - WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) - }); - - // Check order of packets how they were processed - const auto packets_proc_info = init_data.packets_processing_info; - - const auto blk0_proc_info = packets_proc_info->getPacketProcessingTimes(blk0_id); - const auto blk1_proc_info = packets_proc_info->getPacketProcessingTimes(blk1_id); - const auto blk2_proc_info = packets_proc_info->getPacketProcessingTimes(blk2_id); - const auto blk3_proc_info = packets_proc_info->getPacketProcessingTimes(blk3_id); - const auto blk4_proc_info = packets_proc_info->getPacketProcessingTimes(blk4_id); - - checkSerialProcessing({ - {blk0_proc_info, "blk0"}, - {blk1_proc_info, "blk1"}, - {blk2_proc_info, "blk2"}, - {blk3_proc_info, "blk3"}, - {blk4_proc_info, "blk4"}, - }); -} + TEST_F(TarcapTpTest, same_dag_blks_ordering) { + HandlersInitData init_data = createHandlersInitData(); + + auto packets_handler = std::make_shared(); + packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); + + // Creates threadpool + threadpool::PacketsThreadPool tp(10); + tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); + + auto dag_block = createDagBlockRlp(0); + + // Pushes packets to the tp + const auto blk0_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); + const auto blk1_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); + const auto blk2_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); + const auto blk3_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); + + size_t packets_count = 0; + const auto blk4_id = packets_count = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); + + tp.startProcessing(); + + // How should dag blocks packets be processed: + // Same dag blocks should not be processed concurrently but one after another + + // All packets should be already being processed after short amount of time + std::this_thread::sleep_for(200ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); + EXPECT_EQ(queuesSize(tp), 0); + + // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to + locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { + // Check if transactions was propagated to node0 + WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) + }); + + // Check order of packets how they were processed + const auto packets_proc_info = init_data.packets_processing_info; + + const auto blk0_proc_info = packets_proc_info->getPacketProcessingTimes(blk0_id); + const auto blk1_proc_info = packets_proc_info->getPacketProcessingTimes(blk1_id); + const auto blk2_proc_info = packets_proc_info->getPacketProcessingTimes(blk2_id); + const auto blk3_proc_info = packets_proc_info->getPacketProcessingTimes(blk3_id); + const auto blk4_proc_info = packets_proc_info->getPacketProcessingTimes(blk4_id); + + checkSerialProcessing({ + {blk0_proc_info, "blk0"}, + {blk1_proc_info, "blk1"}, + {blk2_proc_info, "blk2"}, + {blk3_proc_info, "blk3"}, + {blk4_proc_info, "blk4"}, + }); + } // Test "dag-level blocking dependencies" related to dag blocks levels: // // Ideally only dag blocks with the same level should be processed. In reality there are situation when node receives // dag block with smaller level than the level of blocks that are already being processed. In such case these blocks -// with smaller levels can be processed concurrently with blocks that have higher level. All new dag blocks with higher -// level than the lowest level from all the blocks that currently being processed are blocked for processing -TEST_F(TarcapTpTest, dag_blks_lvls_ordering) { - HandlersInitData init_data = createHandlersInitData(); - - auto packets_handler = std::make_shared(); - packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); - - // Creates threadpool - threadpool::PacketsThreadPool tp(10); - tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); - - // Pushes packets to the tp - const auto blk0_lvl1_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(1, 1)})) - .value(); - const auto blk1_lvl1_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(1, 2)})) - .value(); - const auto blk2_lvl0_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(0, 3)})) - .value(); - const auto blk3_lvl1_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(1, 4)})) - .value(); - const auto blk4_lvl2_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(2, 5)})) - .value(); - - size_t packets_count = 0; - const auto blk5_lvl3_id = packets_count = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::DagBlockPacket, {createDagBlockRlp(3, 6)})) - .value(); - - tp.startProcessing(); - - // How should dag blocks packets be processed: - // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or - // synchronously due to some blocking dependencies - depends on situation), check - // PriorityQueue::updateDependenciesStart - /* - ------------- - - blk0_lvl1 - - ------------- - ------------- - - blk1_lvl1 - - ------------- - ------------- - - blk2_lvl0 - - ------------- - ------------- - - blk3_lvl1 - - ------------- - ------------- - - blk4_lvl2 - - ------------- - ------------- - - blk5_lvl3 - - ------------- - 0...........20............40............60.............80................. time [ms] - */ - - // All packets should be already being processed after short amount of time - std::this_thread::sleep_for(80ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); - EXPECT_EQ(queuesSize(tp), 0); - - // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to locking - EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { - // Check if transactions was propagated to node0 - WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) - }); - - // Check order of packets how they were processed - const auto packets_proc_info = init_data.packets_processing_info; - - const auto blk0_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk0_lvl1_id); - const auto blk1_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk1_lvl1_id); - const auto blk2_lvl0_proc_info = packets_proc_info->getPacketProcessingTimes(blk2_lvl0_id); - const auto blk3_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk3_lvl1_id); - const auto blk4_lvl2_proc_info = packets_proc_info->getPacketProcessingTimes(blk4_lvl2_id); - const auto blk5_lvl3_proc_info = packets_proc_info->getPacketProcessingTimes(blk5_lvl3_id); - - checkConcurrentProcessing({ - {blk0_lvl1_proc_info, "blk0_lvl1"}, - {blk1_lvl1_proc_info, "blk1_lvl1"}, - {blk2_lvl0_proc_info, "blk2_lvl0"}, - }); - - EXPECT_GT(blk3_lvl1_proc_info.start_time_, blk2_lvl0_proc_info.finish_time_); - EXPECT_GT(blk4_lvl2_proc_info.start_time_, blk3_lvl1_proc_info.finish_time_); - EXPECT_GT(blk5_lvl3_proc_info.start_time_, blk4_lvl2_proc_info.finish_time_); -} +// with smaller levels can be processed concurrently with blocks that have higher level. All new dag blocks with +/ higher / level than the lowest level from all the blocks that currently being processed are blocked for processing + TEST_F(TarcapTpTest, dag_blks_lvls_ordering) { + HandlersInitData init_data = createHandlersInitData(); + + auto packets_handler = std::make_shared(); + packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); + + // Creates threadpool + threadpool::PacketsThreadPool tp(10); + tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); + + // Pushes packets to the tp + const auto blk0_lvl1_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, + 1)})) + .value(); + const auto blk1_lvl1_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, + 2)})) + .value(); + const auto blk2_lvl0_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, + 3)})) + .value(); + const auto blk3_lvl1_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, + 4)})) + .value(); + const auto blk4_lvl2_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(2, + 5)})) + .value(); + + size_t packets_count = 0; + const auto blk5_lvl3_id = packets_count = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(3, + 6)})) + .value(); + + tp.startProcessing(); + + // How should dag blocks packets be processed: + // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or + // synchronously due to some blocking dependencies - depends on situation), check + // PriorityQueue::updateDependenciesStart + /* + ------------- + - blk0_lvl1 - + ------------- + ------------- + - blk1_lvl1 - + ------------- + ------------- + - blk2_lvl0 - + ------------- + ------------- + - blk3_lvl1 - + ------------- + ------------- + - blk4_lvl2 - + ------------- + ------------- + - blk5_lvl3 - + ------------- + 0...........20............40............60.............80................. time [ms] + */ + + // All packets should be already being processed after short amount of time + std::this_thread::sleep_for(80ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); + EXPECT_EQ(queuesSize(tp), 0); + + // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to + locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { + // Check if transactions was propagated to node0 + WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) + }); + + // Check order of packets how they were processed + const auto packets_proc_info = init_data.packets_processing_info; + + const auto blk0_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk0_lvl1_id); + const auto blk1_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk1_lvl1_id); + const auto blk2_lvl0_proc_info = packets_proc_info->getPacketProcessingTimes(blk2_lvl0_id); + const auto blk3_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk3_lvl1_id); + const auto blk4_lvl2_proc_info = packets_proc_info->getPacketProcessingTimes(blk4_lvl2_id); + const auto blk5_lvl3_proc_info = packets_proc_info->getPacketProcessingTimes(blk5_lvl3_id); + + checkConcurrentProcessing({ + {blk0_lvl1_proc_info, "blk0_lvl1"}, + {blk1_lvl1_proc_info, "blk1_lvl1"}, + {blk2_lvl0_proc_info, "blk2_lvl0"}, + }); + + EXPECT_GT(blk3_lvl1_proc_info.start_time_, blk2_lvl0_proc_info.finish_time_); + EXPECT_GT(blk4_lvl2_proc_info.start_time_, blk3_lvl1_proc_info.finish_time_); + EXPECT_GT(blk5_lvl3_proc_info.start_time_, blk4_lvl2_proc_info.finish_time_); + } // Test threads borrowing // @@ -873,199 +890,203 @@ TEST_F(TarcapTpTest, dag_blks_lvls_ordering) { // be unused. In such cases priority queues max workers limits can and should be ignored. // // Always keep 1 reserved thread for each priority queue at all times -TEST_F(TarcapTpTest, threads_borrowing) { - HandlersInitData init_data = createHandlersInitData(); - - auto packets_handler = std::make_shared(); - packets_handler->registerHandler(init_data, "VOTE_PH", 100); - - // Creates threadpool - const size_t threads_num = 10; - threadpool::PacketsThreadPool tp(threads_num); - tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); - - // Pushes packets to the tp - std::vector pushed_packets_ids; - for (size_t i = 0; i < threads_num; i++) { - uint64_t packet_id = tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::VotePacket, {})).value(); - pushed_packets_ids.push_back(packet_id); - } - - tp.startProcessing(); - - // How should packets be processed: - // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or - // synchronously due to some blocking dependencies - depends on situation), check - // PriorityQueue::updateDependenciesStart - // - // Note: each queue has 1 thread reserved at all times(even if it does not do anything) and there is 10 threads in - // total, even with borrowing only 8 threads could be used at the same time - /* - ---------------- - - packet0_vote - - ---------------- - ---------------- - - packet1_vote - - ---------------- - ---------------- - - packet2_vote - - ---------------- - - -||- - ... - - ---------------- - - packet7_vote - - ---------------- - ---------------- - - packet8_vote - - ---------------- - ---------------- - - packet9_vote - - ---------------- - 0..............100...............200........... time [ms] - */ - - // First 8 packets should be already processed by this time - std::this_thread::sleep_for(100ms + 50ms /* might take longer due to threads borrowing */); - EXPECT_LE(queuesSize(tp), 2); - - // Check order of packets how they were processed - const auto packets_proc_info = init_data.packets_processing_info; - - // In case some packet processing is not finished yet, getPacketProcessingTimes() returns default (empty) value - std::chrono::steady_clock::time_point default_time_point; - - // Because each queue has 1 thread reserved at all times(even if it does not do anything) and there is 10 threads in - // total, even with borrowing only 8 threads could be used at the same time, thus last 2 packets (9th & 10th) should - // not be processed after (100 + WAIT_TRESHOLD_MS) ms - EXPECT_EQ(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[8]).finish_time_, default_time_point); - EXPECT_EQ(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[9]).finish_time_, default_time_point); - - std::vector> packets_proc_info_vec; - for (size_t i = 0; i < threads_num - (threadpool::PacketData::PacketPriority::Count - 1); i++) { - packets_proc_info_vec.emplace_back(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[i]), - "packet" + std::to_string(pushed_packets_ids[i]) + "_vote"); - } - - // Check if first 8 pbft vote packets were processed concurrently -> threads from other queues had to be borrowed for - // that - checkConcurrentProcessing(packets_proc_info_vec); -} + TEST_F(TarcapTpTest, threads_borrowing) { + HandlersInitData init_data = createHandlersInitData(); + + auto packets_handler = std::make_shared(); + packets_handler->registerHandler(init_data, "VOTE_PH", 100); + + // Creates threadpool + const size_t threads_num = 10; + threadpool::PacketsThreadPool tp(threads_num); + tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); + + // Pushes packets to the tp + std::vector pushed_packets_ids; + for (size_t i = 0; i < threads_num; i++) { + uint64_t packet_id = tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, + {})).value(); pushed_packets_ids.push_back(packet_id); + } + + tp.startProcessing(); + + // How should packets be processed: + // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or + // synchronously due to some blocking dependencies - depends on situation), check + // PriorityQueue::updateDependenciesStart + // + // Note: each queue has 1 thread reserved at all times(even if it does not do anything) and there is 10 threads in + // total, even with borrowing only 8 threads could be used at the same time + /* + ---------------- + - packet0_vote - + ---------------- + ---------------- + - packet1_vote - + ---------------- + ---------------- + - packet2_vote - + ---------------- + + -||- + ... + + ---------------- + - packet7_vote - + ---------------- + ---------------- + - packet8_vote - + ---------------- + ---------------- + - packet9_vote - + ---------------- + 0..............100...............200........... time [ms] + */ + + // First 8 packets should be already processed by this time + std::this_thread::sleep_for(100ms + 50ms /* might take longer due to threads borrowing */); + EXPECT_LE(queuesSize(tp), 2); + + // Check order of packets how they were processed + const auto packets_proc_info = init_data.packets_processing_info; + + // In case some packet processing is not finished yet, getPacketProcessingTimes() returns default (empty) value + std::chrono::steady_clock::time_point default_time_point; + + // Because each queue has 1 thread reserved at all times(even if it does not do anything) and there is 10 threads + in + // total, even with borrowing only 8 threads could be used at the same time, thus last 2 packets (9th & 10th) + should + // not be processed after (100 + WAIT_TRESHOLD_MS) ms + EXPECT_EQ(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[8]).finish_time_, default_time_point); + EXPECT_EQ(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[9]).finish_time_, default_time_point); + + std::vector> packets_proc_info_vec; + for (size_t i = 0; i < threads_num - (threadpool::PacketData::PacketPriority::Count - 1); i++) { + packets_proc_info_vec.emplace_back(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[i]), + "packet" + std::to_string(pushed_packets_ids[i]) + "_vote"); + } + + // Check if first 8 pbft vote packets were processed concurrently -> threads from other queues had to be borrowed + for + // that + checkConcurrentProcessing(packets_proc_info_vec); + } // Test low priority queue starvation // // It should never happen that packets from lower priority queues are waiting to be processed until all packets from // higher priority queues are processed -TEST_F(TarcapTpTest, low_priotity_queue_starvation) { - HandlersInitData init_data = createHandlersInitData(); - - auto packets_handler = std::make_shared(); - // Handler for packet from high priority queue - packets_handler->registerHandler(init_data, "VOTE_PH", 20); - - // Handler for packet from mid priority queue - packets_handler->registerHandler(init_data, "TX_PH", 20); - - // Handler for packet from low priority queue - packets_handler->registerHandler(init_data, "STATUS_PH", 20); - - // Creates threadpool - size_t threads_num = 10; - threadpool::PacketsThreadPool tp(threads_num); - tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); - - // Push 10x more packets for each prioriy queue than max tp capacity to make sure that tp wont be able to process all - // packets from each queue concurrently -> many packets will be waiting due to max threads num reached for specific - // priority queues - for (size_t i = 0; i < 2 * 10 * threads_num; i++) { - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::VotePacket, {})).value(); - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::TransactionPacket, {})).value(); - } + TEST_F(TarcapTpTest, low_priotity_queue_starvation) { + HandlersInitData init_data = createHandlersInitData(); + + auto packets_handler = std::make_shared(); + // Handler for packet from high priority queue + packets_handler->registerHandler(init_data, "VOTE_PH", 20); + + // Handler for packet from mid priority queue + packets_handler->registerHandler(init_data, "TX_PH", 20); + + // Handler for packet from low priority queue + packets_handler->registerHandler(init_data, "STATUS_PH", 20); + + // Creates threadpool + size_t threads_num = 10; + threadpool::PacketsThreadPool tp(threads_num); + tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); + + // Push 10x more packets for each prioriy queue than max tp capacity to make sure that tp wont be able to process + all + // packets from each queue concurrently -> many packets will be waiting due to max threads num reached for specific + // priority queues + for (size_t i = 0; i < 2 * 10 * threads_num; i++) { + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); + } + + // Push a few packets low priority packets + for (size_t i = 0; i < 4; i++) { + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); + } + + tp.startProcessing(); + + // How should packets be processed: + // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or + // synchronously due to some blocking dependencies - depends on situation), check + // PriorityQueue::updateDependenciesStart In this test are max concurrent processing limits for queues reached, so + // when we have 10 threads in thredpool: + // - 4 is limit for High priority queue - VotePacket + // - 4 is limit for Mid priority queue - TransactionPacket + // - 3 is limit for Low priority queue - StatusPacket, but because max total limit (10) is always checked first + // , low priority queue wont be able to use more than 2 threads concurrently + /* + ---------------- + - packet0_vote - + ---------------- + ---------------- + - packet1_vote - + ---------------- + ---------------- + - packet2_vote - + ---------------- + ---------------- + - packet3_vote - + ---------------- + ---------------- + -- packet4_tx -- + ---------------- + ---------------- + -- packet5_tx -- + ---------------- + ---------------- + -- packet6_tx -- + ---------------- + ---------------- + -- packet7_tx -- + ---------------- + + .... + votes and tx packets are processed concurrently 4 at a time until all of them are processed + + + ------------------ + - packet400_test - + ------------------ + ------------------ + - packet401_test - + ------------------ + ------------------ + - packet402_test - + ------------------ + ------------------ + - packet403_test - + ------------------ + 0.................20.................40................... time [ms] + */ - // Push a few packets low priority packets - for (size_t i = 0; i < 4; i++) { - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::StatusPacket, {})).value(); - } + std::this_thread::sleep_for(40ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); - tp.startProcessing(); - - // How should packets be processed: - // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or - // synchronously due to some blocking dependencies - depends on situation), check - // PriorityQueue::updateDependenciesStart In this test are max concurrent processing limits for queues reached, so - // when we have 10 threads in thredpool: - // - 4 is limit for High priority queue - VotePacket - // - 4 is limit for Mid priority queue - TransactionPacket - // - 3 is limit for Low priority queue - StatusPacket, but because max total limit (10) is always checked first - // , low priority queue wont be able to use more than 2 threads concurrently - /* - ---------------- - - packet0_vote - - ---------------- - ---------------- - - packet1_vote - - ---------------- - ---------------- - - packet2_vote - - ---------------- - ---------------- - - packet3_vote - - ---------------- - ---------------- - -- packet4_tx -- - ---------------- - ---------------- - -- packet5_tx -- - ---------------- - ---------------- - -- packet6_tx -- - ---------------- - ---------------- - -- packet7_tx -- - ---------------- - - .... - votes and tx packets are processed concurrently 4 at a time until all of them are processed - - - ------------------ - - packet400_test - - ------------------ - ------------------ - - packet401_test - - ------------------ - ------------------ - - packet402_test - - ------------------ - ------------------ - - packet403_test - - ------------------ - 0.................20.................40................... time [ms] - */ - - std::this_thread::sleep_for(40ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); + const auto [high_priority_queue_size, mid_priority_queue_size, low_priority_queue_size] = tp.getQueueSize(); - const auto [high_priority_queue_size, mid_priority_queue_size, low_priority_queue_size] = tp.getQueueSize(); - - EXPECT_GT(high_priority_queue_size, 0); - EXPECT_GT(mid_priority_queue_size, 0); - EXPECT_EQ(low_priority_queue_size, 0); -} + EXPECT_GT(high_priority_queue_size, 0); + EXPECT_GT(mid_priority_queue_size, 0); + EXPECT_EQ(low_priority_queue_size, 0); + } -} // namespace taraxa::core_tests + } // namespace taraxa::core_tests -int main(int argc, char** argv) { - using namespace taraxa; + int main(int argc, char** argv) { + using namespace taraxa; - auto logging = logger::createDefaultLoggingConfig(); + auto logging = logger::createDefaultLoggingConfig(); - // Set this to debug to see log msgs - logging.verbosity = logger::Verbosity::Debug; + // Set this to debug to see log msgs + logging.verbosity = logger::Verbosity::Debug; - addr_t node_addr; - logger::InitLogging(logging, node_addr); + addr_t node_addr; + logger::InitLogging(logging, node_addr); - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} \ No newline at end of file + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); + } \ No newline at end of file From 5e4eec7c7beaecf5070e5671f6d20960335c25bd Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Mon, 14 Oct 2024 15:15:35 +0200 Subject: [PATCH 070/105] implement NET VERSION 5 tarcap --- libraries/common/include/common/constants.hpp | 2 +- .../packets/latest/dag_block_packet.hpp | 27 + .../tarcap/packets/latest/dag_sync_packet.hpp | 33 + .../packets/latest/get_dag_sync_packet.hpp | 24 + .../get_next_votes_bundle_packet.hpp | 7 +- .../{ => latest}/get_pbft_sync_packet.hpp | 6 +- .../get_pillar_votes_bundle_packet.hpp | 9 +- .../packets/latest/pbft_sync_packet.hpp | 33 + .../packets/latest/pillar_vote_packet.hpp | 24 + .../latest/pillar_votes_bundle_packet.hpp | 33 + .../tarcap/packets/latest/status_packet.hpp | 50 + .../packets/latest/transaction_packet.hpp | 32 + .../tarcap/packets/latest/vote_packet.hpp | 28 + .../packets/latest/votes_bundle_packet.hpp | 35 + .../packets/{ => v4}/dag_block_packet.hpp | 9 +- .../packets/{ => v4}/dag_sync_packet.hpp | 8 +- .../packets/{ => v4}/get_dag_sync_packet.hpp | 8 +- .../packets/{ => v4}/pbft_sync_packet.hpp | 8 +- .../packets/{ => v4}/pillar_vote_packet.hpp | 10 +- .../{ => v4}/pillar_votes_bundle_packet.hpp | 10 +- .../tarcap/packets/{ => v4}/status_packet.hpp | 14 +- .../packets/{ => v4}/transaction_packet.hpp | 14 +- .../tarcap/packets/{ => v4}/vote_packet.hpp | 12 +- .../packets/{ => v4}/votes_bundle_packet.hpp | 11 +- .../latest/common/packet_handler.hpp | 9 +- .../latest/dag_block_packet_handler.hpp | 2 +- .../latest/dag_sync_packet_handler.hpp | 2 +- .../latest/get_dag_sync_packet_handler.hpp | 2 +- .../get_next_votes_bundle_packet_handler.hpp | 2 +- .../latest/get_pbft_sync_packet_handler.hpp | 2 +- ...get_pillar_votes_bundle_packet_handler.hpp | 4 +- .../latest/pbft_sync_packet_handler.hpp | 5 +- .../latest/pillar_vote_packet_handler.hpp | 2 +- .../pillar_votes_bundle_packet_handler.hpp | 5 +- .../latest/status_packet_handler.hpp | 5 +- .../latest/transaction_packet_handler.hpp | 5 +- .../latest/vote_packet_handler.hpp | 6 +- .../latest/votes_bundle_packet_handler.hpp | 2 +- .../v4/dag_block_packet_handler.hpp | 36 + .../v4/dag_sync_packet_handler.hpp | 31 + .../v4/get_dag_sync_packet_handler.hpp | 38 + .../v4/pbft_sync_packet_handler.hpp | 40 + .../v4/pillar_vote_packet_handler.hpp | 25 + .../v4/pillar_votes_bundle_packet_handler.hpp | 22 + .../v4/status_packet_handler.hpp | 30 + .../v4/transaction_packet_handler.hpp | 76 + .../v4/vote_packet_handler.hpp | 35 + .../v4/votes_bundle_packet_handler.hpp | 33 + .../network/tarcap/taraxa_capability.hpp | 3 + libraries/core_libs/network/src/network.cpp | 9 +- .../latest/dag_block_packet_handler.cpp | 25 +- .../latest/dag_sync_packet_handler.cpp | 35 +- .../latest/get_dag_sync_packet_handler.cpp | 25 +- .../latest/get_pbft_sync_packet_handler.cpp | 24 +- ...get_pillar_votes_bundle_packet_handler.cpp | 29 +- .../latest/pbft_sync_packet_handler.cpp | 2 +- .../latest/pillar_vote_packet_handler.cpp | 5 +- .../latest/status_packet_handler.cpp | 15 +- .../latest/transaction_packet_handler.cpp | 23 +- .../latest/vote_packet_handler.cpp | 13 +- .../v4/dag_block_packet_handler.cpp | 228 ++ .../v4/dag_sync_packet_handler.cpp | 103 + .../v4/get_dag_sync_packet_handler.cpp | 81 + .../v4/pbft_sync_packet_handler.cpp | 266 ++ .../v4/pillar_vote_packet_handler.cpp | 55 + .../v4/pillar_votes_bundle_packet_handler.cpp | 32 + .../v4/status_packet_handler.cpp | 177 ++ .../v4/transaction_packet_handler.cpp | 176 ++ .../v4/vote_packet_handler.cpp | 126 + .../v4/votes_bundle_packet_handler.cpp | 102 + .../network/src/tarcap/taraxa_capability.cpp | 63 + .../storage/include/storage/storage.hpp | 1 + .../types/dag_block/include/dag/dag_block.hpp | 3 + libraries/types/dag_block/src/dag_block.cpp | 7 + .../pbft_block/include/pbft/pbft_block.hpp | 29 +- .../pbft_block/include/pbft/period_data.hpp | 3 + libraries/types/pbft_block/src/pbft_block.cpp | 7 + .../types/pbft_block/src/period_data.cpp | 7 + .../include/transaction/transaction.hpp | 3 + .../types/transaction/src/transaction.cpp | 7 + .../types/vote/include/vote/pbft_vote.hpp | 3 + libraries/types/vote/src/pbft_vote.cpp | 8 + tests/tarcap_threadpool_test.cpp | 2184 ++++++++--------- 83 files changed, 3415 insertions(+), 1330 deletions(-) create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/latest/dag_block_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/latest/dag_sync_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/latest/get_dag_sync_packet.hpp rename libraries/core_libs/network/include/network/tarcap/packets/{ => latest}/get_next_votes_bundle_packet.hpp (83%) rename libraries/core_libs/network/include/network/tarcap/packets/{ => latest}/get_pbft_sync_packet.hpp (79%) rename libraries/core_libs/network/include/network/tarcap/packets/{ => latest}/get_pillar_votes_bundle_packet.hpp (71%) create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/latest/pbft_sync_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_vote_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/latest/status_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp rename libraries/core_libs/network/include/network/tarcap/packets/{ => v4}/dag_block_packet.hpp (83%) rename libraries/core_libs/network/include/network/tarcap/packets/{ => v4}/dag_sync_packet.hpp (87%) rename libraries/core_libs/network/include/network/tarcap/packets/{ => v4}/get_dag_sync_packet.hpp (77%) rename libraries/core_libs/network/include/network/tarcap/packets/{ => v4}/pbft_sync_packet.hpp (90%) rename libraries/core_libs/network/include/network/tarcap/packets/{ => v4}/pillar_vote_packet.hpp (72%) rename libraries/core_libs/network/include/network/tarcap/packets/{ => v4}/pillar_votes_bundle_packet.hpp (76%) rename libraries/core_libs/network/include/network/tarcap/packets/{ => v4}/status_packet.hpp (84%) rename libraries/core_libs/network/include/network/tarcap/packets/{ => v4}/transaction_packet.hpp (83%) rename libraries/core_libs/network/include/network/tarcap/packets/{ => v4}/vote_packet.hpp (78%) rename libraries/core_libs/network/include/network/tarcap/packets/{ => v4}/votes_bundle_packet.hpp (81%) create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_vote_packet_handler.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/status_packet_handler.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/vote_packet_handler.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_block_packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_sync_packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/pbft_sync_packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_vote_packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/status_packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/transaction_packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/vote_packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/votes_bundle_packet_handler.cpp diff --git a/libraries/common/include/common/constants.hpp b/libraries/common/include/common/constants.hpp index aea008f1ac..3a60e60d59 100644 --- a/libraries/common/include/common/constants.hpp +++ b/libraries/common/include/common/constants.hpp @@ -31,7 +31,7 @@ constexpr uint32_t kDefaultTransactionPoolSize{200000}; constexpr uint32_t kMaxNonFinalizedTransactions{1000000}; constexpr uint32_t kMaxNonFinalizedDagBlocks{100}; -const size_t kV3NetworkVersion = 3; +const size_t kV4NetworkVersion = 4; const uint32_t kRecentlyFinalizedTransactionsFactor = 2; diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_block_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_block_packet.hpp new file mode 100644 index 0000000000..e7958db76d --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_block_packet.hpp @@ -0,0 +1,27 @@ +#pragma once + +#include "dag/dag_block.hpp" +#include "transaction/system_transaction.hpp" + +namespace taraxa::network::tarcap { + +struct DagBlockPacket { + DagBlockPacket() = default; + DagBlockPacket(const DagBlockPacket&) = default; + DagBlockPacket(DagBlockPacket&&) = default; + DagBlockPacket& operator=(const DagBlockPacket&) = default; + DagBlockPacket& operator=(DagBlockPacket&&) = default; + // TODO[2868]: optimize args + DagBlockPacket(const std::vector>& transactions, const DagBlock& dag_block) + : transactions(transactions), dag_block(dag_block) {} + + DagBlockPacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } + dev::bytes encodeRlp() { return util::rlp_enc(*this); } + + std::vector> transactions; + DagBlock dag_block; + + RLP_FIELDS_DEFINE_INPLACE(transactions, dag_block) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_sync_packet.hpp new file mode 100644 index 0000000000..fb22776e90 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_sync_packet.hpp @@ -0,0 +1,33 @@ +#pragma once + +#include "dag/dag_block.hpp" +#include "transaction/system_transaction.hpp" + +namespace taraxa::network::tarcap { + +struct DagSyncPacket { + DagSyncPacket() = default; + DagSyncPacket(const DagSyncPacket&) = default; + DagSyncPacket(DagSyncPacket&&) = default; + DagSyncPacket& operator=(const DagSyncPacket&) = default; + DagSyncPacket& operator=(DagSyncPacket&&) = default; + DagSyncPacket(PbftPeriod request_period, PbftPeriod response_period, + std::vector>&& transactions, + std::vector>&& dag_blocks) + : request_period(request_period), + response_period(response_period), + transactions(std::move(transactions)), + dag_blocks(std::move(dag_blocks)) {} + DagSyncPacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } + + dev::bytes encodeRlp() { return util::rlp_enc(*this); } + + PbftPeriod request_period; + PbftPeriod response_period; + std::vector> transactions; + std::vector> dag_blocks; + + RLP_FIELDS_DEFINE_INPLACE(request_period, response_period, transactions, dag_blocks) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/get_dag_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_dag_sync_packet.hpp new file mode 100644 index 0000000000..47a072925e --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_dag_sync_packet.hpp @@ -0,0 +1,24 @@ +#pragma once + +#include "dag/dag_block.hpp" +#include "transaction/system_transaction.hpp" + +namespace taraxa::network::tarcap { + +struct GetDagSyncPacket { + GetDagSyncPacket() = default; + GetDagSyncPacket(const GetDagSyncPacket&) = default; + GetDagSyncPacket(GetDagSyncPacket&&) = default; + GetDagSyncPacket& operator=(const GetDagSyncPacket&) = default; + GetDagSyncPacket& operator=(GetDagSyncPacket&&) = default; + + GetDagSyncPacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } + dev::bytes encodeRlp() { return util::rlp_enc(*this); } + + PbftPeriod peer_period; + std::vector blocks_hashes; + + RLP_FIELDS_DEFINE_INPLACE(peer_period, blocks_hashes) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/get_next_votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_next_votes_bundle_packet.hpp similarity index 83% rename from libraries/core_libs/network/include/network/tarcap/packets/get_next_votes_bundle_packet.hpp rename to libraries/core_libs/network/include/network/tarcap/packets/latest/get_next_votes_bundle_packet.hpp index 853feef923..da65e264df 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/get_next_votes_bundle_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_next_votes_bundle_packet.hpp @@ -2,7 +2,7 @@ #include "common/encoding_rlp.hpp" -namespace taraxa::network { +namespace taraxa::network::tarcap { struct GetNextVotesBundlePacket { GetNextVotesBundlePacket() = default; @@ -12,8 +12,7 @@ struct GetNextVotesBundlePacket { GetNextVotesBundlePacket& operator=(GetNextVotesBundlePacket&&) = default; GetNextVotesBundlePacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); }; - - dev::bytes encode() { return util::rlp_enc(*this); } + dev::bytes encodeRlp() { return util::rlp_enc(*this); } PbftPeriod peer_pbft_period; PbftRound peer_pbft_round; @@ -21,4 +20,4 @@ struct GetNextVotesBundlePacket { RLP_FIELDS_DEFINE_INPLACE(peer_pbft_period, peer_pbft_round) }; -} // namespace taraxa::network +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/get_pbft_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pbft_sync_packet.hpp similarity index 79% rename from libraries/core_libs/network/include/network/tarcap/packets/get_pbft_sync_packet.hpp rename to libraries/core_libs/network/include/network/tarcap/packets/latest/get_pbft_sync_packet.hpp index 5372370914..e373501ba3 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/get_pbft_sync_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pbft_sync_packet.hpp @@ -2,7 +2,7 @@ #include "common/encoding_rlp.hpp" -namespace taraxa::network { +namespace taraxa::network::tarcap { struct GetPbftSyncPacket { GetPbftSyncPacket() = default; @@ -12,11 +12,11 @@ struct GetPbftSyncPacket { GetPbftSyncPacket& operator=(GetPbftSyncPacket&&) = default; GetPbftSyncPacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } - dev::bytes encode() { return util::rlp_enc(*this); } + dev::bytes encodeRlp() { return util::rlp_enc(*this); } size_t height_to_sync; RLP_FIELDS_DEFINE_INPLACE(height_to_sync) }; -} // namespace taraxa::network +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/get_pillar_votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pillar_votes_bundle_packet.hpp similarity index 71% rename from libraries/core_libs/network/include/network/tarcap/packets/get_pillar_votes_bundle_packet.hpp rename to libraries/core_libs/network/include/network/tarcap/packets/latest/get_pillar_votes_bundle_packet.hpp index 85483f7d11..6c11b03e35 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/get_pillar_votes_bundle_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pillar_votes_bundle_packet.hpp @@ -2,7 +2,7 @@ #include "common/encoding_rlp.hpp" -namespace taraxa::network { +namespace taraxa::network::tarcap { struct GetPillarVotesBundlePacket { GetPillarVotesBundlePacket() = default; @@ -10,12 +10,13 @@ struct GetPillarVotesBundlePacket { GetPillarVotesBundlePacket(GetPillarVotesBundlePacket&&) = default; GetPillarVotesBundlePacket& operator=(const GetPillarVotesBundlePacket&) = default; GetPillarVotesBundlePacket& operator=(GetPillarVotesBundlePacket&&) = default; - + GetPillarVotesBundlePacket(PbftPeriod period, blk_hash_t pillar_block_hash) + : period(period), pillar_block_hash(pillar_block_hash) {} GetPillarVotesBundlePacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } - dev::bytes encode() { return util::rlp_enc(*this); } + dev::bytes encodeRlp() { return util::rlp_enc(*this); } PbftPeriod period; blk_hash_t pillar_block_hash; @@ -23,4 +24,4 @@ struct GetPillarVotesBundlePacket { RLP_FIELDS_DEFINE_INPLACE(period, pillar_block_hash) }; -} // namespace taraxa::network +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/pbft_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/pbft_sync_packet.hpp new file mode 100644 index 0000000000..ac2ee8bd27 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/pbft_sync_packet.hpp @@ -0,0 +1,33 @@ +#pragma once + +#include "pbft/period_data.hpp" +#include "vote/pbft_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" + +namespace taraxa::network::tarcap { + +struct PbftSyncPacket { + PbftSyncPacket() = default; + PbftSyncPacket(const PbftSyncPacket&) = default; + PbftSyncPacket(PbftSyncPacket&&) = default; + PbftSyncPacket& operator=(const PbftSyncPacket&) = default; + PbftSyncPacket& operator=(PbftSyncPacket&&) = default; + PbftSyncPacket(bool last_block, PeriodData&& period_data, + std::vector>&& current_block_cert_votes = {}) + : last_block(last_block), + period_data(std::move(period_data)), + current_block_cert_votes(std::move(current_block_cert_votes)) {} + PbftSyncPacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); }; + + dev::bytes encodeRlp() { return util::rlp_enc(*this); } + + bool last_block; + PeriodData period_data; + // TODO: should it be optional ??? + // TODO[2870]: optimize rlp size (use custom class), see encodePbftVotesBundleRlp + std::vector> current_block_cert_votes; + + RLP_FIELDS_DEFINE_INPLACE(last_block, period_data, current_block_cert_votes) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_vote_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_vote_packet.hpp new file mode 100644 index 0000000000..77c441132e --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_vote_packet.hpp @@ -0,0 +1,24 @@ +#pragma once + +#include "common/encoding_rlp.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" +#include "vote/pillar_vote.hpp" + +namespace taraxa::network::tarcap { + +struct PillarVotePacket { + PillarVotePacket() = default; + PillarVotePacket(const PillarVotePacket&) = default; + PillarVotePacket(PillarVotePacket&&) = default; + PillarVotePacket& operator=(const PillarVotePacket&) = default; + PillarVotePacket& operator=(PillarVotePacket&&) = default; + PillarVotePacket(std::shared_ptr pillar_vote) : pillar_vote(std::move(pillar_vote)) {} + PillarVotePacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } + dev::bytes encodeRlp() { return util::rlp_enc(*this); } + + std::shared_ptr pillar_vote; + + RLP_FIELDS_DEFINE_INPLACE(pillar_vote) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp new file mode 100644 index 0000000000..cc7d6cc37d --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp @@ -0,0 +1,33 @@ +#pragma once + +#include "common/encoding_rlp.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" +#include "vote/pillar_vote.hpp" + +namespace taraxa::network::tarcap { + +struct PillarVotesBundlePacket { + PillarVotesBundlePacket() = default; + PillarVotesBundlePacket(const PillarVotesBundlePacket&) = default; + PillarVotesBundlePacket(PillarVotesBundlePacket&&) = default; + PillarVotesBundlePacket& operator=(const PillarVotesBundlePacket&) = default; + PillarVotesBundlePacket& operator=(PillarVotesBundlePacket&&) = default; + PillarVotesBundlePacket(std::vector>&& pillar_votes) + : pillar_votes(std::move(pillar_votes)) {} + PillarVotesBundlePacket(const dev::RLP& packet_rlp) { + *this = util::rlp_dec(packet_rlp); + if (pillar_votes.size() == 0 || pillar_votes.size() > kMaxPillarVotesInBundleRlp) { + throw InvalidRlpItemsCountException("PillarVotesBundlePacket", pillar_votes.size(), kMaxPillarVotesInBundleRlp); + } + } + dev::bytes encodeRlp() { return util::rlp_enc(*this); } + + // TODO[2870]: optimize rlp size (use custom class), see encodePillarVotesBundleRlp + std::vector> pillar_votes; + + constexpr static size_t kMaxPillarVotesInBundleRlp{250}; + + RLP_FIELDS_DEFINE_INPLACE(pillar_votes) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/status_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/status_packet.hpp new file mode 100644 index 0000000000..e20243a8fb --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/status_packet.hpp @@ -0,0 +1,50 @@ +#pragma once + +namespace taraxa::network::tarcap { + +// TODO: create new version of this packet without manual parsing +struct StatusPacket { + StatusPacket() = default; + StatusPacket(const StatusPacket&) = default; + StatusPacket(StatusPacket&&) = default; + StatusPacket& operator=(const StatusPacket&) = default; + StatusPacket& operator=(StatusPacket&&) = default; + StatusPacket(PbftPeriod peer_pbft_chain_size, PbftRound peer_pbft_round, uint64_t peer_dag_level, bool peer_syncing, + std::optional peer_chain_id = {}, std::optional genesis_hash = {}, + std::optional node_major_version = {}, std::optional node_minor_version = {}, + std::optional node_patch_version = {}, std::optional is_light_node = {}, + std::optional node_history = {}) + : peer_pbft_chain_size(peer_pbft_chain_size), + peer_pbft_round(peer_pbft_round), + peer_dag_level(peer_dag_level), + peer_syncing(peer_syncing), + peer_chain_id(std::move(peer_chain_id)), + genesis_hash(std::move(genesis_hash)), + node_major_version(std::move(node_major_version)), + node_minor_version(std::move(node_minor_version)), + node_patch_version(std::move(node_patch_version)), + is_light_node(std::move(is_light_node)), + node_history(std::move(node_history)) {} + StatusPacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } + dev::bytes encodeRlp() { return util::rlp_enc(*this); } + + bool isInitialStatusPacket() const { return peer_chain_id.has_value(); } + + PbftPeriod peer_pbft_chain_size; + PbftRound peer_pbft_round; + uint64_t peer_dag_level; + bool peer_syncing; + std::optional peer_chain_id; + std::optional genesis_hash; + std::optional node_major_version; + std::optional node_minor_version; + std::optional node_patch_version; + std::optional is_light_node; + std::optional node_history; + + RLP_FIELDS_DEFINE_INPLACE(peer_pbft_chain_size, peer_pbft_round, peer_dag_level, peer_syncing, peer_chain_id, + genesis_hash, node_major_version, node_minor_version, node_patch_version, is_light_node, + node_history) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp new file mode 100644 index 0000000000..ce7ab984cb --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp @@ -0,0 +1,32 @@ +#pragma once + +#include "transaction/transaction.hpp" + +namespace taraxa::network::tarcap { + +struct TransactionPacket { + TransactionPacket() = default; + TransactionPacket(const TransactionPacket&) = default; + TransactionPacket(TransactionPacket&&) = default; + TransactionPacket& operator=(const TransactionPacket&) = default; + TransactionPacket& operator=(TransactionPacket&&) = default; + // TODO[2868]: optimize ctor + TransactionPacket(const std::vector>& transactions) : transactions(transactions) {} + TransactionPacket(const dev::RLP& packet_rlp) { + *this = util::rlp_dec(packet_rlp); + if (transactions.size() > kMaxTransactionsInPacket) { + throw InvalidRlpItemsCountException("TransactionPacket", transactions.size(), kMaxTransactionsInPacket); + } + } + dev::bytes encodeRlp() { return util::rlp_enc(*this); } + + std::vector> transactions; + + constexpr static uint32_t kTransactionPacketItemCount = 2; + constexpr static uint32_t kMaxTransactionsInPacket{500}; + constexpr static uint32_t kMaxHashesInPacket{5000}; + + RLP_FIELDS_DEFINE_INPLACE(transactions) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp new file mode 100644 index 0000000000..e74e61ccc8 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp @@ -0,0 +1,28 @@ +#pragma once + +#include "pbft/pbft_block.hpp" +#include "vote/pbft_vote.hpp" + +namespace taraxa::network::tarcap { + +struct VotePacket { + VotePacket() = default; + VotePacket(const VotePacket&) = default; + VotePacket(VotePacket&&) = default; + VotePacket& operator=(const VotePacket&) = default; + VotePacket& operator=(VotePacket&&) = default; + VotePacket(std::shared_ptr vote, std::shared_ptr pbft_block = {}, + std::optional peer_chain_size = {}) + : vote(std::move(vote)), pbft_block(std::move(pbft_block)), peer_chain_size(std::move(peer_chain_size)) {} + VotePacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); }; + dev::bytes encodeRlp() { return util::rlp_enc(*this); } + + std::shared_ptr vote; + // TODO: Should it be also optional ? + std::shared_ptr pbft_block; + std::optional peer_chain_size; + + RLP_FIELDS_DEFINE_INPLACE(vote, pbft_block, peer_chain_size) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp new file mode 100644 index 0000000000..d92e0df85f --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp @@ -0,0 +1,35 @@ +#pragma once + +#include "vote/pbft_vote.hpp" + +namespace taraxa::network::tarcap { + +struct VotesBundlePacket { + VotesBundlePacket() = default; + VotesBundlePacket(const VotesBundlePacket&) = default; + VotesBundlePacket(VotesBundlePacket&&) = default; + VotesBundlePacket& operator=(const VotesBundlePacket&) = default; + VotesBundlePacket& operator=(VotesBundlePacket&&) = default; + + VotesBundlePacket(const dev::RLP& packet_rlp) { + *this = util::rlp_dec(packet_rlp); + if (votes.size() == 0 || votes.size() > kMaxVotesInBundleRlp) { + throw InvalidRlpItemsCountException("VotesBundlePacket", votes.size(), kMaxVotesInBundleRlp); + } + }; + dev::bytes encodeRlp() { return util::rlp_enc(*this); } + + blk_hash_t votes_bundle_block_hash; + PbftPeriod votes_bundle_pbft_period; + PbftRound votes_bundle_pbft_round; + PbftStep votes_bundle_votes_step; + + std::vector> votes; + + constexpr static size_t kMaxVotesInBundleRlp{1000}; + + RLP_FIELDS_DEFINE_INPLACE(votes_bundle_block_hash, votes_bundle_pbft_period, votes_bundle_pbft_round, + votes_bundle_votes_step, votes) +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/dag_block_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/dag_block_packet.hpp similarity index 83% rename from libraries/core_libs/network/include/network/tarcap/packets/dag_block_packet.hpp rename to libraries/core_libs/network/include/network/tarcap/packets/v4/dag_block_packet.hpp index 87c88a9e57..eb09ad7c10 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/dag_block_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/v4/dag_block_packet.hpp @@ -1,11 +1,11 @@ #pragma once #include "dag/dag_block.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "transaction/system_transaction.hpp" -namespace taraxa::network { +namespace taraxa::network::tarcap::v4 { -// TODO: create new version of this packet without manual parsing struct DagBlockPacket { DagBlockPacket(const dev::RLP& packet_rlp) { constexpr size_t required_size = 2; @@ -37,11 +37,8 @@ struct DagBlockPacket { dag_block = DagBlock(dag_rlp); }; - // TODO: make this a vector for automatic encoding/decoding... std::unordered_map> transactions; DagBlock dag_block; - - // RLP_FIELDS_DEFINE_INPLACE(transactions, dag_block) }; -} // namespace taraxa::network +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/dag_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/dag_sync_packet.hpp similarity index 87% rename from libraries/core_libs/network/include/network/tarcap/packets/dag_sync_packet.hpp rename to libraries/core_libs/network/include/network/tarcap/packets/v4/dag_sync_packet.hpp index 1f38458bfd..c71515f0fb 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/dag_sync_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/v4/dag_sync_packet.hpp @@ -1,11 +1,11 @@ #pragma once #include "dag/dag_block.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "transaction/system_transaction.hpp" -namespace taraxa::network { +namespace taraxa::network::tarcap::v4 { -// TODO: create new version of this packet without manual parsing struct DagSyncPacket { DagSyncPacket(const dev::RLP& packet_rlp) { if (constexpr size_t required_size = 4; packet_rlp.itemCount() != required_size) { @@ -38,8 +38,6 @@ struct DagSyncPacket { PbftPeriod response_period; std::unordered_map> transactions; std::vector dag_blocks; - - // RLP_FIELDS_DEFINE_INPLACE(transactions, dag_block) }; -} // namespace taraxa::network +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/get_dag_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/get_dag_sync_packet.hpp similarity index 77% rename from libraries/core_libs/network/include/network/tarcap/packets/get_dag_sync_packet.hpp rename to libraries/core_libs/network/include/network/tarcap/packets/v4/get_dag_sync_packet.hpp index cbd0674c9b..25783c7008 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/get_dag_sync_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/v4/get_dag_sync_packet.hpp @@ -1,11 +1,11 @@ #pragma once #include "dag/dag_block.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "transaction/system_transaction.hpp" -namespace taraxa::network { +namespace taraxa::network::tarcap::v4 { -// TODO: create new version of this packet without manual parsing struct GetDagSyncPacket { GetDagSyncPacket(const dev::RLP& packet_rlp) { if (constexpr size_t required_size = 2; packet_rlp.itemCount() != required_size) { @@ -22,8 +22,6 @@ struct GetDagSyncPacket { PbftPeriod peer_period; std::unordered_set blocks_hashes; - - // RLP_FIELDS_DEFINE_INPLACE(transactions, dag_block) }; -} // namespace taraxa::network +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/pbft_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/pbft_sync_packet.hpp similarity index 90% rename from libraries/core_libs/network/include/network/tarcap/packets/pbft_sync_packet.hpp rename to libraries/core_libs/network/include/network/tarcap/packets/v4/pbft_sync_packet.hpp index 6ad908fd62..6d81f72f0c 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/pbft_sync_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/v4/pbft_sync_packet.hpp @@ -1,12 +1,12 @@ #pragma once +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "pbft/period_data.hpp" #include "vote/pbft_vote.hpp" #include "vote/votes_bundle_rlp.hpp" -namespace taraxa::network { +namespace taraxa::network::tarcap::v4 { -// TODO: create new version of this packet without manual parsing struct PbftSyncPacket { PbftSyncPacket(const dev::RLP& packet_rlp) { if (packet_rlp.itemCount() != kStandardPacketSize && packet_rlp.itemCount() != kChainSyncedPacketSize) { @@ -42,8 +42,6 @@ struct PbftSyncPacket { const size_t kStandardPacketSize = 2; const size_t kChainSyncedPacketSize = 3; - - // RLP_FIELDS_DEFINE_INPLACE(transactions, dag_block) }; -} // namespace taraxa::network +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/pillar_vote_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/pillar_vote_packet.hpp similarity index 72% rename from libraries/core_libs/network/include/network/tarcap/packets/pillar_vote_packet.hpp rename to libraries/core_libs/network/include/network/tarcap/packets/v4/pillar_vote_packet.hpp index 0abb37d38c..6871c98892 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/pillar_vote_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/v4/pillar_vote_packet.hpp @@ -4,7 +4,7 @@ #include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "vote/pillar_vote.hpp" -namespace taraxa::network { +namespace taraxa::network::tarcap::v4 { struct PillarVotePacket { PillarVotePacket() = default; @@ -13,9 +13,6 @@ struct PillarVotePacket { PillarVotePacket& operator=(const PillarVotePacket&) = default; PillarVotePacket& operator=(PillarVotePacket&&) = default; - // PillarVotePacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } - // dev::bytes encode() { return util::rlp_enc(*this); } - PillarVotePacket(const dev::RLP& packet_rlp) { auto items = packet_rlp.itemCount(); if (items != PillarVote::kStandardRlpSize) { @@ -25,10 +22,7 @@ struct PillarVotePacket { pillar_vote = std::make_shared(packet_rlp); } - // TODO: will shared_ptr work ? std::shared_ptr pillar_vote; - - // RLP_FIELDS_DEFINE_INPLACE(pillar_vote) }; -} // namespace taraxa::network +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/pillar_votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/pillar_votes_bundle_packet.hpp similarity index 76% rename from libraries/core_libs/network/include/network/tarcap/packets/pillar_votes_bundle_packet.hpp rename to libraries/core_libs/network/include/network/tarcap/packets/v4/pillar_votes_bundle_packet.hpp index 41669dbe71..8ff35fc0fb 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/pillar_votes_bundle_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/v4/pillar_votes_bundle_packet.hpp @@ -4,7 +4,7 @@ #include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "vote/pillar_vote.hpp" -namespace taraxa::network { +namespace taraxa::network::tarcap::v4 { struct PillarVotesBundlePacket { PillarVotesBundlePacket() = default; @@ -13,9 +13,6 @@ struct PillarVotesBundlePacket { PillarVotesBundlePacket& operator=(const PillarVotesBundlePacket&) = default; PillarVotesBundlePacket& operator=(PillarVotesBundlePacket&&) = default; - // PillarVotesBundlePacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); - // } dev::bytes encode() { return util::rlp_enc(*this); } - PillarVotesBundlePacket(const dev::RLP& packet_rlp) { auto items = packet_rlp.itemCount(); if (items == 0 || items > kMaxPillarVotesInBundleRlp) { @@ -27,12 +24,9 @@ struct PillarVotesBundlePacket { } } - // TODO: will shared_ptr work ? std::vector> pillar_votes; constexpr static size_t kMaxPillarVotesInBundleRlp{250}; - - // RLP_FIELDS_DEFINE_INPLACE(pillar_votes) }; -} // namespace taraxa::network +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/status_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/status_packet.hpp similarity index 84% rename from libraries/core_libs/network/include/network/tarcap/packets/status_packet.hpp rename to libraries/core_libs/network/include/network/tarcap/packets/v4/status_packet.hpp index ebc46b782c..01f857aa8f 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/status_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/v4/status_packet.hpp @@ -1,8 +1,10 @@ #pragma once -namespace taraxa::network { +#include "common/types.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" + +namespace taraxa::network::tarcap::v4 { -// TODO: create new version of this packet without manual parsing struct StatusPacket { StatusPacket(const dev::RLP& packet_rlp) { if (const auto items_count = packet_rlp.itemCount(); @@ -45,10 +47,8 @@ struct StatusPacket { std::optional is_light_node; std::optional node_history; - const uint16_t kInitialStatusPacketItemsCount = 11; - const uint16_t kStandardStatusPacketItemsCount = 4; - - // RLP_FIELDS_DEFINE_INPLACE(transactions, dag_block) + static constexpr uint16_t kInitialStatusPacketItemsCount = 11; + static constexpr uint16_t kStandardStatusPacketItemsCount = 4; }; -} // namespace taraxa::network +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/transaction_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/transaction_packet.hpp similarity index 83% rename from libraries/core_libs/network/include/network/tarcap/packets/transaction_packet.hpp rename to libraries/core_libs/network/include/network/tarcap/packets/v4/transaction_packet.hpp index 7e94bc6a9a..48f0f24326 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/transaction_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/v4/transaction_packet.hpp @@ -1,10 +1,10 @@ #pragma once +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "transaction/transaction.hpp" -namespace taraxa::network { +namespace taraxa::network::tarcap::v4 { -// TODO: create new version of this packet without manual parsing struct TransactionPacket { TransactionPacket(const dev::RLP& packet_rlp) { auto items = packet_rlp.itemCount(); @@ -45,11 +45,9 @@ struct TransactionPacket { std::vector> transactions; - const uint32_t kTransactionPacketItemCount = 2; - const uint32_t kMaxTransactionsInPacket{500}; - const uint32_t kMaxHashesInPacket{5000}; - - // RLP_FIELDS_DEFINE_INPLACE(transactions, dag_block) + static constexpr uint32_t kTransactionPacketItemCount = 2; + static constexpr uint32_t kMaxTransactionsInPacket{500}; + static constexpr uint32_t kMaxHashesInPacket{5000}; }; -} // namespace taraxa::network +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/vote_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/vote_packet.hpp similarity index 78% rename from libraries/core_libs/network/include/network/tarcap/packets/vote_packet.hpp rename to libraries/core_libs/network/include/network/tarcap/packets/v4/vote_packet.hpp index ae641d5e85..6f34ce7ee2 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/vote_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/v4/vote_packet.hpp @@ -1,11 +1,11 @@ #pragma once +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "pbft/pbft_block.hpp" #include "vote/pbft_vote.hpp" -namespace taraxa::network { +namespace taraxa::network::tarcap::v4 { -// TODO: create new version of this packet without manual parsing struct VotePacket { VotePacket(const dev::RLP& packet_rlp) { auto items = packet_rlp.itemCount(); @@ -29,10 +29,8 @@ struct VotePacket { std::shared_ptr pbft_block; std::optional peer_chain_size; - const size_t kVotePacketSize{1}; - const size_t kExtendedVotePacketSize{3}; - - // RLP_FIELDS_DEFINE_INPLACE(transactions, dag_block) + static constexpr size_t kVotePacketSize{1}; + static constexpr size_t kExtendedVotePacketSize{3}; }; -} // namespace taraxa::network +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/votes_bundle_packet.hpp similarity index 81% rename from libraries/core_libs/network/include/network/tarcap/packets/votes_bundle_packet.hpp rename to libraries/core_libs/network/include/network/tarcap/packets/v4/votes_bundle_packet.hpp index cc3b3b4ddc..41e342e1b7 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/votes_bundle_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/v4/votes_bundle_packet.hpp @@ -1,10 +1,11 @@ #pragma once +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "vote/pbft_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" -namespace taraxa::network { +namespace taraxa::network::tarcap::v4 { -// TODO: create new version of this packet without manual parsing struct VotesBundlePacket { VotesBundlePacket(const dev::RLP& packet_rlp) { auto items = packet_rlp.itemCount(); @@ -36,10 +37,6 @@ struct VotesBundlePacket { std::vector> votes; const size_t kMaxVotesInBundleRlp{1000}; - - // TODO: votes size must be <1, limit> - // RLP_FIELDS_DEFINE_INPLACE(votes_bundle_block_hash, votes_bundle_pbft_period, votes_bundle_pbft_round, - // votes_bundle_votes_step, votes) }; -} // namespace taraxa::network +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp index af6e0d3a4a..2bacec7d36 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp @@ -143,7 +143,12 @@ class PacketHandler : public BasePacketHandler { } } + // TODO[2865]: remove bool sealAndSend(const dev::p2p::NodeID& node_id, SubprotocolPacketType packet_type, dev::RLPStream&& rlp) { + return sealAndSend(node_id, packet_type, rlp.invalidate()); + } + + bool sealAndSend(const dev::p2p::NodeID& node_id, SubprotocolPacketType packet_type, dev::bytes&& rlp_bytes) { auto host = peers_state_->host_.lock(); if (!host) { LOG(log_er_) << "sealAndSend failed to obtain host"; @@ -157,9 +162,9 @@ class PacketHandler : public BasePacketHandler { } const auto begin = std::chrono::steady_clock::now(); - const size_t packet_size = rlp.out().size(); + const size_t packet_size = rlp_bytes.size(); - host->send(node_id, TARAXA_CAPABILITY_NAME, packet_type, rlp.invalidate(), + host->send(node_id, TARAXA_CAPABILITY_NAME, packet_type, std::move(rlp_bytes), [begin, node_id, packet_size, packet_type, this]() { if (!kConf.network.ddos_protection.log_packets_stats) { return; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp index 377a83c8a3..4d99d08cba 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp @@ -1,7 +1,7 @@ #pragma once #include "common/ext_syncing_packet_handler.hpp" -#include "network/tarcap/packets/dag_block_packet.hpp" +#include "network/tarcap/packets/latest/dag_block_packet.hpp" namespace taraxa { class TransactionManager; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_sync_packet_handler.hpp index d4477986f4..95009bef8c 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_sync_packet_handler.hpp @@ -1,7 +1,7 @@ #pragma once #include "common/ext_syncing_packet_handler.hpp" -#include "network/tarcap/packets/dag_sync_packet.hpp" +#include "network/tarcap/packets/latest/dag_sync_packet.hpp" namespace taraxa { class TransactionManager; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp index c50315b02e..70bac09778 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp @@ -1,7 +1,7 @@ #pragma once #include "common/packet_handler.hpp" -#include "network/tarcap/packets/get_dag_sync_packet.hpp" +#include "network/tarcap/packets/latest/get_dag_sync_packet.hpp" #include "transaction/transaction.hpp" namespace taraxa { diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.hpp index ae3e4e64e9..80780fdeba 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_next_votes_bundle_packet_handler.hpp @@ -1,7 +1,7 @@ #pragma once #include "common/ext_votes_packet_handler.hpp" -#include "network/tarcap/packets/get_next_votes_bundle_packet.hpp" +#include "network/tarcap/packets/latest/get_next_votes_bundle_packet.hpp" namespace taraxa { class PbftManager; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.hpp index 250449cf75..1d8d170fd3 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.hpp @@ -1,7 +1,7 @@ #pragma once #include "common/packet_handler.hpp" -#include "network/tarcap/packets/get_pbft_sync_packet.hpp" +#include "network/tarcap/packets/latest/get_pbft_sync_packet.hpp" namespace taraxa { class PbftChain; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp index 58f7b2481c..c3d5fbaa2d 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp @@ -1,7 +1,7 @@ #pragma once #include "common/packet_handler.hpp" -#include "network/tarcap/packets/get_pillar_votes_bundle_packet.hpp" +#include "network/tarcap/packets/latest/get_pillar_votes_bundle_packet.hpp" #include "pillar_chain/pillar_chain_manager.hpp" namespace taraxa::network::tarcap { @@ -23,8 +23,6 @@ class GetPillarVotesBundlePacketHandler : public PacketHandler& peer) override; protected: - constexpr static size_t kGetPillarVotesBundlePacketSize{2}; - std::shared_ptr pillar_chain_manager_; }; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp index 621e23a262..bfd065bb12 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pbft_sync_packet_handler.hpp @@ -2,7 +2,7 @@ #include "common/ext_syncing_packet_handler.hpp" #include "common/thread_pool.hpp" -#include "network/tarcap/packets/pbft_sync_packet.hpp" +#include "network/tarcap/packets/latest/pbft_sync_packet.hpp" #include "vote_manager/vote_manager.hpp" namespace taraxa::network::tarcap { @@ -35,9 +35,6 @@ class PbftSyncPacketHandler : public ExtSyncingPacketHandler { std::shared_ptr vote_mgr_; util::ThreadPool periodic_events_tp_; - - static constexpr size_t kStandardPacketSize = 2; - static constexpr size_t kChainSyncedPacketSize = 3; }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_vote_packet_handler.hpp index 8bdbb52531..028a472cb4 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_vote_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_vote_packet_handler.hpp @@ -1,6 +1,6 @@ #pragma once -#include "network/tarcap/packets/pillar_vote_packet.hpp" +#include "network/tarcap/packets/latest/pillar_vote_packet.hpp" #include "network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp" namespace taraxa::network::tarcap { diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp index 0f6812c663..ed827d7405 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp @@ -1,6 +1,6 @@ #pragma once -#include "network/tarcap/packets/pillar_votes_bundle_packet.hpp" +#include "network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp" #include "network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp" namespace taraxa::network::tarcap { @@ -17,9 +17,6 @@ class PillarVotesBundlePacketHandler : public ExtPillarVotePacketHandler& peer) override; - - public: - constexpr static size_t kMaxPillarVotesInBundleRlp{250}; }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/status_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/status_packet_handler.hpp index bfb75f42a0..24af6fb7bd 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/status_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/status_packet_handler.hpp @@ -1,7 +1,7 @@ #pragma once #include "common/ext_syncing_packet_handler.hpp" -#include "network/tarcap/packets/status_packet.hpp" +#include "network/tarcap/packets/latest/status_packet.hpp" namespace taraxa::network::tarcap { @@ -24,9 +24,6 @@ class StatusPacketHandler : public ExtSyncingPacketHandler { virtual void process(StatusPacket&& packet, const std::shared_ptr& peer) override; protected: - static constexpr uint16_t kInitialStatusPacketItemsCount = 11; - static constexpr uint16_t kStandardStatusPacketItemsCount = 4; - const h256 kGenesisHash; }; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp index 7ed3474b4b..f5a4c45e94 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp @@ -1,7 +1,7 @@ #pragma once #include "common/packet_handler.hpp" -#include "network/tarcap/packets/transaction_packet.hpp" +#include "network/tarcap/packets/latest/transaction_packet.hpp" #include "transaction/transaction.hpp" namespace taraxa { @@ -39,9 +39,6 @@ class TransactionPacketHandler : public PacketHandler { // Packet type that is processed by this handler static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kTransactionPacket; - // 2 items: hashes and transactions - static constexpr uint32_t kTransactionPacketItemCount = 2; - private: virtual void process(TransactionPacket&& packet, const std::shared_ptr& peer) override; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/vote_packet_handler.hpp index c60a2c9788..d88c9a2f6e 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/vote_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/vote_packet_handler.hpp @@ -1,7 +1,7 @@ #pragma once #include "common/ext_votes_packet_handler.hpp" -#include "network/tarcap/packets/vote_packet.hpp" +#include "network/tarcap/packets/latest/vote_packet.hpp" namespace taraxa::network::tarcap { @@ -30,10 +30,6 @@ class VotePacketHandler : public ExtVotesPacketHandler { private: virtual void process(VotePacket&& packet, const std::shared_ptr& peer) override; - - protected: - const size_t kVotePacketSize{1}; - const size_t kExtendedVotePacketSize{3}; }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp index 6755333dc0..daef9e4b9a 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp @@ -1,7 +1,7 @@ #pragma once #include "common/ext_votes_packet_handler.hpp" -#include "network/tarcap/packets/votes_bundle_packet.hpp" +#include "network/tarcap/packets/latest/votes_bundle_packet.hpp" namespace taraxa::network::tarcap { diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp new file mode 100644 index 0000000000..9b0ec69968 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp @@ -0,0 +1,36 @@ +#pragma once + +#include "network/tarcap/packets/v4/dag_block_packet.hpp" +#include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" + +namespace taraxa { +class TransactionManager; +} // namespace taraxa + +namespace taraxa::network::tarcap::v4 { + +class DagBlockPacketHandler : public ExtSyncingPacketHandler { + public: + DagBlockPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, + std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, + std::shared_ptr trx_mgr, std::shared_ptr db, + const addr_t &node_addr, const std::string &logs_prefix = ""); + + void sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, DagBlock block, const SharedTransactions &trxs); + void onNewBlockReceived(DagBlock &&block, const std::shared_ptr &peer = nullptr, + const std::unordered_map> &trxs = {}); + void onNewBlockVerified(const DagBlock &block, bool proposed, const SharedTransactions &trxs); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagBlockPacket; + + private: + virtual void process(DagBlockPacket &&packet, const std::shared_ptr &peer) override; + + protected: + std::shared_ptr trx_mgr_{nullptr}; +}; + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp new file mode 100644 index 0000000000..d9989c25a8 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp @@ -0,0 +1,31 @@ +#pragma once + +#include "network/tarcap/packets/v4/dag_sync_packet.hpp" +#include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" + +namespace taraxa { +class TransactionManager; +} // namespace taraxa + +namespace taraxa::network::tarcap::v4 { + +class DagSyncPacketHandler : public ExtSyncingPacketHandler { + public: + DagSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, + std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, + std::shared_ptr trx_mgr, std::shared_ptr db, + const addr_t& node_addr, const std::string& logs_prefix = ""); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagSyncPacket; + + private: + virtual void process(DagSyncPacket&& packet, const std::shared_ptr& peer) override; + + protected: + std::shared_ptr trx_mgr_{nullptr}; +}; + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.hpp new file mode 100644 index 0000000000..ff1b55097f --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.hpp @@ -0,0 +1,38 @@ +#pragma once + +#include "network/tarcap/packets/v4/get_dag_sync_packet.hpp" +#include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" +#include "transaction/transaction.hpp" + +namespace taraxa { +class DagManager; +class DbStorage; +class TransactionManager; +} // namespace taraxa + +namespace taraxa::network::tarcap::v4 { + +class GetDagSyncPacketHandler : public PacketHandler { + public: + GetDagSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr trx_mgr, std::shared_ptr dag_mgr, + std::shared_ptr db, const addr_t& node_addr, + const std::string& logs_prefix = "GET_DAG_SYNC_PH"); + + void sendBlocks(const dev::p2p::NodeID& peer_id, std::vector>&& blocks, + SharedTransactions&& transactions, PbftPeriod request_period, PbftPeriod period); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetDagSyncPacket; + + private: + virtual void process(GetDagSyncPacket&& packet, const std::shared_ptr& peer) override; + + protected: + std::shared_ptr trx_mgr_; + std::shared_ptr dag_mgr_; + std::shared_ptr db_; +}; + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp new file mode 100644 index 0000000000..75e1115a8f --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp @@ -0,0 +1,40 @@ +#pragma once + +#include "common/thread_pool.hpp" +#include "network/tarcap/packets/v4/pbft_sync_packet.hpp" +#include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" +#include "vote_manager/vote_manager.hpp" + +namespace taraxa::network::tarcap::v4 { + +class PbftSyncPacketHandler : public ExtSyncingPacketHandler { + public: + PbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, + std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, + std::shared_ptr vote_mgr, std::shared_ptr db, const addr_t& node_addr, + const std::string& logs_prefix = "PBFT_SYNC_PH"); + + void handleMaliciousSyncPeer(const dev::p2p::NodeID& id); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPbftSyncPacket; + + private: + virtual void process(PbftSyncPacket&& packet, const std::shared_ptr& peer) override; + + protected: + virtual PeriodData decodePeriodData(const dev::RLP& period_data_rlp) const; + virtual std::vector> decodeVotesBundle(const dev::RLP& votes_bundle_rlp) const; + + void pbftSyncComplete(); + void delayedPbftSync(uint32_t counter); + + static constexpr uint32_t kDelayedPbftSyncDelayMs = 10; + + std::shared_ptr vote_mgr_; + util::ThreadPool periodic_events_tp_; +}; + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_vote_packet_handler.hpp new file mode 100644 index 0000000000..8b9ad59bda --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_vote_packet_handler.hpp @@ -0,0 +1,25 @@ +#pragma once + +#include "network/tarcap/packets/v4/pillar_vote_packet.hpp" +#include "network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp" + +namespace taraxa::network::tarcap::v4 { + +class PillarVotePacketHandler : public ExtPillarVotePacketHandler { + public: + PillarVotePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pillar_chain_manager, + const addr_t& node_addr, const std::string& logs_prefix); + + void onNewPillarVote(const std::shared_ptr& vote, bool rebroadcast = false); + void sendPillarVote(const std::shared_ptr& peer, const std::shared_ptr& vote); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPillarVotePacket; + + private: + virtual void process(PillarVotePacket&& packet, const std::shared_ptr& peer) override; +}; + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.hpp new file mode 100644 index 0000000000..ac1197e9b2 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include "network/tarcap/packets/v4/pillar_votes_bundle_packet.hpp" +#include "network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp" + +namespace taraxa::network::tarcap::v4 { + +class PillarVotesBundlePacketHandler : public ExtPillarVotePacketHandler { + public: + PillarVotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pillar_chain_manager, + const addr_t& node_addr, const std::string& logs_prefix); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPillarVotesBundlePacket; + + private: + virtual void process(PillarVotesBundlePacket&& packet, const std::shared_ptr& peer) override; +}; + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/status_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/status_packet_handler.hpp new file mode 100644 index 0000000000..601c91250d --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/status_packet_handler.hpp @@ -0,0 +1,30 @@ +#pragma once + +#include "network/tarcap/packets/v4/status_packet.hpp" +#include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" + +namespace taraxa::network::tarcap::v4 { + +class StatusPacketHandler : public ExtSyncingPacketHandler { + public: + StatusPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, + std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, + std::shared_ptr db, h256 genesis_hash, const addr_t& node_addr, + const std::string& logs_prefix = "STATUS_PH"); + + bool sendStatus(const dev::p2p::NodeID& node_id, bool initial); + void sendStatusToPeers(); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kStatusPacket; + + private: + virtual void process(StatusPacket&& packet, const std::shared_ptr& peer) override; + + protected: + const h256 kGenesisHash; +}; + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp new file mode 100644 index 0000000000..65c7f215e0 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp @@ -0,0 +1,76 @@ +#pragma once + +#include "network/tarcap/packets/v4/transaction_packet.hpp" +#include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" +#include "transaction/transaction.hpp" + +namespace taraxa { +class TransactionManager; +enum class TransactionStatus; +} // namespace taraxa + +namespace taraxa::network::tarcap::v4 { + +class TransactionPacketHandler : public PacketHandler { + public: + TransactionPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr trx_mgr, const addr_t& node_addr, + const std::string& logs_prefix = "TRANSACTION_PH"); + + /** + * @brief Send transactions + * + * @param peer peer to send transactions to + * @param transactions serialized transactions + * + */ + void sendTransactions(std::shared_ptr peer, + std::pair>&& transactions); + + /** + * @brief Sends batch of transactions to all connected peers + * @note This method is used as periodic event to broadcast transactions to the other peers in network + * + * @param transactions to be sent + */ + void periodicSendTransactions(std::vector&& transactions); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kTransactionPacket; + + // 2 items: hashes and transactions + static constexpr uint32_t kTransactionPacketItemCount = 2; + + private: + virtual void process(TransactionPacket&& packet, const std::shared_ptr& peer) override; + + protected: + /** + * @brief select which transactions and hashes to send to which connected peer + * + * @param transactions to be sent + * @return selected transactions and hashes to be sent per peer + */ + std::vector, std::pair>>> + transactionsToSendToPeers(std::vector&& transactions); + + /** + * @brief select which transactions and hashes to send to peer + * + * @param peer + * @param transactions grouped per account to be sent + * @param account_start_index which account to start with + * @return index of the next account to continue and selected transactions and hashes to be sent per peer + */ + std::pair>> transactionsToSendToPeer( + std::shared_ptr peer, const std::vector& transactions, + uint32_t account_start_index); + + std::shared_ptr trx_mgr_; + + std::atomic received_trx_count_{0}; + std::atomic unique_received_trx_count_{0}; +}; + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/vote_packet_handler.hpp new file mode 100644 index 0000000000..ab3974091d --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/vote_packet_handler.hpp @@ -0,0 +1,35 @@ +#pragma once + +#include "network/tarcap/packets/v4/vote_packet.hpp" +#include "network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp" + +namespace taraxa::network::tarcap::v4 { + +class VotePacketHandler : public ExtVotesPacketHandler { + public: + VotePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, + std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, + std::shared_ptr slashing_manager, const addr_t& node_addr, + const std::string& logs_prefix = ""); + + /** + * @brief Sends pbft vote to connected peers + * + * @param vote Votes to send + * @param block block to send - nullptr means no block + * @param rebroadcast - send even of vote i known for the peer + */ + void onNewPbftVote(const std::shared_ptr& vote, const std::shared_ptr& block, + bool rebroadcast = false); + void sendPbftVote(const std::shared_ptr& peer, const std::shared_ptr& vote, + const std::shared_ptr& block); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotePacket; + + private: + virtual void process(VotePacket&& packet, const std::shared_ptr& peer) override; +}; + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp new file mode 100644 index 0000000000..b2ead3490c --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp @@ -0,0 +1,33 @@ +#pragma once + +#include "network/tarcap/packets/v4/votes_bundle_packet.hpp" +#include "network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp" + +namespace taraxa::network::tarcap::v4 { + +class VotesBundlePacketHandler : public ExtVotesPacketHandler { + public: + VotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, + std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, + std::shared_ptr slashing_manager, const addr_t& node_addr, + const std::string& logs_prefix = ""); + + /** + * @brief Sends pbft votes bundle to connected peers + * + * @param votes Votes to send + * @param rebroadcast if rebroadcast is true, all votes are resent to all peers + * @param exclude_node do not send votes to excluded node + */ + void onNewPbftVotesBundle(const std::vector>& votes, bool rebroadcast = false, + const std::optional& exclude_node = {}); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotesBundlePacket; + + private: + virtual void process(VotesBundlePacket&& packet, const std::shared_ptr& peer) override; +}; + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp index 8a2ccff8d6..33222e7384 100644 --- a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp +++ b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp @@ -59,6 +59,9 @@ class TaraxaCapability final : public dev::p2p::CapabilityFace { */ static const InitPacketsHandlers kInitLatestVersionHandlers; + // TODO: remove this once we pass next hf after cornus hf + static const InitPacketsHandlers kInitV4Handlers; + public: TaraxaCapability(TarcapVersion version, const FullNodeConfig &conf, const h256 &genesis_hash, std::weak_ptr host, const dev::KeyPair &key, diff --git a/libraries/core_libs/network/src/network.cpp b/libraries/core_libs/network/src/network.cpp index 0262050a3f..d450320159 100644 --- a/libraries/core_libs/network/src/network.cpp +++ b/libraries/core_libs/network/src/network.cpp @@ -73,10 +73,17 @@ Network::Network(const FullNodeConfig &config, const h256 &genesis_hash, std::fi dev::p2p::Host::CapabilitiesFactory constructCapabilities = [&](std::weak_ptr host) { assert(!host.expired()); - assert(kV3NetworkVersion < TARAXA_NET_VERSION); + assert(kV4NetworkVersion < TARAXA_NET_VERSION); dev::p2p::Host::CapabilityList capabilities; + // Register old version (V4) of taraxa capability + auto v4_tarcap = std::make_shared( + kV4NetworkVersion, config, genesis_hash, host, key, packets_tp_, all_packets_stats_, pbft_syncing_state_, db, + pbft_mgr, pbft_chain, vote_mgr, dag_mgr, trx_mgr, slashing_manager, pillar_chain_mgr, + network::tarcap::TaraxaCapability::kInitV4Handlers); + capabilities.emplace_back(v4_tarcap); + // Register latest version of taraxa capability auto latest_tarcap = std::make_shared( TARAXA_NET_VERSION, config, genesis_hash, host, key, packets_tp_, all_packets_stats_, pbft_syncing_state_, db, diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp index 253f237e59..a08659288d 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp @@ -23,7 +23,7 @@ void DagBlockPacketHandler::process(DagBlockPacket &&packet, const std::shared_p blk_hash_t const hash = packet.dag_block.getHash(); for (const auto &tx : packet.transactions) { - peer->markTransactionAsKnown(tx.first); + peer->markTransactionAsKnown(tx->getHash()); } peer->markDagBlockAsKnown(hash); @@ -37,7 +37,13 @@ void DagBlockPacketHandler::process(DagBlockPacket &&packet, const std::shared_p return; } - onNewBlockReceived(std::move(packet.dag_block), peer, packet.transactions); + std::unordered_map> txs_map; + txs_map.reserve(packet.transactions.size()); + for (const auto &tx : packet.transactions) { + txs_map.emplace(tx->getHash(), tx); + } + + onNewBlockReceived(std::move(packet.dag_block), peer, txs_map); } void DagBlockPacketHandler::sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, taraxa::DagBlock block, @@ -48,23 +54,12 @@ void DagBlockPacketHandler::sendBlockWithTransactions(dev::p2p::NodeID const &pe return; } - dev::RLPStream s(2); - // This lock prevents race condition between syncing and gossiping dag blocks std::unique_lock lock(peer->mutex_for_sending_dag_blocks_); - taraxa::bytes trx_bytes; - for (uint32_t i = 0; i < trxs.size(); i++) { - auto trx_data = trxs[i]->rlp(); - trx_bytes.insert(trx_bytes.end(), std::begin(trx_data), std::end(trx_data)); - } - - s.appendList(trxs.size()); - s.appendRaw(trx_bytes, trxs.size()); - - s.appendRaw(block.rlp(true)); + DagBlockPacket dag_block_packet(trxs, block); - if (!sealAndSend(peer_id, SubprotocolPacketType::kDagBlockPacket, std::move(s))) { + if (!sealAndSend(peer_id, SubprotocolPacketType::kDagBlockPacket, dag_block_packet.encodeRlp())) { LOG(log_wr_) << "Sending DagBlock " << block.getHash() << " failed to " << peer_id; return; } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_sync_packet_handler.cpp index 8d7d599299..9c8e2319fe 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_sync_packet_handler.cpp @@ -42,19 +42,23 @@ void DagSyncPacketHandler::process(DagSyncPacket&& packet, const std::shared_ptr } std::vector transactions_to_log; + std::unordered_map> transactions_map; transactions_to_log.reserve(packet.transactions.size()); + transactions_map.reserve(packet.transactions.size()); for (auto& trx : packet.transactions) { - peer->markTransactionAsKnown(trx.first); - transactions_to_log.push_back(trx.first); + const auto tx_hash = trx->getHash(); + peer->markTransactionAsKnown(tx_hash); + transactions_to_log.push_back(tx_hash); + transactions_map.emplace(tx_hash, trx); - if (trx_mgr_->isTransactionKnown(trx.first)) { + if (trx_mgr_->isTransactionKnown(tx_hash)) { continue; } - auto [verified, reason] = trx_mgr_->verifyTransaction(trx.second); + auto [verified, reason] = trx_mgr_->verifyTransaction(trx); if (!verified) { std::ostringstream err_msg; - err_msg << "DagBlock transaction " << trx.first << " validation failed: " << reason; + err_msg << "DagBlock transaction " << tx_hash << " validation failed: " << reason; throw MaliciousPeerException(err_msg.str()); } } @@ -62,31 +66,32 @@ void DagSyncPacketHandler::process(DagSyncPacket&& packet, const std::shared_ptr std::vector dag_blocks_to_log; dag_blocks_to_log.reserve(packet.dag_blocks.size()); for (auto& block : packet.dag_blocks) { - dag_blocks_to_log.push_back(block.getHash()); - peer->markDagBlockAsKnown(block.getHash()); + dag_blocks_to_log.push_back(block->getHash()); + peer->markDagBlockAsKnown(block->getHash()); - if (dag_mgr_->isDagBlockKnown(block.getHash())) { - LOG(log_tr_) << "Received known DagBlock " << block.getHash() << "from: " << peer->getId(); + if (dag_mgr_->isDagBlockKnown(block->getHash())) { + LOG(log_tr_) << "Received known DagBlock " << block->getHash() << "from: " << peer->getId(); continue; } - auto verified = dag_mgr_->verifyBlock(block, packet.transactions); + auto verified = dag_mgr_->verifyBlock(*block, transactions_map); if (verified.first != DagManager::VerifyBlockReturnType::Verified) { std::ostringstream err_msg; - err_msg << "DagBlock " << block.getHash() << " failed verification with error code " + err_msg << "DagBlock " << block->getHash() << " failed verification with error code " << static_cast(verified.first); throw MaliciousPeerException(err_msg.str()); } - if (block.getLevel() > peer->dag_level_) peer->dag_level_ = block.getLevel(); + if (block->getLevel() > peer->dag_level_) peer->dag_level_ = block->getLevel(); - auto status = dag_mgr_->addDagBlock(std::move(block), std::move(verified.second)); + // TODO[2869]: fix dag blocks usage - shared_ptr vs object type on different places... + auto status = dag_mgr_->addDagBlock(std::move(*block), std::move(verified.second)); if (!status.first) { std::ostringstream err_msg; if (status.second.size() > 0) - err_msg << "DagBlock" << block.getHash() << " has missing pivot or/and tips " << status.second; + err_msg << "DagBlock" << block->getHash() << " has missing pivot or/and tips " << status.second; else - err_msg << "DagBlock" << block.getHash() << " could not be added to DAG"; + err_msg << "DagBlock" << block->getHash() << " could not be added to DAG"; throw MaliciousPeerException(err_msg.str()); } } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.cpp index 4329a0db8c..f71ccc78b8 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.cpp @@ -1,6 +1,7 @@ #include "network/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.hpp" #include "dag/dag_manager.hpp" +#include "network/tarcap/packets/latest/dag_sync_packet.hpp" #include "transaction/transaction_manager.hpp" namespace taraxa::network::tarcap { @@ -29,15 +30,18 @@ void GetDagSyncPacketHandler::process(GetDagSyncPacket &&packet, // This lock prevents race condition between syncing and gossiping dag blocks std::unique_lock lock(peer->mutex_for_sending_dag_blocks_); + std::unordered_set blocks_hashes_set; std::string blocks_hashes_to_log; blocks_hashes_to_log.reserve(packet.blocks_hashes.size()); for (const auto &hash : packet.blocks_hashes) { - blocks_hashes_to_log += hash.abridged(); + if (blocks_hashes_set.insert(hash).second) { + blocks_hashes_to_log += hash.abridged(); + } } LOG(log_dg_) << "Received GetDagSyncPacket: " << blocks_hashes_to_log << " from " << peer->getId(); - auto [period, blocks, transactions] = dag_mgr_->getNonFinalizedBlocksWithTransactions(packet.blocks_hashes); + auto [period, blocks, transactions] = dag_mgr_->getNonFinalizedBlocksWithTransactions(blocks_hashes_set); if (packet.peer_period == period) { peer->syncing_ = false; peer->peer_requested_dag_syncing_ = true; @@ -58,21 +62,8 @@ void GetDagSyncPacketHandler::sendBlocks(const dev::p2p::NodeID &peer_id, auto peer = peers_state_->getPeer(peer_id); if (!peer) return; - dev::RLPStream s(4); - s.append(request_period); - s.append(period); - - s.appendList(transactions.size()); - for (const auto &tx : transactions) { - s.appendRaw(tx->rlp()); - } - - s.appendList(blocks.size()); - for (const auto &block : blocks) { - s.appendRaw(block->rlp(true)); - } - - sealAndSend(peer_id, SubprotocolPacketType::kDagSyncPacket, std::move(s)); + DagSyncPacket dag_sync_packet(request_period, period, std::move(transactions), std::move(blocks)); + sealAndSend(peer_id, SubprotocolPacketType::kDagSyncPacket, dag_sync_packet.encodeRlp()); } } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp index aa238f02f4..f6422d3d8f 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp @@ -1,5 +1,6 @@ #include "network/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.hpp" +#include "network/tarcap/packets/latest/pbft_sync_packet.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "pbft/pbft_chain.hpp" #include "storage/storage.hpp" @@ -73,30 +74,27 @@ void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr return; } - dev::RLPStream s; + PeriodData period_data(std::move(data)); + std::shared_ptr pbft_sync_packet; + if (pbft_chain_synced && last_block) { // Latest finalized block cert votes are saved in db as reward votes for new blocks - const auto reward_votes = vote_mgr_->getRewardVotes(); + auto reward_votes = vote_mgr_->getRewardVotes(); assert(!reward_votes.empty()); // It is possible that the node pushed another block to the chain in the meantime if (reward_votes[0]->getPeriod() == block_period) { - s.appendList(3); - s << last_block; - s.appendRaw(data); - s.appendRaw(encodePbftVotesBundleRlp(reward_votes)); + // TODO[2870]: use custom votes bundle class instead of vector + pbft_sync_packet = + std::make_shared(last_block, std::move(period_data), std::move(reward_votes)); } else { - s.appendList(2); - s << last_block; - s.appendRaw(data); + pbft_sync_packet = std::make_shared(last_block, std::move(period_data)); } } else { - s.appendList(2); - s << last_block; - s.appendRaw(data); + pbft_sync_packet = std::make_shared(last_block, std::move(period_data)); } LOG(log_dg_) << "Sending PbftSyncPacket period " << block_period << " to " << peer_id; - sealAndSend(peer_id, SubprotocolPacketType::kPbftSyncPacket, std::move(s)); + sealAndSend(peer_id, SubprotocolPacketType::kPbftSyncPacket, pbft_sync_packet->encodeRlp()); if (pbft_chain_synced && last_block) { peer->syncing_ = false; } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp index 883d70dc6d..63aa01c749 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp @@ -1,5 +1,6 @@ #include "network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets/v4/pillar_votes_bundle_packet.hpp" #include "network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp" namespace taraxa::network::tarcap { @@ -43,17 +44,19 @@ void GetPillarVotesBundlePacketHandler::process(GetPillarVotesBundlePacket &&pac while (votes_sent < total_votes) { // Determine the size of the current chunk const size_t chunk_size = - std::min(PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp, total_votes - votes_sent); + std::min(v4::PillarVotesBundlePacket::kMaxPillarVotesInBundleRlp, total_votes - votes_sent); - // Create a new RLPStream for the chunk - dev::RLPStream s(chunk_size); + // Create PillarVotesBundlePacket + std::vector> pillar_votes; + pillar_votes.reserve(chunk_size); for (size_t i = 0; i < chunk_size; ++i) { - const auto &sig = votes[votes_sent + i]; - s.appendRaw(sig->rlp()); + pillar_votes.emplace_back(votes[votes_sent + i]); } + PillarVotesBundlePacket pillar_votes_bundle_packet(std::move(pillar_votes)); // Seal and send the chunk to the peer - if (sealAndSend(peer->getId(), SubprotocolPacketType::kPillarVotesBundlePacket, std::move(s))) { + if (sealAndSend(peer->getId(), SubprotocolPacketType::kPillarVotesBundlePacket, + pillar_votes_bundle_packet.encodeRlp())) { // Mark the votes in this chunk as known for (size_t i = 0; i < chunk_size; ++i) { peer->markPillarVoteAsKnown(votes[votes_sent + i]->getHash()); @@ -61,9 +64,9 @@ void GetPillarVotesBundlePacketHandler::process(GetPillarVotesBundlePacket &&pac LOG(log_nf_) << "Pillar votes bundle for period " << packet.period << ", hash " << packet.pillar_block_hash << " sent to " << peer->getId() << " (Chunk " - << (votes_sent / PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp) + 1 << "/" - << (total_votes + PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp - 1) / - PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp + << (votes_sent / v4::PillarVotesBundlePacket::kMaxPillarVotesInBundleRlp) + 1 << "/" + << (total_votes + v4::PillarVotesBundlePacket::kMaxPillarVotesInBundleRlp - 1) / + v4::PillarVotesBundlePacket::kMaxPillarVotesInBundleRlp << ")"; } @@ -74,12 +77,8 @@ void GetPillarVotesBundlePacketHandler::process(GetPillarVotesBundlePacket &&pac void GetPillarVotesBundlePacketHandler::requestPillarVotesBundle(PbftPeriod period, const blk_hash_t &pillar_block_hash, const std::shared_ptr &peer) { - // TODO: create GetPillarVotesBundlePacket object and call encode instead of manullaty creating packet here - dev::RLPStream s(kGetPillarVotesBundlePacketSize); - s << period; - s << pillar_block_hash; - - if (sealAndSend(peer->getId(), SubprotocolPacketType::kGetPillarVotesBundlePacket, std::move(s))) { + if (sealAndSend(peer->getId(), SubprotocolPacketType::kGetPillarVotesBundlePacket, + GetPillarVotesBundlePacket(period, pillar_block_hash).encodeRlp())) { LOG(log_nf_) << "Requested pillar votes bundle for period " << period << " and pillar block " << pillar_block_hash << " from peer " << peer->getId(); } else { diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp index 58419901ba..048a6f5d69 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp @@ -40,7 +40,7 @@ void PbftSyncPacketHandler::process(PbftSyncPacket &&packet, const std::shared_p // Process received pbft blocks // pbft_chain_synced is the flag to indicate own PBFT chain has synced with the peer's PBFT chain - const bool pbft_chain_synced = !packet.current_block_cert_votes.empty(); + const bool pbft_chain_synced = packet.current_block_cert_votes.size() > 0; const auto pbft_blk_hash = packet.period_data.pbft_blk->getBlockHash(); std::string received_dag_blocks_str; // This is just log related stuff diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_vote_packet_handler.cpp index 67d235299d..abb9bf17b1 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_vote_packet_handler.cpp @@ -42,10 +42,7 @@ void PillarVotePacketHandler::onNewPillarVote(const std::shared_ptr void PillarVotePacketHandler::sendPillarVote(const std::shared_ptr &peer, const std::shared_ptr &vote) { - dev::RLPStream s; - s.appendRaw(vote->rlp()); - - if (sealAndSend(peer->getId(), SubprotocolPacketType::kPillarVotePacket, std::move(s))) { + if (sealAndSend(peer->getId(), SubprotocolPacketType::kPillarVotePacket, PillarVotePacket(vote).encodeRlp())) { peer->markPillarVoteAsKnown(vote->getHash()); LOG(log_dg_) << "Pillar vote " << vote->getHash() << ", period " << vote->getPeriod() << " sent to " << peer->getId(); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp index 766d1cb3a5..e3f0136530 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp @@ -2,6 +2,7 @@ #include "config/version.hpp" #include "dag/dag.hpp" +#include "network/tarcap/packets/v4/status_packet.hpp" #include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "pbft/pbft_chain.hpp" @@ -144,17 +145,15 @@ bool StatusPacketHandler::sendStatus(const dev::p2p::NodeID& node_id, bool initi const auto pbft_round = pbft_mgr_->getPbftRound(); if (initial) { - success = sealAndSend( - node_id, SubprotocolPacketType::kStatusPacket, - std::move(dev::RLPStream(kInitialStatusPacketItemsCount) - << kConf.genesis.chain_id << dag_max_level << kGenesisHash << pbft_chain_size - << pbft_syncing_state_->isPbftSyncing() << pbft_round << TARAXA_MAJOR_VERSION << TARAXA_MINOR_VERSION - << TARAXA_PATCH_VERSION << kConf.is_light_node << kConf.light_node_history)); + success = sealAndSend(node_id, SubprotocolPacketType::kStatusPacket, + StatusPacket(pbft_chain_size, pbft_round, dag_max_level, pbft_syncing_state_->isPbftSyncing(), + kConf.genesis.chain_id, kGenesisHash, TARAXA_MAJOR_VERSION, TARAXA_MINOR_VERSION, + TARAXA_PATCH_VERSION, kConf.is_light_node, kConf.light_node_history) + .encodeRlp()); } else { success = sealAndSend( node_id, SubprotocolPacketType::kStatusPacket, - std::move(dev::RLPStream(kStandardStatusPacketItemsCount) - << dag_max_level << pbft_chain_size << pbft_syncing_state_->isDeepPbftSyncing() << pbft_round)); + StatusPacket(pbft_chain_size, pbft_round, dag_max_level, pbft_syncing_state_->isDeepPbftSyncing()).encodeRlp()); } return success; diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp index d5286035f7..283ec4282c 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp @@ -2,6 +2,7 @@ #include +#include "network/tarcap/packets/v4/transaction_packet.hpp" #include "transaction/transaction.hpp" #include "transaction/transaction_manager.hpp" @@ -129,6 +130,7 @@ TransactionPacketHandler::transactionsToSendToPeers(std::vector &&transactions) { + // TODO[2871]: do not process hashes auto peers_with_transactions_to_send = transactionsToSendToPeers(std::move(transactions)); const auto peers_to_send_count = peers_with_transactions_to_send.size(); if (peers_to_send_count > 0) { @@ -141,32 +143,17 @@ void TransactionPacketHandler::periodicSendTransactions(std::vector peer, std::pair> &&transactions) { if (!peer) return; const auto peer_id = peer->getId(); const auto transactions_size = transactions.first.size(); - const auto hashes_size = transactions.second.size(); LOG(log_tr_) << "sendTransactions " << transactions.first.size() << " to " << peer_id; - dev::RLPStream s(kTransactionPacketItemCount); - s.appendList(transactions_size + hashes_size); - for (const auto &trx : transactions.first) { - s << trx->getHash(); - } - - for (const auto &trx_hash : transactions.second) { - s << trx_hash; - } - - s.appendList(transactions_size); - - for (const auto &trx : transactions.first) { - s.appendRaw(trx->rlp()); - } - - if (sealAndSend(peer_id, SubprotocolPacketType::kTransactionPacket, std::move(s))) { + if (sealAndSend(peer_id, SubprotocolPacketType::kTransactionPacket, + TransactionPacket(transactions.first).encodeRlp())) { for (const auto &trx : transactions.first) { peer->markTransactionAsKnown(trx->getHash()); } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp index 0d0a3e2d33..70c2688778 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp @@ -1,5 +1,6 @@ #include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" +#include "network/tarcap/packets/v4/vote_packet.hpp" #include "pbft/pbft_manager.hpp" #include "vote_manager/vote_manager.hpp" @@ -97,19 +98,15 @@ void VotePacketHandler::sendPbftVote(const std::shared_ptr &peer, co return; } - dev::RLPStream s; + VotePacket vote_packet; if (block) { - s = dev::RLPStream(kExtendedVotePacketSize); - s.appendRaw(vote->rlp(true, false)); - s.appendRaw(block->rlp(true)); - s.append(pbft_chain_->getPbftChainSize()); + vote_packet = VotePacket(vote, block, pbft_chain_->getPbftChainSize()); } else { - s = dev::RLPStream(kVotePacketSize); - s.appendRaw(vote->rlp(true, false)); + vote_packet = VotePacket(vote); } - if (sealAndSend(peer->getId(), SubprotocolPacketType::kVotePacket, std::move(s))) { + if (sealAndSend(peer->getId(), SubprotocolPacketType::kVotePacket, vote_packet.encodeRlp())) { peer->markPbftVoteAsKnown(vote->getHash()); if (block) { peer->markPbftBlockAsKnown(block->getBlockHash()); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_block_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_block_packet_handler.cpp new file mode 100644 index 0000000000..7529eb86b6 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_block_packet_handler.cpp @@ -0,0 +1,228 @@ +#include "network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp" + +#include "dag/dag_manager.hpp" +#include "network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp" +#include "network/tarcap/shared_states/pbft_syncing_state.hpp" +#include "transaction/transaction_manager.hpp" + +namespace taraxa::network::tarcap::v4 { + +DagBlockPacketHandler::DagBlockPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, + std::shared_ptr pbft_chain, + std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, + std::shared_ptr trx_mgr, std::shared_ptr db, + const addr_t &node_addr, const std::string &logs_prefix) + : ExtSyncingPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_syncing_state), + std::move(pbft_chain), std::move(pbft_mgr), std::move(dag_mgr), std::move(db), node_addr, + logs_prefix + "DAG_BLOCK_PH"), + trx_mgr_(std::move(trx_mgr)) {} + +void DagBlockPacketHandler::process(DagBlockPacket &&packet, const std::shared_ptr &peer) { + blk_hash_t const hash = packet.dag_block.getHash(); + + for (const auto &tx : packet.transactions) { + peer->markTransactionAsKnown(tx.first); + } + peer->markDagBlockAsKnown(hash); + + if (packet.dag_block.getLevel() > peer->dag_level_) { + peer->dag_level_ = packet.dag_block.getLevel(); + } + + // Do not process this block in case we already have it + if (dag_mgr_->isDagBlockKnown(packet.dag_block.getHash())) { + LOG(log_tr_) << "Received known DagBlockPacket " << hash << "from: " << peer->getId(); + return; + } + + onNewBlockReceived(std::move(packet.dag_block), peer, packet.transactions); +} + +void DagBlockPacketHandler::sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, taraxa::DagBlock block, + const SharedTransactions &trxs) { + std::shared_ptr peer = peers_state_->getPeer(peer_id); + if (!peer) { + LOG(log_wr_) << "Send dag block " << block.getHash() << ". Failed to obtain peer " << peer_id; + return; + } + + dev::RLPStream s(2); + + // This lock prevents race condition between syncing and gossiping dag blocks + std::unique_lock lock(peer->mutex_for_sending_dag_blocks_); + + taraxa::bytes trx_bytes; + for (uint32_t i = 0; i < trxs.size(); i++) { + auto trx_data = trxs[i]->rlp(); + trx_bytes.insert(trx_bytes.end(), std::begin(trx_data), std::end(trx_data)); + } + + s.appendList(trxs.size()); + s.appendRaw(trx_bytes, trxs.size()); + + s.appendRaw(block.rlp(true)); + + if (!sealAndSend(peer_id, SubprotocolPacketType::kDagBlockPacket, std::move(s))) { + LOG(log_wr_) << "Sending DagBlock " << block.getHash() << " failed to " << peer_id; + return; + } + + // Mark data as known if sending was successful + peer->markDagBlockAsKnown(block.getHash()); +} + +void DagBlockPacketHandler::onNewBlockReceived( + DagBlock &&block, const std::shared_ptr &peer, + const std::unordered_map> &trxs) { + const auto block_hash = block.getHash(); + auto verified = dag_mgr_->verifyBlock(block, trxs); + switch (verified.first) { + case DagManager::VerifyBlockReturnType::IncorrectTransactionsEstimation: + case DagManager::VerifyBlockReturnType::BlockTooBig: + case DagManager::VerifyBlockReturnType::FailedVdfVerification: + case DagManager::VerifyBlockReturnType::NotEligible: + case DagManager::VerifyBlockReturnType::FailedTipsVerification: { + std::ostringstream err_msg; + err_msg << "DagBlock " << block_hash << " failed verification with error code " + << static_cast(verified.first); + throw MaliciousPeerException(err_msg.str()); + } + case DagManager::VerifyBlockReturnType::MissingTransaction: + if (peer->dagSyncingAllowed()) { + if (trx_mgr_->transactionsDropped()) [[unlikely]] { + LOG(log_nf_) << "NewBlock " << block_hash.toString() << " from peer " << peer->getId() + << " is missing transaction, our pool recently dropped transactions, requesting dag sync"; + } else { + LOG(log_wr_) << "NewBlock " << block_hash.toString() << " from peer " << peer->getId() + << " is missing transaction, requesting dag sync"; + } + peer->peer_dag_synced_ = false; + requestPendingDagBlocks(peer); + } else { + if (trx_mgr_->transactionsDropped()) [[unlikely]] { + // Disconnecting since anything after will also contain missing pivot/tips ... + LOG(log_nf_) << "NewBlock " << block_hash.toString() << " from peer " << peer->getId() + << " is missing transaction, but our pool recently dropped transactions, disconnecting"; + disconnect(peer->getId(), dev::p2p::UserReason); + } else { + std::ostringstream err_msg; + err_msg << "DagBlock" << block_hash << " is missing a transaction while in a dag synced state"; + throw MaliciousPeerException(err_msg.str()); + } + } + break; + case DagManager::VerifyBlockReturnType::MissingTip: + if (peer->peer_dag_synced_) { + if (peer->dagSyncingAllowed()) { + LOG(log_wr_) << "NewBlock " << block_hash.toString() << " from peer " << peer->getId() + << " is missing tip, requesting dag sync"; + peer->peer_dag_synced_ = false; + requestPendingDagBlocks(peer); + } else { + std::ostringstream err_msg; + err_msg << "DagBlock has missing tip"; + throw MaliciousPeerException(err_msg.str()); + } + } else { + // peer_dag_synced_ flag ensures that this can only be performed once for a peer + requestPendingDagBlocks(peer); + } + break; + case DagManager::VerifyBlockReturnType::AheadBlock: + case DagManager::VerifyBlockReturnType::FutureBlock: + if (peer->peer_dag_synced_) { + LOG(log_er_) << "DagBlock" << block_hash << " is an ahead/future block. Peer " << peer->getId() + << " will be disconnected"; + disconnect(peer->getId(), dev::p2p::UserReason); + } + break; + case DagManager::VerifyBlockReturnType::Verified: { + auto status = dag_mgr_->addDagBlock(std::move(block), std::move(verified.second)); + if (!status.first) { + LOG(log_dg_) << "Received DagBlockPacket " << block_hash << "from: " << peer->getId(); + // Ignore new block packets when pbft syncing + if (pbft_syncing_state_->isPbftSyncing()) { + LOG(log_dg_) << "Ignore new dag block " << block_hash << ", pbft syncing is on"; + } else if (peer->peer_dag_syncing_) { + LOG(log_dg_) << "Ignore new dag block " << block_hash << ", dag syncing is on"; + } else { + if (peer->peer_dag_synced_) { + std::ostringstream err_msg; + if (status.second.size() > 0) + err_msg << "DagBlock" << block.getHash() << " has missing pivot or/and tips " << status.second; + else + err_msg << "DagBlock" << block.getHash() << " could not be added to DAG"; + throw MaliciousPeerException(err_msg.str()); + } else { + // peer_dag_synced_ flag ensures that this can only be performed once for a peer + requestPendingDagBlocks(peer); + } + } + } + } break; + case DagManager::VerifyBlockReturnType::ExpiredBlock: + break; + } +} + +void DagBlockPacketHandler::onNewBlockVerified(const DagBlock &block, bool proposed, const SharedTransactions &trxs) { + // If node is pbft syncing and block is not proposed by us, this is an old block that has been verified - no block + // gossip is needed + if (!proposed && pbft_syncing_state_->isDeepPbftSyncing()) { + return; + } + + const auto &block_hash = block.getHash(); + LOG(log_tr_) << "Verified NewBlock " << block_hash.toString(); + + std::vector peers_to_send; + for (auto const &peer : peers_state_->getAllPeers()) { + if (!peer.second->isDagBlockKnown(block_hash) && !peer.second->syncing_) { + peers_to_send.push_back(peer.first); + } + } + + std::string peer_and_transactions_to_log; + // Sending it in same order favours some peers over others, always start with a different position + const auto peers_to_send_count = peers_to_send.size(); + if (peers_to_send_count > 0) { + uint32_t start_with = rand() % peers_to_send_count; + for (uint32_t i = 0; i < peers_to_send_count; i++) { + auto peer_id = peers_to_send[(start_with + i) % peers_to_send_count]; + dev::RLPStream ts; + auto peer = peers_state_->getPeer(peer_id); + if (peer && !peer->syncing_) { + peer_and_transactions_to_log += " Peer: " + peer->getId().abridged() + " Trxs: "; + + SharedTransactions transactions_to_send; + for (const auto &trx : trxs) { + const auto &trx_hash = trx->getHash(); + if (peer->isTransactionKnown(trx_hash)) { + continue; + } + transactions_to_send.push_back(trx); + peer_and_transactions_to_log += trx_hash.abridged(); + } + + for (const auto &trx : trxs) { + assert(trx != nullptr); + const auto trx_hash = trx->getHash(); + if (peer->isTransactionKnown(trx_hash)) { + continue; + } + + transactions_to_send.push_back(trx); + peer_and_transactions_to_log += trx_hash.abridged(); + } + + sendBlockWithTransactions(peer_id, block, transactions_to_send); + peer->markDagBlockAsKnown(block_hash); + } + } + } + LOG(log_dg_) << "Send DagBlock " << block.getHash() << " to peers: " << peer_and_transactions_to_log; + if (!peers_to_send.empty()) LOG(log_tr_) << "Sent block to " << peers_to_send.size() << " peers"; +} +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_sync_packet_handler.cpp new file mode 100644 index 0000000000..27fcaea920 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_sync_packet_handler.cpp @@ -0,0 +1,103 @@ +#include "network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp" + +#include "dag/dag.hpp" +#include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/shared_states/pbft_syncing_state.hpp" +#include "transaction/transaction.hpp" +#include "transaction/transaction_manager.hpp" + +namespace taraxa::network::tarcap::v4 { + +DagSyncPacketHandler::DagSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, + std::shared_ptr pbft_chain, std::shared_ptr pbft_mgr, + std::shared_ptr dag_mgr, + std::shared_ptr trx_mgr, std::shared_ptr db, + const addr_t& node_addr, const std::string& logs_prefix) + : ExtSyncingPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_syncing_state), + std::move(pbft_chain), std::move(pbft_mgr), std::move(dag_mgr), std::move(db), node_addr, + logs_prefix + "DAG_SYNC_PH"), + trx_mgr_(std::move(trx_mgr)) {} + +void DagSyncPacketHandler::process(DagSyncPacket&& packet, const std::shared_ptr& peer) { + // If the periods did not match restart syncing + if (packet.response_period > packet.request_period) { + LOG(log_dg_) << "Received DagSyncPacket with mismatching periods: " << packet.response_period << " " + << packet.request_period << " from " << peer->getId(); + if (peer->pbft_chain_size_ < packet.response_period) { + peer->pbft_chain_size_ = packet.response_period; + } + peer->peer_dag_syncing_ = false; + // We might be behind, restart pbft sync if needed + startSyncingPbft(); + return; + } else if (packet.response_period < packet.request_period) { + // This should not be possible for honest node + std::ostringstream err_msg; + err_msg << "Received DagSyncPacket with mismatching periods: response_period(" << packet.response_period + << ") != request_period(" << packet.request_period << ")"; + + throw MaliciousPeerException(err_msg.str()); + } + + std::vector transactions_to_log; + transactions_to_log.reserve(packet.transactions.size()); + for (auto& trx : packet.transactions) { + peer->markTransactionAsKnown(trx.first); + transactions_to_log.push_back(trx.first); + + if (trx_mgr_->isTransactionKnown(trx.first)) { + continue; + } + + auto [verified, reason] = trx_mgr_->verifyTransaction(trx.second); + if (!verified) { + std::ostringstream err_msg; + err_msg << "DagBlock transaction " << trx.first << " validation failed: " << reason; + throw MaliciousPeerException(err_msg.str()); + } + } + + std::vector dag_blocks_to_log; + dag_blocks_to_log.reserve(packet.dag_blocks.size()); + for (auto& block : packet.dag_blocks) { + dag_blocks_to_log.push_back(block.getHash()); + peer->markDagBlockAsKnown(block.getHash()); + + if (dag_mgr_->isDagBlockKnown(block.getHash())) { + LOG(log_tr_) << "Received known DagBlock " << block.getHash() << "from: " << peer->getId(); + continue; + } + + auto verified = dag_mgr_->verifyBlock(block, packet.transactions); + if (verified.first != DagManager::VerifyBlockReturnType::Verified) { + std::ostringstream err_msg; + err_msg << "DagBlock " << block.getHash() << " failed verification with error code " + << static_cast(verified.first); + throw MaliciousPeerException(err_msg.str()); + } + + if (block.getLevel() > peer->dag_level_) peer->dag_level_ = block.getLevel(); + + auto status = dag_mgr_->addDagBlock(std::move(block), std::move(verified.second)); + if (!status.first) { + std::ostringstream err_msg; + if (status.second.size() > 0) + err_msg << "DagBlock" << block.getHash() << " has missing pivot or/and tips " << status.second; + else + err_msg << "DagBlock" << block.getHash() << " could not be added to DAG"; + throw MaliciousPeerException(err_msg.str()); + } + } + + peer->peer_dag_synced_ = true; + peer->peer_dag_synced_time_ = + std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); + peer->peer_dag_syncing_ = false; + + LOG(log_dg_) << "Received DagSyncPacket with blocks: " << dag_blocks_to_log + << " Transactions: " << transactions_to_log << " from " << peer->getId(); +} + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.cpp new file mode 100644 index 0000000000..74ddabb85c --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.cpp @@ -0,0 +1,81 @@ +#include "network/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.hpp" + +#include "dag/dag_manager.hpp" +#include "transaction/transaction_manager.hpp" + +namespace taraxa::network::tarcap::v4 { + +GetDagSyncPacketHandler::GetDagSyncPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr trx_mgr, + std::shared_ptr dag_mgr, std::shared_ptr db, + const addr_t &node_addr, const std::string &logs_prefix) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, logs_prefix + "GET_DAG_SYNC_PH"), + trx_mgr_(std::move(trx_mgr)), + dag_mgr_(std::move(dag_mgr)), + db_(std::move(db)) {} + +void GetDagSyncPacketHandler::process(GetDagSyncPacket &&packet, + [[maybe_unused]] const std::shared_ptr &peer) { + if (!peer->requestDagSyncingAllowed()) { + // This should not be possible for honest node + // Each node should perform dag syncing only when allowed + std::ostringstream err_msg; + err_msg << "Received multiple GetDagSyncPackets from " << peer->getId().abridged(); + + throw MaliciousPeerException(err_msg.str()); + } + + // This lock prevents race condition between syncing and gossiping dag blocks + std::unique_lock lock(peer->mutex_for_sending_dag_blocks_); + + std::unordered_set blocks_hashes_set; + std::string blocks_hashes_to_log; + blocks_hashes_to_log.reserve(packet.blocks_hashes.size()); + for (const auto &hash : packet.blocks_hashes) { + if (blocks_hashes_set.insert(hash).second) { + blocks_hashes_to_log += hash.abridged(); + } + } + + LOG(log_dg_) << "Received GetDagSyncPacket: " << blocks_hashes_to_log << " from " << peer->getId(); + + auto [period, blocks, transactions] = dag_mgr_->getNonFinalizedBlocksWithTransactions(blocks_hashes_set); + if (packet.peer_period == period) { + peer->syncing_ = false; + peer->peer_requested_dag_syncing_ = true; + peer->peer_requested_dag_syncing_time_ = + std::chrono::duration_cast(std::chrono::system_clock::now().time_since_epoch()).count(); + } else { + // There is no point in sending blocks if periods do not match, but an empty packet should be sent + blocks.clear(); + transactions.clear(); + } + sendBlocks(peer->getId(), std::move(blocks), std::move(transactions), packet.peer_period, period); +} + +void GetDagSyncPacketHandler::sendBlocks(const dev::p2p::NodeID &peer_id, + std::vector> &&blocks, + SharedTransactions &&transactions, PbftPeriod request_period, + PbftPeriod period) { + auto peer = peers_state_->getPeer(peer_id); + if (!peer) return; + + dev::RLPStream s(4); + s.append(request_period); + s.append(period); + + s.appendList(transactions.size()); + for (const auto &tx : transactions) { + s.appendRaw(tx->rlp()); + } + + s.appendList(blocks.size()); + for (const auto &block : blocks) { + s.appendRaw(block->rlp(true)); + } + + sealAndSend(peer_id, SubprotocolPacketType::kDagSyncPacket, std::move(s)); +} + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pbft_sync_packet_handler.cpp new file mode 100644 index 0000000000..087f458935 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pbft_sync_packet_handler.cpp @@ -0,0 +1,266 @@ +#include "network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp" + +#include "network/tarcap/shared_states/pbft_syncing_state.hpp" +#include "pbft/pbft_chain.hpp" +#include "pbft/pbft_manager.hpp" +#include "transaction/transaction_manager.hpp" +#include "vote/pbft_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" + +namespace taraxa::network::tarcap::v4 { + +PbftSyncPacketHandler::PbftSyncPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, + std::shared_ptr pbft_chain, + std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, + std::shared_ptr vote_mgr, std::shared_ptr db, + const addr_t &node_addr, const std::string &logs_prefix) + : ExtSyncingPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_syncing_state), + std::move(pbft_chain), std::move(pbft_mgr), std::move(dag_mgr), std::move(db), node_addr, + logs_prefix + "PBFT_SYNC_PH"), + vote_mgr_(std::move(vote_mgr)), + periodic_events_tp_(1, true) {} + +void PbftSyncPacketHandler::process(PbftSyncPacket &&packet, const std::shared_ptr &peer) { + // Note: no need to consider possible race conditions due to concurrent processing as it is + // disabled on priority_queue blocking dependencies level + const auto syncing_peer = pbft_syncing_state_->syncingPeer(); + if (!syncing_peer) { + LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << peer->getId().abridged() + << " but there is no current syncing peer set"; + return; + } + + if (syncing_peer->getId() != peer->getId()) { + LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << peer->getId().abridged() + << " current syncing peer " << syncing_peer->getId().abridged(); + return; + } + + // Process received pbft blocks + // pbft_chain_synced is the flag to indicate own PBFT chain has synced with the peer's PBFT chain + const bool pbft_chain_synced = !packet.current_block_cert_votes.empty(); + const auto pbft_blk_hash = packet.period_data.pbft_blk->getBlockHash(); + + std::string received_dag_blocks_str; // This is just log related stuff + for (auto const &block : packet.period_data.dag_blocks) { + received_dag_blocks_str += block.getHash().toString() + " "; + if (peer->dag_level_ < block.getLevel()) { + peer->dag_level_ = block.getLevel(); + } + } + + const auto pbft_block_period = packet.period_data.pbft_blk->getPeriod(); + LOG(log_dg_) << "PbftSyncPacket received. Period: " << pbft_block_period + << ", dag Blocks: " << received_dag_blocks_str << " from " << peer->getId(); + + peer->markPbftBlockAsKnown(pbft_blk_hash); + // Update peer's pbft period if outdated + if (peer->pbft_chain_size_ < pbft_block_period) { + peer->pbft_chain_size_ = pbft_block_period; + } + + LOG(log_tr_) << "Processing pbft block: " << pbft_blk_hash; + + if (pbft_chain_->findPbftBlockInChain(pbft_blk_hash)) { + LOG(log_wr_) << "PBFT block " << pbft_blk_hash << ", period: " << packet.period_data.pbft_blk->getPeriod() + << " from " << peer->getId() << " already present in chain"; + } else { + if (pbft_block_period != pbft_mgr_->pbftSyncingPeriod() + 1) { + // This can happen if we just got synced and block was cert voted + if (pbft_chain_synced && pbft_block_period == pbft_mgr_->pbftSyncingPeriod()) { + pbftSyncComplete(); + return; + } + + LOG(log_er_) << "Block " << pbft_blk_hash << " period unexpected: " << pbft_block_period + << ". Expected period: " << pbft_mgr_->pbftSyncingPeriod() + 1; + return; + } + + // Check cert vote matches if final synced block + if (pbft_chain_synced) { + for (auto const &vote : packet.current_block_cert_votes) { + if (vote->getBlockHash() != pbft_blk_hash) { + LOG(log_er_) << "Invalid cert votes block hash " << vote->getBlockHash() << " instead of " << pbft_blk_hash + << " from peer " << peer->getId().abridged() << " received, stop syncing."; + handleMaliciousSyncPeer(peer->getId()); + return; + } + } + } + + // Check votes match the hash of previous block in the queue + auto last_pbft_block_hash = pbft_mgr_->lastPbftBlockHashFromQueueOrChain(); + // Check cert vote matches + for (auto const &vote : packet.period_data.previous_block_cert_votes) { + if (vote->getBlockHash() != last_pbft_block_hash) { + LOG(log_er_) << "Invalid cert votes block hash " << vote->getBlockHash() << " instead of " + << last_pbft_block_hash << " from peer " << peer->getId().abridged() << " received, stop syncing."; + handleMaliciousSyncPeer(peer->getId()); + return; + } + } + + if (!pbft_mgr_->validatePillarDataInPeriodData(packet.period_data)) { + handleMaliciousSyncPeer(peer->getId()); + return; + } + + auto order_hash = PbftManager::calculateOrderHash(packet.period_data.dag_blocks); + if (order_hash != packet.period_data.pbft_blk->getOrderHash()) { + { // This is just log related stuff + std::vector trx_order; + trx_order.reserve(packet.period_data.transactions.size()); + std::vector blk_order; + blk_order.reserve(packet.period_data.dag_blocks.size()); + for (auto t : packet.period_data.transactions) { + trx_order.push_back(t->getHash()); + } + for (auto b : packet.period_data.dag_blocks) { + blk_order.push_back(b.getHash()); + } + LOG(log_er_) << "Order hash incorrect in period data " << pbft_blk_hash << " expected: " << order_hash + << " received " << packet.period_data.pbft_blk->getOrderHash() << "; Dag order: " << blk_order + << "; Trx order: " << trx_order << "; from " << peer->getId().abridged() << ", stop syncing."; + } + handleMaliciousSyncPeer(peer->getId()); + return; + } + + // This is special case when queue is empty and we can not say for sure that all votes that are part of this block + // have been verified before + if (pbft_mgr_->periodDataQueueEmpty()) { + for (const auto &v : packet.period_data.previous_block_cert_votes) { + if (auto vote_is_valid = vote_mgr_->validateVote(v); vote_is_valid.first == false) { + LOG(log_er_) << "Invalid reward votes in block " << packet.period_data.pbft_blk->getBlockHash() + << " from peer " << peer->getId().abridged() + << " received, stop syncing. Validation failed. Err: " << vote_is_valid.second; + handleMaliciousSyncPeer(peer->getId()); + return; + } + + vote_mgr_->addVerifiedVote(v); + } + + // And now we need to replace it with verified votes + if (auto votes = vote_mgr_->checkRewardVotes(packet.period_data.pbft_blk, true); votes.first) { + packet.period_data.previous_block_cert_votes = std::move(votes.second); + } else { + // checkRewardVotes could fail because we just cert voted this block and moved to next period, + // in that case we are probably fully synced + if (pbft_block_period <= vote_mgr_->getRewardVotesPbftBlockPeriod()) { + pbft_syncing_state_->setPbftSyncing(false); + return; + } + + LOG(log_er_) << "Invalid reward votes in block " << packet.period_data.pbft_blk->getBlockHash() << " from peer " + << peer->getId().abridged() << " received, stop syncing."; + handleMaliciousSyncPeer(peer->getId()); + return; + } + } + + LOG(log_tr_) << "Synced PBFT block hash " << pbft_blk_hash << " with " + << packet.period_data.previous_block_cert_votes.size() << " cert votes"; + LOG(log_tr_) << "Synced PBFT block " << packet.period_data; + pbft_mgr_->periodDataQueuePush(std::move(packet.period_data), peer->getId(), + std::move(packet.current_block_cert_votes)); + } + + auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); + + // Reset last sync packet received time + pbft_syncing_state_->setLastSyncPacketTime(); + + if (pbft_chain_synced) { + pbftSyncComplete(); + return; + } + + if (packet.last_block) { + // If current sync period is actually bigger than the block we just received we are probably synced + if (pbft_sync_period > pbft_block_period) { + pbft_syncing_state_->setPbftSyncing(false); + return; + } + if (pbft_syncing_state_->isPbftSyncing()) { + if (pbft_sync_period > pbft_chain_->getPbftChainSize() + (10 * kConf.network.sync_level_size)) { + LOG(log_tr_) << "Syncing pbft blocks too fast than processing. Has synced period " << pbft_sync_period + << ", PBFT chain size " << pbft_chain_->getPbftChainSize(); + periodic_events_tp_.post(kDelayedPbftSyncDelayMs, [this] { delayedPbftSync(1); }); + } else { + if (!syncPeerPbft(pbft_sync_period + 1)) { + pbft_syncing_state_->setPbftSyncing(false); + return; + } + } + } + } +} + +PeriodData PbftSyncPacketHandler::decodePeriodData(const dev::RLP &period_data_rlp) const { + return PeriodData(period_data_rlp); +} + +std::vector> PbftSyncPacketHandler::decodeVotesBundle( + const dev::RLP &votes_bundle_rlp) const { + return decodePbftVotesBundleRlp(votes_bundle_rlp); +} + +void PbftSyncPacketHandler::pbftSyncComplete() { + if (pbft_mgr_->periodDataQueueSize()) { + LOG(log_tr_) << "Syncing pbft blocks faster than processing. Remaining sync size " + << pbft_mgr_->periodDataQueueSize(); + periodic_events_tp_.post(kDelayedPbftSyncDelayMs, [this] { pbftSyncComplete(); }); + } else { + LOG(log_dg_) << "Syncing PBFT is completed"; + // We are pbft synced with the node we are connected to but + // calling startSyncingPbft will check if some nodes have + // greater pbft chain size and we should continue syncing with + // them, Or sync pending DAG blocks + pbft_syncing_state_->setPbftSyncing(false); + startSyncingPbft(); + if (!pbft_syncing_state_->isPbftSyncing()) { + requestPendingDagBlocks(); + } + } +} + +void PbftSyncPacketHandler::delayedPbftSync(uint32_t counter) { + const uint32_t max_delayed_pbft_sync_count = 60000 / kDelayedPbftSyncDelayMs; + auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); + if (counter > max_delayed_pbft_sync_count) { + LOG(log_er_) << "Pbft blocks stuck in queue, no new block processed in 60 seconds " << pbft_sync_period << " " + << pbft_chain_->getPbftChainSize(); + pbft_syncing_state_->setPbftSyncing(false); + LOG(log_tr_) << "Syncing PBFT is stopping"; + return; + } + + if (pbft_syncing_state_->isPbftSyncing()) { + if (pbft_sync_period > pbft_chain_->getPbftChainSize() + (10 * kConf.network.sync_level_size)) { + LOG(log_tr_) << "Syncing pbft blocks faster than processing " << pbft_sync_period << " " + << pbft_chain_->getPbftChainSize(); + periodic_events_tp_.post(kDelayedPbftSyncDelayMs, [this, counter] { delayedPbftSync(counter + 1); }); + } else { + if (!syncPeerPbft(pbft_sync_period + 1)) { + pbft_syncing_state_->setPbftSyncing(false); + } + } + } +} + +void PbftSyncPacketHandler::handleMaliciousSyncPeer(const dev::p2p::NodeID &id) { + peers_state_->set_peer_malicious(id); + + if (auto host = peers_state_->host_.lock(); host) { + LOG(log_nf_) << "Disconnect peer " << id; + host->disconnect(id, dev::p2p::UserReason); + } else { + LOG(log_er_) << "Unable to handleMaliciousSyncPeer, host == nullptr"; + } +} + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_vote_packet_handler.cpp new file mode 100644 index 0000000000..5ef5f327e7 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_vote_packet_handler.cpp @@ -0,0 +1,55 @@ +#include "network/tarcap/packets_handlers/v4/pillar_vote_packet_handler.hpp" + +#include "vote/pillar_vote.hpp" + +namespace taraxa::network::tarcap::v4 { + +PillarVotePacketHandler::PillarVotePacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pillar_chain_manager, + const addr_t &node_addr, const std::string &logs_prefix) + : ExtPillarVotePacketHandler(conf, std::move(peers_state), std::move(packets_stats), + std::move(pillar_chain_manager), node_addr, logs_prefix + "PILLAR_VOTE_PH") {} + +void PillarVotePacketHandler::process(PillarVotePacket &&packet, const std::shared_ptr &peer) { + if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(packet.pillar_vote->getPeriod())) { + std::ostringstream err_msg; + err_msg << "Pillar vote " << packet.pillar_vote->getHash() << ", period " << packet.pillar_vote->getPeriod() + << " < ficus hardfork block num"; + throw MaliciousPeerException(err_msg.str()); + } + + if (processPillarVote(packet.pillar_vote, peer)) { + onNewPillarVote(packet.pillar_vote); + } +} + +void PillarVotePacketHandler::onNewPillarVote(const std::shared_ptr &vote, bool rebroadcast) { + for (const auto &peer : peers_state_->getAllPeers()) { + if (peer.second->syncing_) { + LOG(log_dg_) << "Pillar vote " << vote->getHash() << ", period " << vote->getPeriod() << " not sent to " + << peer.first << ". Peer syncing"; + continue; + } + + if (peer.second->isPillarVoteKnown(vote->getHash()) && !rebroadcast) { + continue; + } + + sendPillarVote(peer.second, vote); + } +} + +void PillarVotePacketHandler::sendPillarVote(const std::shared_ptr &peer, + const std::shared_ptr &vote) { + dev::RLPStream s; + s.appendRaw(vote->rlp()); + + if (sealAndSend(peer->getId(), SubprotocolPacketType::kPillarVotePacket, std::move(s))) { + peer->markPillarVoteAsKnown(vote->getHash()); + LOG(log_dg_) << "Pillar vote " << vote->getHash() << ", period " << vote->getPeriod() << " sent to " + << peer->getId(); + } +} + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.cpp new file mode 100644 index 0000000000..71e6224685 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.cpp @@ -0,0 +1,32 @@ +#include "network/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.hpp" + +#include "vote/pillar_vote.hpp" + +namespace taraxa::network::tarcap::v4 { + +PillarVotesBundlePacketHandler::PillarVotesBundlePacketHandler( + const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pillar_chain_manager, const addr_t &node_addr, + const std::string &logs_prefix) + : ExtPillarVotePacketHandler(conf, std::move(peers_state), std::move(packets_stats), + std::move(pillar_chain_manager), node_addr, logs_prefix + "PILLAR_VOTES_BUNDLE_PH") {} + +void PillarVotesBundlePacketHandler::process(PillarVotesBundlePacket &&packet, + const std::shared_ptr &peer) { + // TODO[2744]: there could be the same protection as in pbft syncing that only requested bundle packet is accepted + LOG(log_dg_) << "PillarVotesBundlePacket received from peer " << peer->getId(); + + for (const auto &pillar_vote : packet.pillar_votes) { + if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(pillar_vote->getPeriod())) { + std::ostringstream err_msg; + err_msg << "Synced pillar vote " << pillar_vote->getHash() << ", period " << pillar_vote->getPeriod() + << " < ficus hardfork block num"; + throw MaliciousPeerException(err_msg.str()); + } + + processPillarVote(pillar_vote, peer); + } +} + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/status_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/status_packet_handler.cpp new file mode 100644 index 0000000000..74267373e2 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/status_packet_handler.cpp @@ -0,0 +1,177 @@ +#include "network/tarcap/packets_handlers/v4/status_packet_handler.hpp" + +#include "config/version.hpp" +#include "dag/dag.hpp" +#include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/shared_states/pbft_syncing_state.hpp" +#include "pbft/pbft_chain.hpp" +#include "pbft/pbft_manager.hpp" +#include "vote_manager/vote_manager.hpp" + +namespace taraxa::network::tarcap::v4 { + +StatusPacketHandler::StatusPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, + std::shared_ptr pbft_chain, std::shared_ptr pbft_mgr, + std::shared_ptr dag_mgr, std::shared_ptr db, + h256 genesis_hash, const addr_t& node_addr, const std::string& logs_prefix) + : ExtSyncingPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_syncing_state), + std::move(pbft_chain), std::move(pbft_mgr), std::move(dag_mgr), std::move(db), node_addr, + logs_prefix + "STATUS_PH"), + kGenesisHash(genesis_hash) {} + +void StatusPacketHandler::process(StatusPacket&& packet, const std::shared_ptr& peer) { + // Important !!! Use only "selected_peer" and not "peer" in this function as "peer" might be nullptr + auto selected_peer = peer; + const auto pbft_synced_period = pbft_mgr_->pbftSyncingPeriod(); + + // Initial status packet + if (packet.isInitialStatusPacket()) { + if (!selected_peer) { + selected_peer = peers_state_->getPendingPeer(peer->getId()); + if (!selected_peer) { + LOG(log_wr_) << "Peer " << peer->getId().abridged() + << " missing in both peers and pending peers map - will be disconnected."; + disconnect(peer->getId(), dev::p2p::UserReason); + return; + } + } + + if (*packet.peer_chain_id != kConf.genesis.chain_id) { + LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) + << "Incorrect network id " << *packet.peer_chain_id << ", host " << peer->getId().abridged() + << " will be disconnected"; + disconnect(peer->getId(), dev::p2p::UserReason); + return; + } + + if (*packet.genesis_hash != kGenesisHash) { + LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) + << "Incorrect genesis hash " << *packet.genesis_hash << ", host " << peer->getId().abridged() + << " will be disconnected"; + disconnect(peer->getId(), dev::p2p::UserReason); + return; + } + + // If this is a light node and it cannot serve our sync request disconnect from it + if (*packet.is_light_node) { + selected_peer->peer_light_node = true; + selected_peer->peer_light_node_history = *packet.node_history; + if (pbft_synced_period + *packet.node_history < packet.peer_pbft_chain_size) { + LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) + << "Light node " << peer->getId().abridged() << " would not be able to serve our syncing request. " + << "Current synced period " << pbft_synced_period << ", peer synced period " << packet.peer_pbft_chain_size + << ", peer light node history " << *packet.node_history << ". Peer will be disconnected"; + disconnect(peer->getId(), dev::p2p::UserReason); + return; + } + } + + selected_peer->dag_level_ = packet.peer_dag_level; + selected_peer->pbft_chain_size_ = packet.peer_pbft_chain_size; + selected_peer->syncing_ = packet.peer_syncing; + selected_peer->pbft_period_ = packet.peer_pbft_chain_size + 1; + selected_peer->pbft_round_ = packet.peer_pbft_round; + + peers_state_->setPeerAsReadyToSendMessages(peer->getId(), selected_peer); + + LOG(log_dg_) << "Received initial status message from " << peer->getId() << ", network id " << *packet.peer_chain_id + << ", peer DAG max level " << selected_peer->dag_level_ << ", genesis " << *packet.genesis_hash + << ", peer pbft chain size " << selected_peer->pbft_chain_size_ << ", peer syncing " << std::boolalpha + << selected_peer->syncing_ << ", peer pbft period " << selected_peer->pbft_period_ + << ", peer pbft round " << selected_peer->pbft_round_ << ", node major version" + << *packet.node_major_version << ", node minor version" << *packet.node_minor_version + << ", node patch version" << *packet.node_patch_version; + + } else { // Standard status packet + if (!selected_peer) { + LOG(log_er_) << "Received standard status packet from " << peer->getId().abridged() + << ", without previously received initial status packet. Will be disconnected"; + disconnect(peer->getId(), dev::p2p::UserReason); + return; + } + + selected_peer->dag_level_ = packet.peer_dag_level; + selected_peer->pbft_chain_size_ = packet.peer_pbft_chain_size; + selected_peer->pbft_period_ = selected_peer->pbft_chain_size_ + 1; + selected_peer->syncing_ = packet.peer_syncing; + selected_peer->pbft_round_ = packet.peer_pbft_round; + + // TODO: Address malicious status + if (!pbft_syncing_state_->isPbftSyncing()) { + if (pbft_synced_period < selected_peer->pbft_chain_size_) { + LOG(log_nf_) << "Restart PBFT chain syncing. Own synced PBFT at period " << pbft_synced_period + << ", peer PBFT chain size " << selected_peer->pbft_chain_size_; + if (pbft_synced_period + 1 < selected_peer->pbft_chain_size_) { + startSyncingPbft(); + } else { + // If we are behind by only one block wait for two status messages before syncing because nodes are not always + // in perfect sync + if (selected_peer->last_status_pbft_chain_size_ == selected_peer->pbft_chain_size_) { + startSyncingPbft(); + } + } + } else if (pbft_synced_period == selected_peer->pbft_chain_size_ && !selected_peer->peer_dag_synced_) { + // if not syncing and the peer period is matching our period request any pending dag blocks + requestPendingDagBlocks(selected_peer); + } + + const auto [pbft_current_round, pbft_current_period] = pbft_mgr_->getPbftRoundAndPeriod(); + if (pbft_current_period == selected_peer->pbft_period_ && pbft_current_round < selected_peer->pbft_round_) { + requestPbftNextVotesAtPeriodRound(selected_peer->getId(), pbft_current_period, pbft_current_round); + } + } + selected_peer->last_status_pbft_chain_size_ = selected_peer->pbft_chain_size_.load(); + + LOG(log_dg_) << "Received status message from " << peer->getId() << ", peer DAG max level " + << selected_peer->dag_level_ << ", peer pbft chain size " << selected_peer->pbft_chain_size_ + << ", peer syncing " << std::boolalpha << selected_peer->syncing_ << ", peer pbft round " + << selected_peer->pbft_round_; + } +} + +bool StatusPacketHandler::sendStatus(const dev::p2p::NodeID& node_id, bool initial) { + bool success = false; + std::string status_packet_type = initial ? "initial" : "standard"; + + LOG(log_dg_) << "Sending " << status_packet_type << " status message to " << node_id << ", protocol version " + << TARAXA_NET_VERSION << ", network id " << kConf.genesis.chain_id << ", genesis " << kGenesisHash + << ", node version " << TARAXA_VERSION; + + auto dag_max_level = dag_mgr_->getMaxLevel(); + auto pbft_chain_size = pbft_chain_->getPbftChainSize(); + const auto pbft_round = pbft_mgr_->getPbftRound(); + + if (initial) { + success = sealAndSend( + node_id, SubprotocolPacketType::kStatusPacket, + // TODO[2865]: use packet class to automatically create rlp + std::move(dev::RLPStream(v4::StatusPacket::kInitialStatusPacketItemsCount) + << kConf.genesis.chain_id << dag_max_level << kGenesisHash << pbft_chain_size + << pbft_syncing_state_->isPbftSyncing() << pbft_round << TARAXA_MAJOR_VERSION << TARAXA_MINOR_VERSION + << TARAXA_PATCH_VERSION << kConf.is_light_node << kConf.light_node_history)); + } else { + success = sealAndSend( + node_id, SubprotocolPacketType::kStatusPacket, + // TODO[2865]: use packet class to automatically create rlp + std::move(dev::RLPStream(v4::StatusPacket::kStandardStatusPacketItemsCount) + << dag_max_level << pbft_chain_size << pbft_syncing_state_->isDeepPbftSyncing() << pbft_round)); + } + + return success; +} + +void StatusPacketHandler::sendStatusToPeers() { + auto host = peers_state_->host_.lock(); + if (!host) { + LOG(log_er_) << "Unavailable host during checkLiveness"; + return; + } + + for (auto const& peer : peers_state_->getAllPeers()) { + sendStatus(peer.first, false); + } +} + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/transaction_packet_handler.cpp new file mode 100644 index 0000000000..a9de4e0ba6 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/transaction_packet_handler.cpp @@ -0,0 +1,176 @@ +#include "network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp" + +#include + +#include "transaction/transaction.hpp" +#include "transaction/transaction_manager.hpp" + +namespace taraxa::network::tarcap::v4 { + +TransactionPacketHandler::TransactionPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr trx_mgr, const addr_t &node_addr, + const std::string &logs_prefix) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, logs_prefix + "TRANSACTION_PH"), + trx_mgr_(std::move(trx_mgr)) {} + +inline void TransactionPacketHandler::process(TransactionPacket &&packet, const std::shared_ptr &peer) { + size_t unseen_txs_count = 0; + for (auto &transaction : packet.transactions) { + // Skip any transactions that are already known to the trx mgr + if (trx_mgr_->isTransactionKnown(transaction->getHash())) { + continue; + } + + unseen_txs_count++; + + const auto [verified, reason] = trx_mgr_->verifyTransaction(transaction); + if (!verified) { + std::ostringstream err_msg; + err_msg << "DagBlock transaction " << transaction->getHash() << " validation failed: " << reason; + throw MaliciousPeerException(err_msg.str()); + } + + received_trx_count_++; + const auto tx_hash = transaction->getHash(); + const auto status = trx_mgr_->insertValidatedTransaction(std::move(transaction)); + if (status == TransactionStatus::Inserted) { + unique_received_trx_count_++; + } + if (status == TransactionStatus::Overflow) { + // Raise exception in trx pool is over the limit and this peer already has too many suspicious packets + if (peer->reportSuspiciousPacket() && trx_mgr_->nonProposableTransactionsOverTheLimit()) { + std::ostringstream err_msg; + err_msg << "Suspicious packets over the limit on DagBlock transaction " << tx_hash << " validation: " << reason; + } + } + } + + if (!packet.transactions.empty()) { + LOG(log_tr_) << "Received TransactionPacket with " << packet.transactions.size() << " transactions"; + LOG(log_dg_) << "Received TransactionPacket with " << packet.transactions.size() + << " unseen transactions:" << unseen_txs_count << " from: " << peer->getId().abridged(); + } +} + +std::pair>> +TransactionPacketHandler::transactionsToSendToPeer(std::shared_ptr peer, + const std::vector &transactions, + uint32_t account_start_index) { + const auto accounts_size = transactions.size(); + bool trx_max_reached = false; + auto account_iterator = account_start_index; + std::pair> result; + // Next peer should continue after the last account of the current peer + uint32_t next_peer_account_index = (account_start_index + 1) % accounts_size; + + while (true) { + // Iterate over transactions from single account + for (auto const &trx : transactions[account_iterator]) { + auto trx_hash = trx->getHash(); + if (peer->isTransactionKnown(trx_hash)) { + continue; + } + // If max number of transactions to be sent is already reached include hashes to be sent + if (trx_max_reached) { + result.second.push_back(trx_hash); + if (result.second.size() == kMaxHashesInPacket) { + // If both transactions and hashes reached max nothing to do for this peer, return + return {next_peer_account_index, std::move(result)}; + } + } else { + result.first.push_back(trx); + if (result.first.size() == kMaxTransactionsInPacket) { + // Max number of transactions reached, save next_peer_account_index for next peer to continue to avoid + // sending same transactions to multiple peers + trx_max_reached = true; + next_peer_account_index = (account_iterator + 1) % accounts_size; + } + } + } + + account_iterator = (account_iterator + 1) % accounts_size; + if (account_iterator == account_start_index) { + // Iterated through all of the transactions, return + return {next_peer_account_index, std::move(result)}; + } + } +} + +std::vector, std::pair>>> +TransactionPacketHandler::transactionsToSendToPeers(std::vector &&transactions) { + // Main goal of the algorithm below is to send different transactions and hashes to different peers but still follow + // nonce ordering for single account and not send higher nonces without sending low nonces first + const auto accounts_size = transactions.size(); + if (!accounts_size) { + return {}; + } + std::vector, std::pair>>> + peers_with_transactions_to_send; + auto peers = peers_state_->getAllPeers(); + + // account_index keeps current account index so that different peers will receive + // transactions from different accounts + uint32_t account_index = 0; + for (const auto &peer : peers) { + if (peer.second->syncing_) { + continue; + } + + std::pair> peer_transactions; + std::tie(account_index, peer_transactions) = transactionsToSendToPeer(peer.second, transactions, account_index); + + if (peer_transactions.first.size() > 0) { + peers_with_transactions_to_send.push_back({peer.second, std::move(peer_transactions)}); + } + } + + return peers_with_transactions_to_send; +} + +void TransactionPacketHandler::periodicSendTransactions(std::vector &&transactions) { + auto peers_with_transactions_to_send = transactionsToSendToPeers(std::move(transactions)); + const auto peers_to_send_count = peers_with_transactions_to_send.size(); + if (peers_to_send_count > 0) { + // Sending it in same order favours some peers over others, always start with a different position + uint32_t start_with = rand() % peers_to_send_count; + for (uint32_t i = 0; i < peers_to_send_count; i++) { + auto peer_to_send = peers_with_transactions_to_send[(start_with + i) % peers_to_send_count]; + sendTransactions(peer_to_send.first, std::move(peer_to_send.second)); + } + } +} + +void TransactionPacketHandler::sendTransactions(std::shared_ptr peer, + std::pair> &&transactions) { + if (!peer) return; + const auto peer_id = peer->getId(); + const auto transactions_size = transactions.first.size(); + const auto hashes_size = transactions.second.size(); + + LOG(log_tr_) << "sendTransactions " << transactions.first.size() << " to " << peer_id; + + dev::RLPStream s(kTransactionPacketItemCount); + s.appendList(transactions_size + hashes_size); + for (const auto &trx : transactions.first) { + s << trx->getHash(); + } + + for (const auto &trx_hash : transactions.second) { + s << trx_hash; + } + + s.appendList(transactions_size); + + for (const auto &trx : transactions.first) { + s.appendRaw(trx->rlp()); + } + + if (sealAndSend(peer_id, SubprotocolPacketType::kTransactionPacket, std::move(s))) { + for (const auto &trx : transactions.first) { + peer->markTransactionAsKnown(trx->getHash()); + } + } +} + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/vote_packet_handler.cpp new file mode 100644 index 0000000000..bebd419e41 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/vote_packet_handler.cpp @@ -0,0 +1,126 @@ +#include "network/tarcap/packets_handlers/v4/vote_packet_handler.hpp" + +#include "pbft/pbft_manager.hpp" +#include "vote_manager/vote_manager.hpp" + +namespace taraxa::network::tarcap::v4 { + +VotePacketHandler::VotePacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_mgr, std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, + std::shared_ptr slashing_manager, const addr_t &node_addr, + const std::string &logs_prefix) + : ExtVotesPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_mgr), + std::move(pbft_chain), std::move(vote_mgr), std::move(slashing_manager), node_addr, + logs_prefix + "PBFT_VOTE_PH") {} + +void VotePacketHandler::process(VotePacket &&packet, const std::shared_ptr &peer) { + const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); + + if (packet.pbft_block) { + LOG(log_dg_) << "Received PBFT vote " << packet.vote->getHash() << " with PBFT block " + << packet.pbft_block->getBlockHash(); + } else { + LOG(log_dg_) << "Received PBFT vote " << packet.vote->getHash(); + } + + // Update peer's max chain size + if (packet.peer_chain_size.has_value() && *packet.peer_chain_size > peer->pbft_chain_size_) { + peer->pbft_chain_size_ = *packet.peer_chain_size; + } + + const auto vote_hash = packet.vote->getHash(); + + if (!isPbftRelevantVote(packet.vote)) { + LOG(log_dg_) << "Drop irrelevant vote " << vote_hash << " for current pbft state. Vote (period, round, step) = (" + << packet.vote->getPeriod() << ", " << packet.vote->getRound() << ", " << packet.vote->getStep() + << "). Current PBFT (period, round, step) = (" << current_pbft_period << ", " << current_pbft_round + << ", " << pbft_mgr_->getPbftStep() << ")"; + return; + } + + // Do not process vote that has already been validated + if (vote_mgr_->voteAlreadyValidated(vote_hash)) { + LOG(log_dg_) << "Received vote " << vote_hash << " has already been validated"; + return; + } + + if (packet.pbft_block) { + if (packet.pbft_block->getBlockHash() != packet.vote->getBlockHash()) { + std::ostringstream err_msg; + err_msg << "Vote " << packet.vote->getHash().abridged() << " voted block " + << packet.vote->getBlockHash().abridged() << " != actual block " + << packet.pbft_block->getBlockHash().abridged(); + throw MaliciousPeerException(err_msg.str()); + } + + peer->markPbftBlockAsKnown(packet.pbft_block->getBlockHash()); + } + + if (!processVote(packet.vote, packet.pbft_block, peer, true)) { + return; + } + + // Do not mark it before, as peers have small caches of known votes. Only mark gossiping votes + peer->markPbftVoteAsKnown(vote_hash); + + pbft_mgr_->gossipVote(packet.vote, packet.pbft_block); +} + +void VotePacketHandler::onNewPbftVote(const std::shared_ptr &vote, const std::shared_ptr &block, + bool rebroadcast) { + for (const auto &peer : peers_state_->getAllPeers()) { + if (peer.second->syncing_) { + LOG(log_dg_) << " PBFT vote " << vote->getHash() << " not sent to " << peer.first << " peer syncing"; + continue; + } + + if (!rebroadcast && peer.second->isPbftVoteKnown(vote->getHash())) { + continue; + } + + // Send also block in case it is not known for the pear or rebroadcast == true + if (rebroadcast || !peer.second->isPbftBlockKnown(vote->getBlockHash())) { + sendPbftVote(peer.second, vote, block); + } else { + sendPbftVote(peer.second, vote, nullptr); + } + } +} + +void VotePacketHandler::sendPbftVote(const std::shared_ptr &peer, const std::shared_ptr &vote, + const std::shared_ptr &block) { + if (block && block->getBlockHash() != vote->getBlockHash()) { + LOG(log_er_) << "Vote " << vote->getHash().abridged() << " voted block " << vote->getBlockHash().abridged() + << " != actual block " << block->getBlockHash().abridged(); + return; + } + + dev::RLPStream s; + + if (block) { + // TODO[2865]: use packet class to automatically create rlp + s = dev::RLPStream(v4::VotePacket::kExtendedVotePacketSize); + s.appendRaw(vote->rlp(true, false)); + s.appendRaw(block->rlp(true)); + s.append(pbft_chain_->getPbftChainSize()); + } else { + // TODO[2865]: use packet class to automatically create rlp + s = dev::RLPStream(v4::VotePacket::kVotePacketSize); + s.appendRaw(vote->rlp(true, false)); + } + + if (sealAndSend(peer->getId(), SubprotocolPacketType::kVotePacket, std::move(s))) { + peer->markPbftVoteAsKnown(vote->getHash()); + if (block) { + peer->markPbftBlockAsKnown(block->getBlockHash()); + LOG(log_dg_) << " PBFT vote " << vote->getHash() << " together with block " << block->getBlockHash() + << " sent to " << peer->getId(); + } else { + LOG(log_dg_) << " PBFT vote " << vote->getHash() << " sent to " << peer->getId(); + } + } +} + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/votes_bundle_packet_handler.cpp new file mode 100644 index 0000000000..0eb5c50221 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/votes_bundle_packet_handler.cpp @@ -0,0 +1,102 @@ +#include "network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp" + +#include "pbft/pbft_manager.hpp" +#include "vote/votes_bundle_rlp.hpp" +#include "vote_manager/vote_manager.hpp" + +namespace taraxa::network::tarcap::v4 { + +VotesBundlePacketHandler::VotesBundlePacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_mgr, + std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, + std::shared_ptr slashing_manager, + const addr_t &node_addr, const std::string &logs_prefix) + : ExtVotesPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_mgr), + std::move(pbft_chain), std::move(vote_mgr), std::move(slashing_manager), node_addr, + logs_prefix + "VOTES_BUNDLE_PH") {} + +void VotesBundlePacketHandler::process(VotesBundlePacket &&packet, const std::shared_ptr &peer) { + const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); + + const auto &reference_vote = packet.votes.front(); + const auto votes_bundle_votes_type = reference_vote->getType(); + + // Votes sync bundles are allowed to cotain only votes bundles of the same type, period, round and step so if first + // vote is irrelevant, all of them are + if (!isPbftRelevantVote(packet.votes[0])) { + LOG(log_wr_) << "Drop votes sync bundle as it is irrelevant for current pbft state. Votes (period, round, step) = (" + << packet.votes_bundle_pbft_period << ", " << packet.votes_bundle_pbft_round << ", " + << reference_vote->getStep() << "). Current PBFT (period, round, step) = (" << current_pbft_period + << ", " << current_pbft_round << ", " << pbft_mgr_->getPbftStep() << ")"; + return; + } + + // VotesBundlePacket does not support propose votes + if (reference_vote->getType() == PbftVoteTypes::propose_vote) { + LOG(log_er_) << "Dropping votes bundle packet due to received \"propose\" votes from " << peer->getId() + << ". The peer may be a malicious player, will be disconnected"; + disconnect(peer->getId(), dev::p2p::UserReason); + return; + } + + // Process processStandardVote is called with false in case of next votes bundle -> does not check max boundaries + // for round and step to actually being able to sync the current round in case network is stalled + bool check_max_round_step = true; + if (votes_bundle_votes_type == PbftVoteTypes::cert_vote || votes_bundle_votes_type == PbftVoteTypes::next_vote) { + check_max_round_step = false; + } + + size_t processed_votes_count = 0; + for (const auto &vote : packet.votes) { + peer->markPbftVoteAsKnown(vote->getHash()); + + // Do not process vote that has already been validated + if (vote_mgr_->voteAlreadyValidated(vote->getHash())) { + LOG(log_dg_) << "Received vote " << vote->getHash() << " has already been validated"; + continue; + } + + LOG(log_dg_) << "Received sync vote " << vote->getHash().abridged(); + + if (!processVote(vote, nullptr, peer, check_max_round_step)) { + continue; + } + + processed_votes_count++; + } + + LOG(log_nf_) << "Received " << packet.votes.size() << " (processed " << processed_votes_count + << " ) sync votes from peer " << peer->getId() << " node current round " << current_pbft_round + << ", peer pbft round " << packet.votes_bundle_pbft_round; + + onNewPbftVotesBundle(packet.votes, false, peer->getId()); +} + +void VotesBundlePacketHandler::onNewPbftVotesBundle(const std::vector> &votes, + bool rebroadcast, + const std::optional &exclude_node) { + for (const auto &peer : peers_state_->getAllPeers()) { + if (peer.second->syncing_) { + continue; + } + + if (exclude_node.has_value() && *exclude_node == peer.first) { + continue; + } + + std::vector> peer_votes; + for (const auto &vote : votes) { + if (!rebroadcast && peer.second->isPbftVoteKnown(vote->getHash())) { + continue; + } + + peer_votes.push_back(vote); + } + + sendPbftVotesBundle(peer.second, std::move(peer_votes)); + } +} + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp index 06940294fc..7a43004033 100644 --- a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp +++ b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp @@ -16,6 +16,16 @@ #include "network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/pillar_vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/status_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "node/node.hpp" #include "pbft/pbft_chain.hpp" @@ -290,4 +300,57 @@ const TaraxaCapability::InitPacketsHandlers TaraxaCapability::kInitLatestVersion return packets_handlers; }; +const TaraxaCapability::InitPacketsHandlers TaraxaCapability::kInitV4Handlers = + [](const std::string &logs_prefix, const FullNodeConfig &config, const h256 &genesis_hash, + const std::shared_ptr &peers_state, const std::shared_ptr &pbft_syncing_state, + const std::shared_ptr &packets_stats, const std::shared_ptr &db, + const std::shared_ptr &pbft_mgr, const std::shared_ptr &pbft_chain, + const std::shared_ptr &vote_mgr, const std::shared_ptr &dag_mgr, + const std::shared_ptr &trx_mgr, const std::shared_ptr &slashing_manager, + const std::shared_ptr &pillar_chain_mgr, TarcapVersion, + const addr_t &node_addr) { + auto packets_handlers = std::make_shared(); + // Consensus packets with high processing priority + packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_mgr, pbft_chain, + vote_mgr, slashing_manager, node_addr, logs_prefix); + packets_handlers->registerHandler( + config, peers_state, packets_stats, pbft_mgr, pbft_chain, vote_mgr, slashing_manager, node_addr, logs_prefix); + packets_handlers->registerHandler( + config, peers_state, packets_stats, pbft_mgr, pbft_chain, vote_mgr, slashing_manager, node_addr, logs_prefix); + + // Standard packets with mid processing priority + packets_handlers->registerHandler(config, peers_state, packets_stats, + pbft_syncing_state, pbft_chain, pbft_mgr, dag_mgr, + trx_mgr, db, node_addr, logs_prefix); + + packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, + node_addr, logs_prefix); + + // Non critical packets with low processing priority + packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, + pbft_chain, pbft_mgr, dag_mgr, db, genesis_hash, + node_addr, logs_prefix); + packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, + dag_mgr, db, node_addr, logs_prefix); + + packets_handlers->registerHandler(config, peers_state, packets_stats, + pbft_syncing_state, pbft_chain, pbft_mgr, dag_mgr, + trx_mgr, db, node_addr, logs_prefix); + + packets_handlers->registerHandler( + config, peers_state, packets_stats, pbft_syncing_state, pbft_chain, vote_mgr, db, node_addr, logs_prefix); + + packets_handlers->registerHandler(config, peers_state, packets_stats, + pbft_syncing_state, pbft_chain, pbft_mgr, dag_mgr, + vote_mgr, db, node_addr, logs_prefix); + packets_handlers->registerHandler(config, peers_state, packets_stats, + pillar_chain_mgr, node_addr, logs_prefix); + packets_handlers->registerHandler(config, peers_state, packets_stats, + pillar_chain_mgr, node_addr, logs_prefix); + packets_handlers->registerHandler(config, peers_state, packets_stats, + pillar_chain_mgr, node_addr, logs_prefix); + + return packets_handlers; + }; + } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 70582c4159..51266fc07c 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -224,6 +224,7 @@ class DbStorage : public std::enable_shared_from_this { // Period data void savePeriodData(const PeriodData& period_data, Batch& write_batch); void clearPeriodDataHistory(PbftPeriod period, uint64_t dag_level_to_keep); + // TODO[2868]: return PeriodData instead of bytes dev::bytes getPeriodDataRaw(PbftPeriod period) const; std::optional getPbftBlock(PbftPeriod period) const; std::vector> getPeriodCertVotes(PbftPeriod period) const; diff --git a/libraries/types/dag_block/include/dag/dag_block.hpp b/libraries/types/dag_block/include/dag/dag_block.hpp index 376507e786..55ccd4dd7e 100644 --- a/libraries/types/dag_block/include/dag/dag_block.hpp +++ b/libraries/types/dag_block/include/dag/dag_block.hpp @@ -1,6 +1,7 @@ #pragma once #include "common/default_construct_copyable_movable.hpp" +#include "common/encoding_rlp.hpp" #include "vdf/sortition.hpp" namespace taraxa { @@ -113,6 +114,8 @@ class DagBlock { */ dev::RLPStream streamRLP(bool include_sig, bool include_trxs = true) const; + HAS_RLP_FIELDS + private: blk_hash_t sha3(bool include_sig) const; }; diff --git a/libraries/types/dag_block/src/dag_block.cpp b/libraries/types/dag_block/src/dag_block.cpp index 5f9c0f2e57..771551e8b4 100644 --- a/libraries/types/dag_block/src/dag_block.cpp +++ b/libraries/types/dag_block/src/dag_block.cpp @@ -199,4 +199,11 @@ bytes DagBlock::rlp(bool include_sig, bool include_trxs) const { blk_hash_t DagBlock::sha3(bool include_sig) const { return dev::sha3(rlp(include_sig)); } +void DagBlock::rlp(::taraxa::util::RLPDecoderRef encoding) { *this = DagBlock(encoding.value); } + +void DagBlock::rlp(::taraxa::util::RLPEncoderRef encoding) const { + encoding.appendList(1); + encoding.appendRaw(rlp(true)); +} + } // namespace taraxa diff --git a/libraries/types/pbft_block/include/pbft/pbft_block.hpp b/libraries/types/pbft_block/include/pbft/pbft_block.hpp index 07bb41fe59..9a7640fb58 100644 --- a/libraries/types/pbft_block/include/pbft/pbft_block.hpp +++ b/libraries/types/pbft_block/include/pbft/pbft_block.hpp @@ -5,6 +5,7 @@ #include #include +#include "common/encoding_rlp.hpp" #include "common/types.hpp" #include "pbft_block_extra_data.hpp" @@ -19,19 +20,8 @@ namespace taraxa { * hash, DAG blocks ordering hash, period number, timestamp, proposer address, and proposer signature. */ class PbftBlock { - blk_hash_t block_hash_; - blk_hash_t prev_block_hash_; - blk_hash_t dag_block_hash_as_pivot_; - blk_hash_t order_hash_; - blk_hash_t final_chain_hash_; - PbftPeriod period_; // Block index, PBFT head block is period 0, first PBFT block is period 1 - uint64_t timestamp_; - addr_t beneficiary_; - sig_t signature_; - std::vector reward_votes_; // Cert votes in previous period - std::optional extra_data_; - public: + PbftBlock() = default; PbftBlock(const blk_hash_t& prev_blk_hash, const blk_hash_t& dag_blk_hash_as_pivot, const blk_hash_t& order_hash, const blk_hash_t& final_chain_hash, PbftPeriod period, const addr_t& beneficiary, const secret_t& sk, std::vector&& reward_votes, const std::optional& extra_data = {}); @@ -141,6 +131,8 @@ class PbftBlock { const auto& getRewardVotes() const { return reward_votes_; } + HAS_RLP_FIELDS + private: /** * @brief Set PBFT block hash and block proposer address @@ -152,6 +144,19 @@ class PbftBlock { * */ void checkUniqueRewardVotes(); + + private: + blk_hash_t block_hash_; + blk_hash_t prev_block_hash_; + blk_hash_t dag_block_hash_as_pivot_; + blk_hash_t order_hash_; + blk_hash_t final_chain_hash_; + PbftPeriod period_; // Block index, PBFT head block is period 0, first PBFT block is period 1 + uint64_t timestamp_; + addr_t beneficiary_; + sig_t signature_; + std::vector reward_votes_; // Cert votes in previous period + std::optional extra_data_; }; std::ostream& operator<<(std::ostream& strm, const PbftBlock& pbft_blk); diff --git a/libraries/types/pbft_block/include/pbft/period_data.hpp b/libraries/types/pbft_block/include/pbft/period_data.hpp index 432fbe5900..060a75268b 100644 --- a/libraries/types/pbft_block/include/pbft/period_data.hpp +++ b/libraries/types/pbft_block/include/pbft/period_data.hpp @@ -5,6 +5,7 @@ #include +#include "common/encoding_rlp.hpp" #include "common/types.hpp" #include "dag/dag_block.hpp" #include "transaction/transaction.hpp" @@ -59,6 +60,8 @@ class PeriodData { * @brief Clear PBFT block, certify votes, DAG blocks, and transactions */ void clear(); + + HAS_RLP_FIELDS }; std::ostream& operator<<(std::ostream& strm, PeriodData const& b); diff --git a/libraries/types/pbft_block/src/pbft_block.cpp b/libraries/types/pbft_block/src/pbft_block.cpp index 0fc8875784..11491f8cfa 100644 --- a/libraries/types/pbft_block/src/pbft_block.cpp +++ b/libraries/types/pbft_block/src/pbft_block.cpp @@ -129,6 +129,13 @@ bytes PbftBlock::rlp(bool include_sig) const { return strm.invalidate(); } +void PbftBlock::rlp(::taraxa::util::RLPDecoderRef encoding) { *this = PbftBlock(encoding.value); } + +void PbftBlock::rlp(::taraxa::util::RLPEncoderRef encoding) const { + encoding.appendList(1); + encoding.appendRaw(rlp(true)); +} + std::ostream& operator<<(std::ostream& strm, PbftBlock const& pbft_blk) { strm << pbft_blk.getJsonStr(); return strm; diff --git a/libraries/types/pbft_block/src/period_data.cpp b/libraries/types/pbft_block/src/period_data.cpp index 0ad9dcfabd..99af20e626 100644 --- a/libraries/types/pbft_block/src/period_data.cpp +++ b/libraries/types/pbft_block/src/period_data.cpp @@ -132,6 +132,13 @@ bytes PeriodData::ToOldPeriodData(const bytes& rlp) { return s.invalidate(); } +void PeriodData::rlp(::taraxa::util::RLPDecoderRef encoding) { *this = PeriodData(encoding.value); } + +void PeriodData::rlp(::taraxa::util::RLPEncoderRef encoding) const { + encoding.appendList(1); + encoding.appendRaw(rlp()); +} + std::ostream& operator<<(std::ostream& strm, PeriodData const& b) { strm << "[PeriodData] : " << b.pbft_blk << " , num of votes " << b.previous_block_cert_votes.size() << std::endl; return strm; diff --git a/libraries/types/transaction/include/transaction/transaction.hpp b/libraries/types/transaction/include/transaction/transaction.hpp index 178be697a0..04c8477e48 100644 --- a/libraries/types/transaction/include/transaction/transaction.hpp +++ b/libraries/types/transaction/include/transaction/transaction.hpp @@ -4,6 +4,7 @@ #include #include +#include "common/encoding_rlp.hpp" #include "common/types.hpp" namespace taraxa { @@ -75,6 +76,8 @@ struct Transaction { const bytes &rlp() const; Json::Value toJSON() const; + + HAS_RLP_FIELDS }; using SharedTransaction = std::shared_ptr; diff --git a/libraries/types/transaction/src/transaction.cpp b/libraries/types/transaction/src/transaction.cpp index 083b2a5de9..4d8a44f7c1 100644 --- a/libraries/types/transaction/src/transaction.cpp +++ b/libraries/types/transaction/src/transaction.cpp @@ -174,4 +174,11 @@ Json::Value Transaction::toJSON() const { return res; } +void Transaction::rlp(::taraxa::util::RLPDecoderRef encoding) { fromRLP(encoding.value, false, {}); } + +void Transaction::rlp(::taraxa::util::RLPEncoderRef encoding) const { + encoding.appendList(1); + encoding.appendRaw(rlp()); +} + } // namespace taraxa diff --git a/libraries/types/vote/include/vote/pbft_vote.hpp b/libraries/types/vote/include/vote/pbft_vote.hpp index f051213ecc..f8225eeb5d 100644 --- a/libraries/types/vote/include/vote/pbft_vote.hpp +++ b/libraries/types/vote/include/vote/pbft_vote.hpp @@ -2,6 +2,7 @@ #include +#include "common/encoding_rlp.hpp" #include "common/vrf_wrapper.hpp" #include "vote.hpp" #include "vrf_sortition.hpp" @@ -128,6 +129,8 @@ class PbftVote : public Vote { */ Json::Value toJSON() const; + HAS_RLP_FIELDS + private: /** * @brief Secure Hash Algorithm 3 diff --git a/libraries/types/vote/src/pbft_vote.cpp b/libraries/types/vote/src/pbft_vote.cpp index 2167be8e25..c89eee03ee 100644 --- a/libraries/types/vote/src/pbft_vote.cpp +++ b/libraries/types/vote/src/pbft_vote.cpp @@ -53,6 +53,7 @@ PbftVote::PbftVote(secret_t const& node_sk, VrfPbftSortition vrf_sortition, blk_ bool PbftVote::operator==(const PbftVote& other) const { return rlp() == other.rlp(); } +// TODO: rename to something else bytes PbftVote::rlp(bool inc_sig, bool inc_weight) const { dev::RLPStream s; uint32_t number_of_items = 2; @@ -124,4 +125,11 @@ PbftStep PbftVote::getStep() const { return vrf_sortition_.pbft_msg_.step_; } vote_hash_t PbftVote::sha3(bool inc_sig) const { return dev::sha3(rlp(inc_sig)); } +void PbftVote::rlp(::taraxa::util::RLPDecoderRef encoding) { *this = PbftVote(encoding.value); } + +void PbftVote::rlp(::taraxa::util::RLPEncoderRef encoding) const { + encoding.appendList(1); + encoding.appendRaw(rlp()); +} + } // namespace taraxa \ No newline at end of file diff --git a/tests/tarcap_threadpool_test.cpp b/tests/tarcap_threadpool_test.cpp index 199054ab9c..8da3a3a005 100644 --- a/tests/tarcap_threadpool_test.cpp +++ b/tests/tarcap_threadpool_test.cpp @@ -1,1092 +1,1092 @@ - #include - - #include "config/config.hpp" - #include "config/version.hpp" - #include "dag/dag_block.hpp" - #include "logger/logger.hpp" - #include "network/tarcap/packets_handler.hpp" - #include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" - #include "network/tarcap/shared_states/peers_state.hpp" - #include "network/threadpool/tarcap_thread_pool.hpp" - #include "test_util/test_util.hpp" - - namespace taraxa::core_tests { - - using namespace std::literals; - -// Do not use NodesTest from "test_util/gtest.hpp" as its functionality is not needed in this test - struct NodesTest : virtual testing::Test { - testing::UnitTest* current_test = ::testing::UnitTest::GetInstance(); - testing::TestInfo const* current_test_info = current_test->current_test_info(); - - NodesTest() = default; - virtual ~NodesTest() = default; - - NodesTest(const NodesTest&) = delete; - NodesTest(NodesTest&&) = delete; - NodesTest& operator=(const NodesTest&) = delete; - NodesTest& operator=(NodesTest&&) = delete; - }; - - struct TarcapTpTest : NodesTest {}; - - using namespace taraxa::network; - - class PacketsProcessingInfo { - public: - struct PacketProcessingTimes { - std::chrono::steady_clock::time_point start_time_; - std::chrono::steady_clock::time_point finish_time_; - }; - - public: - void addPacketProcessingTimes(threadpool::PacketData::PacketId packet_id, - const PacketProcessingTimes& packet_processing_times) { - std::scoped_lock lock(mutex_); - bool res = packets_processing_times_.emplace(packet_id, packet_processing_times).second; - assert(res); - } - - PacketProcessingTimes getPacketProcessingTimes(threadpool::PacketData::PacketId packet_id) const { - std::shared_lock lock(mutex_); - - auto found_packet_info = packets_processing_times_.find(packet_id); - - // Failed to obtain processing times for packet id: packet_id. Processing did not finish yet. This should be - // caught in processing times comparing - if (found_packet_info == packets_processing_times_.end()) { - return {}; - } - - return found_packet_info->second; - } - - size_t getPacketProcessingTimesCount() const { - std::shared_lock lock(mutex_); - return packets_processing_times_.size(); - } - - private: - std::unordered_map packets_processing_times_; - mutable std::shared_mutex mutex_; - }; - -// Help functions for tests - struct HandlersInitData { - FullNodeConfig conf; - dev::p2p::NodeID sender_node_id; - addr_t own_node_addr; - - std::shared_ptr peers_state; - std::shared_ptr packets_stats; - std::shared_ptr packets_processing_info; - - dev::p2p::NodeID copySender() { return sender_node_id; } - }; - - struct DummyPacket { - DummyPacket(const dev::RLP& packet_rlp) {} - }; - - class DummyPacketHandler : public tarcap::PacketHandler { - public: - DummyPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : PacketHandler(init_data.conf, init_data.peers_state, init_data.packets_stats, init_data.own_node_addr, - log_channel_name), - processing_delay_ms_(processing_delay_ms), - packets_proc_info_(init_data.packets_processing_info) {} - - virtual ~DummyPacketHandler() = default; - DummyPacketHandler(const DummyPacketHandler&) = default; - DummyPacketHandler(DummyPacketHandler&&) = default; - DummyPacketHandler& operator=(const DummyPacketHandler&) = delete; - DummyPacketHandler& operator=(DummyPacketHandler&&) = delete; - - private: - void validatePacketRlpFormat([[maybe_unused]] const threadpool::PacketData& packet_data) const override {} - - void process(DummyPacket&& packet_data, - [[maybe_unused]] const std::shared_ptr& peer) override { - // Note do not use LOG() before saving start & finish time as it is internally synchronized and can - // cause delays, which result in tests fails - auto start_time = std::chrono::steady_clock::now(); - std::this_thread::sleep_for(std::chrono::milliseconds(processing_delay_ms_)); - auto finish_time = std::chrono::steady_clock::now(); - - LOG(log_dg_) << "Processing packet: " << packet_data.type_str_ << ", id(" << packet_data.id_ << ") finished. " - << "Start time: " << start_time.time_since_epoch().count() - << ", finish time: " << finish_time.time_since_epoch().count(); - - packets_proc_info_->addPacketProcessingTimes(packet_data.id_, {start_time, finish_time}); - } - - uint32_t processing_delay_ms_{0}; - std::shared_ptr packets_proc_info_; - }; - - class DummyTransactionPacketHandler : public DummyPacketHandler { - public: - DummyTransactionPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kTransactionPacket; - }; - - class DummyDagBlockPacketHandler : public DummyPacketHandler { - public: - DummyDagBlockPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagBlockPacket; - }; - - class DummyStatusPacketHandler : public DummyPacketHandler { - public: - DummyStatusPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kStatusPacket; - }; - - class DummyVotePacketHandler : public DummyPacketHandler { - public: - DummyVotePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotePacket; - }; - - class DummyGetNextVotesBundlePacketHandler : public DummyPacketHandler { - public: - DummyGetNextVotesBundlePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetNextVotesSyncPacket; - }; - - class DummyVotesBundlePacketHandler : public DummyPacketHandler { - public: - DummyVotesBundlePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotesBundlePacket; - }; - - class DummyGetDagSyncPacketHandler : public DummyPacketHandler { - public: - DummyGetDagSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetDagSyncPacket; - }; - - class DummyGetPbftSyncPacketHandler : public DummyPacketHandler { - public: - DummyGetPbftSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetPbftSyncPacket; - }; - - class DummyDagSyncPacketHandler : public DummyPacketHandler { - public: - DummyDagSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagSyncPacket; - }; - - class DummyPbftSyncPacketHandler : public DummyPacketHandler { - public: - DummyPbftSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, - uint32_t processing_delay_ms) - : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} - - // Packet type that is processed by this handler - static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPbftSyncPacket; - }; - - HandlersInitData createHandlersInitData() { - HandlersInitData ret_init_data; - - ret_init_data.sender_node_id = dev::p2p::NodeID(1); - ret_init_data.own_node_addr = addr_t(2); - ret_init_data.peers_state = std::make_shared(std::weak_ptr(), - FullNodeConfig()); ret_init_data.packets_stats = - std::make_shared(std::chrono::milliseconds(0), ret_init_data.own_node_addr); - ret_init_data.packets_processing_info = std::make_shared(); - - // Enable packets from sending peer to be processed - auto peer = ret_init_data.peers_state->addPendingPeer(ret_init_data.sender_node_id, ""); - ret_init_data.peers_state->setPeerAsReadyToSendMessages(ret_init_data.sender_node_id, peer); - - return ret_init_data; - } - - std::pair createPacket( - const dev::p2p::NodeID& sender_node_id, SubprotocolPacketType packet_type, - std::optional> packet_rlp_bytes = {}) { - if (packet_rlp_bytes.has_value()) { - threadpool::PacketData packet_data(packet_type, sender_node_id, std::move(packet_rlp_bytes.value())); - return {TARAXA_NET_VERSION, std::move(packet_data)}; - } - - dev::RLPStream s(0); - threadpool::PacketData packet_data(packet_type, sender_node_id, s.invalidate()); - return {TARAXA_NET_VERSION, std::move(packet_data)}; - } - - bytes createDagBlockRlp(level_t level, uint32_t sig = 777) { - // Creates dag block rlp as it is required for blocking mask to extract dag block level - DagBlock blk(blk_hash_t(10), level, {}, {}, sig_t(sig), blk_hash_t(1), addr_t(15)); - return blk.rlp(true); - } - -/** - * @brief Check all combinations(without repetition) of provided packets that they were processed concurrently: - * - packet1.start_time < packet2.finish_time - * - packet2.start_time < packet1.finish_time - * - * @param packets - */ - void checkConcurrentProcessing( - const std::vector>& packets) { - assert(packets.size() >= 2); - - for (size_t i = 0; i < packets.size(); i++) { - const auto& packet_l = packets[0]; - for (size_t j = i + 1; j < packets.size(); j++) { - const auto& packet_r = packets[j]; - EXPECT_LT(packet_l.first.start_time_, packet_r.first.finish_time_) - << packet_l.second << ".start_time < " << packet_r.second << ".finish_time"; - EXPECT_LT(packet_r.first.start_time_, packet_l.first.finish_time_) - << packet_r.second << ".start_time < " << packet_l.second << ".finish_time"; - } - } -} - -/** - * @brief Check all combinations(without repetition) of provided packets that they were processed serial: - * - packet1.finish_time < packet2.start_time - * - * @param packets - */ - void checkSerialProcessing( - const std::vector>& packets) { - assert(packets.size() >= 2); - - for (size_t i = 0; i < packets.size(); i++) { - const auto& packet_l = packets[0]; - for (size_t j = i + 1; j < packets.size(); j++) { - const auto& packet_r = packets[j]; - EXPECT_LT(packet_l.first.finish_time_, packet_r.first.start_time_) - << packet_l.second << ".finish_time < " << packet_r.second << ".start_time"; - } - } -} - - size_t queuesSize(const threadpool::PacketsThreadPool& tp) { - const auto [high_priority_queue_size, mid_priority_queue_size, low_priority_queue_size] = tp.getQueueSize(); - - return high_priority_queue_size + mid_priority_queue_size + low_priority_queue_size; -} - -// Threshold for packets queue to be emptied - constexpr std::chrono::milliseconds QUEUE_EMPTIED_WAIT_TRESHOLD_MS = 15ms; - -// Test all packet types if they are either in non-blocking or blocking list of packets - TEST_F(TarcapTpTest, packets_blocking_dependencies) { - network::threadpool::PriorityQueue priority_queue(3); - - for (auto packet_type = SubprotocolPacketType{0}; packet_type != SubprotocolPacketType::kPacketCount; - packet_type = static_cast(static_cast(packet_type) + 1)) { - // Skip unreal packet types - switch (packet_type) { - case SubprotocolPacketType::kHighPriorityPackets: - case SubprotocolPacketType::kMidPriorityPackets: - case SubprotocolPacketType::kLowPriorityPackets: - case SubprotocolPacketType::kPacketCount: - continue; - } - - std::vector packet_bytes; - - // Generate proper rlp for packets that need it for processing - if (packet_type == SubprotocolPacketType::kDagBlockPacket) { - DagBlock blk(blk_hash_t(1), 1, {}, {trx_hash_t(1), trx_hash_t(2)}, sig_t(3), blk_hash_t(0x4), addr_t(5)); - packet_bytes = blk.rlp(true); - } - network::threadpool::PacketData packet_data{packet_type, {}, std::move(packet_bytes)}; - packet_data.id_ = static_cast(packet_type); - - bool is_non_blocking_packet = priority_queue.isNonBlockingPacket(packet_data.type_); - bool is_blocking_packet = priority_queue.updateBlockingDependencies(packet_data); - - EXPECT_TRUE(is_non_blocking_packet != is_blocking_packet); - } - } - -// Test if all "block-free" packets are processed concurrently -// Note: in case someone creates new blocking dependency and does not adjust tests, this test should fail - TEST_F(TarcapTpTest, block_free_packets) { - HandlersInitData init_data = createHandlersInitData(); - - // Creates sender 2 to bypass peer order block on Transaction -> DagBlock packet. In case those packets sent - // 2 different senders those packets are "block-free" - dev::p2p::NodeID sender2(3); - auto peer = init_data.peers_state->addPendingPeer(sender2, ""); - init_data.peers_state->setPeerAsReadyToSendMessages(sender2, peer); - - auto packets_handler = std::make_shared(); - - packets_handler->registerHandler(init_data, "TX_PH", 20); - packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); - packets_handler->registerHandler(init_data, "STATUS_PH", 20); - packets_handler->registerHandler(init_data, "VOTE_PH", 20); - packets_handler->registerHandler(init_data, "GET_NEXT_VOTES_SYNC_PH", 20); - packets_handler->registerHandler(init_data, "VOTES_SYNC_PH", 20); - - // Creates threadpool - // Note: make num of threads >= num of packets to check if they are processed concurrently without blocks, - otherwise - // some blocks would be blocked for processing due to max threads limit - threadpool::PacketsThreadPool tp(18); - tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); - - // Pushes packets to the tp - auto packet = createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {}); - if (packet.second.rlp_.isList()) { - std::cout << "is list"; - } else { - std::cout << "not list"; - } - const auto packet0_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); - const auto packet1_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); - const auto packet2_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); - const auto packet3_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); - - const auto packet4_dag_block_id = - tp.push( - createPacket(dev::p2p::NodeID(sender2), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, - 1)})) - .value(); - const auto packet5_dag_block_id = - tp.push( - createPacket(dev::p2p::NodeID(sender2), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, - 2)})) - .value(); - - const auto packet8_status_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); - const auto packet9_status_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); - - const auto packet12_vote_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); - const auto packet13_vote_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); - - const auto packet14_get_pbft_next_votes_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetNextVotesSyncPacket, {})).value(); - const auto packet15_get_pbft_next_votes_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetNextVotesSyncPacket, {})).value(); - - const auto packet16_pbft_next_votes_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotesBundlePacket, {})).value(); - - size_t packets_count = 0; - const auto packet17_pbft_next_votes_id = packets_count = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotesBundlePacket, {})).value(); - - tp.startProcessing(); - - // How should packets be processed: - // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or - // synchronously due to some blocking dependencies - depends on situation), check - // PriorityQueue::updateDependenciesStart - /* - ---------------------- - - packet0_transaction - - ---------------------- - ---------------------- - - packet1_transaction - - ---------------------- - ----------------------- - - packet2_transaction - - ----------------------- - - -||- - ... - - ----------------------- - - packet17_votes_sync - - ----------------------- - 0.....................20.................... time [ms] - */ - - // All packets should be already being processed after short amount of time - std::this_thread::sleep_for(QUEUE_EMPTIED_WAIT_TRESHOLD_MS); - EXPECT_EQ(queuesSize(tp), 0); - - // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to - locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { - // Check if transactions was propagated to node0 - WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) - }); - - // Check order of packets how they were processed - const auto packets_proc_info = init_data.packets_processing_info; - - const auto packet0_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_tx_id); - const auto packet1_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_tx_id); - const auto packet2_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_tx_id); - const auto packet3_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_tx_id); - - const auto packet4_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_dag_block_id); - const auto packet5_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet5_dag_block_id); - - const auto packet8_status_proc_info = packets_proc_info->getPacketProcessingTimes(packet8_status_id); - const auto packet9_status_proc_info = packets_proc_info->getPacketProcessingTimes(packet9_status_id); - - const auto packet12_vote_proc_info = packets_proc_info->getPacketProcessingTimes(packet12_vote_id); - const auto packet13_vote_proc_info = packets_proc_info->getPacketProcessingTimes(packet13_vote_id); - - const auto packet14_get_pbft_next_votes_proc_info = - packets_proc_info->getPacketProcessingTimes(packet14_get_pbft_next_votes_id); - const auto packet15_get_pbft_next_votes_proc_info = - packets_proc_info->getPacketProcessingTimes(packet15_get_pbft_next_votes_id); - - const auto packet16_pbft_next_votes_proc_info = - packets_proc_info->getPacketProcessingTimes(packet16_pbft_next_votes_id); - const auto packet17_pbft_next_votes_proc_info = - packets_proc_info->getPacketProcessingTimes(packet17_pbft_next_votes_id); - - checkConcurrentProcessing({ - {packet0_tx_proc_info, "packet0_tx"}, - {packet1_tx_proc_info, "packet1_tx"}, - {packet2_tx_proc_info, "packet2_tx"}, - {packet3_tx_proc_info, "packet3_tx"}, - {packet4_dag_block_proc_info, "packet4_dag_block"}, - {packet5_dag_block_proc_info, "packet5_dag_block"}, - {packet8_status_proc_info, "packet8_status"}, - {packet9_status_proc_info, "packet9_status"}, - {packet12_vote_proc_info, "packet12_vote"}, - {packet13_vote_proc_info, "packet13_vote"}, - {packet14_get_pbft_next_votes_proc_info, "packet14_get_pbft_next_votes"}, - {packet15_get_pbft_next_votes_proc_info, "packet15_get_pbft_next_votes"}, - {packet16_pbft_next_votes_proc_info, "packet16_pbft_next_votes"}, - {packet17_pbft_next_votes_proc_info, "packet17_pbft_next_votes"}, - }); - } - -// Test "hard blocking dependencies" related synchronous processing of certain packets: -// -// Packets types that are currently hard blocked for processing in another threads due to dependencies, -// e.g. syncing packets must be processed synchronously one by one, etc... -// Each packet type might be simultaneously blocked by multiple different packets that are being processed. - TEST_F(TarcapTpTest, hard_blocking_deps) { - HandlersInitData init_data = createHandlersInitData(); - - auto packets_handler = std::make_shared(); - packets_handler->registerHandler(init_data, "GET_DAG_SYNC_PH", 20); - packets_handler->registerHandler(init_data, "GET_PBFT_SYNC_PH", 20); - packets_handler->registerHandler(init_data, "DAG_SYNC_PH", 20); - packets_handler->registerHandler(init_data, "PBFT_SYNC_PH", 20); - - // Creates threadpool - threadpool::PacketsThreadPool tp(10); - tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); - - // Pushes packets to the tp - const auto packet0_dag_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket, {})).value(); - const auto packet1_dag_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket, {})).value(); - const auto packet2_get_dag_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); - const auto packet3_get_dag_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); - const auto packet4_get_pbft_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetPbftSyncPacket, {})).value(); - const auto packet5_get_pbft_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetPbftSyncPacket, {})).value(); - const auto packet6_pbft_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kPbftSyncPacket, {})).value(); - const auto packet7_pbft_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kPbftSyncPacket, {})).value(); - - size_t packets_count = 0; - const auto packet8_get_dag_sync_id = packets_count = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); - - tp.startProcessing(); - - // How should packets be processed: - // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or - // synchronously due to some blocking dependencies - depends on situation), check - // PriorityQueue::updateDependenciesStart - /* - ------------------------ - --- packet0_dag_sync --- - ------------------------ - ------------------------ - --- packet1_dag_sync --- - ------------------------ - ------------------------- - -- packet2_get_dag_sync - - ------------------------- - ------------------------- - -- packet3_get_dag_sync - - ------------------------- - ------------------------- - - packet4_get_pbft_sync - - ------------------------- - ------------------------- - - packet5_get_pbft_sync - - ------------------------- - ------------------------ - --- packet6_pbft_sync -- - ------------------------ - ------------------------ - --- packet7_pbft_sync -- - ------------------------ - ------------------------ - - packet8_get_dag_sync - - ------------------------ - 0......................20........................40........................60.......... time - */ - - // All packets should be already being processed after short amount of time - std::this_thread::sleep_for(60ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); - EXPECT_EQ(queuesSize(tp), 0); - - // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to - locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { - // Check if transactions was propagated to node0 - WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) - }); - - // Check order of packets how they were processed - const auto packets_proc_info = init_data.packets_processing_info; - - const auto packet0_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_dag_sync_id); - const auto packet1_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_dag_sync_id); - const auto packet2_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_get_dag_sync_id); - const auto packet3_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_get_dag_sync_id); - const auto packet4_get_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_get_pbft_sync_id); - const auto packet5_get_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet5_get_pbft_sync_id); - const auto packet6_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet6_pbft_sync_id); - const auto packet7_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet7_pbft_sync_id); - const auto packet8_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet8_get_dag_sync_id); - - checkConcurrentProcessing({ - {packet0_dag_sync_proc_info, "packet0_dag_sync"}, - {packet2_get_dag_sync_proc_info, "packet2_get_dag_sync"}, - {packet4_get_pbft_sync_proc_info, "packet4_get_pbft_sync"}, - {packet6_pbft_sync_proc_info, "packet6_pbft_sync"}, - }); - - checkConcurrentProcessing({ - {packet1_dag_sync_proc_info, "packet1_dag_sync"}, - {packet3_get_dag_sync_proc_info, "packet3_get_dag_sync"}, - {packet5_get_pbft_sync_proc_info, "packet5_get_pbft_sync"}, - {packet7_pbft_sync_proc_info, "packet7_pbft_sync"}, - }); - - EXPECT_GT(packet1_dag_sync_proc_info.start_time_, packet0_dag_sync_proc_info.finish_time_); - EXPECT_GT(packet3_get_dag_sync_proc_info.start_time_, packet2_get_dag_sync_proc_info.finish_time_); - EXPECT_GT(packet5_get_pbft_sync_proc_info.start_time_, packet4_get_pbft_sync_proc_info.finish_time_); - EXPECT_GT(packet7_pbft_sync_proc_info.start_time_, packet6_pbft_sync_proc_info.finish_time_); - - EXPECT_GT(packet8_get_dag_sync_proc_info.start_time_, packet3_get_dag_sync_proc_info.finish_time_); - } - -// Test "peer-order blocking dependencies" related to specific (peer & order) combination: -// -// Packets types that are blocked only for processing when received from specific peer & after specific -// time (order), e.g.: new dag block packet processing is blocked until all transactions packets that were received -// before it are processed. This blocking dependency is applied only for the same peer so transaction packet from one -// peer does not block new dag block packet from another peer - TEST_F(TarcapTpTest, peer_order_blocking_deps) { - HandlersInitData init_data = createHandlersInitData(); - - auto packets_handler = std::make_shared(); - packets_handler->registerHandler(init_data, "TX_PH", 20); - packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 0); - packets_handler->registerHandler(init_data, "SYNC_TEST_PH", 40); - - // Creates threadpool - threadpool::PacketsThreadPool tp(10); - tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); - - // Pushes packets to the tp - const auto packet0_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); - const auto packet1_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); - const auto packet2_dag_sync_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket)).value(); - const auto packet3_tx_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); - - size_t packets_count = 0; - const auto packet4_dag_block_id = packets_count = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1)})) - .value(); - - // How should packets be processed: - // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or - // synchronously due to some blocking dependencies - depends on situation), check - // PriorityQueue::updateDependenciesStart - /* - -------------- - - packet0_tx - - -------------- - -------------- - - packet1_tx - - -------------- - ---------------------------- - ----- packet2_dag_sync ----- - ---------------------------- - -------------- - - packet3_tx - - -------------- - --------------------- - - packet4_dag_block - - --------------------- - 0............20.............40....................60.................. time [ms] - */ - - tp.startProcessing(); - - // All packets should be already being processed after short amount of time - std::this_thread::sleep_for(60ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); - EXPECT_EQ(queuesSize(tp), 0); - - // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to - locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { - // Check if transactions was propagated to node0 - WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) - }); - - // Check order of packets how they were processed - const auto packets_proc_info = init_data.packets_processing_info; - - const auto packet0_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_tx_id); - const auto packet1_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_tx_id); - const auto packet3_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_tx_id); - const auto packet4_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_dag_block_id); - const auto packet2_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_dag_sync_id); - - checkConcurrentProcessing({ - {packet0_tx_proc_info, "packet0_tx"}, - {packet1_tx_proc_info, "packet1_tx"}, - {packet2_dag_sync_proc_info, "packet2_dag_sync"}, - {packet3_tx_proc_info, "packet3_tx"}, - }); - - EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet0_tx_proc_info.finish_time_); - EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet1_tx_proc_info.finish_time_); - EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet3_tx_proc_info.finish_time_); - - EXPECT_GT(packet4_dag_block_proc_info.start_time_, packet2_dag_sync_proc_info.finish_time_); - } - -// Test "dag-block blocking dependencies" related to dag blocks: -// -// Same dag blocks should not be processed at the same time - TEST_F(TarcapTpTest, same_dag_blks_ordering) { - HandlersInitData init_data = createHandlersInitData(); - - auto packets_handler = std::make_shared(); - packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); - - // Creates threadpool - threadpool::PacketsThreadPool tp(10); - tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); - - auto dag_block = createDagBlockRlp(0); - - // Pushes packets to the tp - const auto blk0_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); - const auto blk1_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); - const auto blk2_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); - const auto blk3_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); - - size_t packets_count = 0; - const auto blk4_id = packets_count = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); - - tp.startProcessing(); - - // How should dag blocks packets be processed: - // Same dag blocks should not be processed concurrently but one after another - - // All packets should be already being processed after short amount of time - std::this_thread::sleep_for(200ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); - EXPECT_EQ(queuesSize(tp), 0); - - // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to - locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { - // Check if transactions was propagated to node0 - WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) - }); - - // Check order of packets how they were processed - const auto packets_proc_info = init_data.packets_processing_info; - - const auto blk0_proc_info = packets_proc_info->getPacketProcessingTimes(blk0_id); - const auto blk1_proc_info = packets_proc_info->getPacketProcessingTimes(blk1_id); - const auto blk2_proc_info = packets_proc_info->getPacketProcessingTimes(blk2_id); - const auto blk3_proc_info = packets_proc_info->getPacketProcessingTimes(blk3_id); - const auto blk4_proc_info = packets_proc_info->getPacketProcessingTimes(blk4_id); - - checkSerialProcessing({ - {blk0_proc_info, "blk0"}, - {blk1_proc_info, "blk1"}, - {blk2_proc_info, "blk2"}, - {blk3_proc_info, "blk3"}, - {blk4_proc_info, "blk4"}, - }); - } - -// Test "dag-level blocking dependencies" related to dag blocks levels: -// -// Ideally only dag blocks with the same level should be processed. In reality there are situation when node receives -// dag block with smaller level than the level of blocks that are already being processed. In such case these blocks -// with smaller levels can be processed concurrently with blocks that have higher level. All new dag blocks with -/ higher / level than the lowest level from all the blocks that currently being processed are blocked for processing - TEST_F(TarcapTpTest, dag_blks_lvls_ordering) { - HandlersInitData init_data = createHandlersInitData(); - - auto packets_handler = std::make_shared(); - packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); - - // Creates threadpool - threadpool::PacketsThreadPool tp(10); - tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); - - // Pushes packets to the tp - const auto blk0_lvl1_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, - 1)})) - .value(); - const auto blk1_lvl1_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, - 2)})) - .value(); - const auto blk2_lvl0_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, - 3)})) - .value(); - const auto blk3_lvl1_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, - 4)})) - .value(); - const auto blk4_lvl2_id = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(2, - 5)})) - .value(); - - size_t packets_count = 0; - const auto blk5_lvl3_id = packets_count = - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(3, - 6)})) - .value(); - - tp.startProcessing(); - - // How should dag blocks packets be processed: - // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or - // synchronously due to some blocking dependencies - depends on situation), check - // PriorityQueue::updateDependenciesStart - /* - ------------- - - blk0_lvl1 - - ------------- - ------------- - - blk1_lvl1 - - ------------- - ------------- - - blk2_lvl0 - - ------------- - ------------- - - blk3_lvl1 - - ------------- - ------------- - - blk4_lvl2 - - ------------- - ------------- - - blk5_lvl3 - - ------------- - 0...........20............40............60.............80................. time [ms] - */ - - // All packets should be already being processed after short amount of time - std::this_thread::sleep_for(80ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); - EXPECT_EQ(queuesSize(tp), 0); - - // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to - locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { - // Check if transactions was propagated to node0 - WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) - }); - - // Check order of packets how they were processed - const auto packets_proc_info = init_data.packets_processing_info; - - const auto blk0_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk0_lvl1_id); - const auto blk1_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk1_lvl1_id); - const auto blk2_lvl0_proc_info = packets_proc_info->getPacketProcessingTimes(blk2_lvl0_id); - const auto blk3_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk3_lvl1_id); - const auto blk4_lvl2_proc_info = packets_proc_info->getPacketProcessingTimes(blk4_lvl2_id); - const auto blk5_lvl3_proc_info = packets_proc_info->getPacketProcessingTimes(blk5_lvl3_id); - - checkConcurrentProcessing({ - {blk0_lvl1_proc_info, "blk0_lvl1"}, - {blk1_lvl1_proc_info, "blk1_lvl1"}, - {blk2_lvl0_proc_info, "blk2_lvl0"}, - }); - - EXPECT_GT(blk3_lvl1_proc_info.start_time_, blk2_lvl0_proc_info.finish_time_); - EXPECT_GT(blk4_lvl2_proc_info.start_time_, blk3_lvl1_proc_info.finish_time_); - EXPECT_GT(blk5_lvl3_proc_info.start_time_, blk4_lvl2_proc_info.finish_time_); - } - -// Test threads borrowing -// -// It can happen that no packet for processing was returned during the first iteration over priority queues as there -// are limits for max total workers per each priority queue. These limits can and should be ignored in some -// scenarios... For example: -// High priority queue reached it's max workers limit, other queues have inside many blocked packets that cannot be -// currently processed concurrently and MAX_TOTAL_WORKERS_COUNT is not reached yet. In such case some threads might -// be unused. In such cases priority queues max workers limits can and should be ignored. -// -// Always keep 1 reserved thread for each priority queue at all times - TEST_F(TarcapTpTest, threads_borrowing) { - HandlersInitData init_data = createHandlersInitData(); - - auto packets_handler = std::make_shared(); - packets_handler->registerHandler(init_data, "VOTE_PH", 100); - - // Creates threadpool - const size_t threads_num = 10; - threadpool::PacketsThreadPool tp(threads_num); - tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); - - // Pushes packets to the tp - std::vector pushed_packets_ids; - for (size_t i = 0; i < threads_num; i++) { - uint64_t packet_id = tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, - {})).value(); pushed_packets_ids.push_back(packet_id); - } - - tp.startProcessing(); - - // How should packets be processed: - // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or - // synchronously due to some blocking dependencies - depends on situation), check - // PriorityQueue::updateDependenciesStart - // - // Note: each queue has 1 thread reserved at all times(even if it does not do anything) and there is 10 threads in - // total, even with borrowing only 8 threads could be used at the same time - /* - ---------------- - - packet0_vote - - ---------------- - ---------------- - - packet1_vote - - ---------------- - ---------------- - - packet2_vote - - ---------------- - - -||- - ... - - ---------------- - - packet7_vote - - ---------------- - ---------------- - - packet8_vote - - ---------------- - ---------------- - - packet9_vote - - ---------------- - 0..............100...............200........... time [ms] - */ - - // First 8 packets should be already processed by this time - std::this_thread::sleep_for(100ms + 50ms /* might take longer due to threads borrowing */); - EXPECT_LE(queuesSize(tp), 2); - - // Check order of packets how they were processed - const auto packets_proc_info = init_data.packets_processing_info; - - // In case some packet processing is not finished yet, getPacketProcessingTimes() returns default (empty) value - std::chrono::steady_clock::time_point default_time_point; - - // Because each queue has 1 thread reserved at all times(even if it does not do anything) and there is 10 threads - in - // total, even with borrowing only 8 threads could be used at the same time, thus last 2 packets (9th & 10th) - should - // not be processed after (100 + WAIT_TRESHOLD_MS) ms - EXPECT_EQ(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[8]).finish_time_, default_time_point); - EXPECT_EQ(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[9]).finish_time_, default_time_point); - - std::vector> packets_proc_info_vec; - for (size_t i = 0; i < threads_num - (threadpool::PacketData::PacketPriority::Count - 1); i++) { - packets_proc_info_vec.emplace_back(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[i]), - "packet" + std::to_string(pushed_packets_ids[i]) + "_vote"); - } - - // Check if first 8 pbft vote packets were processed concurrently -> threads from other queues had to be borrowed - for - // that - checkConcurrentProcessing(packets_proc_info_vec); - } - -// Test low priority queue starvation -// -// It should never happen that packets from lower priority queues are waiting to be processed until all packets from -// higher priority queues are processed - TEST_F(TarcapTpTest, low_priotity_queue_starvation) { - HandlersInitData init_data = createHandlersInitData(); - - auto packets_handler = std::make_shared(); - // Handler for packet from high priority queue - packets_handler->registerHandler(init_data, "VOTE_PH", 20); - - // Handler for packet from mid priority queue - packets_handler->registerHandler(init_data, "TX_PH", 20); - - // Handler for packet from low priority queue - packets_handler->registerHandler(init_data, "STATUS_PH", 20); - - // Creates threadpool - size_t threads_num = 10; - threadpool::PacketsThreadPool tp(threads_num); - tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); - - // Push 10x more packets for each prioriy queue than max tp capacity to make sure that tp wont be able to process - all - // packets from each queue concurrently -> many packets will be waiting due to max threads num reached for specific - // priority queues - for (size_t i = 0; i < 2 * 10 * threads_num; i++) { - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); - } - - // Push a few packets low priority packets - for (size_t i = 0; i < 4; i++) { - tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); - } - - tp.startProcessing(); - - // How should packets be processed: - // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or - // synchronously due to some blocking dependencies - depends on situation), check - // PriorityQueue::updateDependenciesStart In this test are max concurrent processing limits for queues reached, so - // when we have 10 threads in thredpool: - // - 4 is limit for High priority queue - VotePacket - // - 4 is limit for Mid priority queue - TransactionPacket - // - 3 is limit for Low priority queue - StatusPacket, but because max total limit (10) is always checked first - // , low priority queue wont be able to use more than 2 threads concurrently - /* - ---------------- - - packet0_vote - - ---------------- - ---------------- - - packet1_vote - - ---------------- - ---------------- - - packet2_vote - - ---------------- - ---------------- - - packet3_vote - - ---------------- - ---------------- - -- packet4_tx -- - ---------------- - ---------------- - -- packet5_tx -- - ---------------- - ---------------- - -- packet6_tx -- - ---------------- - ---------------- - -- packet7_tx -- - ---------------- - - .... - votes and tx packets are processed concurrently 4 at a time until all of them are processed - - - ------------------ - - packet400_test - - ------------------ - ------------------ - - packet401_test - - ------------------ - ------------------ - - packet402_test - - ------------------ - ------------------ - - packet403_test - - ------------------ - 0.................20.................40................... time [ms] - */ - - std::this_thread::sleep_for(40ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); - - const auto [high_priority_queue_size, mid_priority_queue_size, low_priority_queue_size] = tp.getQueueSize(); - - EXPECT_GT(high_priority_queue_size, 0); - EXPECT_GT(mid_priority_queue_size, 0); - EXPECT_EQ(low_priority_queue_size, 0); - } - - } // namespace taraxa::core_tests - - int main(int argc, char** argv) { - using namespace taraxa; - - auto logging = logger::createDefaultLoggingConfig(); - - // Set this to debug to see log msgs - logging.verbosity = logger::Verbosity::Debug; - - addr_t node_addr; - logger::InitLogging(logging, node_addr); - - ::testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); - } \ No newline at end of file +// #include +// +// #include "config/config.hpp" +// #include "config/version.hpp" +// #include "dag/dag_block.hpp" +// #include "logger/logger.hpp" +// #include "network/tarcap/packets_handler.hpp" +// #include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" +// #include "network/tarcap/shared_states/peers_state.hpp" +// #include "network/threadpool/tarcap_thread_pool.hpp" +// #include "test_util/test_util.hpp" +// +// namespace taraxa::core_tests { +// +// using namespace std::literals; +// +//// Do not use NodesTest from "test_util/gtest.hpp" as its functionality is not needed in this test +// struct NodesTest : virtual testing::Test { +// testing::UnitTest* current_test = ::testing::UnitTest::GetInstance(); +// testing::TestInfo const* current_test_info = current_test->current_test_info(); +// +// NodesTest() = default; +// virtual ~NodesTest() = default; +// +// NodesTest(const NodesTest&) = delete; +// NodesTest(NodesTest&&) = delete; +// NodesTest& operator=(const NodesTest&) = delete; +// NodesTest& operator=(NodesTest&&) = delete; +// }; +// +// struct TarcapTpTest : NodesTest {}; +// +// using namespace taraxa::network; +// +// class PacketsProcessingInfo { +// public: +// struct PacketProcessingTimes { +// std::chrono::steady_clock::time_point start_time_; +// std::chrono::steady_clock::time_point finish_time_; +// }; +// +// public: +// void addPacketProcessingTimes(threadpool::PacketData::PacketId packet_id, +// const PacketProcessingTimes& packet_processing_times) { +// std::scoped_lock lock(mutex_); +// bool res = packets_processing_times_.emplace(packet_id, packet_processing_times).second; +// assert(res); +// } +// +// PacketProcessingTimes getPacketProcessingTimes(threadpool::PacketData::PacketId packet_id) const { +// std::shared_lock lock(mutex_); +// +// auto found_packet_info = packets_processing_times_.find(packet_id); +// +// // Failed to obtain processing times for packet id: packet_id. Processing did not finish yet. This should be +// // caught in processing times comparing +// if (found_packet_info == packets_processing_times_.end()) { +// return {}; +// } +// +// return found_packet_info->second; +// } +// +// size_t getPacketProcessingTimesCount() const { +// std::shared_lock lock(mutex_); +// return packets_processing_times_.size(); +// } +// +// private: +// std::unordered_map packets_processing_times_; +// mutable std::shared_mutex mutex_; +// }; +// +//// Help functions for tests +// struct HandlersInitData { +// FullNodeConfig conf; +// dev::p2p::NodeID sender_node_id; +// addr_t own_node_addr; +// +// std::shared_ptr peers_state; +// std::shared_ptr packets_stats; +// std::shared_ptr packets_processing_info; +// +// dev::p2p::NodeID copySender() { return sender_node_id; } +// }; +// +// struct DummyPacket { +// DummyPacket(const dev::RLP& packet_rlp) {} +// }; +// +// class DummyPacketHandler : public tarcap::PacketHandler { +// public: +// DummyPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, +// uint32_t processing_delay_ms) +// : PacketHandler(init_data.conf, init_data.peers_state, init_data.packets_stats, init_data.own_node_addr, +// log_channel_name), +// processing_delay_ms_(processing_delay_ms), +// packets_proc_info_(init_data.packets_processing_info) {} +// +// virtual ~DummyPacketHandler() = default; +// DummyPacketHandler(const DummyPacketHandler&) = default; +// DummyPacketHandler(DummyPacketHandler&&) = default; +// DummyPacketHandler& operator=(const DummyPacketHandler&) = delete; +// DummyPacketHandler& operator=(DummyPacketHandler&&) = delete; +// +// private: +// void validatePacketRlpFormat([[maybe_unused]] const threadpool::PacketData& packet_data) const override {} +// +// void process(DummyPacket&& packet_data, +// [[maybe_unused]] const std::shared_ptr& peer) override { +// // Note do not use LOG() before saving start & finish time as it is internally synchronized and can +// // cause delays, which result in tests fails +// auto start_time = std::chrono::steady_clock::now(); +// std::this_thread::sleep_for(std::chrono::milliseconds(processing_delay_ms_)); +// auto finish_time = std::chrono::steady_clock::now(); +// +// LOG(log_dg_) << "Processing packet: " << packet_data.type_str_ << ", id(" << packet_data.id_ << ") finished. " +// << "Start time: " << start_time.time_since_epoch().count() +// << ", finish time: " << finish_time.time_since_epoch().count(); +// +// packets_proc_info_->addPacketProcessingTimes(packet_data.id_, {start_time, finish_time}); +// } +// +// uint32_t processing_delay_ms_{0}; +// std::shared_ptr packets_proc_info_; +// }; +// +// class DummyTransactionPacketHandler : public DummyPacketHandler { +// public: +// DummyTransactionPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, +// uint32_t processing_delay_ms) +// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} +// +// // Packet type that is processed by this handler +// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kTransactionPacket; +// }; +// +// class DummyDagBlockPacketHandler : public DummyPacketHandler { +// public: +// DummyDagBlockPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, +// uint32_t processing_delay_ms) +// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} +// +// // Packet type that is processed by this handler +// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagBlockPacket; +// }; +// +// class DummyStatusPacketHandler : public DummyPacketHandler { +// public: +// DummyStatusPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, +// uint32_t processing_delay_ms) +// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} +// +// // Packet type that is processed by this handler +// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kStatusPacket; +// }; +// +// class DummyVotePacketHandler : public DummyPacketHandler { +// public: +// DummyVotePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, +// uint32_t processing_delay_ms) +// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} +// +// // Packet type that is processed by this handler +// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotePacket; +// }; +// +// class DummyGetNextVotesBundlePacketHandler : public DummyPacketHandler { +// public: +// DummyGetNextVotesBundlePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, +// uint32_t processing_delay_ms) +// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} +// +// // Packet type that is processed by this handler +// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetNextVotesSyncPacket; +// }; +// +// class DummyVotesBundlePacketHandler : public DummyPacketHandler { +// public: +// DummyVotesBundlePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, +// uint32_t processing_delay_ms) +// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} +// +// // Packet type that is processed by this handler +// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotesBundlePacket; +// }; +// +// class DummyGetDagSyncPacketHandler : public DummyPacketHandler { +// public: +// DummyGetDagSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, +// uint32_t processing_delay_ms) +// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} +// +// // Packet type that is processed by this handler +// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetDagSyncPacket; +// }; +// +// class DummyGetPbftSyncPacketHandler : public DummyPacketHandler { +// public: +// DummyGetPbftSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, +// uint32_t processing_delay_ms) +// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} +// +// // Packet type that is processed by this handler +// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetPbftSyncPacket; +// }; +// +// class DummyDagSyncPacketHandler : public DummyPacketHandler { +// public: +// DummyDagSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, +// uint32_t processing_delay_ms) +// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} +// +// // Packet type that is processed by this handler +// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagSyncPacket; +// }; +// +// class DummyPbftSyncPacketHandler : public DummyPacketHandler { +// public: +// DummyPbftSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, +// uint32_t processing_delay_ms) +// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} +// +// // Packet type that is processed by this handler +// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPbftSyncPacket; +// }; +// +// HandlersInitData createHandlersInitData() { +// HandlersInitData ret_init_data; +// +// ret_init_data.sender_node_id = dev::p2p::NodeID(1); +// ret_init_data.own_node_addr = addr_t(2); +// ret_init_data.peers_state = std::make_shared(std::weak_ptr(), +// FullNodeConfig()); ret_init_data.packets_stats = +// std::make_shared(std::chrono::milliseconds(0), ret_init_data.own_node_addr); +// ret_init_data.packets_processing_info = std::make_shared(); +// +// // Enable packets from sending peer to be processed +// auto peer = ret_init_data.peers_state->addPendingPeer(ret_init_data.sender_node_id, ""); +// ret_init_data.peers_state->setPeerAsReadyToSendMessages(ret_init_data.sender_node_id, peer); +// +// return ret_init_data; +// } +// +// std::pair createPacket( +// const dev::p2p::NodeID& sender_node_id, SubprotocolPacketType packet_type, +// std::optional> packet_rlp_bytes = {}) { +// if (packet_rlp_bytes.has_value()) { +// threadpool::PacketData packet_data(packet_type, sender_node_id, std::move(packet_rlp_bytes.value())); +// return {TARAXA_NET_VERSION, std::move(packet_data)}; +// } +// +// dev::RLPStream s(0); +// threadpool::PacketData packet_data(packet_type, sender_node_id, s.invalidate()); +// return {TARAXA_NET_VERSION, std::move(packet_data)}; +// } +// +// bytes createDagBlockRlp(level_t level, uint32_t sig = 777) { +// // Creates dag block rlp as it is required for blocking mask to extract dag block level +// DagBlock blk(blk_hash_t(10), level, {}, {}, sig_t(sig), blk_hash_t(1), addr_t(15)); +// return blk.rlp(true); +// } +// +///** +// * @brief Check all combinations(without repetition) of provided packets that they were processed concurrently: +// * - packet1.start_time < packet2.finish_time +// * - packet2.start_time < packet1.finish_time +// * +// * @param packets +// */ +// void checkConcurrentProcessing( +// const std::vector>& packets) { +// assert(packets.size() >= 2); +// +// for (size_t i = 0; i < packets.size(); i++) { +// const auto& packet_l = packets[0]; +// for (size_t j = i + 1; j < packets.size(); j++) { +// const auto& packet_r = packets[j]; +// EXPECT_LT(packet_l.first.start_time_, packet_r.first.finish_time_) +// << packet_l.second << ".start_time < " << packet_r.second << ".finish_time"; +// EXPECT_LT(packet_r.first.start_time_, packet_l.first.finish_time_) +// << packet_r.second << ".start_time < " << packet_l.second << ".finish_time"; +// } +// } +//} +// +///** +// * @brief Check all combinations(without repetition) of provided packets that they were processed serial: +// * - packet1.finish_time < packet2.start_time +// * +// * @param packets +// */ +// void checkSerialProcessing( +// const std::vector>& packets) { +// assert(packets.size() >= 2); +// +// for (size_t i = 0; i < packets.size(); i++) { +// const auto& packet_l = packets[0]; +// for (size_t j = i + 1; j < packets.size(); j++) { +// const auto& packet_r = packets[j]; +// EXPECT_LT(packet_l.first.finish_time_, packet_r.first.start_time_) +// << packet_l.second << ".finish_time < " << packet_r.second << ".start_time"; +// } +// } +//} +// +// size_t queuesSize(const threadpool::PacketsThreadPool& tp) { +// const auto [high_priority_queue_size, mid_priority_queue_size, low_priority_queue_size] = tp.getQueueSize(); +// +// return high_priority_queue_size + mid_priority_queue_size + low_priority_queue_size; +//} +// +//// Threshold for packets queue to be emptied +// constexpr std::chrono::milliseconds QUEUE_EMPTIED_WAIT_TRESHOLD_MS = 15ms; +// +//// Test all packet types if they are either in non-blocking or blocking list of packets +// TEST_F(TarcapTpTest, packets_blocking_dependencies) { +// network::threadpool::PriorityQueue priority_queue(3); +// +// for (auto packet_type = SubprotocolPacketType{0}; packet_type != SubprotocolPacketType::kPacketCount; +// packet_type = static_cast(static_cast(packet_type) + 1)) { +// // Skip unreal packet types +// switch (packet_type) { +// case SubprotocolPacketType::kHighPriorityPackets: +// case SubprotocolPacketType::kMidPriorityPackets: +// case SubprotocolPacketType::kLowPriorityPackets: +// case SubprotocolPacketType::kPacketCount: +// continue; +// } +// +// std::vector packet_bytes; +// +// // Generate proper rlp for packets that need it for processing +// if (packet_type == SubprotocolPacketType::kDagBlockPacket) { +// DagBlock blk(blk_hash_t(1), 1, {}, {trx_hash_t(1), trx_hash_t(2)}, sig_t(3), blk_hash_t(0x4), addr_t(5)); +// packet_bytes = blk.rlp(true); +// } +// network::threadpool::PacketData packet_data{packet_type, {}, std::move(packet_bytes)}; +// packet_data.id_ = static_cast(packet_type); +// +// bool is_non_blocking_packet = priority_queue.isNonBlockingPacket(packet_data.type_); +// bool is_blocking_packet = priority_queue.updateBlockingDependencies(packet_data); +// +// EXPECT_TRUE(is_non_blocking_packet != is_blocking_packet); +// } +// } +// +//// Test if all "block-free" packets are processed concurrently +//// Note: in case someone creates new blocking dependency and does not adjust tests, this test should fail +// TEST_F(TarcapTpTest, block_free_packets) { +// HandlersInitData init_data = createHandlersInitData(); +// +// // Creates sender 2 to bypass peer order block on Transaction -> DagBlock packet. In case those packets sent +// // 2 different senders those packets are "block-free" +// dev::p2p::NodeID sender2(3); +// auto peer = init_data.peers_state->addPendingPeer(sender2, ""); +// init_data.peers_state->setPeerAsReadyToSendMessages(sender2, peer); +// +// auto packets_handler = std::make_shared(); +// +// packets_handler->registerHandler(init_data, "TX_PH", 20); +// packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); +// packets_handler->registerHandler(init_data, "STATUS_PH", 20); +// packets_handler->registerHandler(init_data, "VOTE_PH", 20); +// packets_handler->registerHandler(init_data, "GET_NEXT_VOTES_SYNC_PH", 20); +// packets_handler->registerHandler(init_data, "VOTES_SYNC_PH", 20); +// +// // Creates threadpool +// // Note: make num of threads >= num of packets to check if they are processed concurrently without blocks, +// otherwise +// // some blocks would be blocked for processing due to max threads limit +// threadpool::PacketsThreadPool tp(18); +// tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); +// +// // Pushes packets to the tp +// auto packet = createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {}); +// if (packet.second.rlp_.isList()) { +// std::cout << "is list"; +// } else { +// std::cout << "not list"; +// } +// const auto packet0_tx_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); +// const auto packet1_tx_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); +// const auto packet2_tx_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); +// const auto packet3_tx_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); +// +// const auto packet4_dag_block_id = +// tp.push( +// createPacket(dev::p2p::NodeID(sender2), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, +// 1)})) +// .value(); +// const auto packet5_dag_block_id = +// tp.push( +// createPacket(dev::p2p::NodeID(sender2), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, +// 2)})) +// .value(); +// +// const auto packet8_status_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); +// const auto packet9_status_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); +// +// const auto packet12_vote_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); +// const auto packet13_vote_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); +// +// const auto packet14_get_pbft_next_votes_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetNextVotesSyncPacket, {})).value(); +// const auto packet15_get_pbft_next_votes_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetNextVotesSyncPacket, {})).value(); +// +// const auto packet16_pbft_next_votes_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotesBundlePacket, {})).value(); +// +// size_t packets_count = 0; +// const auto packet17_pbft_next_votes_id = packets_count = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotesBundlePacket, {})).value(); +// +// tp.startProcessing(); +// +// // How should packets be processed: +// // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or +// // synchronously due to some blocking dependencies - depends on situation), check +// // PriorityQueue::updateDependenciesStart +// /* +// ---------------------- +// - packet0_transaction - +// ---------------------- +// ---------------------- +// - packet1_transaction - +// ---------------------- +// ----------------------- +// - packet2_transaction - +// ----------------------- +// +// -||- +// ... +// +// ----------------------- +// - packet17_votes_sync - +// ----------------------- +// 0.....................20.................... time [ms] +// */ +// +// // All packets should be already being processed after short amount of time +// std::this_thread::sleep_for(QUEUE_EMPTIED_WAIT_TRESHOLD_MS); +// EXPECT_EQ(queuesSize(tp), 0); +// +// // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to +// locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { +// // Check if transactions was propagated to node0 +// WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) +// }); +// +// // Check order of packets how they were processed +// const auto packets_proc_info = init_data.packets_processing_info; +// +// const auto packet0_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_tx_id); +// const auto packet1_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_tx_id); +// const auto packet2_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_tx_id); +// const auto packet3_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_tx_id); +// +// const auto packet4_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_dag_block_id); +// const auto packet5_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet5_dag_block_id); +// +// const auto packet8_status_proc_info = packets_proc_info->getPacketProcessingTimes(packet8_status_id); +// const auto packet9_status_proc_info = packets_proc_info->getPacketProcessingTimes(packet9_status_id); +// +// const auto packet12_vote_proc_info = packets_proc_info->getPacketProcessingTimes(packet12_vote_id); +// const auto packet13_vote_proc_info = packets_proc_info->getPacketProcessingTimes(packet13_vote_id); +// +// const auto packet14_get_pbft_next_votes_proc_info = +// packets_proc_info->getPacketProcessingTimes(packet14_get_pbft_next_votes_id); +// const auto packet15_get_pbft_next_votes_proc_info = +// packets_proc_info->getPacketProcessingTimes(packet15_get_pbft_next_votes_id); +// +// const auto packet16_pbft_next_votes_proc_info = +// packets_proc_info->getPacketProcessingTimes(packet16_pbft_next_votes_id); +// const auto packet17_pbft_next_votes_proc_info = +// packets_proc_info->getPacketProcessingTimes(packet17_pbft_next_votes_id); +// +// checkConcurrentProcessing({ +// {packet0_tx_proc_info, "packet0_tx"}, +// {packet1_tx_proc_info, "packet1_tx"}, +// {packet2_tx_proc_info, "packet2_tx"}, +// {packet3_tx_proc_info, "packet3_tx"}, +// {packet4_dag_block_proc_info, "packet4_dag_block"}, +// {packet5_dag_block_proc_info, "packet5_dag_block"}, +// {packet8_status_proc_info, "packet8_status"}, +// {packet9_status_proc_info, "packet9_status"}, +// {packet12_vote_proc_info, "packet12_vote"}, +// {packet13_vote_proc_info, "packet13_vote"}, +// {packet14_get_pbft_next_votes_proc_info, "packet14_get_pbft_next_votes"}, +// {packet15_get_pbft_next_votes_proc_info, "packet15_get_pbft_next_votes"}, +// {packet16_pbft_next_votes_proc_info, "packet16_pbft_next_votes"}, +// {packet17_pbft_next_votes_proc_info, "packet17_pbft_next_votes"}, +// }); +// } +// +//// Test "hard blocking dependencies" related synchronous processing of certain packets: +//// +//// Packets types that are currently hard blocked for processing in another threads due to dependencies, +//// e.g. syncing packets must be processed synchronously one by one, etc... +//// Each packet type might be simultaneously blocked by multiple different packets that are being processed. +// TEST_F(TarcapTpTest, hard_blocking_deps) { +// HandlersInitData init_data = createHandlersInitData(); +// +// auto packets_handler = std::make_shared(); +// packets_handler->registerHandler(init_data, "GET_DAG_SYNC_PH", 20); +// packets_handler->registerHandler(init_data, "GET_PBFT_SYNC_PH", 20); +// packets_handler->registerHandler(init_data, "DAG_SYNC_PH", 20); +// packets_handler->registerHandler(init_data, "PBFT_SYNC_PH", 20); +// +// // Creates threadpool +// threadpool::PacketsThreadPool tp(10); +// tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); +// +// // Pushes packets to the tp +// const auto packet0_dag_sync_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket, {})).value(); +// const auto packet1_dag_sync_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket, {})).value(); +// const auto packet2_get_dag_sync_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); +// const auto packet3_get_dag_sync_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); +// const auto packet4_get_pbft_sync_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetPbftSyncPacket, {})).value(); +// const auto packet5_get_pbft_sync_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetPbftSyncPacket, {})).value(); +// const auto packet6_pbft_sync_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kPbftSyncPacket, {})).value(); +// const auto packet7_pbft_sync_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kPbftSyncPacket, {})).value(); +// +// size_t packets_count = 0; +// const auto packet8_get_dag_sync_id = packets_count = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); +// +// tp.startProcessing(); +// +// // How should packets be processed: +// // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or +// // synchronously due to some blocking dependencies - depends on situation), check +// // PriorityQueue::updateDependenciesStart +// /* +// ------------------------ +// --- packet0_dag_sync --- +// ------------------------ +// ------------------------ +// --- packet1_dag_sync --- +// ------------------------ +// ------------------------- +// -- packet2_get_dag_sync - +// ------------------------- +// ------------------------- +// -- packet3_get_dag_sync - +// ------------------------- +// ------------------------- +// - packet4_get_pbft_sync - +// ------------------------- +// ------------------------- +// - packet5_get_pbft_sync - +// ------------------------- +// ------------------------ +// --- packet6_pbft_sync -- +// ------------------------ +// ------------------------ +// --- packet7_pbft_sync -- +// ------------------------ +// ------------------------ +// - packet8_get_dag_sync - +// ------------------------ +// 0......................20........................40........................60.......... time +// */ +// +// // All packets should be already being processed after short amount of time +// std::this_thread::sleep_for(60ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); +// EXPECT_EQ(queuesSize(tp), 0); +// +// // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to +// locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { +// // Check if transactions was propagated to node0 +// WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) +// }); +// +// // Check order of packets how they were processed +// const auto packets_proc_info = init_data.packets_processing_info; +// +// const auto packet0_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_dag_sync_id); +// const auto packet1_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_dag_sync_id); +// const auto packet2_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_get_dag_sync_id); +// const auto packet3_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_get_dag_sync_id); +// const auto packet4_get_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_get_pbft_sync_id); +// const auto packet5_get_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet5_get_pbft_sync_id); +// const auto packet6_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet6_pbft_sync_id); +// const auto packet7_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet7_pbft_sync_id); +// const auto packet8_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet8_get_dag_sync_id); +// +// checkConcurrentProcessing({ +// {packet0_dag_sync_proc_info, "packet0_dag_sync"}, +// {packet2_get_dag_sync_proc_info, "packet2_get_dag_sync"}, +// {packet4_get_pbft_sync_proc_info, "packet4_get_pbft_sync"}, +// {packet6_pbft_sync_proc_info, "packet6_pbft_sync"}, +// }); +// +// checkConcurrentProcessing({ +// {packet1_dag_sync_proc_info, "packet1_dag_sync"}, +// {packet3_get_dag_sync_proc_info, "packet3_get_dag_sync"}, +// {packet5_get_pbft_sync_proc_info, "packet5_get_pbft_sync"}, +// {packet7_pbft_sync_proc_info, "packet7_pbft_sync"}, +// }); +// +// EXPECT_GT(packet1_dag_sync_proc_info.start_time_, packet0_dag_sync_proc_info.finish_time_); +// EXPECT_GT(packet3_get_dag_sync_proc_info.start_time_, packet2_get_dag_sync_proc_info.finish_time_); +// EXPECT_GT(packet5_get_pbft_sync_proc_info.start_time_, packet4_get_pbft_sync_proc_info.finish_time_); +// EXPECT_GT(packet7_pbft_sync_proc_info.start_time_, packet6_pbft_sync_proc_info.finish_time_); +// +// EXPECT_GT(packet8_get_dag_sync_proc_info.start_time_, packet3_get_dag_sync_proc_info.finish_time_); +// } +// +//// Test "peer-order blocking dependencies" related to specific (peer & order) combination: +//// +//// Packets types that are blocked only for processing when received from specific peer & after specific +//// time (order), e.g.: new dag block packet processing is blocked until all transactions packets that were received +//// before it are processed. This blocking dependency is applied only for the same peer so transaction packet from one +//// peer does not block new dag block packet from another peer +// TEST_F(TarcapTpTest, peer_order_blocking_deps) { +// HandlersInitData init_data = createHandlersInitData(); +// +// auto packets_handler = std::make_shared(); +// packets_handler->registerHandler(init_data, "TX_PH", 20); +// packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 0); +// packets_handler->registerHandler(init_data, "SYNC_TEST_PH", 40); +// +// // Creates threadpool +// threadpool::PacketsThreadPool tp(10); +// tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); +// +// // Pushes packets to the tp +// const auto packet0_tx_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); +// const auto packet1_tx_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); +// const auto packet2_dag_sync_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket)).value(); +// const auto packet3_tx_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); +// +// size_t packets_count = 0; +// const auto packet4_dag_block_id = packets_count = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1)})) +// .value(); +// +// // How should packets be processed: +// // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or +// // synchronously due to some blocking dependencies - depends on situation), check +// // PriorityQueue::updateDependenciesStart +// /* +// -------------- +// - packet0_tx - +// -------------- +// -------------- +// - packet1_tx - +// -------------- +// ---------------------------- +// ----- packet2_dag_sync ----- +// ---------------------------- +// -------------- +// - packet3_tx - +// -------------- +// --------------------- +// - packet4_dag_block - +// --------------------- +// 0............20.............40....................60.................. time [ms] +// */ +// +// tp.startProcessing(); +// +// // All packets should be already being processed after short amount of time +// std::this_thread::sleep_for(60ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); +// EXPECT_EQ(queuesSize(tp), 0); +// +// // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to +// locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { +// // Check if transactions was propagated to node0 +// WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) +// }); +// +// // Check order of packets how they were processed +// const auto packets_proc_info = init_data.packets_processing_info; +// +// const auto packet0_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_tx_id); +// const auto packet1_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_tx_id); +// const auto packet3_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_tx_id); +// const auto packet4_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_dag_block_id); +// const auto packet2_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_dag_sync_id); +// +// checkConcurrentProcessing({ +// {packet0_tx_proc_info, "packet0_tx"}, +// {packet1_tx_proc_info, "packet1_tx"}, +// {packet2_dag_sync_proc_info, "packet2_dag_sync"}, +// {packet3_tx_proc_info, "packet3_tx"}, +// }); +// +// EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet0_tx_proc_info.finish_time_); +// EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet1_tx_proc_info.finish_time_); +// EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet3_tx_proc_info.finish_time_); +// +// EXPECT_GT(packet4_dag_block_proc_info.start_time_, packet2_dag_sync_proc_info.finish_time_); +// } +// +//// Test "dag-block blocking dependencies" related to dag blocks: +//// +//// Same dag blocks should not be processed at the same time +// TEST_F(TarcapTpTest, same_dag_blks_ordering) { +// HandlersInitData init_data = createHandlersInitData(); +// +// auto packets_handler = std::make_shared(); +// packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); +// +// // Creates threadpool +// threadpool::PacketsThreadPool tp(10); +// tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); +// +// auto dag_block = createDagBlockRlp(0); +// +// // Pushes packets to the tp +// const auto blk0_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); +// const auto blk1_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); +// const auto blk2_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); +// const auto blk3_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); +// +// size_t packets_count = 0; +// const auto blk4_id = packets_count = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); +// +// tp.startProcessing(); +// +// // How should dag blocks packets be processed: +// // Same dag blocks should not be processed concurrently but one after another +// +// // All packets should be already being processed after short amount of time +// std::this_thread::sleep_for(200ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); +// EXPECT_EQ(queuesSize(tp), 0); +// +// // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to +// locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { +// // Check if transactions was propagated to node0 +// WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) +// }); +// +// // Check order of packets how they were processed +// const auto packets_proc_info = init_data.packets_processing_info; +// +// const auto blk0_proc_info = packets_proc_info->getPacketProcessingTimes(blk0_id); +// const auto blk1_proc_info = packets_proc_info->getPacketProcessingTimes(blk1_id); +// const auto blk2_proc_info = packets_proc_info->getPacketProcessingTimes(blk2_id); +// const auto blk3_proc_info = packets_proc_info->getPacketProcessingTimes(blk3_id); +// const auto blk4_proc_info = packets_proc_info->getPacketProcessingTimes(blk4_id); +// +// checkSerialProcessing({ +// {blk0_proc_info, "blk0"}, +// {blk1_proc_info, "blk1"}, +// {blk2_proc_info, "blk2"}, +// {blk3_proc_info, "blk3"}, +// {blk4_proc_info, "blk4"}, +// }); +// } +// +//// Test "dag-level blocking dependencies" related to dag blocks levels: +//// +//// Ideally only dag blocks with the same level should be processed. In reality there are situation when node receives +//// dag block with smaller level than the level of blocks that are already being processed. In such case these blocks +//// with smaller levels can be processed concurrently with blocks that have higher level. All new dag blocks with +/// higher / level than the lowest level from all the blocks that currently being processed are blocked for processing +// TEST_F(TarcapTpTest, dag_blks_lvls_ordering) { +// HandlersInitData init_data = createHandlersInitData(); +// +// auto packets_handler = std::make_shared(); +// packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); +// +// // Creates threadpool +// threadpool::PacketsThreadPool tp(10); +// tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); +// +// // Pushes packets to the tp +// const auto blk0_lvl1_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, +// 1)})) +// .value(); +// const auto blk1_lvl1_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, +// 2)})) +// .value(); +// const auto blk2_lvl0_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, +// 3)})) +// .value(); +// const auto blk3_lvl1_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, +// 4)})) +// .value(); +// const auto blk4_lvl2_id = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(2, +// 5)})) +// .value(); +// +// size_t packets_count = 0; +// const auto blk5_lvl3_id = packets_count = +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(3, +// 6)})) +// .value(); +// +// tp.startProcessing(); +// +// // How should dag blocks packets be processed: +// // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or +// // synchronously due to some blocking dependencies - depends on situation), check +// // PriorityQueue::updateDependenciesStart +// /* +// ------------- +// - blk0_lvl1 - +// ------------- +// ------------- +// - blk1_lvl1 - +// ------------- +// ------------- +// - blk2_lvl0 - +// ------------- +// ------------- +// - blk3_lvl1 - +// ------------- +// ------------- +// - blk4_lvl2 - +// ------------- +// ------------- +// - blk5_lvl3 - +// ------------- +// 0...........20............40............60.............80................. time [ms] +// */ +// +// // All packets should be already being processed after short amount of time +// std::this_thread::sleep_for(80ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); +// EXPECT_EQ(queuesSize(tp), 0); +// +// // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to +// locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { +// // Check if transactions was propagated to node0 +// WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) +// }); +// +// // Check order of packets how they were processed +// const auto packets_proc_info = init_data.packets_processing_info; +// +// const auto blk0_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk0_lvl1_id); +// const auto blk1_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk1_lvl1_id); +// const auto blk2_lvl0_proc_info = packets_proc_info->getPacketProcessingTimes(blk2_lvl0_id); +// const auto blk3_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk3_lvl1_id); +// const auto blk4_lvl2_proc_info = packets_proc_info->getPacketProcessingTimes(blk4_lvl2_id); +// const auto blk5_lvl3_proc_info = packets_proc_info->getPacketProcessingTimes(blk5_lvl3_id); +// +// checkConcurrentProcessing({ +// {blk0_lvl1_proc_info, "blk0_lvl1"}, +// {blk1_lvl1_proc_info, "blk1_lvl1"}, +// {blk2_lvl0_proc_info, "blk2_lvl0"}, +// }); +// +// EXPECT_GT(blk3_lvl1_proc_info.start_time_, blk2_lvl0_proc_info.finish_time_); +// EXPECT_GT(blk4_lvl2_proc_info.start_time_, blk3_lvl1_proc_info.finish_time_); +// EXPECT_GT(blk5_lvl3_proc_info.start_time_, blk4_lvl2_proc_info.finish_time_); +// } +// +//// Test threads borrowing +//// +//// It can happen that no packet for processing was returned during the first iteration over priority queues as there +//// are limits for max total workers per each priority queue. These limits can and should be ignored in some +//// scenarios... For example: +//// High priority queue reached it's max workers limit, other queues have inside many blocked packets that cannot be +//// currently processed concurrently and MAX_TOTAL_WORKERS_COUNT is not reached yet. In such case some threads might +//// be unused. In such cases priority queues max workers limits can and should be ignored. +//// +//// Always keep 1 reserved thread for each priority queue at all times +// TEST_F(TarcapTpTest, threads_borrowing) { +// HandlersInitData init_data = createHandlersInitData(); +// +// auto packets_handler = std::make_shared(); +// packets_handler->registerHandler(init_data, "VOTE_PH", 100); +// +// // Creates threadpool +// const size_t threads_num = 10; +// threadpool::PacketsThreadPool tp(threads_num); +// tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); +// +// // Pushes packets to the tp +// std::vector pushed_packets_ids; +// for (size_t i = 0; i < threads_num; i++) { +// uint64_t packet_id = tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, +// {})).value(); pushed_packets_ids.push_back(packet_id); +// } +// +// tp.startProcessing(); +// +// // How should packets be processed: +// // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or +// // synchronously due to some blocking dependencies - depends on situation), check +// // PriorityQueue::updateDependenciesStart +// // +// // Note: each queue has 1 thread reserved at all times(even if it does not do anything) and there is 10 threads in +// // total, even with borrowing only 8 threads could be used at the same time +// /* +// ---------------- +// - packet0_vote - +// ---------------- +// ---------------- +// - packet1_vote - +// ---------------- +// ---------------- +// - packet2_vote - +// ---------------- +// +// -||- +// ... +// +// ---------------- +// - packet7_vote - +// ---------------- +// ---------------- +// - packet8_vote - +// ---------------- +// ---------------- +// - packet9_vote - +// ---------------- +// 0..............100...............200........... time [ms] +// */ +// +// // First 8 packets should be already processed by this time +// std::this_thread::sleep_for(100ms + 50ms /* might take longer due to threads borrowing */); +// EXPECT_LE(queuesSize(tp), 2); +// +// // Check order of packets how they were processed +// const auto packets_proc_info = init_data.packets_processing_info; +// +// // In case some packet processing is not finished yet, getPacketProcessingTimes() returns default (empty) value +// std::chrono::steady_clock::time_point default_time_point; +// +// // Because each queue has 1 thread reserved at all times(even if it does not do anything) and there is 10 threads +// in +// // total, even with borrowing only 8 threads could be used at the same time, thus last 2 packets (9th & 10th) +// should +// // not be processed after (100 + WAIT_TRESHOLD_MS) ms +// EXPECT_EQ(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[8]).finish_time_, default_time_point); +// EXPECT_EQ(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[9]).finish_time_, default_time_point); +// +// std::vector> packets_proc_info_vec; +// for (size_t i = 0; i < threads_num - (threadpool::PacketData::PacketPriority::Count - 1); i++) { +// packets_proc_info_vec.emplace_back(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[i]), +// "packet" + std::to_string(pushed_packets_ids[i]) + "_vote"); +// } +// +// // Check if first 8 pbft vote packets were processed concurrently -> threads from other queues had to be borrowed +// for +// // that +// checkConcurrentProcessing(packets_proc_info_vec); +// } +// +//// Test low priority queue starvation +//// +//// It should never happen that packets from lower priority queues are waiting to be processed until all packets from +//// higher priority queues are processed +// TEST_F(TarcapTpTest, low_priotity_queue_starvation) { +// HandlersInitData init_data = createHandlersInitData(); +// +// auto packets_handler = std::make_shared(); +// // Handler for packet from high priority queue +// packets_handler->registerHandler(init_data, "VOTE_PH", 20); +// +// // Handler for packet from mid priority queue +// packets_handler->registerHandler(init_data, "TX_PH", 20); +// +// // Handler for packet from low priority queue +// packets_handler->registerHandler(init_data, "STATUS_PH", 20); +// +// // Creates threadpool +// size_t threads_num = 10; +// threadpool::PacketsThreadPool tp(threads_num); +// tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); +// +// // Push 10x more packets for each prioriy queue than max tp capacity to make sure that tp wont be able to process +// all +// // packets from each queue concurrently -> many packets will be waiting due to max threads num reached for specific +// // priority queues +// for (size_t i = 0; i < 2 * 10 * threads_num; i++) { +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); +// } +// +// // Push a few packets low priority packets +// for (size_t i = 0; i < 4; i++) { +// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); +// } +// +// tp.startProcessing(); +// +// // How should packets be processed: +// // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or +// // synchronously due to some blocking dependencies - depends on situation), check +// // PriorityQueue::updateDependenciesStart In this test are max concurrent processing limits for queues reached, so +// // when we have 10 threads in thredpool: +// // - 4 is limit for High priority queue - VotePacket +// // - 4 is limit for Mid priority queue - TransactionPacket +// // - 3 is limit for Low priority queue - StatusPacket, but because max total limit (10) is always checked first +// // , low priority queue wont be able to use more than 2 threads concurrently +// /* +// ---------------- +// - packet0_vote - +// ---------------- +// ---------------- +// - packet1_vote - +// ---------------- +// ---------------- +// - packet2_vote - +// ---------------- +// ---------------- +// - packet3_vote - +// ---------------- +// ---------------- +// -- packet4_tx -- +// ---------------- +// ---------------- +// -- packet5_tx -- +// ---------------- +// ---------------- +// -- packet6_tx -- +// ---------------- +// ---------------- +// -- packet7_tx -- +// ---------------- +// +// .... +// votes and tx packets are processed concurrently 4 at a time until all of them are processed +// +// +// ------------------ +// - packet400_test - +// ------------------ +// ------------------ +// - packet401_test - +// ------------------ +// ------------------ +// - packet402_test - +// ------------------ +// ------------------ +// - packet403_test - +// ------------------ +// 0.................20.................40................... time [ms] +// */ +// +// std::this_thread::sleep_for(40ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); +// +// const auto [high_priority_queue_size, mid_priority_queue_size, low_priority_queue_size] = tp.getQueueSize(); +// +// EXPECT_GT(high_priority_queue_size, 0); +// EXPECT_GT(mid_priority_queue_size, 0); +// EXPECT_EQ(low_priority_queue_size, 0); +// } +// +// } // namespace taraxa::core_tests +// +// int main(int argc, char** argv) { +// using namespace taraxa; +// +// auto logging = logger::createDefaultLoggingConfig(); +// +// // Set this to debug to see log msgs +// logging.verbosity = logger::Verbosity::Debug; +// +// addr_t node_addr; +// logger::InitLogging(logging, node_addr); +// +// ::testing::InitGoogleTest(&argc, argv); +// return RUN_ALL_TESTS(); +// } \ No newline at end of file From dd79d794e795658ccd4cd60d95c669ef79aaeaa3 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Tue, 15 Oct 2024 11:00:32 +0200 Subject: [PATCH 071/105] fix statusPacket rlp parsing --- .../tarcap/packets/latest/status_packet.hpp | 40 +++++++---------- .../packets/latest/transaction_packet.hpp | 2 - .../tarcap/packets/latest/vote_packet.hpp | 1 - .../latest/common/packet_handler.hpp | 2 +- .../latest/status_packet_handler.cpp | 45 ++++++++++--------- .../latest/transaction_packet_handler.cpp | 1 - 6 files changed, 42 insertions(+), 49 deletions(-) diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/status_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/status_packet.hpp index e20243a8fb..a4c1703ee1 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/status_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/status_packet.hpp @@ -4,47 +4,41 @@ namespace taraxa::network::tarcap { // TODO: create new version of this packet without manual parsing struct StatusPacket { + struct InitialData { + uint64_t peer_chain_id; + blk_hash_t genesis_hash; + unsigned node_major_version; + unsigned node_minor_version; + unsigned node_patch_version; + bool is_light_node; + PbftPeriod node_history; + + RLP_FIELDS_DEFINE_INPLACE(peer_chain_id, genesis_hash, node_major_version, node_minor_version, node_patch_version, + is_light_node, node_history) + }; + StatusPacket() = default; StatusPacket(const StatusPacket&) = default; StatusPacket(StatusPacket&&) = default; StatusPacket& operator=(const StatusPacket&) = default; StatusPacket& operator=(StatusPacket&&) = default; StatusPacket(PbftPeriod peer_pbft_chain_size, PbftRound peer_pbft_round, uint64_t peer_dag_level, bool peer_syncing, - std::optional peer_chain_id = {}, std::optional genesis_hash = {}, - std::optional node_major_version = {}, std::optional node_minor_version = {}, - std::optional node_patch_version = {}, std::optional is_light_node = {}, - std::optional node_history = {}) + std::optional initial_data = {}) : peer_pbft_chain_size(peer_pbft_chain_size), peer_pbft_round(peer_pbft_round), peer_dag_level(peer_dag_level), peer_syncing(peer_syncing), - peer_chain_id(std::move(peer_chain_id)), - genesis_hash(std::move(genesis_hash)), - node_major_version(std::move(node_major_version)), - node_minor_version(std::move(node_minor_version)), - node_patch_version(std::move(node_patch_version)), - is_light_node(std::move(is_light_node)), - node_history(std::move(node_history)) {} + initial_data(std::move(initial_data)) {} StatusPacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } dev::bytes encodeRlp() { return util::rlp_enc(*this); } - bool isInitialStatusPacket() const { return peer_chain_id.has_value(); } - PbftPeriod peer_pbft_chain_size; PbftRound peer_pbft_round; uint64_t peer_dag_level; bool peer_syncing; - std::optional peer_chain_id; - std::optional genesis_hash; - std::optional node_major_version; - std::optional node_minor_version; - std::optional node_patch_version; - std::optional is_light_node; - std::optional node_history; + std::optional initial_data; - RLP_FIELDS_DEFINE_INPLACE(peer_pbft_chain_size, peer_pbft_round, peer_dag_level, peer_syncing, peer_chain_id, - genesis_hash, node_major_version, node_minor_version, node_patch_version, is_light_node, - node_history) + RLP_FIELDS_DEFINE_INPLACE(peer_pbft_chain_size, peer_pbft_round, peer_dag_level, peer_syncing, initial_data) }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp index ce7ab984cb..3f393ce93f 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp @@ -22,9 +22,7 @@ struct TransactionPacket { std::vector> transactions; - constexpr static uint32_t kTransactionPacketItemCount = 2; constexpr static uint32_t kMaxTransactionsInPacket{500}; - constexpr static uint32_t kMaxHashesInPacket{5000}; RLP_FIELDS_DEFINE_INPLACE(transactions) }; diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp index e74e61ccc8..7b4008151c 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp @@ -18,7 +18,6 @@ struct VotePacket { dev::bytes encodeRlp() { return util::rlp_enc(*this); } std::shared_ptr vote; - // TODO: Should it be also optional ? std::shared_ptr pbft_block; std::optional peer_chain_size; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp index 2bacec7d36..01e000545d 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp @@ -58,7 +58,7 @@ class PacketHandler : public BasePacketHandler { return; } - // TODO: can be removed after taraxa net version is completely switched to 5 + // TODO[2865]: can be removed after taraxa net version is completely switched to 5 checkPacketRlpIsList(packet_data); // Main processing function diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp index e3f0136530..a42da45ab4 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp @@ -28,7 +28,7 @@ void StatusPacketHandler::process(StatusPacket&& packet, const std::shared_ptrpbftSyncingPeriod(); // Initial status packet - if (packet.isInitialStatusPacket()) { + if (packet.initial_data.has_value()) { if (!selected_peer) { selected_peer = peers_state_->getPendingPeer(peer->getId()); if (!selected_peer) { @@ -39,31 +39,31 @@ void StatusPacketHandler::process(StatusPacket&& packet, const std::shared_ptrpeer_chain_id != kConf.genesis.chain_id) { LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) - << "Incorrect network id " << *packet.peer_chain_id << ", host " << peer->getId().abridged() + << "Incorrect network id " << packet.initial_data->peer_chain_id << ", host " << peer->getId().abridged() << " will be disconnected"; disconnect(peer->getId(), dev::p2p::UserReason); return; } - if (*packet.genesis_hash != kGenesisHash) { + if (packet.initial_data->genesis_hash != kGenesisHash) { LOG((peers_state_->getPeersCount()) ? log_nf_ : log_wr_) - << "Incorrect genesis hash " << *packet.genesis_hash << ", host " << peer->getId().abridged() + << "Incorrect genesis hash " << packet.initial_data->genesis_hash << ", host " << peer->getId().abridged() << " will be disconnected"; disconnect(peer->getId(), dev::p2p::UserReason); return; } // If this is a light node and it cannot serve our sync request disconnect from it - if (*packet.is_light_node) { + if (packet.initial_data->is_light_node) { selected_peer->peer_light_node = true; - selected_peer->peer_light_node_history = *packet.node_history; - if (pbft_synced_period + *packet.node_history < packet.peer_pbft_chain_size) { + selected_peer->peer_light_node_history = packet.initial_data->node_history; + if (pbft_synced_period + packet.initial_data->node_history < packet.peer_pbft_chain_size) { LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) << "Light node " << peer->getId().abridged() << " would not be able to serve our syncing request. " << "Current synced period " << pbft_synced_period << ", peer synced period " << packet.peer_pbft_chain_size - << ", peer light node history " << *packet.node_history << ". Peer will be disconnected"; + << ", peer light node history " << packet.initial_data->node_history << ". Peer will be disconnected"; disconnect(peer->getId(), dev::p2p::UserReason); return; } @@ -77,13 +77,14 @@ void StatusPacketHandler::process(StatusPacket&& packet, const std::shared_ptrsetPeerAsReadyToSendMessages(peer->getId(), selected_peer); - LOG(log_dg_) << "Received initial status message from " << peer->getId() << ", network id " << *packet.peer_chain_id - << ", peer DAG max level " << selected_peer->dag_level_ << ", genesis " << *packet.genesis_hash - << ", peer pbft chain size " << selected_peer->pbft_chain_size_ << ", peer syncing " << std::boolalpha - << selected_peer->syncing_ << ", peer pbft period " << selected_peer->pbft_period_ - << ", peer pbft round " << selected_peer->pbft_round_ << ", node major version" - << *packet.node_major_version << ", node minor version" << *packet.node_minor_version - << ", node patch version" << *packet.node_patch_version; + LOG(log_dg_) << "Received initial status message from " << peer->getId() << ", network id " + << packet.initial_data->peer_chain_id << ", peer DAG max level " << selected_peer->dag_level_ + << ", genesis " << packet.initial_data->genesis_hash << ", peer pbft chain size " + << selected_peer->pbft_chain_size_ << ", peer syncing " << std::boolalpha << selected_peer->syncing_ + << ", peer pbft period " << selected_peer->pbft_period_ << ", peer pbft round " + << selected_peer->pbft_round_ << ", node major version" << packet.initial_data->node_major_version + << ", node minor version" << packet.initial_data->node_minor_version << ", node patch version" + << packet.initial_data->node_patch_version; } else { // Standard status packet if (!selected_peer) { @@ -145,11 +146,13 @@ bool StatusPacketHandler::sendStatus(const dev::p2p::NodeID& node_id, bool initi const auto pbft_round = pbft_mgr_->getPbftRound(); if (initial) { - success = sealAndSend(node_id, SubprotocolPacketType::kStatusPacket, - StatusPacket(pbft_chain_size, pbft_round, dag_max_level, pbft_syncing_state_->isPbftSyncing(), - kConf.genesis.chain_id, kGenesisHash, TARAXA_MAJOR_VERSION, TARAXA_MINOR_VERSION, - TARAXA_PATCH_VERSION, kConf.is_light_node, kConf.light_node_history) - .encodeRlp()); + success = + sealAndSend(node_id, SubprotocolPacketType::kStatusPacket, + StatusPacket(pbft_chain_size, pbft_round, dag_max_level, pbft_syncing_state_->isPbftSyncing(), + StatusPacket::InitialData{kConf.genesis.chain_id, kGenesisHash, TARAXA_MAJOR_VERSION, + TARAXA_MINOR_VERSION, TARAXA_PATCH_VERSION, + kConf.is_light_node, kConf.light_node_history}) + .encodeRlp()); } else { success = sealAndSend( node_id, SubprotocolPacketType::kStatusPacket, diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp index 283ec4282c..bd0727a498 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp @@ -148,7 +148,6 @@ void TransactionPacketHandler::sendTransactions(std::shared_ptr peer std::pair> &&transactions) { if (!peer) return; const auto peer_id = peer->getId(); - const auto transactions_size = transactions.first.size(); LOG(log_tr_) << "sendTransactions " << transactions.first.size() << " to " << peer_id; From a09eef691a3eeb41ec2fbd70289f98e302614718 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Wed, 16 Oct 2024 09:52:18 +0200 Subject: [PATCH 072/105] fix packets rlp encoding/decoding --- CMakeLists.txt | 2 +- .../packets/latest/get_dag_sync_packet.hpp | 3 +- .../packets/latest/get_pbft_sync_packet.hpp | 2 +- .../tarcap/packets/latest/vote_packet.hpp | 17 +- .../packets/latest/votes_bundle_packet.hpp | 17 +- .../common/ext_syncing_packet_handler.hpp | 20 +- .../common/ext_votes_packet_handler.hpp | 18 +- .../v4/common/ext_syncing_packet_handler.hpp | 200 +++++++++++++ .../v4/common/ext_votes_packet_handler.hpp | 281 ++++++++++++++++++ .../v4/dag_block_packet_handler.hpp | 4 +- .../v4/dag_sync_packet_handler.hpp | 4 +- .../v4/pbft_sync_packet_handler.hpp | 4 +- .../v4/status_packet_handler.hpp | 4 +- .../v4/vote_packet_handler.hpp | 4 +- .../v4/votes_bundle_packet_handler.hpp | 4 +- .../latest/vote_packet_handler.cpp | 38 +-- .../latest/votes_bundle_packet_handler.cpp | 4 +- libraries/types/dag_block/src/dag_block.cpp | 5 +- libraries/types/pbft_block/src/pbft_block.cpp | 5 +- .../types/pbft_block/src/period_data.cpp | 5 +- .../types/transaction/src/transaction.cpp | 5 +- libraries/types/vote/src/pbft_vote.cpp | 5 +- 22 files changed, 560 insertions(+), 91 deletions(-) create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index ffc5a0bc55..a22fb579b0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -7,7 +7,7 @@ set(TARAXA_PATCH_VERSION 0) set(TARAXA_VERSION ${TARAXA_MAJOR_VERSION}.${TARAXA_MINOR_VERSION}.${TARAXA_PATCH_VERSION}) # Any time a change in the network protocol is introduced this version should be increased -set(TARAXA_NET_VERSION 4) +set(TARAXA_NET_VERSION 5) # Major version is modified when DAG blocks, pbft blocks and any basic building blocks of our blockchain is modified # in the db set(TARAXA_DB_MAJOR_VERSION 1) diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/get_dag_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_dag_sync_packet.hpp index 47a072925e..ccb5c5a539 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/get_dag_sync_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_dag_sync_packet.hpp @@ -11,7 +11,8 @@ struct GetDagSyncPacket { GetDagSyncPacket(GetDagSyncPacket&&) = default; GetDagSyncPacket& operator=(const GetDagSyncPacket&) = default; GetDagSyncPacket& operator=(GetDagSyncPacket&&) = default; - + GetDagSyncPacket(PbftPeriod peer_period, std::vector&& blocks_hashes) + : peer_period(peer_period), blocks_hashes(std::move(blocks_hashes)) {} GetDagSyncPacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } dev::bytes encodeRlp() { return util::rlp_enc(*this); } diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pbft_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pbft_sync_packet.hpp index e373501ba3..9f4ef983cf 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pbft_sync_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pbft_sync_packet.hpp @@ -10,7 +10,7 @@ struct GetPbftSyncPacket { GetPbftSyncPacket(GetPbftSyncPacket&&) = default; GetPbftSyncPacket& operator=(const GetPbftSyncPacket&) = default; GetPbftSyncPacket& operator=(GetPbftSyncPacket&&) = default; - + GetPbftSyncPacket(size_t height_to_sync) : height_to_sync(height_to_sync) {} GetPbftSyncPacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } dev::bytes encodeRlp() { return util::rlp_enc(*this); } diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp index 7b4008151c..d33968d7ce 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp @@ -6,22 +6,27 @@ namespace taraxa::network::tarcap { struct VotePacket { + struct OptionalData { + std::shared_ptr pbft_block; + uint64_t peer_chain_size; + + RLP_FIELDS_DEFINE_INPLACE(pbft_block, peer_chain_size) + }; + VotePacket() = default; VotePacket(const VotePacket&) = default; VotePacket(VotePacket&&) = default; VotePacket& operator=(const VotePacket&) = default; VotePacket& operator=(VotePacket&&) = default; - VotePacket(std::shared_ptr vote, std::shared_ptr pbft_block = {}, - std::optional peer_chain_size = {}) - : vote(std::move(vote)), pbft_block(std::move(pbft_block)), peer_chain_size(std::move(peer_chain_size)) {} + VotePacket(std::shared_ptr vote, std::optional optional_data = {}) + : vote(std::move(vote)), optional_data(std::move(optional_data)) {} VotePacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); }; dev::bytes encodeRlp() { return util::rlp_enc(*this); } std::shared_ptr vote; - std::shared_ptr pbft_block; - std::optional peer_chain_size; + std::optional optional_data; - RLP_FIELDS_DEFINE_INPLACE(vote, pbft_block, peer_chain_size) + RLP_FIELDS_DEFINE_INPLACE(vote, optional_data) }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp index d92e0df85f..6cfeb1e510 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp @@ -1,5 +1,6 @@ #pragma once +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "vote/pbft_vote.hpp" namespace taraxa::network::tarcap { @@ -10,7 +11,7 @@ struct VotesBundlePacket { VotesBundlePacket(VotesBundlePacket&&) = default; VotesBundlePacket& operator=(const VotesBundlePacket&) = default; VotesBundlePacket& operator=(VotesBundlePacket&&) = default; - + VotesBundlePacket(std::vector>&& votes) : votes(std::move(votes)) {} VotesBundlePacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); if (votes.size() == 0 || votes.size() > kMaxVotesInBundleRlp) { @@ -19,17 +20,19 @@ struct VotesBundlePacket { }; dev::bytes encodeRlp() { return util::rlp_enc(*this); } - blk_hash_t votes_bundle_block_hash; - PbftPeriod votes_bundle_pbft_period; - PbftRound votes_bundle_pbft_round; - PbftStep votes_bundle_votes_step; + // TODO[2870]: Create votes bundles class + // blk_hash_t votes_bundle_block_hash; + // PbftPeriod votes_bundle_pbft_period; + // PbftRound votes_bundle_pbft_round; + // PbftStep votes_bundle_votes_step; std::vector> votes; constexpr static size_t kMaxVotesInBundleRlp{1000}; - RLP_FIELDS_DEFINE_INPLACE(votes_bundle_block_hash, votes_bundle_pbft_period, votes_bundle_pbft_round, - votes_bundle_votes_step, votes) + // RLP_FIELDS_DEFINE_INPLACE(votes_bundle_block_hash, votes_bundle_pbft_period, votes_bundle_pbft_round, + // votes_bundle_votes_step, votes) + RLP_FIELDS_DEFINE_INPLACE(votes) }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp index 06c4e61317..ad41ca6b89 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp @@ -1,6 +1,8 @@ #pragma once #include "dag/dag_manager.hpp" +#include "network/tarcap/packets/latest/get_dag_sync_packet.hpp" +#include "network/tarcap/packets/latest/get_pbft_sync_packet.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "packet_handler.hpp" #include "pbft/pbft_chain.hpp" @@ -97,16 +99,12 @@ class ExtSyncingPacketHandler : public PacketHandler { LOG(this->log_nf_) << "Send GetPbftSyncPacket with period " << request_period << " to node " << syncing_peer->getId(); return this->sealAndSend(syncing_peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, - std::move(dev::RLPStream(1) << request_period)); + GetPbftSyncPacket{request_period}.encodeRlp()); } - void requestDagBlocks(const dev::p2p::NodeID &_nodeID, const std::unordered_set &blocks, - PbftPeriod period) { - dev::RLPStream s(2); // Period + blocks list - s.append(period); - s.append(blocks); - - this->sealAndSend(_nodeID, SubprotocolPacketType::kGetDagSyncPacket, std::move(s)); + void requestDagBlocks(const dev::p2p::NodeID &_nodeID, std::vector &&blocks, PbftPeriod period) { + this->sealAndSend(_nodeID, SubprotocolPacketType::kGetDagSyncPacket, + GetDagSyncPacket{period, std::move(blocks)}.encodeRlp()); } void requestPendingDagBlocks(std::shared_ptr peer = nullptr) { @@ -143,15 +141,15 @@ class ExtSyncingPacketHandler : public PacketHandler { return; } LOG(this->log_nf_) << "Request pending blocks from peer " << peer->getId(); - std::unordered_set known_non_finalized_blocks; + std::vector known_non_finalized_blocks; auto [period, blocks] = dag_mgr_->getNonFinalizedBlocks(); for (auto &level_blocks : blocks) { for (auto &block : level_blocks.second) { - known_non_finalized_blocks.insert(block); + known_non_finalized_blocks.emplace_back(block); } } - requestDagBlocks(peer->getId(), known_non_finalized_blocks, period); + requestDagBlocks(peer->getId(), std::move(known_non_finalized_blocks), period); } } diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp index 8e9e50398e..1bb39a9a01 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp @@ -1,5 +1,7 @@ #pragma once +#include "network/tarcap/packets/latest/get_pbft_sync_packet.hpp" +#include "network/tarcap/packets/latest/votes_bundle_packet.hpp" #include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "packet_handler.hpp" #include "pbft/pbft_manager.hpp" @@ -118,16 +120,10 @@ class ExtVotesPacketHandler : public PacketHandler { } auto sendVotes = [this, &peer](std::vector>&& votes) { - auto votes_bytes = encodePbftVotesBundleRlp(std::move(votes)); - if (votes_bytes.empty()) { - LOG(this->log_er_) << "Unable to send VotesBundle rlp"; - return; - } - - dev::RLPStream votes_rlp_stream; - votes_rlp_stream.appendRaw(votes_bytes); - - if (this->sealAndSend(peer->getId(), SubprotocolPacketType::kVotesBundlePacket, std::move(votes_rlp_stream))) { + // TODO[2868]: optimize this + auto votes_copy = votes; + if (this->sealAndSend(peer->getId(), SubprotocolPacketType::kVotesBundlePacket, + VotesBundlePacket{std::move(votes_copy)}.encodeRlp())) { LOG(this->log_dg_) << " Votes bundle with " << votes.size() << " votes sent to " << peer->getId(); for (const auto& vote : votes) { peer->markPbftVoteAsKnown(vote->getHash()); @@ -196,7 +192,7 @@ class ExtVotesPacketHandler : public PacketHandler { // request PBFT chain sync from this node this->sealAndSend( peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, - std::move(dev::RLPStream(1) << std::max(vote->getPeriod() - 1, peer->pbft_chain_size_.load()))); + GetPbftSyncPacket{std::max(vote->getPeriod() - 1, peer->pbft_chain_size_.load())}.encodeRlp()); last_pbft_block_sync_request_time_ = std::chrono::system_clock::now(); } diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp new file mode 100644 index 0000000000..72cbc57a74 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp @@ -0,0 +1,200 @@ +#pragma once + +#include "dag/dag_manager.hpp" +#include "network/tarcap/shared_states/pbft_syncing_state.hpp" +#include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" +#include "pbft/pbft_chain.hpp" +#include "pbft/pbft_manager.hpp" + +namespace taraxa::network::tarcap::v4 { + +/** + * @brief ExtSyncingPacketHandler is extended abstract PacketHandler with added functions that are used in packet + * handlers that need to interact with syncing process in some way + */ +template +class ExtSyncingPacketHandler : public PacketHandler { + public: + ExtSyncingPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, + std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, + std::shared_ptr db, const addr_t &node_addr, const std::string &log_channel_name) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel_name), + pbft_syncing_state_(std::move(pbft_syncing_state)), + pbft_chain_(std::move(pbft_chain)), + pbft_mgr_(std::move(pbft_mgr)), + dag_mgr_(std::move(dag_mgr)), + db_(std::move(db)) {} + + virtual ~ExtSyncingPacketHandler() = default; + ExtSyncingPacketHandler &operator=(const ExtSyncingPacketHandler &) = delete; + ExtSyncingPacketHandler &operator=(ExtSyncingPacketHandler &&) = delete; + + /** + * @brief Start syncing pbft if needed + * + */ + void startSyncingPbft() { + if (pbft_syncing_state_->isPbftSyncing()) { + LOG(this->log_dg_) << "startSyncingPbft called but syncing_ already true"; + return; + } + + std::shared_ptr peer = getMaxChainPeer(); + if (!peer) { + LOG(this->log_nf_) << "Restarting syncing PBFT not possible since no connected peers"; + return; + } + + auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); + if (peer->pbft_chain_size_ > pbft_sync_period) { + auto peer_id = peer->getId().abridged(); + auto peer_pbft_chain_size = peer->pbft_chain_size_.load(); + if (!pbft_syncing_state_->setPbftSyncing(true, pbft_sync_period, std::move(peer))) { + LOG(this->log_dg_) << "startSyncingPbft called but syncing_ already true"; + return; + } + LOG(this->log_si_) << "Restarting syncing PBFT from peer " << peer_id << ", peer PBFT chain size " + << peer_pbft_chain_size << ", own PBFT chain synced at period " << pbft_sync_period; + + if (syncPeerPbft(pbft_sync_period + 1)) { + // Disable snapshots only if are syncing from scratch + if (pbft_syncing_state_->isDeepPbftSyncing()) { + db_->disableSnapshots(); + } + } else { + pbft_syncing_state_->setPbftSyncing(false); + } + } else { + LOG(this->log_nf_) << "Restarting syncing PBFT not needed since our pbft chain size: " << pbft_sync_period << "(" + << pbft_chain_->getPbftChainSize() << ")" + << " is greater or equal than max node pbft chain size:" << peer->pbft_chain_size_; + db_->enableSnapshots(); + } + } + + /** + * @brief Send sync request to the current syncing peer with specified request_period + * + * @param request_period + * + * @return true if sync request was sent, otherwise false + */ + bool syncPeerPbft(PbftPeriod request_period) { + const auto syncing_peer = pbft_syncing_state_->syncingPeer(); + if (!syncing_peer) { + LOG(this->log_er_) << "Unable to send GetPbftSyncPacket. No syncing peer set."; + return false; + } + + if (request_period > syncing_peer->pbft_chain_size_) { + LOG(this->log_wr_) << "Invalid syncPeerPbft argument. Node " << syncing_peer->getId() << " chain size " + << syncing_peer->pbft_chain_size_ << ", requested period " << request_period; + return false; + } + + LOG(this->log_nf_) << "Send GetPbftSyncPacket with period " << request_period << " to node " + << syncing_peer->getId(); + return this->sealAndSend(syncing_peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, + std::move(dev::RLPStream(1) << request_period)); + } + + void requestDagBlocks(const dev::p2p::NodeID &_nodeID, const std::unordered_set &blocks, + PbftPeriod period) { + dev::RLPStream s(2); // Period + blocks list + s.append(period); + s.append(blocks); + + this->sealAndSend(_nodeID, SubprotocolPacketType::kGetDagSyncPacket, std::move(s)); + } + + void requestPendingDagBlocks(std::shared_ptr peer = nullptr) { + if (!peer) { + peer = getMaxChainPeer([](const std::shared_ptr &peer) { + if (peer->peer_dag_synced_ || !peer->dagSyncingAllowed()) { + return false; + } + return true; + }); + if (!peer) { + LOG(this->log_nf_) << "requestPendingDagBlocks not possible since no peers are matching conditions"; + return; + } + } + + if (!peer) { + LOG(this->log_nf_) << "requestPendingDagBlocks not possible since no connected peers"; + return; + } + + // This prevents ddos requesting dag blocks. We can only request this one time from one peer. + if (peer->peer_dag_synced_) { + LOG(this->log_nf_) << "requestPendingDagBlocks not possible since already requested for peer"; + return; + } + + // Only request dag blocks if periods are matching + auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); + if (pbft_sync_period == peer->pbft_chain_size_) { + // This prevents parallel requests + if (bool b = false; !peer->peer_dag_syncing_.compare_exchange_strong(b, !b)) { + LOG(this->log_nf_) << "requestPendingDagBlocks not possible since already requesting for peer"; + return; + } + LOG(this->log_nf_) << "Request pending blocks from peer " << peer->getId(); + std::unordered_set known_non_finalized_blocks; + auto [period, blocks] = dag_mgr_->getNonFinalizedBlocks(); + for (auto &level_blocks : blocks) { + for (auto &block : level_blocks.second) { + known_non_finalized_blocks.insert(block); + } + } + + requestDagBlocks(peer->getId(), known_non_finalized_blocks, period); + } + } + + std::shared_ptr getMaxChainPeer(std::function &)> filter_func = + [](const std::shared_ptr &) { return true; }) { + std::shared_ptr max_pbft_chain_peer; + PbftPeriod max_pbft_chain_size = 0; + uint64_t max_node_dag_level = 0; + + // Find peer with max pbft chain and dag level + for (auto const &peer : this->peers_state_->getAllPeers()) { + // Apply the filter function + if (!filter_func(peer.second)) { + continue; + } + + if (peer.second->pbft_chain_size_ > max_pbft_chain_size) { + if (peer.second->peer_light_node && + pbft_mgr_->pbftSyncingPeriod() + peer.second->peer_light_node_history < peer.second->pbft_chain_size_) { + LOG(this->log_er_) << "Disconnecting from light node peer " << peer.first + << " History: " << peer.second->peer_light_node_history + << " chain size: " << peer.second->pbft_chain_size_; + this->disconnect(peer.first, dev::p2p::UserReason); + continue; + } + max_pbft_chain_size = peer.second->pbft_chain_size_; + max_node_dag_level = peer.second->dag_level_; + max_pbft_chain_peer = peer.second; + } else if (peer.second->pbft_chain_size_ == max_pbft_chain_size && peer.second->dag_level_ > max_node_dag_level) { + max_node_dag_level = peer.second->dag_level_; + max_pbft_chain_peer = peer.second; + } + } + return max_pbft_chain_peer; + } + + protected: + std::shared_ptr pbft_syncing_state_{nullptr}; + + std::shared_ptr pbft_chain_{nullptr}; + std::shared_ptr pbft_mgr_{nullptr}; + std::shared_ptr dag_mgr_{nullptr}; + std::shared_ptr db_{nullptr}; +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.hpp new file mode 100644 index 0000000000..77809f7c87 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.hpp @@ -0,0 +1,281 @@ +#pragma once + +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" +#include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" +#include "pbft/pbft_manager.hpp" +#include "vote/pbft_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" +#include "vote_manager/vote_manager.hpp" + +namespace taraxa::network::tarcap::v4 { + +/** + * @brief ExtVotesPacketHandler is extended abstract PacketHandler with added functions that are used in packet + * handlers that process pbft votes + */ +template +class ExtVotesPacketHandler : public PacketHandler { + public: + ExtVotesPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, + std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, + std::shared_ptr slashing_manager, const addr_t& node_addr, + const std::string& log_channel_name) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel_name), + last_votes_sync_request_time_(std::chrono::system_clock::now()), + last_pbft_block_sync_request_time_(std::chrono::system_clock::now()), + pbft_mgr_(std::move(pbft_mgr)), + pbft_chain_(std::move(pbft_chain)), + vote_mgr_(std::move(vote_mgr)), + slashing_manager_(std::move(slashing_manager)) {} + + virtual ~ExtVotesPacketHandler() = default; + ExtVotesPacketHandler(const ExtVotesPacketHandler&) = delete; + ExtVotesPacketHandler(ExtVotesPacketHandler&&) = delete; + ExtVotesPacketHandler& operator=(const ExtVotesPacketHandler&) = delete; + ExtVotesPacketHandler& operator=(ExtVotesPacketHandler&&) = delete; + + /** + * @brief Process vote + * + * @param vote + * @param pbft_block + * @param peer + * @param validate_max_round_step + * @return if vote was successfully processed, otherwise false + */ + bool processVote(const std::shared_ptr& vote, const std::shared_ptr& pbft_block, + const std::shared_ptr& peer, bool validate_max_round_step) { + if (pbft_block && !validateVoteAndBlock(vote, pbft_block)) { + throw MaliciousPeerException("Received vote's voted value != received pbft block"); + } + + if (vote_mgr_->voteInVerifiedMap(vote)) { + LOG(this->log_dg_) << "Vote " << vote->getHash() << " already inserted in verified queue"; + return false; + } + + // Validate vote's period, round and step min/max values + if (const auto vote_valid = validateVotePeriodRoundStep(vote, peer, validate_max_round_step); !vote_valid.first) { + LOG(this->log_wr_) << "Vote period/round/step " << vote->getHash() + << " validation failed. Err: " << vote_valid.second; + return false; + } + + // Check is vote is unique per period, round & step & voter -> each address can generate just 1 vote + // (for a value that isn't NBH) per period, round & step + if (auto vote_valid = vote_mgr_->isUniqueVote(vote); !vote_valid.first) { + // Create double voting proof + slashing_manager_->submitDoubleVotingProof(vote, vote_valid.second); + throw MaliciousPeerException("Received double vote", vote->getVoter()); + } + + // Validate vote's signature, vrf, etc... + if (const auto vote_valid = vote_mgr_->validateVote(vote); !vote_valid.first) { + LOG(this->log_wr_) << "Vote " << vote->getHash() << " validation failed. Err: " << vote_valid.second; + return false; + } + + if (!vote_mgr_->addVerifiedVote(vote)) { + LOG(this->log_dg_) << "Vote " << vote->getHash() << " already inserted in verified queue(race condition)"; + return false; + } + + if (pbft_block) { + pbft_mgr_->processProposedBlock(pbft_block, vote); + } + + return true; + } + + /** + * @brief Checks is vote is relevant for current pbft state in terms of period, round and type + * @param vote + * @return true if vote is relevant for current pbft state, otherwise false + */ + bool isPbftRelevantVote(const std::shared_ptr& vote) const { + const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); + + if (vote->getPeriod() >= current_pbft_period && vote->getRound() >= current_pbft_round) { + // Standard current or future vote + return true; + } else if (vote->getPeriod() == current_pbft_period && vote->getRound() == (current_pbft_round - 1) && + vote->getType() == PbftVoteTypes::next_vote) { + // Previous round next vote + return true; + } else if (vote->getPeriod() == current_pbft_period - 1 && vote->getType() == PbftVoteTypes::cert_vote) { + // Previous period cert vote - potential reward vote + return true; + } + + return false; + } + + virtual void sendPbftVotesBundle(const std::shared_ptr& peer, + std::vector>&& votes) { + if (votes.empty()) { + return; + } + + auto sendVotes = [this, &peer](std::vector>&& votes) { + auto votes_bytes = encodePbftVotesBundleRlp(std::move(votes)); + if (votes_bytes.empty()) { + LOG(this->log_er_) << "Unable to send VotesBundle rlp"; + return; + } + + dev::RLPStream votes_rlp_stream; + votes_rlp_stream.appendRaw(votes_bytes); + + if (this->sealAndSend(peer->getId(), SubprotocolPacketType::kVotesBundlePacket, std::move(votes_rlp_stream))) { + LOG(this->log_dg_) << " Votes bundle with " << votes.size() << " votes sent to " << peer->getId(); + for (const auto& vote : votes) { + peer->markPbftVoteAsKnown(vote->getHash()); + } + } + }; + + if (votes.size() <= kMaxVotesInBundleRlp) { + sendVotes(std::move(votes)); + return; + } else { + // Need to split votes into multiple packets + size_t index = 0; + while (index < votes.size()) { + const size_t votes_count = std::min(kMaxVotesInBundleRlp, votes.size() - index); + + const auto begin_it = std::next(votes.begin(), index); + const auto end_it = std::next(begin_it, votes_count); + + std::vector> votes_sub_vector; + std::move(begin_it, end_it, std::back_inserter(votes_sub_vector)); + + sendVotes(std::move(votes_sub_vector)); + + index += votes_count; + } + } + } + + private: + /** + * @brief Validates vote period, round and step against max values from config + * + * @param vote to be validated + * @param peer + * @param validate_max_round_step validate also max round and step + * @return vote validation passed, otherwise + */ + std::pair validateVotePeriodRoundStep(const std::shared_ptr& vote, + const std::shared_ptr& peer, + bool validate_max_round_step) { + const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); + + auto genErrMsg = [period = current_pbft_period, round = current_pbft_round, + step = pbft_mgr_->getPbftStep()](const std::shared_ptr& vote) -> std::string { + std::stringstream err; + err << "Vote " << vote->getHash() << " (period, round, step) = (" << vote->getPeriod() << ", " << vote->getRound() + << ", " << vote->getStep() << "). Current PBFT (period, round, step) = (" << period << ", " << round << ", " + << step << ")"; + return err.str(); + }; + + // Period validation + // vote->getPeriod() == current_pbft_period - 1 && cert_vote -> potential reward vote + if (vote->getPeriod() < current_pbft_period - 1 || + (vote->getPeriod() == current_pbft_period - 1 && vote->getType() != PbftVoteTypes::cert_vote)) { + return {false, "Invalid period(too small): " + genErrMsg(vote)}; + } else if (this->kConf.network.ddos_protection.vote_accepting_periods && + vote->getPeriod() - 1 > + current_pbft_period + this->kConf.network.ddos_protection.vote_accepting_periods) { + // skip this check if kConf.network.ddos_protection.vote_accepting_periods == 0 + // vote->getPeriod() - 1 is here because votes are validated against vote_period - 1 in dpos contract + // Do not request round sync too often here + if (vote->getVoter() == peer->getId() && + std::chrono::system_clock::now() - last_pbft_block_sync_request_time_ > kSyncRequestInterval) { + // request PBFT chain sync from this node + this->sealAndSend( + peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, + std::move(dev::RLPStream(1) << std::max(vote->getPeriod() - 1, peer->pbft_chain_size_.load()))); + last_pbft_block_sync_request_time_ = std::chrono::system_clock::now(); + } + + return {false, "Invalid period(too big): " + genErrMsg(vote)}; + } + + // Round validation + auto checking_round = current_pbft_round; + // If period is not the same we assume current round is equal to 1 + // So we won't accept votes for future period with round bigger than kConf.network.vote_accepting_steps + if (current_pbft_period != vote->getPeriod()) { + checking_round = 1; + } + + // vote->getRound() == checking_round - 1 && next_vote -> previous round next vote + if (vote->getRound() < checking_round - 1 || + (vote->getRound() == checking_round - 1 && vote->getType() != PbftVoteTypes::next_vote)) { + return {false, "Invalid round(too small): " + genErrMsg(vote)}; + } else if (validate_max_round_step && this->kConf.network.ddos_protection.vote_accepting_rounds && + vote->getRound() >= checking_round + this->kConf.network.ddos_protection.vote_accepting_rounds) { + // skip this check if kConf.network.vote_accepting_rounds == 0 + // Trigger votes(round) syncing only if we are in sync in terms of period + if (current_pbft_period == vote->getPeriod()) { + // Do not request round sync too often here + if (vote->getVoter() == peer->getId() && + std::chrono::system_clock::now() - last_votes_sync_request_time_ > kSyncRequestInterval) { + // request round votes sync from this node + this->requestPbftNextVotesAtPeriodRound(peer->getId(), current_pbft_period, current_pbft_round); + last_votes_sync_request_time_ = std::chrono::system_clock::now(); + } + } + + return {false, "Invalid round(too big): " + genErrMsg(vote)}; + } + + // Step validation + auto checking_step = pbft_mgr_->getPbftStep(); + // If period or round is not the same we assume current step is equal to 1 + // So we won't accept votes for future rounds with step bigger than kConf.network.vote_accepting_steps + if (current_pbft_period != vote->getPeriod() || current_pbft_round != vote->getRound()) { + checking_step = 1; + } + + // skip check if kConf.network.vote_accepting_steps == 0 + if (validate_max_round_step && this->kConf.network.ddos_protection.vote_accepting_steps && + vote->getStep() >= checking_step + this->kConf.network.ddos_protection.vote_accepting_steps) { + return {false, "Invalid step(too big): " + genErrMsg(vote)}; + } + + return {true, ""}; + } + + /** + * @brief Validates provided vote if voted value == provided block + * + * @param vote + * @param pbft_block + * @return true if validation successful, otherwise false + */ + bool validateVoteAndBlock(const std::shared_ptr& vote, const std::shared_ptr& pbft_block) const { + if (pbft_block->getBlockHash() != vote->getBlockHash()) { + LOG(this->log_er_) << "Vote " << vote->getHash() << " voted block " << vote->getBlockHash() << " != actual block " + << pbft_block->getBlockHash(); + return false; + } + return true; + } + + protected: + constexpr static size_t kMaxVotesInBundleRlp{1000}; + constexpr static std::chrono::seconds kSyncRequestInterval = std::chrono::seconds(10); + + mutable std::chrono::system_clock::time_point last_votes_sync_request_time_; + mutable std::chrono::system_clock::time_point last_pbft_block_sync_request_time_; + + std::shared_ptr pbft_mgr_; + std::shared_ptr pbft_chain_; + std::shared_ptr vote_mgr_; + std::shared_ptr slashing_manager_; +}; + +} // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp index 9b0ec69968..57910851be 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp @@ -1,7 +1,7 @@ #pragma once #include "network/tarcap/packets/v4/dag_block_packet.hpp" -#include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp" namespace taraxa { class TransactionManager; @@ -9,7 +9,7 @@ class TransactionManager; namespace taraxa::network::tarcap::v4 { -class DagBlockPacketHandler : public ExtSyncingPacketHandler { +class DagBlockPacketHandler : public v4::ExtSyncingPacketHandler { public: DagBlockPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp index d9989c25a8..9178d04f62 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp @@ -1,7 +1,7 @@ #pragma once #include "network/tarcap/packets/v4/dag_sync_packet.hpp" -#include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp" namespace taraxa { class TransactionManager; @@ -9,7 +9,7 @@ class TransactionManager; namespace taraxa::network::tarcap::v4 { -class DagSyncPacketHandler : public ExtSyncingPacketHandler { +class DagSyncPacketHandler : public v4::ExtSyncingPacketHandler { public: DagSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp index 75e1115a8f..32b7eb88a6 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp @@ -2,12 +2,12 @@ #include "common/thread_pool.hpp" #include "network/tarcap/packets/v4/pbft_sync_packet.hpp" -#include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp" #include "vote_manager/vote_manager.hpp" namespace taraxa::network::tarcap::v4 { -class PbftSyncPacketHandler : public ExtSyncingPacketHandler { +class PbftSyncPacketHandler : public v4::ExtSyncingPacketHandler { public: PbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/status_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/status_packet_handler.hpp index 601c91250d..df62476e36 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/status_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/status_packet_handler.hpp @@ -1,11 +1,11 @@ #pragma once #include "network/tarcap/packets/v4/status_packet.hpp" -#include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp" namespace taraxa::network::tarcap::v4 { -class StatusPacketHandler : public ExtSyncingPacketHandler { +class StatusPacketHandler : public v4::ExtSyncingPacketHandler { public: StatusPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/vote_packet_handler.hpp index ab3974091d..9c174408ab 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/vote_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/vote_packet_handler.hpp @@ -1,11 +1,11 @@ #pragma once #include "network/tarcap/packets/v4/vote_packet.hpp" -#include "network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.hpp" namespace taraxa::network::tarcap::v4 { -class VotePacketHandler : public ExtVotesPacketHandler { +class VotePacketHandler : public v4::ExtVotesPacketHandler { public: VotePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp index b2ead3490c..848784b3ec 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp @@ -1,11 +1,11 @@ #pragma once #include "network/tarcap/packets/v4/votes_bundle_packet.hpp" -#include "network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.hpp" namespace taraxa::network::tarcap::v4 { -class VotesBundlePacketHandler : public ExtVotesPacketHandler { +class VotesBundlePacketHandler : public v4::ExtVotesPacketHandler { public: VotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp index 70c2688778..1a1970ab8a 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp @@ -19,18 +19,18 @@ VotePacketHandler::VotePacketHandler(const FullNodeConfig &conf, std::shared_ptr void VotePacketHandler::process(VotePacket &&packet, const std::shared_ptr &peer) { const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); - if (packet.pbft_block) { + if (packet.optional_data.has_value()) { LOG(log_dg_) << "Received PBFT vote " << packet.vote->getHash() << " with PBFT block " - << packet.pbft_block->getBlockHash(); + << packet.optional_data->pbft_block->getBlockHash(); + + // Update peer's max chain size + if (packet.optional_data->peer_chain_size > peer->pbft_chain_size_) { + peer->pbft_chain_size_ = packet.optional_data->peer_chain_size; + } } else { LOG(log_dg_) << "Received PBFT vote " << packet.vote->getHash(); } - // Update peer's max chain size - if (packet.peer_chain_size.has_value() && *packet.peer_chain_size > peer->pbft_chain_size_) { - peer->pbft_chain_size_ = *packet.peer_chain_size; - } - const auto vote_hash = packet.vote->getHash(); if (!isPbftRelevantVote(packet.vote)) { @@ -47,26 +47,28 @@ void VotePacketHandler::process(VotePacket &&packet, const std::shared_ptrgetBlockHash() != packet.vote->getBlockHash()) { + std::shared_ptr pbft_block; + if (packet.optional_data.has_value()) { + if (packet.optional_data->pbft_block->getBlockHash() != packet.vote->getBlockHash()) { std::ostringstream err_msg; err_msg << "Vote " << packet.vote->getHash().abridged() << " voted block " << packet.vote->getBlockHash().abridged() << " != actual block " - << packet.pbft_block->getBlockHash().abridged(); + << packet.optional_data->pbft_block->getBlockHash().abridged(); throw MaliciousPeerException(err_msg.str()); } - peer->markPbftBlockAsKnown(packet.pbft_block->getBlockHash()); + peer->markPbftBlockAsKnown(packet.optional_data->pbft_block->getBlockHash()); + pbft_block = packet.optional_data->pbft_block; } - if (!processVote(packet.vote, packet.pbft_block, peer, true)) { + if (!processVote(packet.vote, pbft_block, peer, true)) { return; } // Do not mark it before, as peers have small caches of known votes. Only mark gossiping votes peer->markPbftVoteAsKnown(vote_hash); - pbft_mgr_->gossipVote(packet.vote, packet.pbft_block); + pbft_mgr_->gossipVote(packet.vote, pbft_block); } void VotePacketHandler::onNewPbftVote(const std::shared_ptr &vote, const std::shared_ptr &block, @@ -98,15 +100,13 @@ void VotePacketHandler::sendPbftVote(const std::shared_ptr &peer, co return; } - VotePacket vote_packet; - + std::optional optional_packet_data; if (block) { - vote_packet = VotePacket(vote, block, pbft_chain_->getPbftChainSize()); - } else { - vote_packet = VotePacket(vote); + optional_packet_data = VotePacket::OptionalData{block, pbft_chain_->getPbftChainSize()}; } - if (sealAndSend(peer->getId(), SubprotocolPacketType::kVotePacket, vote_packet.encodeRlp())) { + if (sealAndSend(peer->getId(), SubprotocolPacketType::kVotePacket, + VotePacket(vote, std::move(optional_packet_data)).encodeRlp())) { peer->markPbftVoteAsKnown(vote->getHash()); if (block) { peer->markPbftBlockAsKnown(block->getBlockHash()); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp index 451279dfe1..32c9909b01 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp @@ -27,7 +27,7 @@ void VotesBundlePacketHandler::process(VotesBundlePacket &&packet, const std::sh // vote is irrelevant, all of them are if (!isPbftRelevantVote(packet.votes[0])) { LOG(log_wr_) << "Drop votes sync bundle as it is irrelevant for current pbft state. Votes (period, round, step) = (" - << packet.votes_bundle_pbft_period << ", " << packet.votes_bundle_pbft_round << ", " + << reference_vote->getPeriod() << ", " << reference_vote->getRound() << ", " << reference_vote->getStep() << "). Current PBFT (period, round, step) = (" << current_pbft_period << ", " << current_pbft_round << ", " << pbft_mgr_->getPbftStep() << ")"; return; @@ -69,7 +69,7 @@ void VotesBundlePacketHandler::process(VotesBundlePacket &&packet, const std::sh LOG(log_nf_) << "Received " << packet.votes.size() << " (processed " << processed_votes_count << " ) sync votes from peer " << peer->getId() << " node current round " << current_pbft_round - << ", peer pbft round " << packet.votes_bundle_pbft_round; + << ", peer pbft round " << reference_vote->getRound(); onNewPbftVotesBundle(packet.votes, false, peer->getId()); } diff --git a/libraries/types/dag_block/src/dag_block.cpp b/libraries/types/dag_block/src/dag_block.cpp index 771551e8b4..c9620aa3b4 100644 --- a/libraries/types/dag_block/src/dag_block.cpp +++ b/libraries/types/dag_block/src/dag_block.cpp @@ -201,9 +201,6 @@ blk_hash_t DagBlock::sha3(bool include_sig) const { return dev::sha3(rlp(include void DagBlock::rlp(::taraxa::util::RLPDecoderRef encoding) { *this = DagBlock(encoding.value); } -void DagBlock::rlp(::taraxa::util::RLPEncoderRef encoding) const { - encoding.appendList(1); - encoding.appendRaw(rlp(true)); -} +void DagBlock::rlp(::taraxa::util::RLPEncoderRef encoding) const { encoding.appendRaw(rlp(true)); } } // namespace taraxa diff --git a/libraries/types/pbft_block/src/pbft_block.cpp b/libraries/types/pbft_block/src/pbft_block.cpp index 11491f8cfa..48e4f91461 100644 --- a/libraries/types/pbft_block/src/pbft_block.cpp +++ b/libraries/types/pbft_block/src/pbft_block.cpp @@ -131,10 +131,7 @@ bytes PbftBlock::rlp(bool include_sig) const { void PbftBlock::rlp(::taraxa::util::RLPDecoderRef encoding) { *this = PbftBlock(encoding.value); } -void PbftBlock::rlp(::taraxa::util::RLPEncoderRef encoding) const { - encoding.appendList(1); - encoding.appendRaw(rlp(true)); -} +void PbftBlock::rlp(::taraxa::util::RLPEncoderRef encoding) const { encoding.appendRaw(rlp(true)); } std::ostream& operator<<(std::ostream& strm, PbftBlock const& pbft_blk) { strm << pbft_blk.getJsonStr(); diff --git a/libraries/types/pbft_block/src/period_data.cpp b/libraries/types/pbft_block/src/period_data.cpp index 99af20e626..8948ca683b 100644 --- a/libraries/types/pbft_block/src/period_data.cpp +++ b/libraries/types/pbft_block/src/period_data.cpp @@ -134,10 +134,7 @@ bytes PeriodData::ToOldPeriodData(const bytes& rlp) { void PeriodData::rlp(::taraxa::util::RLPDecoderRef encoding) { *this = PeriodData(encoding.value); } -void PeriodData::rlp(::taraxa::util::RLPEncoderRef encoding) const { - encoding.appendList(1); - encoding.appendRaw(rlp()); -} +void PeriodData::rlp(::taraxa::util::RLPEncoderRef encoding) const { encoding.appendRaw(rlp()); } std::ostream& operator<<(std::ostream& strm, PeriodData const& b) { strm << "[PeriodData] : " << b.pbft_blk << " , num of votes " << b.previous_block_cert_votes.size() << std::endl; diff --git a/libraries/types/transaction/src/transaction.cpp b/libraries/types/transaction/src/transaction.cpp index 4d8a44f7c1..df928fbf3d 100644 --- a/libraries/types/transaction/src/transaction.cpp +++ b/libraries/types/transaction/src/transaction.cpp @@ -176,9 +176,6 @@ Json::Value Transaction::toJSON() const { void Transaction::rlp(::taraxa::util::RLPDecoderRef encoding) { fromRLP(encoding.value, false, {}); } -void Transaction::rlp(::taraxa::util::RLPEncoderRef encoding) const { - encoding.appendList(1); - encoding.appendRaw(rlp()); -} +void Transaction::rlp(::taraxa::util::RLPEncoderRef encoding) const { encoding.appendRaw(rlp()); } } // namespace taraxa diff --git a/libraries/types/vote/src/pbft_vote.cpp b/libraries/types/vote/src/pbft_vote.cpp index c89eee03ee..e3ce1c9c76 100644 --- a/libraries/types/vote/src/pbft_vote.cpp +++ b/libraries/types/vote/src/pbft_vote.cpp @@ -127,9 +127,6 @@ vote_hash_t PbftVote::sha3(bool inc_sig) const { return dev::sha3(rlp(inc_sig)); void PbftVote::rlp(::taraxa::util::RLPDecoderRef encoding) { *this = PbftVote(encoding.value); } -void PbftVote::rlp(::taraxa::util::RLPEncoderRef encoding) const { - encoding.appendList(1); - encoding.appendRaw(rlp()); -} +void PbftVote::rlp(::taraxa::util::RLPEncoderRef encoding) const { encoding.appendRaw(rlp()); } } // namespace taraxa \ No newline at end of file From 31696f9c5c5dbf10014c4f7bd90e39057cbaa795 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Fri, 18 Oct 2024 11:01:06 +0200 Subject: [PATCH 073/105] simplify new packets handlers code --- .../packets/latest/dag_block_packet.hpp | 12 - .../tarcap/packets/latest/dag_sync_packet.hpp | 16 -- .../packets/latest/get_dag_sync_packet.hpp | 10 - .../latest/get_next_votes_bundle_packet.hpp | 9 - .../packets/latest/get_pbft_sync_packet.hpp | 9 - .../latest/get_pillar_votes_bundle_packet.hpp | 13 - .../packets/latest/pbft_sync_packet.hpp | 14 -- .../packets/latest/pillar_vote_packet.hpp | 9 - .../latest/pillar_votes_bundle_packet.hpp | 17 -- .../tarcap/packets/latest/status_packet.hpp | 16 -- .../packets/latest/transaction_packet.hpp | 19 +- .../tarcap/packets/latest/vote_packet.hpp | 10 - .../packets/latest/votes_bundle_packet.hpp | 16 -- .../tarcap/packets/v4/dag_block_packet.hpp | 44 ---- .../tarcap/packets/v4/dag_sync_packet.hpp | 43 ---- .../tarcap/packets/v4/get_dag_sync_packet.hpp | 27 --- .../tarcap/packets/v4/pbft_sync_packet.hpp | 47 ---- .../tarcap/packets/v4/pillar_vote_packet.hpp | 28 --- .../packets/v4/pillar_votes_bundle_packet.hpp | 32 --- .../tarcap/packets/v4/status_packet.hpp | 54 ----- .../tarcap/packets/v4/transaction_packet.hpp | 53 ---- .../network/tarcap/packets/v4/vote_packet.hpp | 36 --- .../tarcap/packets/v4/votes_bundle_packet.hpp | 42 ---- .../latest/common/base_packet_handler.hpp | 3 + .../common/ext_syncing_packet_handler.hpp | 4 +- .../common/ext_votes_packet_handler.hpp | 4 +- .../latest/common/packet_handler.hpp | 40 ++- .../pillar_votes_bundle_packet_handler.hpp | 2 + .../common/ext_pillar_vote_packet_handler.hpp | 35 +++ .../v4/common/ext_syncing_packet_handler.hpp | 174 ++----------- .../v4/common/ext_votes_packet_handler.hpp | 226 ++--------------- .../v4/common/packet_handler.hpp | 86 +++++++ .../v4/dag_block_packet_handler.hpp | 8 +- .../v4/dag_sync_packet_handler.hpp | 8 +- .../v4/get_dag_sync_packet_handler.hpp | 8 +- .../get_next_votes_bundle_packet_handler.hpp | 29 +++ .../v4/get_pbft_sync_packet_handler.hpp | 42 ++++ ...get_pillar_votes_bundle_packet_handler.hpp | 31 +++ .../v4/pbft_sync_packet_handler.hpp | 11 +- .../v4/pillar_vote_packet_handler.hpp | 8 +- .../v4/pillar_votes_bundle_packet_handler.hpp | 11 +- .../v4/status_packet_handler.hpp | 11 +- .../v4/transaction_packet_handler.hpp | 10 +- .../v4/vote_packet_handler.hpp | 12 +- .../v4/votes_bundle_packet_handler.hpp | 8 +- .../common/ext_bls_sig_packet_handler.cpp | 37 --- .../common/ext_syncing_packet_handler.cpp | 169 ------------- .../common/ext_votes_packet_handler.cpp | 228 ------------------ .../latest/common/packet_handler.cpp | 149 ------------ .../latest/dag_block_packet_handler.cpp | 5 +- .../latest/get_dag_sync_packet_handler.cpp | 2 +- .../latest/get_pbft_sync_packet_handler.cpp | 2 +- ...get_pillar_votes_bundle_packet_handler.cpp | 14 +- .../latest/pillar_vote_packet_handler.cpp | 2 +- .../pillar_votes_bundle_packet_handler.cpp | 5 + .../latest/status_packet_handler.cpp | 19 +- .../latest/transaction_packet_handler.cpp | 8 +- .../latest/vote_packet_handler.cpp | 4 +- .../latest/votes_bundle_packet_handler.cpp | 4 + .../v4/common/ext_bls_sig_packet_handler.cpp | 37 +++ .../v4/common/ext_syncing_packet_handler.cpp | 167 +++++++++++++ .../v4/common/ext_votes_packet_handler.cpp | 225 +++++++++++++++++ .../v4/common/packet_handler.cpp | 144 +++++++++++ .../v4/dag_block_packet_handler.cpp | 42 +++- .../v4/dag_sync_packet_handler.cpp | 79 ++++-- .../v4/get_dag_sync_packet_handler.cpp | 30 ++- .../get_next_votes_bundle_packet_handler.cpp | 82 +++++++ .../v4/get_pbft_sync_packet_handler.cpp | 114 +++++++++ ...get_pillar_votes_bundle_packet_handler.cpp | 98 ++++++++ .../v4/pbft_sync_packet_handler.cpp | 114 +++++---- .../v4/pillar_vote_packet_handler.cpp | 19 +- .../v4/pillar_votes_bundle_packet_handler.cpp | 12 +- .../src/tarcap/packets_handlers/v4/readme.md | 6 + .../v4/status_packet_handler.cpp | 107 ++++---- .../v4/transaction_packet_handler.cpp | 64 ++++- .../v4/vote_packet_handler.cpp | 59 +++-- .../v4/votes_bundle_packet_handler.cpp | 53 ++-- .../network/src/tarcap/taraxa_capability.cpp | 12 +- 78 files changed, 1658 insertions(+), 1801 deletions(-) delete mode 100644 libraries/core_libs/network/include/network/tarcap/packets/v4/dag_block_packet.hpp delete mode 100644 libraries/core_libs/network/include/network/tarcap/packets/v4/dag_sync_packet.hpp delete mode 100644 libraries/core_libs/network/include/network/tarcap/packets/v4/get_dag_sync_packet.hpp delete mode 100644 libraries/core_libs/network/include/network/tarcap/packets/v4/pbft_sync_packet.hpp delete mode 100644 libraries/core_libs/network/include/network/tarcap/packets/v4/pillar_vote_packet.hpp delete mode 100644 libraries/core_libs/network/include/network/tarcap/packets/v4/pillar_votes_bundle_packet.hpp delete mode 100644 libraries/core_libs/network/include/network/tarcap/packets/v4/status_packet.hpp delete mode 100644 libraries/core_libs/network/include/network/tarcap/packets/v4/transaction_packet.hpp delete mode 100644 libraries/core_libs/network/include/network/tarcap/packets/v4/vote_packet.hpp delete mode 100644 libraries/core_libs/network/include/network/tarcap/packets/v4/votes_bundle_packet.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_pillar_vote_packet_handler.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/packet_handler.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_next_votes_bundle_packet_handler.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_pbft_sync_packet_handler.hpp create mode 100644 libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.hpp delete mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_bls_sig_packet_handler.cpp delete mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.cpp delete mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.cpp delete mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_bls_sig_packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_next_votes_bundle_packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_pbft_sync_packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.cpp create mode 100644 libraries/core_libs/network/src/tarcap/packets_handlers/v4/readme.md diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_block_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_block_packet.hpp index e7958db76d..f624996840 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_block_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_block_packet.hpp @@ -6,18 +6,6 @@ namespace taraxa::network::tarcap { struct DagBlockPacket { - DagBlockPacket() = default; - DagBlockPacket(const DagBlockPacket&) = default; - DagBlockPacket(DagBlockPacket&&) = default; - DagBlockPacket& operator=(const DagBlockPacket&) = default; - DagBlockPacket& operator=(DagBlockPacket&&) = default; - // TODO[2868]: optimize args - DagBlockPacket(const std::vector>& transactions, const DagBlock& dag_block) - : transactions(transactions), dag_block(dag_block) {} - - DagBlockPacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } - dev::bytes encodeRlp() { return util::rlp_enc(*this); } - std::vector> transactions; DagBlock dag_block; diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_sync_packet.hpp index fb22776e90..0e5c352add 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_sync_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_sync_packet.hpp @@ -6,22 +6,6 @@ namespace taraxa::network::tarcap { struct DagSyncPacket { - DagSyncPacket() = default; - DagSyncPacket(const DagSyncPacket&) = default; - DagSyncPacket(DagSyncPacket&&) = default; - DagSyncPacket& operator=(const DagSyncPacket&) = default; - DagSyncPacket& operator=(DagSyncPacket&&) = default; - DagSyncPacket(PbftPeriod request_period, PbftPeriod response_period, - std::vector>&& transactions, - std::vector>&& dag_blocks) - : request_period(request_period), - response_period(response_period), - transactions(std::move(transactions)), - dag_blocks(std::move(dag_blocks)) {} - DagSyncPacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } - - dev::bytes encodeRlp() { return util::rlp_enc(*this); } - PbftPeriod request_period; PbftPeriod response_period; std::vector> transactions; diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/get_dag_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_dag_sync_packet.hpp index ccb5c5a539..5c96debe2d 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/get_dag_sync_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_dag_sync_packet.hpp @@ -6,16 +6,6 @@ namespace taraxa::network::tarcap { struct GetDagSyncPacket { - GetDagSyncPacket() = default; - GetDagSyncPacket(const GetDagSyncPacket&) = default; - GetDagSyncPacket(GetDagSyncPacket&&) = default; - GetDagSyncPacket& operator=(const GetDagSyncPacket&) = default; - GetDagSyncPacket& operator=(GetDagSyncPacket&&) = default; - GetDagSyncPacket(PbftPeriod peer_period, std::vector&& blocks_hashes) - : peer_period(peer_period), blocks_hashes(std::move(blocks_hashes)) {} - GetDagSyncPacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } - dev::bytes encodeRlp() { return util::rlp_enc(*this); } - PbftPeriod peer_period; std::vector blocks_hashes; diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/get_next_votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_next_votes_bundle_packet.hpp index da65e264df..08c584b249 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/get_next_votes_bundle_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_next_votes_bundle_packet.hpp @@ -5,15 +5,6 @@ namespace taraxa::network::tarcap { struct GetNextVotesBundlePacket { - GetNextVotesBundlePacket() = default; - GetNextVotesBundlePacket(const GetNextVotesBundlePacket&) = default; - GetNextVotesBundlePacket(GetNextVotesBundlePacket&&) = default; - GetNextVotesBundlePacket& operator=(const GetNextVotesBundlePacket&) = default; - GetNextVotesBundlePacket& operator=(GetNextVotesBundlePacket&&) = default; - - GetNextVotesBundlePacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); }; - dev::bytes encodeRlp() { return util::rlp_enc(*this); } - PbftPeriod peer_pbft_period; PbftRound peer_pbft_round; diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pbft_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pbft_sync_packet.hpp index 9f4ef983cf..b0429f0882 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pbft_sync_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pbft_sync_packet.hpp @@ -5,15 +5,6 @@ namespace taraxa::network::tarcap { struct GetPbftSyncPacket { - GetPbftSyncPacket() = default; - GetPbftSyncPacket(const GetPbftSyncPacket&) = default; - GetPbftSyncPacket(GetPbftSyncPacket&&) = default; - GetPbftSyncPacket& operator=(const GetPbftSyncPacket&) = default; - GetPbftSyncPacket& operator=(GetPbftSyncPacket&&) = default; - GetPbftSyncPacket(size_t height_to_sync) : height_to_sync(height_to_sync) {} - GetPbftSyncPacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } - dev::bytes encodeRlp() { return util::rlp_enc(*this); } - size_t height_to_sync; RLP_FIELDS_DEFINE_INPLACE(height_to_sync) diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pillar_votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pillar_votes_bundle_packet.hpp index 6c11b03e35..fe25c5469b 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pillar_votes_bundle_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/get_pillar_votes_bundle_packet.hpp @@ -5,19 +5,6 @@ namespace taraxa::network::tarcap { struct GetPillarVotesBundlePacket { - GetPillarVotesBundlePacket() = default; - GetPillarVotesBundlePacket(const GetPillarVotesBundlePacket&) = default; - GetPillarVotesBundlePacket(GetPillarVotesBundlePacket&&) = default; - GetPillarVotesBundlePacket& operator=(const GetPillarVotesBundlePacket&) = default; - GetPillarVotesBundlePacket& operator=(GetPillarVotesBundlePacket&&) = default; - GetPillarVotesBundlePacket(PbftPeriod period, blk_hash_t pillar_block_hash) - : period(period), pillar_block_hash(pillar_block_hash) {} - GetPillarVotesBundlePacket(const dev::RLP& packet_rlp) { - *this = util::rlp_dec(packet_rlp); - } - - dev::bytes encodeRlp() { return util::rlp_enc(*this); } - PbftPeriod period; blk_hash_t pillar_block_hash; diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/pbft_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/pbft_sync_packet.hpp index ac2ee8bd27..206a08bb4a 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/pbft_sync_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/pbft_sync_packet.hpp @@ -7,20 +7,6 @@ namespace taraxa::network::tarcap { struct PbftSyncPacket { - PbftSyncPacket() = default; - PbftSyncPacket(const PbftSyncPacket&) = default; - PbftSyncPacket(PbftSyncPacket&&) = default; - PbftSyncPacket& operator=(const PbftSyncPacket&) = default; - PbftSyncPacket& operator=(PbftSyncPacket&&) = default; - PbftSyncPacket(bool last_block, PeriodData&& period_data, - std::vector>&& current_block_cert_votes = {}) - : last_block(last_block), - period_data(std::move(period_data)), - current_block_cert_votes(std::move(current_block_cert_votes)) {} - PbftSyncPacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); }; - - dev::bytes encodeRlp() { return util::rlp_enc(*this); } - bool last_block; PeriodData period_data; // TODO: should it be optional ??? diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_vote_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_vote_packet.hpp index 77c441132e..e5bac3c4d9 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_vote_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_vote_packet.hpp @@ -7,15 +7,6 @@ namespace taraxa::network::tarcap { struct PillarVotePacket { - PillarVotePacket() = default; - PillarVotePacket(const PillarVotePacket&) = default; - PillarVotePacket(PillarVotePacket&&) = default; - PillarVotePacket& operator=(const PillarVotePacket&) = default; - PillarVotePacket& operator=(PillarVotePacket&&) = default; - PillarVotePacket(std::shared_ptr pillar_vote) : pillar_vote(std::move(pillar_vote)) {} - PillarVotePacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } - dev::bytes encodeRlp() { return util::rlp_enc(*this); } - std::shared_ptr pillar_vote; RLP_FIELDS_DEFINE_INPLACE(pillar_vote) diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp index cc7d6cc37d..af5aa0a6f2 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp @@ -7,26 +7,9 @@ namespace taraxa::network::tarcap { struct PillarVotesBundlePacket { - PillarVotesBundlePacket() = default; - PillarVotesBundlePacket(const PillarVotesBundlePacket&) = default; - PillarVotesBundlePacket(PillarVotesBundlePacket&&) = default; - PillarVotesBundlePacket& operator=(const PillarVotesBundlePacket&) = default; - PillarVotesBundlePacket& operator=(PillarVotesBundlePacket&&) = default; - PillarVotesBundlePacket(std::vector>&& pillar_votes) - : pillar_votes(std::move(pillar_votes)) {} - PillarVotesBundlePacket(const dev::RLP& packet_rlp) { - *this = util::rlp_dec(packet_rlp); - if (pillar_votes.size() == 0 || pillar_votes.size() > kMaxPillarVotesInBundleRlp) { - throw InvalidRlpItemsCountException("PillarVotesBundlePacket", pillar_votes.size(), kMaxPillarVotesInBundleRlp); - } - } - dev::bytes encodeRlp() { return util::rlp_enc(*this); } - // TODO[2870]: optimize rlp size (use custom class), see encodePillarVotesBundleRlp std::vector> pillar_votes; - constexpr static size_t kMaxPillarVotesInBundleRlp{250}; - RLP_FIELDS_DEFINE_INPLACE(pillar_votes) }; diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/status_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/status_packet.hpp index a4c1703ee1..314becb769 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/status_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/status_packet.hpp @@ -2,7 +2,6 @@ namespace taraxa::network::tarcap { -// TODO: create new version of this packet without manual parsing struct StatusPacket { struct InitialData { uint64_t peer_chain_id; @@ -17,21 +16,6 @@ struct StatusPacket { is_light_node, node_history) }; - StatusPacket() = default; - StatusPacket(const StatusPacket&) = default; - StatusPacket(StatusPacket&&) = default; - StatusPacket& operator=(const StatusPacket&) = default; - StatusPacket& operator=(StatusPacket&&) = default; - StatusPacket(PbftPeriod peer_pbft_chain_size, PbftRound peer_pbft_round, uint64_t peer_dag_level, bool peer_syncing, - std::optional initial_data = {}) - : peer_pbft_chain_size(peer_pbft_chain_size), - peer_pbft_round(peer_pbft_round), - peer_dag_level(peer_dag_level), - peer_syncing(peer_syncing), - initial_data(std::move(initial_data)) {} - StatusPacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); } - dev::bytes encodeRlp() { return util::rlp_enc(*this); } - PbftPeriod peer_pbft_chain_size; PbftRound peer_pbft_round; uint64_t peer_dag_level; diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp index 3f393ce93f..4a46325f94 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp @@ -5,26 +5,11 @@ namespace taraxa::network::tarcap { struct TransactionPacket { - TransactionPacket() = default; - TransactionPacket(const TransactionPacket&) = default; - TransactionPacket(TransactionPacket&&) = default; - TransactionPacket& operator=(const TransactionPacket&) = default; - TransactionPacket& operator=(TransactionPacket&&) = default; - // TODO[2868]: optimize ctor - TransactionPacket(const std::vector>& transactions) : transactions(transactions) {} - TransactionPacket(const dev::RLP& packet_rlp) { - *this = util::rlp_dec(packet_rlp); - if (transactions.size() > kMaxTransactionsInPacket) { - throw InvalidRlpItemsCountException("TransactionPacket", transactions.size(), kMaxTransactionsInPacket); - } - } - dev::bytes encodeRlp() { return util::rlp_enc(*this); } - std::vector> transactions; - constexpr static uint32_t kMaxTransactionsInPacket{500}; - RLP_FIELDS_DEFINE_INPLACE(transactions) + + constexpr static uint32_t kMaxTransactionsInPacket{500}; }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp index d33968d7ce..d8a7c900a0 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/vote_packet.hpp @@ -13,16 +13,6 @@ struct VotePacket { RLP_FIELDS_DEFINE_INPLACE(pbft_block, peer_chain_size) }; - VotePacket() = default; - VotePacket(const VotePacket&) = default; - VotePacket(VotePacket&&) = default; - VotePacket& operator=(const VotePacket&) = default; - VotePacket& operator=(VotePacket&&) = default; - VotePacket(std::shared_ptr vote, std::optional optional_data = {}) - : vote(std::move(vote)), optional_data(std::move(optional_data)) {} - VotePacket(const dev::RLP& packet_rlp) { *this = util::rlp_dec(packet_rlp); }; - dev::bytes encodeRlp() { return util::rlp_enc(*this); } - std::shared_ptr vote; std::optional optional_data; diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp index 6cfeb1e510..261b5a52a6 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp @@ -6,20 +6,6 @@ namespace taraxa::network::tarcap { struct VotesBundlePacket { - VotesBundlePacket() = default; - VotesBundlePacket(const VotesBundlePacket&) = default; - VotesBundlePacket(VotesBundlePacket&&) = default; - VotesBundlePacket& operator=(const VotesBundlePacket&) = default; - VotesBundlePacket& operator=(VotesBundlePacket&&) = default; - VotesBundlePacket(std::vector>&& votes) : votes(std::move(votes)) {} - VotesBundlePacket(const dev::RLP& packet_rlp) { - *this = util::rlp_dec(packet_rlp); - if (votes.size() == 0 || votes.size() > kMaxVotesInBundleRlp) { - throw InvalidRlpItemsCountException("VotesBundlePacket", votes.size(), kMaxVotesInBundleRlp); - } - }; - dev::bytes encodeRlp() { return util::rlp_enc(*this); } - // TODO[2870]: Create votes bundles class // blk_hash_t votes_bundle_block_hash; // PbftPeriod votes_bundle_pbft_period; @@ -28,8 +14,6 @@ struct VotesBundlePacket { std::vector> votes; - constexpr static size_t kMaxVotesInBundleRlp{1000}; - // RLP_FIELDS_DEFINE_INPLACE(votes_bundle_block_hash, votes_bundle_pbft_period, votes_bundle_pbft_round, // votes_bundle_votes_step, votes) RLP_FIELDS_DEFINE_INPLACE(votes) diff --git a/libraries/core_libs/network/include/network/tarcap/packets/v4/dag_block_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/dag_block_packet.hpp deleted file mode 100644 index eb09ad7c10..0000000000 --- a/libraries/core_libs/network/include/network/tarcap/packets/v4/dag_block_packet.hpp +++ /dev/null @@ -1,44 +0,0 @@ -#pragma once - -#include "dag/dag_block.hpp" -#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" -#include "transaction/system_transaction.hpp" - -namespace taraxa::network::tarcap::v4 { - -struct DagBlockPacket { - DagBlockPacket(const dev::RLP& packet_rlp) { - constexpr size_t required_size = 2; - // Only one dag block can be received - if (packet_rlp.itemCount() != required_size) { - throw InvalidRlpItemsCountException("DagBlockPacket", packet_rlp.itemCount(), required_size); - } - - dev::RLP dag_rlp; - - // TODO: bad rlp form - we should not check itemsCount here... - if (packet_rlp.itemCount() == 2) { - const auto trx_count = packet_rlp[0].itemCount(); - transactions.reserve(trx_count); - - for (const auto tx_rlp : packet_rlp[0]) { - try { - auto trx = std::make_shared(tx_rlp); - transactions.emplace(trx->getHash(), std::move(trx)); - } catch (const Transaction::InvalidTransaction& e) { - throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); - } - } - dag_rlp = packet_rlp[1]; - } else { - dag_rlp = packet_rlp; - } - - dag_block = DagBlock(dag_rlp); - }; - - std::unordered_map> transactions; - DagBlock dag_block; -}; - -} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/v4/dag_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/dag_sync_packet.hpp deleted file mode 100644 index c71515f0fb..0000000000 --- a/libraries/core_libs/network/include/network/tarcap/packets/v4/dag_sync_packet.hpp +++ /dev/null @@ -1,43 +0,0 @@ -#pragma once - -#include "dag/dag_block.hpp" -#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" -#include "transaction/system_transaction.hpp" - -namespace taraxa::network::tarcap::v4 { - -struct DagSyncPacket { - DagSyncPacket(const dev::RLP& packet_rlp) { - if (constexpr size_t required_size = 4; packet_rlp.itemCount() != required_size) { - throw InvalidRlpItemsCountException("DagSyncPacket", packet_rlp.itemCount(), required_size); - } - - auto it = packet_rlp.begin(); - request_period = (*it++).toInt(); - response_period = (*it++).toInt(); - - const auto trx_count = (*it).itemCount(); - transactions.reserve(trx_count); - - for (const auto tx_rlp : (*it++)) { - try { - auto trx = std::make_shared(tx_rlp); - transactions.emplace(trx->getHash(), std::move(trx)); - } catch (const Transaction::InvalidTransaction& e) { - throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); - } - } - - dag_blocks.reserve((*it).itemCount()); - for (const auto block_rlp : *it) { - dag_blocks.emplace_back(DagBlock{block_rlp}); - } - }; - - PbftPeriod request_period; - PbftPeriod response_period; - std::unordered_map> transactions; - std::vector dag_blocks; -}; - -} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/v4/get_dag_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/get_dag_sync_packet.hpp deleted file mode 100644 index 25783c7008..0000000000 --- a/libraries/core_libs/network/include/network/tarcap/packets/v4/get_dag_sync_packet.hpp +++ /dev/null @@ -1,27 +0,0 @@ -#pragma once - -#include "dag/dag_block.hpp" -#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" -#include "transaction/system_transaction.hpp" - -namespace taraxa::network::tarcap::v4 { - -struct GetDagSyncPacket { - GetDagSyncPacket(const dev::RLP& packet_rlp) { - if (constexpr size_t required_size = 2; packet_rlp.itemCount() != required_size) { - throw InvalidRlpItemsCountException("GetDagSyncPacket", packet_rlp.itemCount(), required_size); - } - - auto it = packet_rlp.begin(); - peer_period = (*it++).toInt(); - - for (const auto block_hash_rlp : *it) { - blocks_hashes.emplace(block_hash_rlp.toHash()); - } - }; - - PbftPeriod peer_period; - std::unordered_set blocks_hashes; -}; - -} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/v4/pbft_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/pbft_sync_packet.hpp deleted file mode 100644 index 6d81f72f0c..0000000000 --- a/libraries/core_libs/network/include/network/tarcap/packets/v4/pbft_sync_packet.hpp +++ /dev/null @@ -1,47 +0,0 @@ -#pragma once - -#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" -#include "pbft/period_data.hpp" -#include "vote/pbft_vote.hpp" -#include "vote/votes_bundle_rlp.hpp" - -namespace taraxa::network::tarcap::v4 { - -struct PbftSyncPacket { - PbftSyncPacket(const dev::RLP& packet_rlp) { - if (packet_rlp.itemCount() != kStandardPacketSize && packet_rlp.itemCount() != kChainSyncedPacketSize) { - throw InvalidRlpItemsCountException("PbftSyncPacket", packet_rlp.itemCount(), kStandardPacketSize); - } - - // PeriodData rlp parsing cannot be done through util::rlp_tuple, which automatically checks the rlp size so it is - // checked here manually - if (packet_rlp[1].itemCount() != PeriodData::kBaseRlpItemCount && - packet_rlp[1].itemCount() != PeriodData::kExtendedRlpItemCount) { - throw InvalidRlpItemsCountException("PbftSyncPacket:PeriodData", packet_rlp[1].itemCount(), - PeriodData::kBaseRlpItemCount); - } - - // last_block is the flag to indicate this is the last block in each syncing round, doesn't mean PBFT chain has - // synced - last_block = packet_rlp[0].toInt(); - try { - period_data = PeriodData(packet_rlp[1]); - } catch (const std::runtime_error& e) { - throw MaliciousPeerException("Unable to parse PeriodData: " + std::string(e.what())); - } - - // pbft_chain_synced is the flag to indicate own PBFT chain has synced with the peer's PBFT chain - if (packet_rlp.itemCount() == kChainSyncedPacketSize) { - current_block_cert_votes = decodePbftVotesBundleRlp(packet_rlp[2]); - } - }; - - bool last_block; - PeriodData period_data; - std::vector> current_block_cert_votes; - - const size_t kStandardPacketSize = 2; - const size_t kChainSyncedPacketSize = 3; -}; - -} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/v4/pillar_vote_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/pillar_vote_packet.hpp deleted file mode 100644 index 6871c98892..0000000000 --- a/libraries/core_libs/network/include/network/tarcap/packets/v4/pillar_vote_packet.hpp +++ /dev/null @@ -1,28 +0,0 @@ -#pragma once - -#include "common/encoding_rlp.hpp" -#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" -#include "vote/pillar_vote.hpp" - -namespace taraxa::network::tarcap::v4 { - -struct PillarVotePacket { - PillarVotePacket() = default; - PillarVotePacket(const PillarVotePacket&) = default; - PillarVotePacket(PillarVotePacket&&) = default; - PillarVotePacket& operator=(const PillarVotePacket&) = default; - PillarVotePacket& operator=(PillarVotePacket&&) = default; - - PillarVotePacket(const dev::RLP& packet_rlp) { - auto items = packet_rlp.itemCount(); - if (items != PillarVote::kStandardRlpSize) { - throw InvalidRlpItemsCountException("PillarVotePacket", items, PillarVote::kStandardRlpSize); - } - - pillar_vote = std::make_shared(packet_rlp); - } - - std::shared_ptr pillar_vote; -}; - -} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/v4/pillar_votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/pillar_votes_bundle_packet.hpp deleted file mode 100644 index 8ff35fc0fb..0000000000 --- a/libraries/core_libs/network/include/network/tarcap/packets/v4/pillar_votes_bundle_packet.hpp +++ /dev/null @@ -1,32 +0,0 @@ -#pragma once - -#include "common/encoding_rlp.hpp" -#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" -#include "vote/pillar_vote.hpp" - -namespace taraxa::network::tarcap::v4 { - -struct PillarVotesBundlePacket { - PillarVotesBundlePacket() = default; - PillarVotesBundlePacket(const PillarVotesBundlePacket&) = default; - PillarVotesBundlePacket(PillarVotesBundlePacket&&) = default; - PillarVotesBundlePacket& operator=(const PillarVotesBundlePacket&) = default; - PillarVotesBundlePacket& operator=(PillarVotesBundlePacket&&) = default; - - PillarVotesBundlePacket(const dev::RLP& packet_rlp) { - auto items = packet_rlp.itemCount(); - if (items == 0 || items > kMaxPillarVotesInBundleRlp) { - throw InvalidRlpItemsCountException("PillarVotesBundlePacket", items, kMaxPillarVotesInBundleRlp); - } - - for (const auto vote_rlp : packet_rlp) { - pillar_votes.emplace_back(std::make_shared(vote_rlp)); - } - } - - std::vector> pillar_votes; - - constexpr static size_t kMaxPillarVotesInBundleRlp{250}; -}; - -} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/v4/status_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/status_packet.hpp deleted file mode 100644 index 01f857aa8f..0000000000 --- a/libraries/core_libs/network/include/network/tarcap/packets/v4/status_packet.hpp +++ /dev/null @@ -1,54 +0,0 @@ -#pragma once - -#include "common/types.hpp" -#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" - -namespace taraxa::network::tarcap::v4 { - -struct StatusPacket { - StatusPacket(const dev::RLP& packet_rlp) { - if (const auto items_count = packet_rlp.itemCount(); - items_count != kInitialStatusPacketItemsCount && items_count != kStandardStatusPacketItemsCount) { - throw InvalidRlpItemsCountException("StatusPacket", packet_rlp.itemCount(), kStandardStatusPacketItemsCount); - } - - auto it = packet_rlp.begin(); - if (packet_rlp.itemCount() == kInitialStatusPacketItemsCount) { - peer_chain_id = (*it++).toInt(); - peer_dag_level = (*it++).toInt(); - genesis_hash = (*it++).toHash(); - peer_pbft_chain_size = (*it++).toInt(); - peer_syncing = (*it++).toInt(); - peer_pbft_round = (*it++).toInt(); - node_major_version = (*it++).toInt(); - node_minor_version = (*it++).toInt(); - node_patch_version = (*it++).toInt(); - is_light_node = (*it++).toInt(); - node_history = (*it++).toInt(); - } else { - peer_dag_level = (*it++).toInt(); - peer_pbft_chain_size = (*it++).toInt(); - peer_syncing = (*it++).toInt(); - peer_pbft_round = (*it++).toInt(); - } - } - - bool isInitialStatusPacket() const { return peer_chain_id.has_value(); } - - uint64_t peer_dag_level; - PbftPeriod peer_pbft_chain_size; - bool peer_syncing; - PbftRound peer_pbft_round; - std::optional peer_chain_id; - std::optional genesis_hash; - std::optional node_major_version; - std::optional node_minor_version; - std::optional node_patch_version; - std::optional is_light_node; - std::optional node_history; - - static constexpr uint16_t kInitialStatusPacketItemsCount = 11; - static constexpr uint16_t kStandardStatusPacketItemsCount = 4; -}; - -} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/v4/transaction_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/transaction_packet.hpp deleted file mode 100644 index 48f0f24326..0000000000 --- a/libraries/core_libs/network/include/network/tarcap/packets/v4/transaction_packet.hpp +++ /dev/null @@ -1,53 +0,0 @@ -#pragma once - -#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" -#include "transaction/transaction.hpp" - -namespace taraxa::network::tarcap::v4 { - -struct TransactionPacket { - TransactionPacket(const dev::RLP& packet_rlp) { - auto items = packet_rlp.itemCount(); - if (items != kTransactionPacketItemCount) { - throw InvalidRlpItemsCountException("TransactionPacket", items, kTransactionPacketItemCount); - } - auto hashes_count = packet_rlp[0].itemCount(); - auto trx_count = packet_rlp[1].itemCount(); - - if (hashes_count < trx_count) { - throw InvalidRlpItemsCountException("TransactionPacket", hashes_count, trx_count); - } - if (hashes_count == 0 || hashes_count > kMaxTransactionsInPacket + kMaxHashesInPacket) { - throw InvalidRlpItemsCountException("TransactionPacket", hashes_count, - kMaxTransactionsInPacket + kMaxHashesInPacket); - } - - if (trx_count > kMaxTransactionsInPacket) { - throw InvalidRlpItemsCountException("TransactionPacket", trx_count, kMaxTransactionsInPacket); - } - - // TODO: these hashes do not make sense after separating parsing - // // First extract only transaction hashes - // for (const auto tx_hash_rlp : packet_rlp[0]) { - // auto trx_hash = tx_hash_rlp.toHash(); - // txs_hashes.emplace_back(std::move(trx_hash)); - // } - - for (const auto tx_rlp : packet_rlp[1]) { - try { - auto tx = std::make_shared(tx_rlp.data().toBytes()); - transactions.emplace_back(std::move(tx)); - } catch (const Transaction::InvalidTransaction& e) { - throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); - } - } - }; - - std::vector> transactions; - - static constexpr uint32_t kTransactionPacketItemCount = 2; - static constexpr uint32_t kMaxTransactionsInPacket{500}; - static constexpr uint32_t kMaxHashesInPacket{5000}; -}; - -} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/v4/vote_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/vote_packet.hpp deleted file mode 100644 index 6f34ce7ee2..0000000000 --- a/libraries/core_libs/network/include/network/tarcap/packets/v4/vote_packet.hpp +++ /dev/null @@ -1,36 +0,0 @@ -#pragma once - -#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" -#include "pbft/pbft_block.hpp" -#include "vote/pbft_vote.hpp" - -namespace taraxa::network::tarcap::v4 { - -struct VotePacket { - VotePacket(const dev::RLP& packet_rlp) { - auto items = packet_rlp.itemCount(); - // Vote packet can contain either just a vote or vote + block + peer_chain_size - if (items != kVotePacketSize && items != kExtendedVotePacketSize) { - throw InvalidRlpItemsCountException("VotePacket", items, kExtendedVotePacketSize); - } - - vote = std::make_shared(packet_rlp[0]); - if (const size_t item_count = packet_rlp.itemCount(); item_count == kExtendedVotePacketSize) { - try { - pbft_block = std::make_shared(packet_rlp[1]); - } catch (const std::exception& e) { - throw MaliciousPeerException(e.what()); - } - peer_chain_size = packet_rlp[2].toInt(); - } - }; - - std::shared_ptr vote; - std::shared_ptr pbft_block; - std::optional peer_chain_size; - - static constexpr size_t kVotePacketSize{1}; - static constexpr size_t kExtendedVotePacketSize{3}; -}; - -} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets/v4/votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/v4/votes_bundle_packet.hpp deleted file mode 100644 index 41e342e1b7..0000000000 --- a/libraries/core_libs/network/include/network/tarcap/packets/v4/votes_bundle_packet.hpp +++ /dev/null @@ -1,42 +0,0 @@ -#pragma once - -#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" -#include "vote/pbft_vote.hpp" -#include "vote/votes_bundle_rlp.hpp" - -namespace taraxa::network::tarcap::v4 { - -struct VotesBundlePacket { - VotesBundlePacket(const dev::RLP& packet_rlp) { - auto items = packet_rlp.itemCount(); - if (items != kPbftVotesBundleRlpSize) { - throw InvalidRlpItemsCountException("VotesBundlePacket", items, kPbftVotesBundleRlpSize); - } - - auto votes_count = packet_rlp[kPbftVotesBundleRlpSize - 1].itemCount(); - if (votes_count == 0 || votes_count > kMaxVotesInBundleRlp) { - throw InvalidRlpItemsCountException("VotesBundlePacket", items, kMaxVotesInBundleRlp); - } - - votes_bundle_block_hash = packet_rlp[0].toHash(); - votes_bundle_pbft_period = packet_rlp[1].toInt(); - votes_bundle_pbft_round = packet_rlp[2].toInt(); - votes_bundle_votes_step = packet_rlp[3].toInt(); - - for (const auto vote_rlp : packet_rlp[4]) { - auto vote = std::make_shared(votes_bundle_block_hash, votes_bundle_pbft_period, votes_bundle_pbft_round, - votes_bundle_votes_step, vote_rlp); - votes.emplace_back(std::move(vote)); - } - }; - - blk_hash_t votes_bundle_block_hash; - PbftPeriod votes_bundle_pbft_period; - PbftRound votes_bundle_pbft_round; - PbftStep votes_bundle_votes_step; - std::vector> votes; - - const size_t kMaxVotesInBundleRlp{1000}; -}; - -} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp index 8a67bab338..37d438345d 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp @@ -4,6 +4,9 @@ namespace taraxa::network::tarcap { +// Taraxa capability name +constexpr char TARAXA_CAPABILITY_NAME[] = "taraxa"; + /** * @brief Base Packet handler base class that consists processPacket function */ diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp index ad41ca6b89..ee53a26841 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp @@ -99,12 +99,12 @@ class ExtSyncingPacketHandler : public PacketHandler { LOG(this->log_nf_) << "Send GetPbftSyncPacket with period " << request_period << " to node " << syncing_peer->getId(); return this->sealAndSend(syncing_peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, - GetPbftSyncPacket{request_period}.encodeRlp()); + encodePacketRlp(GetPbftSyncPacket{request_period})); } void requestDagBlocks(const dev::p2p::NodeID &_nodeID, std::vector &&blocks, PbftPeriod period) { this->sealAndSend(_nodeID, SubprotocolPacketType::kGetDagSyncPacket, - GetDagSyncPacket{period, std::move(blocks)}.encodeRlp()); + encodePacketRlp(GetDagSyncPacket{period, std::move(blocks)})); } void requestPendingDagBlocks(std::shared_ptr peer = nullptr) { diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp index 1bb39a9a01..9171c7adba 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp @@ -123,7 +123,7 @@ class ExtVotesPacketHandler : public PacketHandler { // TODO[2868]: optimize this auto votes_copy = votes; if (this->sealAndSend(peer->getId(), SubprotocolPacketType::kVotesBundlePacket, - VotesBundlePacket{std::move(votes_copy)}.encodeRlp())) { + encodePacketRlp(VotesBundlePacket{std::move(votes_copy)}))) { LOG(this->log_dg_) << " Votes bundle with " << votes.size() << " votes sent to " << peer->getId(); for (const auto& vote : votes) { peer->markPbftVoteAsKnown(vote->getHash()); @@ -192,7 +192,7 @@ class ExtVotesPacketHandler : public PacketHandler { // request PBFT chain sync from this node this->sealAndSend( peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, - GetPbftSyncPacket{std::max(vote->getPeriod() - 1, peer->pbft_chain_size_.load())}.encodeRlp()); + encodePacketRlp(GetPbftSyncPacket{std::max(vote->getPeriod() - 1, peer->pbft_chain_size_.load())})); last_pbft_block_sync_request_time_ = std::chrono::system_clock::now(); } diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp index 01e000545d..ed19f219c9 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/packet_handler.hpp @@ -8,6 +8,7 @@ #include "exceptions.hpp" #include "logger/logger.hpp" #include "network/tarcap/packet_types.hpp" +#include "network/tarcap/packets/latest/get_next_votes_bundle_packet.hpp" #include "network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "network/tarcap/shared_states/peers_state.hpp" @@ -17,8 +18,15 @@ namespace taraxa::network::tarcap { -// Taraxa capability name -constexpr char TARAXA_CAPABILITY_NAME[] = "taraxa"; +template +PacketType decodePacketRlp(const dev::RLP& packet_rlp) { + return util::rlp_dec(packet_rlp); +} + +template +dev::bytes encodePacketRlp(PacketType packet) { + return util::rlp_enc(packet); +} /** * @brief Packet handler base class that consists of shared state and some commonly used functions @@ -58,11 +66,11 @@ class PacketHandler : public BasePacketHandler { return; } - // TODO[2865]: can be removed after taraxa net version is completely switched to 5 - checkPacketRlpIsList(packet_data); + // Decode packet rlp into packet object + auto packet = decodePacketRlp(packet_data.rlp_); // Main processing function - process(PacketType{packet_data.rlp_}, peer.first); + process(std::move(packet), peer.first); auto processing_duration = std::chrono::duration_cast(std::chrono::steady_clock::now() - begin); @@ -104,9 +112,8 @@ class PacketHandler : public BasePacketHandler { // TODO: probbaly should not be here but in specific packet class ??? void requestPbftNextVotesAtPeriodRound(const dev::p2p::NodeID& peerID, PbftPeriod pbft_period, PbftRound pbft_round) { LOG(log_dg_) << "Sending GetNextVotesSyncPacket with period:" << pbft_period << ", round:" << pbft_round; - // TODO: use packet class instead of manually creating rlp - sealAndSend(peerID, SubprotocolPacketType::kGetNextVotesSyncPacket, - std::move(dev::RLPStream(2) << pbft_period << pbft_round)); + const auto packet = GetNextVotesBundlePacket{.peer_pbft_period = pbft_period, .peer_pbft_round = pbft_round}; + sealAndSend(peerID, SubprotocolPacketType::kGetNextVotesSyncPacket, encodePacketRlp(packet)); } private: @@ -131,23 +138,6 @@ class PacketHandler : public BasePacketHandler { virtual void process(PacketType&& packet, const std::shared_ptr& peer) = 0; protected: - /** - * @brief Checks if packet rlp is a list, if not it throws InvalidRlpItemsCountException - * - * @param packet_data - * @throws InvalidRlpItemsCountException exception - */ - void checkPacketRlpIsList(const threadpool::PacketData& packet_data) const { - if (!packet_data.rlp_.isList()) { - throw InvalidRlpItemsCountException(packet_data.type_str_ + " RLP must be a list. ", 0, 1); - } - } - - // TODO[2865]: remove - bool sealAndSend(const dev::p2p::NodeID& node_id, SubprotocolPacketType packet_type, dev::RLPStream&& rlp) { - return sealAndSend(node_id, packet_type, rlp.invalidate()); - } - bool sealAndSend(const dev::p2p::NodeID& node_id, SubprotocolPacketType packet_type, dev::bytes&& rlp_bytes) { auto host = peers_state_->host_.lock(); if (!host) { diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp index ed827d7405..4a7b521ebf 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp @@ -15,6 +15,8 @@ class PillarVotesBundlePacketHandler : public ExtPillarVotePacketHandler& peer) override; }; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_pillar_vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_pillar_vote_packet_handler.hpp new file mode 100644 index 0000000000..8dfea03969 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_pillar_vote_packet_handler.hpp @@ -0,0 +1,35 @@ +#pragma once + +#include "packet_handler.hpp" + +namespace taraxa { +class KeyManager; +class PillarVote; + +namespace pillar_chain { +class PillarChainManager; +} // namespace pillar_chain + +namespace final_chain { +class FinalChain; +} + +} // namespace taraxa + +namespace taraxa::network::tarcap::v4 { + +class ExtPillarVotePacketHandler : public PacketHandler { + public: + ExtPillarVotePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pillar_chain_manager, + const addr_t& node_addr, const std::string& log_channel); + + protected: + bool processPillarVote(const std::shared_ptr& vote, const std::shared_ptr& peer); + + protected: + std::shared_ptr pillar_chain_manager_; +}; + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp index 72cbc57a74..5019670fde 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp @@ -1,10 +1,18 @@ #pragma once #include "dag/dag_manager.hpp" -#include "network/tarcap/shared_states/pbft_syncing_state.hpp" -#include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" -#include "pbft/pbft_chain.hpp" -#include "pbft/pbft_manager.hpp" +#include "packet_handler.hpp" + +namespace taraxa { +class PbftChain; +class PbftManager; +class DagManager; +class DbStorage; +} // namespace taraxa + +namespace taraxa::network::tarcap { +class PbftSyncingState; +} namespace taraxa::network::tarcap::v4 { @@ -12,20 +20,13 @@ namespace taraxa::network::tarcap::v4 { * @brief ExtSyncingPacketHandler is extended abstract PacketHandler with added functions that are used in packet * handlers that need to interact with syncing process in some way */ -template -class ExtSyncingPacketHandler : public PacketHandler { +class ExtSyncingPacketHandler : public PacketHandler { public: ExtSyncingPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, - std::shared_ptr db, const addr_t &node_addr, const std::string &log_channel_name) - : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel_name), - pbft_syncing_state_(std::move(pbft_syncing_state)), - pbft_chain_(std::move(pbft_chain)), - pbft_mgr_(std::move(pbft_mgr)), - dag_mgr_(std::move(dag_mgr)), - db_(std::move(db)) {} + std::shared_ptr db, const addr_t &node_addr, const std::string &log_channel_name); virtual ~ExtSyncingPacketHandler() = default; ExtSyncingPacketHandler &operator=(const ExtSyncingPacketHandler &) = delete; @@ -35,44 +36,7 @@ class ExtSyncingPacketHandler : public PacketHandler { * @brief Start syncing pbft if needed * */ - void startSyncingPbft() { - if (pbft_syncing_state_->isPbftSyncing()) { - LOG(this->log_dg_) << "startSyncingPbft called but syncing_ already true"; - return; - } - - std::shared_ptr peer = getMaxChainPeer(); - if (!peer) { - LOG(this->log_nf_) << "Restarting syncing PBFT not possible since no connected peers"; - return; - } - - auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); - if (peer->pbft_chain_size_ > pbft_sync_period) { - auto peer_id = peer->getId().abridged(); - auto peer_pbft_chain_size = peer->pbft_chain_size_.load(); - if (!pbft_syncing_state_->setPbftSyncing(true, pbft_sync_period, std::move(peer))) { - LOG(this->log_dg_) << "startSyncingPbft called but syncing_ already true"; - return; - } - LOG(this->log_si_) << "Restarting syncing PBFT from peer " << peer_id << ", peer PBFT chain size " - << peer_pbft_chain_size << ", own PBFT chain synced at period " << pbft_sync_period; - - if (syncPeerPbft(pbft_sync_period + 1)) { - // Disable snapshots only if are syncing from scratch - if (pbft_syncing_state_->isDeepPbftSyncing()) { - db_->disableSnapshots(); - } - } else { - pbft_syncing_state_->setPbftSyncing(false); - } - } else { - LOG(this->log_nf_) << "Restarting syncing PBFT not needed since our pbft chain size: " << pbft_sync_period << "(" - << pbft_chain_->getPbftChainSize() << ")" - << " is greater or equal than max node pbft chain size:" << peer->pbft_chain_size_; - db_->enableSnapshots(); - } - } + void startSyncingPbft(); /** * @brief Send sync request to the current syncing peer with specified request_period @@ -81,112 +45,14 @@ class ExtSyncingPacketHandler : public PacketHandler { * * @return true if sync request was sent, otherwise false */ - bool syncPeerPbft(PbftPeriod request_period) { - const auto syncing_peer = pbft_syncing_state_->syncingPeer(); - if (!syncing_peer) { - LOG(this->log_er_) << "Unable to send GetPbftSyncPacket. No syncing peer set."; - return false; - } - - if (request_period > syncing_peer->pbft_chain_size_) { - LOG(this->log_wr_) << "Invalid syncPeerPbft argument. Node " << syncing_peer->getId() << " chain size " - << syncing_peer->pbft_chain_size_ << ", requested period " << request_period; - return false; - } - - LOG(this->log_nf_) << "Send GetPbftSyncPacket with period " << request_period << " to node " - << syncing_peer->getId(); - return this->sealAndSend(syncing_peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, - std::move(dev::RLPStream(1) << request_period)); - } + bool syncPeerPbft(PbftPeriod request_period); void requestDagBlocks(const dev::p2p::NodeID &_nodeID, const std::unordered_set &blocks, - PbftPeriod period) { - dev::RLPStream s(2); // Period + blocks list - s.append(period); - s.append(blocks); - - this->sealAndSend(_nodeID, SubprotocolPacketType::kGetDagSyncPacket, std::move(s)); - } - - void requestPendingDagBlocks(std::shared_ptr peer = nullptr) { - if (!peer) { - peer = getMaxChainPeer([](const std::shared_ptr &peer) { - if (peer->peer_dag_synced_ || !peer->dagSyncingAllowed()) { - return false; - } - return true; - }); - if (!peer) { - LOG(this->log_nf_) << "requestPendingDagBlocks not possible since no peers are matching conditions"; - return; - } - } - - if (!peer) { - LOG(this->log_nf_) << "requestPendingDagBlocks not possible since no connected peers"; - return; - } - - // This prevents ddos requesting dag blocks. We can only request this one time from one peer. - if (peer->peer_dag_synced_) { - LOG(this->log_nf_) << "requestPendingDagBlocks not possible since already requested for peer"; - return; - } - - // Only request dag blocks if periods are matching - auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); - if (pbft_sync_period == peer->pbft_chain_size_) { - // This prevents parallel requests - if (bool b = false; !peer->peer_dag_syncing_.compare_exchange_strong(b, !b)) { - LOG(this->log_nf_) << "requestPendingDagBlocks not possible since already requesting for peer"; - return; - } - LOG(this->log_nf_) << "Request pending blocks from peer " << peer->getId(); - std::unordered_set known_non_finalized_blocks; - auto [period, blocks] = dag_mgr_->getNonFinalizedBlocks(); - for (auto &level_blocks : blocks) { - for (auto &block : level_blocks.second) { - known_non_finalized_blocks.insert(block); - } - } - - requestDagBlocks(peer->getId(), known_non_finalized_blocks, period); - } - } + PbftPeriod period); + void requestPendingDagBlocks(std::shared_ptr peer = nullptr); std::shared_ptr getMaxChainPeer(std::function &)> filter_func = - [](const std::shared_ptr &) { return true; }) { - std::shared_ptr max_pbft_chain_peer; - PbftPeriod max_pbft_chain_size = 0; - uint64_t max_node_dag_level = 0; - - // Find peer with max pbft chain and dag level - for (auto const &peer : this->peers_state_->getAllPeers()) { - // Apply the filter function - if (!filter_func(peer.second)) { - continue; - } - - if (peer.second->pbft_chain_size_ > max_pbft_chain_size) { - if (peer.second->peer_light_node && - pbft_mgr_->pbftSyncingPeriod() + peer.second->peer_light_node_history < peer.second->pbft_chain_size_) { - LOG(this->log_er_) << "Disconnecting from light node peer " << peer.first - << " History: " << peer.second->peer_light_node_history - << " chain size: " << peer.second->pbft_chain_size_; - this->disconnect(peer.first, dev::p2p::UserReason); - continue; - } - max_pbft_chain_size = peer.second->pbft_chain_size_; - max_node_dag_level = peer.second->dag_level_; - max_pbft_chain_peer = peer.second; - } else if (peer.second->pbft_chain_size_ == max_pbft_chain_size && peer.second->dag_level_ > max_node_dag_level) { - max_node_dag_level = peer.second->dag_level_; - max_pbft_chain_peer = peer.second; - } - } - return max_pbft_chain_peer; - } + [](const std::shared_ptr &) { return true; }); protected: std::shared_ptr pbft_syncing_state_{nullptr}; @@ -197,4 +63,4 @@ class ExtSyncingPacketHandler : public PacketHandler { std::shared_ptr db_{nullptr}; }; -} // namespace taraxa::network::tarcap +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.hpp index 77809f7c87..40ca24fb90 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.hpp @@ -1,11 +1,15 @@ #pragma once -#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" -#include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" -#include "pbft/pbft_manager.hpp" -#include "vote/pbft_vote.hpp" -#include "vote/votes_bundle_rlp.hpp" -#include "vote_manager/vote_manager.hpp" +#include "packet_handler.hpp" + +namespace taraxa { +class PbftVote; +class PbftManager; +class PbftChain; +class PbftBlock; +class VoteManager; +class SlashingManager; +} // namespace taraxa namespace taraxa::network::tarcap::v4 { @@ -13,21 +17,13 @@ namespace taraxa::network::tarcap::v4 { * @brief ExtVotesPacketHandler is extended abstract PacketHandler with added functions that are used in packet * handlers that process pbft votes */ -template -class ExtVotesPacketHandler : public PacketHandler { +class ExtVotesPacketHandler : public PacketHandler { public: ExtVotesPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, std::shared_ptr slashing_manager, const addr_t& node_addr, - const std::string& log_channel_name) - : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel_name), - last_votes_sync_request_time_(std::chrono::system_clock::now()), - last_pbft_block_sync_request_time_(std::chrono::system_clock::now()), - pbft_mgr_(std::move(pbft_mgr)), - pbft_chain_(std::move(pbft_chain)), - vote_mgr_(std::move(vote_mgr)), - slashing_manager_(std::move(slashing_manager)) {} + const std::string& log_channel_name); virtual ~ExtVotesPacketHandler() = default; ExtVotesPacketHandler(const ExtVotesPacketHandler&) = delete; @@ -45,117 +41,17 @@ class ExtVotesPacketHandler : public PacketHandler { * @return if vote was successfully processed, otherwise false */ bool processVote(const std::shared_ptr& vote, const std::shared_ptr& pbft_block, - const std::shared_ptr& peer, bool validate_max_round_step) { - if (pbft_block && !validateVoteAndBlock(vote, pbft_block)) { - throw MaliciousPeerException("Received vote's voted value != received pbft block"); - } - - if (vote_mgr_->voteInVerifiedMap(vote)) { - LOG(this->log_dg_) << "Vote " << vote->getHash() << " already inserted in verified queue"; - return false; - } - - // Validate vote's period, round and step min/max values - if (const auto vote_valid = validateVotePeriodRoundStep(vote, peer, validate_max_round_step); !vote_valid.first) { - LOG(this->log_wr_) << "Vote period/round/step " << vote->getHash() - << " validation failed. Err: " << vote_valid.second; - return false; - } - - // Check is vote is unique per period, round & step & voter -> each address can generate just 1 vote - // (for a value that isn't NBH) per period, round & step - if (auto vote_valid = vote_mgr_->isUniqueVote(vote); !vote_valid.first) { - // Create double voting proof - slashing_manager_->submitDoubleVotingProof(vote, vote_valid.second); - throw MaliciousPeerException("Received double vote", vote->getVoter()); - } - - // Validate vote's signature, vrf, etc... - if (const auto vote_valid = vote_mgr_->validateVote(vote); !vote_valid.first) { - LOG(this->log_wr_) << "Vote " << vote->getHash() << " validation failed. Err: " << vote_valid.second; - return false; - } - - if (!vote_mgr_->addVerifiedVote(vote)) { - LOG(this->log_dg_) << "Vote " << vote->getHash() << " already inserted in verified queue(race condition)"; - return false; - } - - if (pbft_block) { - pbft_mgr_->processProposedBlock(pbft_block, vote); - } - - return true; - } + const std::shared_ptr& peer, bool validate_max_round_step); /** * @brief Checks is vote is relevant for current pbft state in terms of period, round and type * @param vote * @return true if vote is relevant for current pbft state, otherwise false */ - bool isPbftRelevantVote(const std::shared_ptr& vote) const { - const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); - - if (vote->getPeriod() >= current_pbft_period && vote->getRound() >= current_pbft_round) { - // Standard current or future vote - return true; - } else if (vote->getPeriod() == current_pbft_period && vote->getRound() == (current_pbft_round - 1) && - vote->getType() == PbftVoteTypes::next_vote) { - // Previous round next vote - return true; - } else if (vote->getPeriod() == current_pbft_period - 1 && vote->getType() == PbftVoteTypes::cert_vote) { - // Previous period cert vote - potential reward vote - return true; - } - - return false; - } + bool isPbftRelevantVote(const std::shared_ptr& vote) const; virtual void sendPbftVotesBundle(const std::shared_ptr& peer, - std::vector>&& votes) { - if (votes.empty()) { - return; - } - - auto sendVotes = [this, &peer](std::vector>&& votes) { - auto votes_bytes = encodePbftVotesBundleRlp(std::move(votes)); - if (votes_bytes.empty()) { - LOG(this->log_er_) << "Unable to send VotesBundle rlp"; - return; - } - - dev::RLPStream votes_rlp_stream; - votes_rlp_stream.appendRaw(votes_bytes); - - if (this->sealAndSend(peer->getId(), SubprotocolPacketType::kVotesBundlePacket, std::move(votes_rlp_stream))) { - LOG(this->log_dg_) << " Votes bundle with " << votes.size() << " votes sent to " << peer->getId(); - for (const auto& vote : votes) { - peer->markPbftVoteAsKnown(vote->getHash()); - } - } - }; - - if (votes.size() <= kMaxVotesInBundleRlp) { - sendVotes(std::move(votes)); - return; - } else { - // Need to split votes into multiple packets - size_t index = 0; - while (index < votes.size()) { - const size_t votes_count = std::min(kMaxVotesInBundleRlp, votes.size() - index); - - const auto begin_it = std::next(votes.begin(), index); - const auto end_it = std::next(begin_it, votes_count); - - std::vector> votes_sub_vector; - std::move(begin_it, end_it, std::back_inserter(votes_sub_vector)); - - sendVotes(std::move(votes_sub_vector)); - - index += votes_count; - } - } - } + std::vector>&& votes); private: /** @@ -168,86 +64,7 @@ class ExtVotesPacketHandler : public PacketHandler { */ std::pair validateVotePeriodRoundStep(const std::shared_ptr& vote, const std::shared_ptr& peer, - bool validate_max_round_step) { - const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); - - auto genErrMsg = [period = current_pbft_period, round = current_pbft_round, - step = pbft_mgr_->getPbftStep()](const std::shared_ptr& vote) -> std::string { - std::stringstream err; - err << "Vote " << vote->getHash() << " (period, round, step) = (" << vote->getPeriod() << ", " << vote->getRound() - << ", " << vote->getStep() << "). Current PBFT (period, round, step) = (" << period << ", " << round << ", " - << step << ")"; - return err.str(); - }; - - // Period validation - // vote->getPeriod() == current_pbft_period - 1 && cert_vote -> potential reward vote - if (vote->getPeriod() < current_pbft_period - 1 || - (vote->getPeriod() == current_pbft_period - 1 && vote->getType() != PbftVoteTypes::cert_vote)) { - return {false, "Invalid period(too small): " + genErrMsg(vote)}; - } else if (this->kConf.network.ddos_protection.vote_accepting_periods && - vote->getPeriod() - 1 > - current_pbft_period + this->kConf.network.ddos_protection.vote_accepting_periods) { - // skip this check if kConf.network.ddos_protection.vote_accepting_periods == 0 - // vote->getPeriod() - 1 is here because votes are validated against vote_period - 1 in dpos contract - // Do not request round sync too often here - if (vote->getVoter() == peer->getId() && - std::chrono::system_clock::now() - last_pbft_block_sync_request_time_ > kSyncRequestInterval) { - // request PBFT chain sync from this node - this->sealAndSend( - peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, - std::move(dev::RLPStream(1) << std::max(vote->getPeriod() - 1, peer->pbft_chain_size_.load()))); - last_pbft_block_sync_request_time_ = std::chrono::system_clock::now(); - } - - return {false, "Invalid period(too big): " + genErrMsg(vote)}; - } - - // Round validation - auto checking_round = current_pbft_round; - // If period is not the same we assume current round is equal to 1 - // So we won't accept votes for future period with round bigger than kConf.network.vote_accepting_steps - if (current_pbft_period != vote->getPeriod()) { - checking_round = 1; - } - - // vote->getRound() == checking_round - 1 && next_vote -> previous round next vote - if (vote->getRound() < checking_round - 1 || - (vote->getRound() == checking_round - 1 && vote->getType() != PbftVoteTypes::next_vote)) { - return {false, "Invalid round(too small): " + genErrMsg(vote)}; - } else if (validate_max_round_step && this->kConf.network.ddos_protection.vote_accepting_rounds && - vote->getRound() >= checking_round + this->kConf.network.ddos_protection.vote_accepting_rounds) { - // skip this check if kConf.network.vote_accepting_rounds == 0 - // Trigger votes(round) syncing only if we are in sync in terms of period - if (current_pbft_period == vote->getPeriod()) { - // Do not request round sync too often here - if (vote->getVoter() == peer->getId() && - std::chrono::system_clock::now() - last_votes_sync_request_time_ > kSyncRequestInterval) { - // request round votes sync from this node - this->requestPbftNextVotesAtPeriodRound(peer->getId(), current_pbft_period, current_pbft_round); - last_votes_sync_request_time_ = std::chrono::system_clock::now(); - } - } - - return {false, "Invalid round(too big): " + genErrMsg(vote)}; - } - - // Step validation - auto checking_step = pbft_mgr_->getPbftStep(); - // If period or round is not the same we assume current step is equal to 1 - // So we won't accept votes for future rounds with step bigger than kConf.network.vote_accepting_steps - if (current_pbft_period != vote->getPeriod() || current_pbft_round != vote->getRound()) { - checking_step = 1; - } - - // skip check if kConf.network.vote_accepting_steps == 0 - if (validate_max_round_step && this->kConf.network.ddos_protection.vote_accepting_steps && - vote->getStep() >= checking_step + this->kConf.network.ddos_protection.vote_accepting_steps) { - return {false, "Invalid step(too big): " + genErrMsg(vote)}; - } - - return {true, ""}; - } + bool validate_max_round_step); /** * @brief Validates provided vote if voted value == provided block @@ -256,14 +73,7 @@ class ExtVotesPacketHandler : public PacketHandler { * @param pbft_block * @return true if validation successful, otherwise false */ - bool validateVoteAndBlock(const std::shared_ptr& vote, const std::shared_ptr& pbft_block) const { - if (pbft_block->getBlockHash() != vote->getBlockHash()) { - LOG(this->log_er_) << "Vote " << vote->getHash() << " voted block " << vote->getBlockHash() << " != actual block " - << pbft_block->getBlockHash(); - return false; - } - return true; - } + bool validateVoteAndBlock(const std::shared_ptr& vote, const std::shared_ptr& pbft_block) const; protected: constexpr static size_t kMaxVotesInBundleRlp{1000}; @@ -278,4 +88,4 @@ class ExtVotesPacketHandler : public PacketHandler { std::shared_ptr slashing_manager_; }; -} // namespace taraxa::network::tarcap +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/packet_handler.hpp new file mode 100644 index 0000000000..cf33bb23b2 --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/packet_handler.hpp @@ -0,0 +1,86 @@ +#pragma once + +#include + +#include +#include + +#include "logger/logger.hpp" +#include "network/tarcap/packet_types.hpp" +#include "network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp" +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" +#include "network/tarcap/shared_states/peers_state.hpp" +#include "network/tarcap/taraxa_peer.hpp" +#include "network/threadpool/packet_data.hpp" + +namespace taraxa::network::tarcap::v4 { + +// class TimePeriodPacketsStats; + +/** + * @brief Packet handler base class that consists of shared state and some commonly used functions + */ +class PacketHandler : public BasePacketHandler { + public: + PacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, const addr_t& node_addr, + const std::string& log_channel_name); + virtual ~PacketHandler() = default; + PacketHandler(const PacketHandler&) = default; + PacketHandler(PacketHandler&&) = default; + PacketHandler& operator=(const PacketHandler&) = delete; + PacketHandler& operator=(PacketHandler&&) = delete; + + /** + * @brief Packet processing function wrapper that logs packet stats and calls process function + * + * @param packet_data + */ + void processPacket(const threadpool::PacketData& packet_data); + + void requestPbftNextVotesAtPeriodRound(const dev::p2p::NodeID& peerID, PbftPeriod pbft_period, PbftRound pbft_round); + + private: + void handle_caught_exception(std::string_view exception_msg, const threadpool::PacketData& packet_data, + const dev::p2p::NodeID& peer, + dev::p2p::DisconnectReason disconnect_reason = dev::p2p::DisconnectReason::UserReason, + bool set_peer_as_malicious = false); + + /** + * @brief Main packet processing function + */ + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) = 0; + + /** + * @brief Validates packet rlp format - items count + * + * @throws InvalidRlpItemsCountException exception + */ + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const = 0; + + protected: + /** + * @brief Checks if packet rlp is a list, if not it throws InvalidRlpItemsCountException + * + * @param packet_data + * @throws InvalidRlpItemsCountException exception + */ + void checkPacketRlpIsList(const threadpool::PacketData& packet_data) const; + + bool sealAndSend(const dev::p2p::NodeID& nodeID, SubprotocolPacketType packet_type, dev::RLPStream&& rlp); + void disconnect(const dev::p2p::NodeID& node_id, dev::p2p::DisconnectReason reason); + + protected: + // Node config + const FullNodeConfig& kConf; + + std::shared_ptr peers_state_{nullptr}; + + // Shared packet stats + std::shared_ptr packets_stats_; + + // Declare logger instances + LOG_OBJECTS_DEFINE +}; + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp index 57910851be..dc9a172a25 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp @@ -1,7 +1,6 @@ #pragma once -#include "network/tarcap/packets/v4/dag_block_packet.hpp" -#include "network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp" +#include "common/ext_syncing_packet_handler.hpp" namespace taraxa { class TransactionManager; @@ -9,7 +8,7 @@ class TransactionManager; namespace taraxa::network::tarcap::v4 { -class DagBlockPacketHandler : public v4::ExtSyncingPacketHandler { +class DagBlockPacketHandler : public ExtSyncingPacketHandler { public: DagBlockPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -27,7 +26,8 @@ class DagBlockPacketHandler : public v4::ExtSyncingPacketHandler &peer) override; + virtual void validatePacketRlpFormat(const threadpool::PacketData &packet_data) const override; + virtual void process(const threadpool::PacketData &packet_data, const std::shared_ptr &peer) override; protected: std::shared_ptr trx_mgr_{nullptr}; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp index 9178d04f62..f3702a7cb0 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp @@ -1,7 +1,6 @@ #pragma once -#include "network/tarcap/packets/v4/dag_sync_packet.hpp" -#include "network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp" +#include "common/ext_syncing_packet_handler.hpp" namespace taraxa { class TransactionManager; @@ -9,7 +8,7 @@ class TransactionManager; namespace taraxa::network::tarcap::v4 { -class DagSyncPacketHandler : public v4::ExtSyncingPacketHandler { +class DagSyncPacketHandler : public ExtSyncingPacketHandler { public: DagSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -22,7 +21,8 @@ class DagSyncPacketHandler : public v4::ExtSyncingPacketHandler& peer) override; + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; protected: std::shared_ptr trx_mgr_{nullptr}; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.hpp index ff1b55097f..a6e7b0b121 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.hpp @@ -1,7 +1,6 @@ #pragma once -#include "network/tarcap/packets/v4/get_dag_sync_packet.hpp" -#include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" +#include "common/packet_handler.hpp" #include "transaction/transaction.hpp" namespace taraxa { @@ -12,7 +11,7 @@ class TransactionManager; namespace taraxa::network::tarcap::v4 { -class GetDagSyncPacketHandler : public PacketHandler { +class GetDagSyncPacketHandler : public PacketHandler { public: GetDagSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -27,7 +26,8 @@ class GetDagSyncPacketHandler : public PacketHandler { static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetDagSyncPacket; private: - virtual void process(GetDagSyncPacket&& packet, const std::shared_ptr& peer) override; + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; protected: std::shared_ptr trx_mgr_; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_next_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_next_votes_bundle_packet_handler.hpp new file mode 100644 index 0000000000..e647cab96d --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_next_votes_bundle_packet_handler.hpp @@ -0,0 +1,29 @@ +#pragma once + +#include "common/ext_votes_packet_handler.hpp" + +namespace taraxa { +class PbftManager; +class VoteManager; +} // namespace taraxa + +namespace taraxa::network::tarcap::v4 { + +class GetNextVotesBundlePacketHandler : public ExtVotesPacketHandler { + public: + GetNextVotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_mgr, std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, + std::shared_ptr slashing_manager, const addr_t& node_addr, + const std::string& logs_prefix = "GET_NEXT_VOTES_BUNDLE_PH"); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetNextVotesSyncPacket; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; +}; + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_pbft_sync_packet_handler.hpp new file mode 100644 index 0000000000..a736b34c5c --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_pbft_sync_packet_handler.hpp @@ -0,0 +1,42 @@ +#pragma once + +#include "common/packet_handler.hpp" + +namespace taraxa { +class PbftChain; +class DbStorage; +class VoteManager; +} // namespace taraxa + +namespace taraxa::network::tarcap { +class PbftSyncingState; +} + +namespace taraxa::network::tarcap::v4 { + +class GetPbftSyncPacketHandler : public PacketHandler { + public: + GetPbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, std::shared_ptr db, + const addr_t& node_addr, const std::string& logs_prefix = "GET_PBFT_SYNC_PH"); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetPbftSyncPacket; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + + virtual void sendPbftBlocks(const std::shared_ptr& peer, PbftPeriod from_period, + size_t blocks_to_transfer, bool pbft_chain_synced); + + protected: + std::shared_ptr pbft_syncing_state_; + std::shared_ptr pbft_chain_; + std::shared_ptr vote_mgr_; + std::shared_ptr db_; +}; + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.hpp new file mode 100644 index 0000000000..0d56947afa --- /dev/null +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.hpp @@ -0,0 +1,31 @@ +#pragma once + +#include "common/packet_handler.hpp" +#include "pillar_chain/pillar_chain_manager.hpp" + +namespace taraxa::network::tarcap::v4 { + +class GetPillarVotesBundlePacketHandler : public PacketHandler { + public: + GetPillarVotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pillar_chain_manager, + const addr_t& node_addr, const std::string& logs_prefix); + + void requestPillarVotesBundle(PbftPeriod period, const blk_hash_t& pillar_block_hash, + const std::shared_ptr& peer); + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetPillarVotesBundlePacket; + + private: + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + + protected: + constexpr static size_t kGetPillarVotesBundlePacketSize{2}; + + std::shared_ptr pillar_chain_manager_; +}; + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp index 32b7eb88a6..f00d044f93 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp @@ -1,13 +1,12 @@ #pragma once +#include "common/ext_syncing_packet_handler.hpp" #include "common/thread_pool.hpp" -#include "network/tarcap/packets/v4/pbft_sync_packet.hpp" -#include "network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp" #include "vote_manager/vote_manager.hpp" namespace taraxa::network::tarcap::v4 { -class PbftSyncPacketHandler : public v4::ExtSyncingPacketHandler { +class PbftSyncPacketHandler : public ExtSyncingPacketHandler { public: PbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -22,7 +21,8 @@ class PbftSyncPacketHandler : public v4::ExtSyncingPacketHandler& peer) override; + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; protected: virtual PeriodData decodePeriodData(const dev::RLP& period_data_rlp) const; @@ -35,6 +35,9 @@ class PbftSyncPacketHandler : public v4::ExtSyncingPacketHandler vote_mgr_; util::ThreadPool periodic_events_tp_; + + static constexpr size_t kStandardPacketSize = 2; + static constexpr size_t kChainSyncedPacketSize = 3; }; } // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_vote_packet_handler.hpp index 8b9ad59bda..a94781ca19 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_vote_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_vote_packet_handler.hpp @@ -1,11 +1,10 @@ #pragma once -#include "network/tarcap/packets/v4/pillar_vote_packet.hpp" -#include "network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/common/ext_pillar_vote_packet_handler.hpp" namespace taraxa::network::tarcap::v4 { -class PillarVotePacketHandler : public ExtPillarVotePacketHandler { +class PillarVotePacketHandler : public ExtPillarVotePacketHandler { public: PillarVotePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -19,7 +18,8 @@ class PillarVotePacketHandler : public ExtPillarVotePacketHandler& peer) override; + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; }; } // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.hpp index ac1197e9b2..40cc624119 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.hpp @@ -1,11 +1,10 @@ #pragma once -#include "network/tarcap/packets/v4/pillar_votes_bundle_packet.hpp" -#include "network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/common/ext_pillar_vote_packet_handler.hpp" namespace taraxa::network::tarcap::v4 { -class PillarVotesBundlePacketHandler : public ExtPillarVotePacketHandler { +class PillarVotesBundlePacketHandler : public ExtPillarVotePacketHandler { public: PillarVotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -16,7 +15,11 @@ class PillarVotesBundlePacketHandler : public ExtPillarVotePacketHandler& peer) override; + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + + public: + constexpr static size_t kMaxPillarVotesInBundleRlp{250}; }; } // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/status_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/status_packet_handler.hpp index df62476e36..7e6978c91e 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/status_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/status_packet_handler.hpp @@ -1,11 +1,10 @@ #pragma once -#include "network/tarcap/packets/v4/status_packet.hpp" -#include "network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp" +#include "common/ext_syncing_packet_handler.hpp" namespace taraxa::network::tarcap::v4 { -class StatusPacketHandler : public v4::ExtSyncingPacketHandler { +class StatusPacketHandler : public ExtSyncingPacketHandler { public: StatusPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -21,9 +20,13 @@ class StatusPacketHandler : public v4::ExtSyncingPacketHandler static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kStatusPacket; private: - virtual void process(StatusPacket&& packet, const std::shared_ptr& peer) override; + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; protected: + static constexpr uint16_t kInitialStatusPacketItemsCount = 11; + static constexpr uint16_t kStandardStatusPacketItemsCount = 4; + const h256 kGenesisHash; }; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp index 65c7f215e0..f40167e567 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp @@ -1,7 +1,6 @@ #pragma once -#include "network/tarcap/packets/v4/transaction_packet.hpp" -#include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" +#include "common/packet_handler.hpp" #include "transaction/transaction.hpp" namespace taraxa { @@ -11,7 +10,9 @@ enum class TransactionStatus; namespace taraxa::network::tarcap::v4 { -class TransactionPacketHandler : public PacketHandler { +class TestState; + +class TransactionPacketHandler : public PacketHandler { public: TransactionPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -43,7 +44,8 @@ class TransactionPacketHandler : public PacketHandler { static constexpr uint32_t kTransactionPacketItemCount = 2; private: - virtual void process(TransactionPacket&& packet, const std::shared_ptr& peer) override; + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; protected: /** diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/vote_packet_handler.hpp index 9c174408ab..aa24da7787 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/vote_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/vote_packet_handler.hpp @@ -1,11 +1,10 @@ #pragma once -#include "network/tarcap/packets/v4/vote_packet.hpp" -#include "network/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.hpp" +#include "common/ext_votes_packet_handler.hpp" namespace taraxa::network::tarcap::v4 { -class VotePacketHandler : public v4::ExtVotesPacketHandler { +class VotePacketHandler : public ExtVotesPacketHandler { public: VotePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, @@ -29,7 +28,12 @@ class VotePacketHandler : public v4::ExtVotesPacketHandler { static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotePacket; private: - virtual void process(VotePacket&& packet, const std::shared_ptr& peer) override; + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; + + protected: + const size_t kVotePacketSize{1}; + const size_t kExtendedVotePacketSize{3}; }; } // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp index 848784b3ec..e42e33c2d2 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp @@ -1,11 +1,10 @@ #pragma once -#include "network/tarcap/packets/v4/votes_bundle_packet.hpp" -#include "network/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.hpp" +#include "common/ext_votes_packet_handler.hpp" namespace taraxa::network::tarcap::v4 { -class VotesBundlePacketHandler : public v4::ExtVotesPacketHandler { +class VotesBundlePacketHandler : public ExtVotesPacketHandler { public: VotesBundlePacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, @@ -27,7 +26,8 @@ class VotesBundlePacketHandler : public v4::ExtVotesPacketHandler& peer) override; + virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; + virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; }; } // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_bls_sig_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_bls_sig_packet_handler.cpp deleted file mode 100644 index fe4ce45241..0000000000 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_bls_sig_packet_handler.cpp +++ /dev/null @@ -1,37 +0,0 @@ -// #include "network/tarcap/packets_handlers/latest/common/ext_pillar_vote_packet_handler.hpp" -// #include "pillar_chain/pillar_chain_manager.hpp" -// -// namespace taraxa::network::tarcap { -// -// ExtPillarVotePacketHandler::ExtPillarVotePacketHandler( -// const FullNodeConfig &conf, std::shared_ptr peers_state, -// std::shared_ptr packets_stats, -// std::shared_ptr pillar_chain_manager, const addr_t &node_addr, -// const std::string &log_channel) -// : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel), -// pillar_chain_manager_{std::move(pillar_chain_manager)} {} -// -// bool ExtPillarVotePacketHandler::processPillarVote(const std::shared_ptr &vote, -// const std::shared_ptr &peer) { -// if (!pillar_chain_manager_->isRelevantPillarVote(vote)) { -// LOG(log_dg_) << "Drop irrelevant pillar vote " << vote->getHash() << ", period " << vote->getPeriod() -// << " from peer " << peer->getId(); -// return false; -// } -// -// if (!pillar_chain_manager_->validatePillarVote(vote)) { -// // TODO: enable for mainnet -// // std::ostringstream err_msg; -// // err_msg << "Invalid pillar vote " << vote->getHash() << " from peer " << peer->getId(); -// // throw MaliciousPeerException(err_msg.str()); -// return false; -// } -// -// pillar_chain_manager_->addVerifiedPillarVote(vote); -// -// // Mark pillar vote as known for peer -// peer->markPillarVoteAsKnown(vote->getHash()); -// return true; -// } -// -// } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.cpp deleted file mode 100644 index 5ea59761bc..0000000000 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.cpp +++ /dev/null @@ -1,169 +0,0 @@ -// #include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" -// -// #include "network/tarcap/shared_states/pbft_syncing_state.hpp" -// #include "pbft/pbft_chain.hpp" -// #include "pbft/pbft_manager.hpp" -// -// namespace taraxa::network::tarcap { -// -// ExtSyncingPacketHandler::ExtSyncingPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, -// std::shared_ptr packets_stats, -// std::shared_ptr pbft_syncing_state, -// std::shared_ptr pbft_chain, -// std::shared_ptr pbft_mgr, -// std::shared_ptr dag_mgr, std::shared_ptr db, -// const addr_t &node_addr, const std::string &log_channel_name) -// : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel_name), -// pbft_syncing_state_(std::move(pbft_syncing_state)), -// pbft_chain_(std::move(pbft_chain)), -// pbft_mgr_(std::move(pbft_mgr)), -// dag_mgr_(std::move(dag_mgr)), -// db_(std::move(db)) {} -// -// void ExtSyncingPacketHandler::startSyncingPbft() { -// if (pbft_syncing_state_->isPbftSyncing()) { -// LOG(log_dg_) << "startSyncingPbft called but syncing_ already true"; -// return; -// } -// -// std::shared_ptr peer = getMaxChainPeer(); -// if (!peer) { -// LOG(log_nf_) << "Restarting syncing PBFT not possible since no connected peers"; -// return; -// } -// -// auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); -// if (peer->pbft_chain_size_ > pbft_sync_period) { -// auto peer_id = peer->getId().abridged(); -// auto peer_pbft_chain_size = peer->pbft_chain_size_.load(); -// if (!pbft_syncing_state_->setPbftSyncing(true, pbft_sync_period, std::move(peer))) { -// LOG(log_dg_) << "startSyncingPbft called but syncing_ already true"; -// return; -// } -// LOG(log_si_) << "Restarting syncing PBFT from peer " << peer_id << ", peer PBFT chain size " << -// peer_pbft_chain_size -// << ", own PBFT chain synced at period " << pbft_sync_period; -// -// if (syncPeerPbft(pbft_sync_period + 1)) { -// // Disable snapshots only if are syncing from scratch -// if (pbft_syncing_state_->isDeepPbftSyncing()) { -// db_->disableSnapshots(); -// } -// } else { -// pbft_syncing_state_->setPbftSyncing(false); -// } -// } else { -// LOG(log_nf_) << "Restarting syncing PBFT not needed since our pbft chain size: " << pbft_sync_period << "(" -// << pbft_chain_->getPbftChainSize() << ")" -// << " is greater or equal than max node pbft chain size:" << peer->pbft_chain_size_; -// db_->enableSnapshots(); -// } -// } -// bool ExtSyncingPacketHandler::syncPeerPbft(PbftPeriod request_period) { -// const auto syncing_peer = pbft_syncing_state_->syncingPeer(); -// if (!syncing_peer) { -// LOG(log_er_) << "Unable to send GetPbftSyncPacket. No syncing peer set."; -// return false; -// } -// -// if (request_period > syncing_peer->pbft_chain_size_) { -// LOG(log_wr_) << "Invalid syncPeerPbft argument. Node " << syncing_peer->getId() << " chain size " -// << syncing_peer->pbft_chain_size_ << ", requested period " << request_period; -// return false; -// } -// -// LOG(log_nf_) << "Send GetPbftSyncPacket with period " << request_period << " to node " << syncing_peer->getId(); -// return sealAndSend(syncing_peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, -// std::move(dev::RLPStream(1) << request_period)); -// } -// -// std::shared_ptr ExtSyncingPacketHandler::getMaxChainPeer( -// std::function &)> filter_func) { -// std::shared_ptr max_pbft_chain_peer; -// PbftPeriod max_pbft_chain_size = 0; -// uint64_t max_node_dag_level = 0; -// -// // Find peer with max pbft chain and dag level -// for (auto const &peer : peers_state_->getAllPeers()) { -// // Apply the filter function -// if (!filter_func(peer.second)) { -// continue; -// } -// -// if (peer.second->pbft_chain_size_ > max_pbft_chain_size) { -// if (peer.second->peer_light_node && -// pbft_mgr_->pbftSyncingPeriod() + peer.second->peer_light_node_history < peer.second->pbft_chain_size_) { -// LOG(log_er_) << "Disconnecting from light node peer " << peer.first -// << " History: " << peer.second->peer_light_node_history -// << " chain size: " << peer.second->pbft_chain_size_; -// disconnect(peer.first, dev::p2p::UserReason); -// continue; -// } -// max_pbft_chain_size = peer.second->pbft_chain_size_; -// max_node_dag_level = peer.second->dag_level_; -// max_pbft_chain_peer = peer.second; -// } else if (peer.second->pbft_chain_size_ == max_pbft_chain_size && peer.second->dag_level_ > max_node_dag_level) -// { -// max_node_dag_level = peer.second->dag_level_; -// max_pbft_chain_peer = peer.second; -// } -// } -// return max_pbft_chain_peer; -// } -// -// void ExtSyncingPacketHandler::requestPendingDagBlocks(std::shared_ptr peer) { -// if (!peer) { -// peer = getMaxChainPeer([](const std::shared_ptr &peer) { -// if (peer->peer_dag_synced_ || !peer->dagSyncingAllowed()) { -// return false; -// } -// return true; -// }); -// if (!peer) { -// LOG(log_nf_) << "requestPendingDagBlocks not possible since no peers are matching conditions"; -// return; -// } -// } -// -// if (!peer) { -// LOG(log_nf_) << "requestPendingDagBlocks not possible since no connected peers"; -// return; -// } -// -// // This prevents ddos requesting dag blocks. We can only request this one time from one peer. -// if (peer->peer_dag_synced_) { -// LOG(log_nf_) << "requestPendingDagBlocks not possible since already requested for peer"; -// return; -// } -// -// // Only request dag blocks if periods are matching -// auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); -// if (pbft_sync_period == peer->pbft_chain_size_) { -// // This prevents parallel requests -// if (bool b = false; !peer->peer_dag_syncing_.compare_exchange_strong(b, !b)) { -// LOG(log_nf_) << "requestPendingDagBlocks not possible since already requesting for peer"; -// return; -// } -// LOG(log_nf_) << "Request pending blocks from peer " << peer->getId(); -// std::unordered_set known_non_finalized_blocks; -// auto [period, blocks] = dag_mgr_->getNonFinalizedBlocks(); -// for (auto &level_blocks : blocks) { -// for (auto &block : level_blocks.second) { -// known_non_finalized_blocks.insert(block); -// } -// } -// -// requestDagBlocks(peer->getId(), known_non_finalized_blocks, period); -// } -// } -// -// void ExtSyncingPacketHandler::requestDagBlocks(const dev::p2p::NodeID &_nodeID, -// const std::unordered_set &blocks, PbftPeriod period) { -// dev::RLPStream s(2); // Period + blocks list -// s.append(period); -// s.append(blocks); -// -// sealAndSend(_nodeID, SubprotocolPacketType::kGetDagSyncPacket, std::move(s)); -// } -// -// } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.cpp deleted file mode 100644 index 4f9ad37be4..0000000000 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.cpp +++ /dev/null @@ -1,228 +0,0 @@ -// #include "network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp" -// -// #include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" -// #include "pbft/pbft_manager.hpp" -// #include "vote/pbft_vote.hpp" -// #include "vote/votes_bundle_rlp.hpp" -// #include "vote_manager/vote_manager.hpp" -// -// namespace taraxa::network::tarcap { -// -// ExtVotesPacketHandler::ExtVotesPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, -// std::shared_ptr packets_stats, -// std::shared_ptr pbft_mgr, -// std::shared_ptr pbft_chain, -// std::shared_ptr vote_mgr, -// std::shared_ptr slashing_manager, const addr_t -// &node_addr, const std::string &log_channel_name) -// : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel_name), -// last_votes_sync_request_time_(std::chrono::system_clock::now()), -// last_pbft_block_sync_request_time_(std::chrono::system_clock::now()), -// pbft_mgr_(std::move(pbft_mgr)), -// pbft_chain_(std::move(pbft_chain)), -// vote_mgr_(std::move(vote_mgr)), -// slashing_manager_(std::move(slashing_manager)) {} -// -// bool ExtVotesPacketHandler::processVote(const std::shared_ptr &vote, -// const std::shared_ptr &pbft_block, -// const std::shared_ptr &peer, bool validate_max_round_step) { -// if (pbft_block && !validateVoteAndBlock(vote, pbft_block)) { -// throw MaliciousPeerException("Received vote's voted value != received pbft block"); -// } -// -// if (vote_mgr_->voteInVerifiedMap(vote)) { -// LOG(log_dg_) << "Vote " << vote->getHash() << " already inserted in verified queue"; -// return false; -// } -// -// // Validate vote's period, round and step min/max values -// if (const auto vote_valid = validateVotePeriodRoundStep(vote, peer, validate_max_round_step); !vote_valid.first) { -// LOG(log_wr_) << "Vote period/round/step " << vote->getHash() << " validation failed. Err: " << vote_valid.second; -// return false; -// } -// -// // Check is vote is unique per period, round & step & voter -> each address can generate just 1 vote -// // (for a value that isn't NBH) per period, round & step -// if (auto vote_valid = vote_mgr_->isUniqueVote(vote); !vote_valid.first) { -// // Create double voting proof -// slashing_manager_->submitDoubleVotingProof(vote, vote_valid.second); -// throw MaliciousPeerException("Received double vote", vote->getVoter()); -// } -// -// // Validate vote's signature, vrf, etc... -// if (const auto vote_valid = vote_mgr_->validateVote(vote); !vote_valid.first) { -// LOG(log_wr_) << "Vote " << vote->getHash() << " validation failed. Err: " << vote_valid.second; -// return false; -// } -// -// if (!vote_mgr_->addVerifiedVote(vote)) { -// LOG(log_dg_) << "Vote " << vote->getHash() << " already inserted in verified queue(race condition)"; -// return false; -// } -// -// if (pbft_block) { -// pbft_mgr_->processProposedBlock(pbft_block, vote); -// } -// -// return true; -// } -// -// std::pair ExtVotesPacketHandler::validateVotePeriodRoundStep(const std::shared_ptr -// &vote, -// const std::shared_ptr -// &peer, bool validate_max_round_step) -// { -// const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); -// -// auto genErrMsg = [period = current_pbft_period, round = current_pbft_round, -// step = pbft_mgr_->getPbftStep()](const std::shared_ptr &vote) -> std::string { -// std::stringstream err; -// err << "Vote " << vote->getHash() << " (period, round, step) = (" << vote->getPeriod() << ", " << -// vote->getRound() -// << ", " << vote->getStep() << "). Current PBFT (period, round, step) = (" << period << ", " << round << ", " -// << step << ")"; -// return err.str(); -// }; -// -// // Period validation -// // vote->getPeriod() == current_pbft_period - 1 && cert_vote -> potential reward vote -// if (vote->getPeriod() < current_pbft_period - 1 || -// (vote->getPeriod() == current_pbft_period - 1 && vote->getType() != PbftVoteTypes::cert_vote)) { -// return {false, "Invalid period(too small): " + genErrMsg(vote)}; -// } else if (kConf.network.ddos_protection.vote_accepting_periods && -// vote->getPeriod() - 1 > current_pbft_period + kConf.network.ddos_protection.vote_accepting_periods) { -// // skip this check if kConf.network.ddos_protection.vote_accepting_periods == 0 -// // vote->getPeriod() - 1 is here because votes are validated against vote_period - 1 in dpos contract -// // Do not request round sync too often here -// if (vote->getVoter() == peer->getId() && -// std::chrono::system_clock::now() - last_pbft_block_sync_request_time_ > kSyncRequestInterval) { -// // request PBFT chain sync from this node -// sealAndSend(peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, -// std::move(dev::RLPStream(1) << std::max(vote->getPeriod() - 1, peer->pbft_chain_size_.load()))); -// last_pbft_block_sync_request_time_ = std::chrono::system_clock::now(); -// } -// -// return {false, "Invalid period(too big): " + genErrMsg(vote)}; -// } -// -// // Round validation -// auto checking_round = current_pbft_round; -// // If period is not the same we assume current round is equal to 1 -// // So we won't accept votes for future period with round bigger than kConf.network.vote_accepting_steps -// if (current_pbft_period != vote->getPeriod()) { -// checking_round = 1; -// } -// -// // vote->getRound() == checking_round - 1 && next_vote -> previous round next vote -// if (vote->getRound() < checking_round - 1 || -// (vote->getRound() == checking_round - 1 && vote->getType() != PbftVoteTypes::next_vote)) { -// return {false, "Invalid round(too small): " + genErrMsg(vote)}; -// } else if (validate_max_round_step && kConf.network.ddos_protection.vote_accepting_rounds && -// vote->getRound() >= checking_round + kConf.network.ddos_protection.vote_accepting_rounds) { -// // skip this check if kConf.network.vote_accepting_rounds == 0 -// // Trigger votes(round) syncing only if we are in sync in terms of period -// if (current_pbft_period == vote->getPeriod()) { -// // Do not request round sync too often here -// if (vote->getVoter() == peer->getId() && -// std::chrono::system_clock::now() - last_votes_sync_request_time_ > kSyncRequestInterval) { -// // request round votes sync from this node -// requestPbftNextVotesAtPeriodRound(peer->getId(), current_pbft_period, current_pbft_round); -// last_votes_sync_request_time_ = std::chrono::system_clock::now(); -// } -// } -// -// return {false, "Invalid round(too big): " + genErrMsg(vote)}; -// } -// -// // Step validation -// auto checking_step = pbft_mgr_->getPbftStep(); -// // If period or round is not the same we assume current step is equal to 1 -// // So we won't accept votes for future rounds with step bigger than kConf.network.vote_accepting_steps -// if (current_pbft_period != vote->getPeriod() || current_pbft_round != vote->getRound()) { -// checking_step = 1; -// } -// -// // skip check if kConf.network.vote_accepting_steps == 0 -// if (validate_max_round_step && kConf.network.ddos_protection.vote_accepting_steps && -// vote->getStep() >= checking_step + kConf.network.ddos_protection.vote_accepting_steps) { -// return {false, "Invalid step(too big): " + genErrMsg(vote)}; -// } -// -// return {true, ""}; -// } -// -// bool ExtVotesPacketHandler::validateVoteAndBlock(const std::shared_ptr &vote, -// const std::shared_ptr &pbft_block) const { -// if (pbft_block->getBlockHash() != vote->getBlockHash()) { -// LOG(log_er_) << "Vote " << vote->getHash() << " voted block " << vote->getBlockHash() << " != actual block " -// << pbft_block->getBlockHash(); -// return false; -// } -// return true; -// } -// -// bool ExtVotesPacketHandler::isPbftRelevantVote(const std::shared_ptr &vote) const { -// const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); -// -// if (vote->getPeriod() >= current_pbft_period && vote->getRound() >= current_pbft_round) { -// // Standard current or future vote -// return true; -// } else if (vote->getPeriod() == current_pbft_period && vote->getRound() == (current_pbft_round - 1) && -// vote->getType() == PbftVoteTypes::next_vote) { -// // Previous round next vote -// return true; -// } else if (vote->getPeriod() == current_pbft_period - 1 && vote->getType() == PbftVoteTypes::cert_vote) { -// // Previous period cert vote - potential reward vote -// return true; -// } -// -// return false; -// } -// -// void ExtVotesPacketHandler::sendPbftVotesBundle(const std::shared_ptr &peer, -// std::vector> &&votes) { -// if (votes.empty()) { -// return; -// } -// -// auto sendVotes = [this, &peer](std::vector> &&votes) { -// auto votes_bytes = encodePbftVotesBundleRlp(std::move(votes)); -// if (votes_bytes.empty()) { -// LOG(log_er_) << "Unable to send VotesBundle rlp"; -// return; -// } -// -// dev::RLPStream votes_rlp_stream; -// votes_rlp_stream.appendRaw(votes_bytes); -// -// if (sealAndSend(peer->getId(), SubprotocolPacketType::kVotesBundlePacket, std::move(votes_rlp_stream))) { -// LOG(log_dg_) << " Votes bundle with " << votes.size() << " votes sent to " << peer->getId(); -// for (const auto &vote : votes) { -// peer->markPbftVoteAsKnown(vote->getHash()); -// } -// } -// }; -// -// if (votes.size() <= kMaxVotesInBundleRlp) { -// sendVotes(std::move(votes)); -// return; -// } else { -// // Need to split votes into multiple packets -// size_t index = 0; -// while (index < votes.size()) { -// const size_t votes_count = std::min(kMaxVotesInBundleRlp, votes.size() - index); -// -// const auto begin_it = std::next(votes.begin(), index); -// const auto end_it = std::next(begin_it, votes_count); -// -// std::vector> votes_sub_vector; -// std::move(begin_it, end_it, std::back_inserter(votes_sub_vector)); -// -// sendVotes(std::move(votes_sub_vector)); -// -// index += votes_count; -// } -// } -// } -// -// } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/packet_handler.cpp deleted file mode 100644 index f623f3965a..0000000000 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/common/packet_handler.cpp +++ /dev/null @@ -1,149 +0,0 @@ -// #include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" -// -// #include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" -// #include "network/tarcap/stats/time_period_packets_stats.hpp" -// -// namespace taraxa::network::tarcap { -// -// PacketHandler::PacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, -// std::shared_ptr packets_stats, const addr_t& node_addr, -// const std::string& log_channel_name) -// : kConf(conf), peers_state_(std::move(peers_state)), packets_stats_(std::move(packets_stats)) { -// LOG_OBJECTS_CREATE(log_channel_name); -// } -// -// void PacketHandler::checkPacketRlpIsList(const threadpool::PacketData& packet_data) const { -// if (!packet_data.rlp_.isList()) { -// throw InvalidRlpItemsCountException(packet_data.type_str_ + " RLP must be a list. ", 0, 1); -// } -// } -// -// void PacketHandler::processPacket(const PacketType& packet_data) { -// try { -// const auto begin = std::chrono::steady_clock::now(); -// -// // It can rarely happen that packet was received and pushed into the queue when peer was still in peers map, -// // in the meantime the connection was lost and we started to process packet from such peer -// const auto peer = peers_state_->getPacketSenderPeer(packet_data.from_node_id_, packet_data.type_); -// if (!peer.first) [[unlikely]] { -// LOG(log_wr_) << "Unable to process packet. Reason: " << peer.second; -// disconnect(packet_data.from_node_id_, dev::p2p::UserReason); -// return; -// } -// -// // Validates packet rlp format -// // In case there is a type mismatch, one of the dev::RLPException's is thrown during further parsing in process -// // function -// checkPacketRlpIsList(packet_data); -// validatePacketRlpFormat(packet_data); -// -// // TODO: call decode function and remove checkPacketRlpIsList & validatePacketRlpFormat -// PacketType packet{packet_data}; -// packet.decocode(); -// -// // Main processing function -// process(packet, peer.first); -// -// auto processing_duration = -// std::chrono::duration_cast(std::chrono::steady_clock::now() - begin); -// auto tp_wait_duration = std::chrono::duration_cast(std::chrono::steady_clock::now() - -// packet_data.receive_time_); -// -// PacketStats packet_stats{1 /* count */, packet_data.rlp_.data().size(), processing_duration, tp_wait_duration}; -// peer.first->addSentPacket(packet_data.type_str_, packet_stats); -// -// if (kConf.network.ddos_protection.log_packets_stats) { -// packets_stats_->addReceivedPacket(packet_data.type_str_, packet_data.from_node_id_, packet_stats); -// } -// -// } catch (const MaliciousPeerException& e) { -// // thrown during packets processing -> malicious peer, invalid rlp items count, ... -// // If there is custom peer set in exception, disconnect him, not packet sender -// if (const auto custom_peer = e.getPeer(); custom_peer.has_value()) { -// handle_caught_exception(e.what(), packet_data, *custom_peer, e.getDisconnectReason(), -// true /* set peer as malicious */); -// } else { -// handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, e.getDisconnectReason(), -// true /* set peer as malicious */); -// } -// } catch (const PacketProcessingException& e) { -// // thrown during packets processing... -// handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, e.getDisconnectReason(), -// true /* set peer as malicious */); -// } catch (const dev::RLPException& e) { -// // thrown during parsing inside aleth/libdevcore -> type mismatch -// handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, -// dev::p2p::DisconnectReason::BadProtocol, -// true /* set peer as malicious */); -// } catch (const std::exception& e) { -// handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_); -// } catch (...) { -// handle_caught_exception("Unknown exception", packet_data, packet_data.from_node_id_); -// } -// } -// -// void PacketHandler::handle_caught_exception(std::string_view exception_msg, const threadpool::PacketData& -// packet_data, -// const dev::p2p::NodeID& peer, dev::p2p::DisconnectReason -// disconnect_reason, bool set_peer_as_malicious) { -// LOG(log_er_) << "Exception caught during packet processing: " << exception_msg << " ." -// << "PacketData: " << jsonToUnstyledString(packet_data.getPacketDataJson()) -// << ", disconnect peer: " << peer.toString(); -// -// if (set_peer_as_malicious) { -// peers_state_->set_peer_malicious(peer); -// } -// -// disconnect(peer, disconnect_reason); -// } -// -// bool PacketHandler::sealAndSend(const dev::p2p::NodeID& node_id, SubprotocolPacketType packet_type, -// dev::RLPStream&& rlp) { -// auto host = peers_state_->host_.lock(); -// if (!host) { -// LOG(log_er_) << "sealAndSend failed to obtain host"; -// return false; -// } -// -// if (const auto peer = peers_state_->getPacketSenderPeer(node_id, packet_type); !peer.first) [[unlikely]] { -// LOG(log_wr_) << "Unable to send packet. Reason: " << peer.second; -// host->disconnect(node_id, dev::p2p::UserReason); -// return false; -// } -// -// const auto begin = std::chrono::steady_clock::now(); -// const size_t packet_size = rlp.out().size(); -// -// host->send(node_id, TARAXA_CAPABILITY_NAME, packet_type, rlp.invalidate(), -// [begin, node_id, packet_size, packet_type, this]() { -// if (!kConf.network.ddos_protection.log_packets_stats) { -// return; -// } -// -// PacketStats packet_stats{ -// 1 /* count */, packet_size, -// std::chrono::duration_cast(std::chrono::steady_clock::now() - begin), -// std::chrono::microseconds{0}}; -// -// packets_stats_->addSentPacket(convertPacketTypeToString(packet_type), node_id, packet_stats); -// }); -// -// return true; -// } -// -// void PacketHandler::disconnect(const dev::p2p::NodeID& node_id, dev::p2p::DisconnectReason reason) { -// if (auto host = peers_state_->host_.lock(); host) { -// LOG(log_nf_) << "Disconnect node " << node_id.abridged(); -// host->disconnect(node_id, reason); -// } else { -// LOG(log_er_) << "Unable to disconnect node " << node_id.abridged() << " due to invalid host."; -// } -// } -// -// void PacketHandler::requestPbftNextVotesAtPeriodRound(const dev::p2p::NodeID& peerID, PbftPeriod pbft_period, -// PbftRound pbft_round) { -// LOG(log_dg_) << "Sending GetNextVotesSyncPacket with period:" << pbft_period << ", round:" << pbft_round; -// sealAndSend(peerID, GetNextVotesSyncPacket, std::move(dev::RLPStream(2) << pbft_period << pbft_round)); -// } -// -// } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp index a08659288d..d6da181ddb 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp @@ -57,9 +57,10 @@ void DagBlockPacketHandler::sendBlockWithTransactions(dev::p2p::NodeID const &pe // This lock prevents race condition between syncing and gossiping dag blocks std::unique_lock lock(peer->mutex_for_sending_dag_blocks_); - DagBlockPacket dag_block_packet(trxs, block); + // TODO[2868]: optimize args, use move semantics + DagBlockPacket dag_block_packet{.transactions = trxs, .dag_block = block}; - if (!sealAndSend(peer_id, SubprotocolPacketType::kDagBlockPacket, dag_block_packet.encodeRlp())) { + if (!sealAndSend(peer_id, SubprotocolPacketType::kDagBlockPacket, encodePacketRlp(dag_block_packet))) { LOG(log_wr_) << "Sending DagBlock " << block.getHash() << " failed to " << peer_id; return; } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.cpp index f71ccc78b8..bf1a60e231 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_dag_sync_packet_handler.cpp @@ -63,7 +63,7 @@ void GetDagSyncPacketHandler::sendBlocks(const dev::p2p::NodeID &peer_id, if (!peer) return; DagSyncPacket dag_sync_packet(request_period, period, std::move(transactions), std::move(blocks)); - sealAndSend(peer_id, SubprotocolPacketType::kDagSyncPacket, dag_sync_packet.encodeRlp()); + sealAndSend(peer_id, SubprotocolPacketType::kDagSyncPacket, encodePacketRlp(dag_sync_packet)); } } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp index f6422d3d8f..2521ab511a 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp @@ -94,7 +94,7 @@ void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr } LOG(log_dg_) << "Sending PbftSyncPacket period " << block_period << " to " << peer_id; - sealAndSend(peer_id, SubprotocolPacketType::kPbftSyncPacket, pbft_sync_packet->encodeRlp()); + sealAndSend(peer_id, SubprotocolPacketType::kPbftSyncPacket, encodePacketRlp(pbft_sync_packet)); if (pbft_chain_synced && last_block) { peer->syncing_ = false; } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp index 63aa01c749..9885cb638c 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp @@ -1,6 +1,6 @@ #include "network/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.hpp" -#include "network/tarcap/packets/v4/pillar_votes_bundle_packet.hpp" +#include "network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp" #include "network/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.hpp" namespace taraxa::network::tarcap { @@ -44,7 +44,7 @@ void GetPillarVotesBundlePacketHandler::process(GetPillarVotesBundlePacket &&pac while (votes_sent < total_votes) { // Determine the size of the current chunk const size_t chunk_size = - std::min(v4::PillarVotesBundlePacket::kMaxPillarVotesInBundleRlp, total_votes - votes_sent); + std::min(PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp, total_votes - votes_sent); // Create PillarVotesBundlePacket std::vector> pillar_votes; @@ -56,7 +56,7 @@ void GetPillarVotesBundlePacketHandler::process(GetPillarVotesBundlePacket &&pac // Seal and send the chunk to the peer if (sealAndSend(peer->getId(), SubprotocolPacketType::kPillarVotesBundlePacket, - pillar_votes_bundle_packet.encodeRlp())) { + encodePacketRlp(pillar_votes_bundle_packet))) { // Mark the votes in this chunk as known for (size_t i = 0; i < chunk_size; ++i) { peer->markPillarVoteAsKnown(votes[votes_sent + i]->getHash()); @@ -64,9 +64,9 @@ void GetPillarVotesBundlePacketHandler::process(GetPillarVotesBundlePacket &&pac LOG(log_nf_) << "Pillar votes bundle for period " << packet.period << ", hash " << packet.pillar_block_hash << " sent to " << peer->getId() << " (Chunk " - << (votes_sent / v4::PillarVotesBundlePacket::kMaxPillarVotesInBundleRlp) + 1 << "/" - << (total_votes + v4::PillarVotesBundlePacket::kMaxPillarVotesInBundleRlp - 1) / - v4::PillarVotesBundlePacket::kMaxPillarVotesInBundleRlp + << (votes_sent / PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp) + 1 << "/" + << (total_votes + PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp - 1) / + PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp << ")"; } @@ -78,7 +78,7 @@ void GetPillarVotesBundlePacketHandler::process(GetPillarVotesBundlePacket &&pac void GetPillarVotesBundlePacketHandler::requestPillarVotesBundle(PbftPeriod period, const blk_hash_t &pillar_block_hash, const std::shared_ptr &peer) { if (sealAndSend(peer->getId(), SubprotocolPacketType::kGetPillarVotesBundlePacket, - GetPillarVotesBundlePacket(period, pillar_block_hash).encodeRlp())) { + encodePacketRlp(GetPillarVotesBundlePacket(period, pillar_block_hash)))) { LOG(log_nf_) << "Requested pillar votes bundle for period " << period << " and pillar block " << pillar_block_hash << " from peer " << peer->getId(); } else { diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_vote_packet_handler.cpp index abb9bf17b1..a4ddfdd8e5 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_vote_packet_handler.cpp @@ -42,7 +42,7 @@ void PillarVotePacketHandler::onNewPillarVote(const std::shared_ptr void PillarVotePacketHandler::sendPillarVote(const std::shared_ptr &peer, const std::shared_ptr &vote) { - if (sealAndSend(peer->getId(), SubprotocolPacketType::kPillarVotePacket, PillarVotePacket(vote).encodeRlp())) { + if (sealAndSend(peer->getId(), SubprotocolPacketType::kPillarVotePacket, encodePacketRlp(PillarVotePacket(vote)))) { peer->markPillarVoteAsKnown(vote->getHash()); LOG(log_dg_) << "Pillar vote " << vote->getHash() << ", period " << vote->getPeriod() << " sent to " << peer->getId(); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.cpp index dfc4ee7089..dce719e0e5 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.cpp @@ -14,6 +14,11 @@ PillarVotesBundlePacketHandler::PillarVotesBundlePacketHandler( void PillarVotesBundlePacketHandler::process(PillarVotesBundlePacket &&packet, const std::shared_ptr &peer) { + if (packet.pillar_votes.size() == 0 || packet.pillar_votes.size() > kMaxPillarVotesInBundleRlp) { + throw InvalidRlpItemsCountException("PillarVotesBundlePacket", packet.pillar_votes.size(), + kMaxPillarVotesInBundleRlp); + } + // TODO[2744]: there could be the same protection as in pbft syncing that only requested bundle packet is accepted LOG(log_dg_) << "PillarVotesBundlePacket received from peer " << peer->getId(); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp index a42da45ab4..4f318db6e6 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/status_packet_handler.cpp @@ -2,7 +2,7 @@ #include "config/version.hpp" #include "dag/dag.hpp" -#include "network/tarcap/packets/v4/status_packet.hpp" +#include "network/tarcap/packets/latest/status_packet.hpp" #include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "pbft/pbft_chain.hpp" @@ -146,17 +146,16 @@ bool StatusPacketHandler::sendStatus(const dev::p2p::NodeID& node_id, bool initi const auto pbft_round = pbft_mgr_->getPbftRound(); if (initial) { - success = - sealAndSend(node_id, SubprotocolPacketType::kStatusPacket, - StatusPacket(pbft_chain_size, pbft_round, dag_max_level, pbft_syncing_state_->isPbftSyncing(), - StatusPacket::InitialData{kConf.genesis.chain_id, kGenesisHash, TARAXA_MAJOR_VERSION, - TARAXA_MINOR_VERSION, TARAXA_PATCH_VERSION, - kConf.is_light_node, kConf.light_node_history}) - .encodeRlp()); - } else { success = sealAndSend( node_id, SubprotocolPacketType::kStatusPacket, - StatusPacket(pbft_chain_size, pbft_round, dag_max_level, pbft_syncing_state_->isDeepPbftSyncing()).encodeRlp()); + encodePacketRlp(StatusPacket( + pbft_chain_size, pbft_round, dag_max_level, pbft_syncing_state_->isPbftSyncing(), + StatusPacket::InitialData{kConf.genesis.chain_id, kGenesisHash, TARAXA_MAJOR_VERSION, TARAXA_MINOR_VERSION, + TARAXA_PATCH_VERSION, kConf.is_light_node, kConf.light_node_history}))); + } else { + success = sealAndSend(node_id, SubprotocolPacketType::kStatusPacket, + encodePacketRlp(StatusPacket(pbft_chain_size, pbft_round, dag_max_level, + pbft_syncing_state_->isDeepPbftSyncing()))); } return success; diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp index bd0727a498..c450488840 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp @@ -2,7 +2,7 @@ #include -#include "network/tarcap/packets/v4/transaction_packet.hpp" +#include "network/tarcap/packets/latest/transaction_packet.hpp" #include "transaction/transaction.hpp" #include "transaction/transaction_manager.hpp" @@ -16,6 +16,10 @@ TransactionPacketHandler::TransactionPacketHandler(const FullNodeConfig &conf, s trx_mgr_(std::move(trx_mgr)) {} inline void TransactionPacketHandler::process(TransactionPacket &&packet, const std::shared_ptr &peer) { + if (packet.transactions.size() > kMaxTransactionsInPacket) { + throw InvalidRlpItemsCountException("TransactionPacket", packet.transactions.size(), kMaxTransactionsInPacket); + } + size_t unseen_txs_count = 0; for (auto &transaction : packet.transactions) { // Skip any transactions that are already known to the trx mgr @@ -152,7 +156,7 @@ void TransactionPacketHandler::sendTransactions(std::shared_ptr peer LOG(log_tr_) << "sendTransactions " << transactions.first.size() << " to " << peer_id; if (sealAndSend(peer_id, SubprotocolPacketType::kTransactionPacket, - TransactionPacket(transactions.first).encodeRlp())) { + encodePacketRlp(TransactionPacket(transactions.first)))) { for (const auto &trx : transactions.first) { peer->markTransactionAsKnown(trx->getHash()); } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp index 1a1970ab8a..780d3a95a5 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/vote_packet_handler.cpp @@ -1,6 +1,6 @@ #include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" -#include "network/tarcap/packets/v4/vote_packet.hpp" +#include "network/tarcap/packets/latest/vote_packet.hpp" #include "pbft/pbft_manager.hpp" #include "vote_manager/vote_manager.hpp" @@ -106,7 +106,7 @@ void VotePacketHandler::sendPbftVote(const std::shared_ptr &peer, co } if (sealAndSend(peer->getId(), SubprotocolPacketType::kVotePacket, - VotePacket(vote, std::move(optional_packet_data)).encodeRlp())) { + encodePacketRlp(VotePacket(vote, std::move(optional_packet_data))))) { peer->markPbftVoteAsKnown(vote->getHash()); if (block) { peer->markPbftBlockAsKnown(block->getBlockHash()); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp index 32c9909b01..184d53e5fe 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp @@ -18,6 +18,10 @@ VotesBundlePacketHandler::VotesBundlePacketHandler(const FullNodeConfig &conf, s logs_prefix + "VOTES_BUNDLE_PH") {} void VotesBundlePacketHandler::process(VotesBundlePacket &&packet, const std::shared_ptr &peer) { + if (packet.votes.size() == 0 || packet.votes.size() > kMaxVotesInBundleRlp) { + throw InvalidRlpItemsCountException("VotesBundlePacket", packet.votes.size(), kMaxVotesInBundleRlp); + } + const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); const auto &reference_vote = packet.votes.front(); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_bls_sig_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_bls_sig_packet_handler.cpp new file mode 100644 index 0000000000..ff85908beb --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_bls_sig_packet_handler.cpp @@ -0,0 +1,37 @@ +#include "network/tarcap/packets_handlers/v4/common/ext_pillar_vote_packet_handler.hpp" +#include "pillar_chain/pillar_chain_manager.hpp" + +namespace taraxa::network::tarcap::v4 { + +ExtPillarVotePacketHandler::ExtPillarVotePacketHandler( + const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pillar_chain_manager, const addr_t &node_addr, + const std::string &log_channel) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel), + pillar_chain_manager_{std::move(pillar_chain_manager)} {} + +bool ExtPillarVotePacketHandler::processPillarVote(const std::shared_ptr &vote, + const std::shared_ptr &peer) { + if (!pillar_chain_manager_->isRelevantPillarVote(vote)) { + LOG(log_dg_) << "Drop irrelevant pillar vote " << vote->getHash() << ", period " << vote->getPeriod() + << " from peer " << peer->getId(); + return false; + } + + if (!pillar_chain_manager_->validatePillarVote(vote)) { + // TODO: enable for mainnet + // std::ostringstream err_msg; + // err_msg << "Invalid pillar vote " << vote->getHash() << " from peer " << peer->getId(); + // throw MaliciousPeerException(err_msg.str()); + return false; + } + + pillar_chain_manager_->addVerifiedPillarVote(vote); + + // Mark pillar vote as known for peer + peer->markPillarVoteAsKnown(vote->getHash()); + return true; +} + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.cpp new file mode 100644 index 0000000000..cf0de7a665 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.cpp @@ -0,0 +1,167 @@ +#include "network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp" + +#include "network/tarcap/shared_states/pbft_syncing_state.hpp" +#include "pbft/pbft_chain.hpp" +#include "pbft/pbft_manager.hpp" + +namespace taraxa::network::tarcap::v4 { + +ExtSyncingPacketHandler::ExtSyncingPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, + std::shared_ptr pbft_chain, + std::shared_ptr pbft_mgr, + std::shared_ptr dag_mgr, std::shared_ptr db, + const addr_t &node_addr, const std::string &log_channel_name) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel_name), + pbft_syncing_state_(std::move(pbft_syncing_state)), + pbft_chain_(std::move(pbft_chain)), + pbft_mgr_(std::move(pbft_mgr)), + dag_mgr_(std::move(dag_mgr)), + db_(std::move(db)) {} + +void ExtSyncingPacketHandler::startSyncingPbft() { + if (pbft_syncing_state_->isPbftSyncing()) { + LOG(log_dg_) << "startSyncingPbft called but syncing_ already true"; + return; + } + + std::shared_ptr peer = getMaxChainPeer(); + if (!peer) { + LOG(log_nf_) << "Restarting syncing PBFT not possible since no connected peers"; + return; + } + + auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); + if (peer->pbft_chain_size_ > pbft_sync_period) { + auto peer_id = peer->getId().abridged(); + auto peer_pbft_chain_size = peer->pbft_chain_size_.load(); + if (!pbft_syncing_state_->setPbftSyncing(true, pbft_sync_period, std::move(peer))) { + LOG(log_dg_) << "startSyncingPbft called but syncing_ already true"; + return; + } + LOG(log_si_) << "Restarting syncing PBFT from peer " << peer_id << ", peer PBFT chain size " << peer_pbft_chain_size + << ", own PBFT chain synced at period " << pbft_sync_period; + + if (syncPeerPbft(pbft_sync_period + 1)) { + // Disable snapshots only if are syncing from scratch + if (pbft_syncing_state_->isDeepPbftSyncing()) { + db_->disableSnapshots(); + } + } else { + pbft_syncing_state_->setPbftSyncing(false); + } + } else { + LOG(log_nf_) << "Restarting syncing PBFT not needed since our pbft chain size: " << pbft_sync_period << "(" + << pbft_chain_->getPbftChainSize() << ")" + << " is greater or equal than max node pbft chain size:" << peer->pbft_chain_size_; + db_->enableSnapshots(); + } +} +bool ExtSyncingPacketHandler::syncPeerPbft(PbftPeriod request_period) { + const auto syncing_peer = pbft_syncing_state_->syncingPeer(); + if (!syncing_peer) { + LOG(log_er_) << "Unable to send GetPbftSyncPacket. No syncing peer set."; + return false; + } + + if (request_period > syncing_peer->pbft_chain_size_) { + LOG(log_wr_) << "Invalid syncPeerPbft argument. Node " << syncing_peer->getId() << " chain size " + << syncing_peer->pbft_chain_size_ << ", requested period " << request_period; + return false; + } + + LOG(log_nf_) << "Send GetPbftSyncPacket with period " << request_period << " to node " << syncing_peer->getId(); + return sealAndSend(syncing_peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, + std::move(dev::RLPStream(1) << request_period)); +} + +std::shared_ptr ExtSyncingPacketHandler::getMaxChainPeer( + std::function &)> filter_func) { + std::shared_ptr max_pbft_chain_peer; + PbftPeriod max_pbft_chain_size = 0; + uint64_t max_node_dag_level = 0; + + // Find peer with max pbft chain and dag level + for (auto const &peer : peers_state_->getAllPeers()) { + // Apply the filter function + if (!filter_func(peer.second)) { + continue; + } + + if (peer.second->pbft_chain_size_ > max_pbft_chain_size) { + if (peer.second->peer_light_node && + pbft_mgr_->pbftSyncingPeriod() + peer.second->peer_light_node_history < peer.second->pbft_chain_size_) { + LOG(log_er_) << "Disconnecting from light node peer " << peer.first + << " History: " << peer.second->peer_light_node_history + << " chain size: " << peer.second->pbft_chain_size_; + disconnect(peer.first, dev::p2p::UserReason); + continue; + } + max_pbft_chain_size = peer.second->pbft_chain_size_; + max_node_dag_level = peer.second->dag_level_; + max_pbft_chain_peer = peer.second; + } else if (peer.second->pbft_chain_size_ == max_pbft_chain_size && peer.second->dag_level_ > max_node_dag_level) { + max_node_dag_level = peer.second->dag_level_; + max_pbft_chain_peer = peer.second; + } + } + return max_pbft_chain_peer; +} + +void ExtSyncingPacketHandler::requestPendingDagBlocks(std::shared_ptr peer) { + if (!peer) { + peer = getMaxChainPeer([](const std::shared_ptr &peer) { + if (peer->peer_dag_synced_ || !peer->dagSyncingAllowed()) { + return false; + } + return true; + }); + if (!peer) { + LOG(log_nf_) << "requestPendingDagBlocks not possible since no peers are matching conditions"; + return; + } + } + + if (!peer) { + LOG(log_nf_) << "requestPendingDagBlocks not possible since no connected peers"; + return; + } + + // This prevents ddos requesting dag blocks. We can only request this one time from one peer. + if (peer->peer_dag_synced_) { + LOG(log_nf_) << "requestPendingDagBlocks not possible since already requested for peer"; + return; + } + + // Only request dag blocks if periods are matching + auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); + if (pbft_sync_period == peer->pbft_chain_size_) { + // This prevents parallel requests + if (bool b = false; !peer->peer_dag_syncing_.compare_exchange_strong(b, !b)) { + LOG(log_nf_) << "requestPendingDagBlocks not possible since already requesting for peer"; + return; + } + LOG(log_nf_) << "Request pending blocks from peer " << peer->getId(); + std::unordered_set known_non_finalized_blocks; + auto [period, blocks] = dag_mgr_->getNonFinalizedBlocks(); + for (auto &level_blocks : blocks) { + for (auto &block : level_blocks.second) { + known_non_finalized_blocks.insert(block); + } + } + + requestDagBlocks(peer->getId(), known_non_finalized_blocks, period); + } +} + +void ExtSyncingPacketHandler::requestDagBlocks(const dev::p2p::NodeID &_nodeID, + const std::unordered_set &blocks, PbftPeriod period) { + dev::RLPStream s(2); // Period + blocks list + s.append(period); + s.append(blocks); + + sealAndSend(_nodeID, SubprotocolPacketType::kGetDagSyncPacket, std::move(s)); +} + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.cpp new file mode 100644 index 0000000000..60bf78ca0a --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.cpp @@ -0,0 +1,225 @@ +#include "network/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.hpp" + +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" +#include "pbft/pbft_manager.hpp" +#include "vote/pbft_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" +#include "vote_manager/vote_manager.hpp" + +namespace taraxa::network::tarcap::v4 { + +ExtVotesPacketHandler::ExtVotesPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_mgr, + std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, + std::shared_ptr slashing_manager, const addr_t &node_addr, + const std::string &log_channel_name) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, log_channel_name), + last_votes_sync_request_time_(std::chrono::system_clock::now()), + last_pbft_block_sync_request_time_(std::chrono::system_clock::now()), + pbft_mgr_(std::move(pbft_mgr)), + pbft_chain_(std::move(pbft_chain)), + vote_mgr_(std::move(vote_mgr)), + slashing_manager_(std::move(slashing_manager)) {} + +bool ExtVotesPacketHandler::processVote(const std::shared_ptr &vote, + const std::shared_ptr &pbft_block, + const std::shared_ptr &peer, bool validate_max_round_step) { + if (pbft_block && !validateVoteAndBlock(vote, pbft_block)) { + throw MaliciousPeerException("Received vote's voted value != received pbft block"); + } + + if (vote_mgr_->voteInVerifiedMap(vote)) { + LOG(log_dg_) << "Vote " << vote->getHash() << " already inserted in verified queue"; + return false; + } + + // Validate vote's period, round and step min/max values + if (const auto vote_valid = validateVotePeriodRoundStep(vote, peer, validate_max_round_step); !vote_valid.first) { + LOG(log_wr_) << "Vote period/round/step " << vote->getHash() << " validation failed. Err: " << vote_valid.second; + return false; + } + + // Check is vote is unique per period, round & step & voter -> each address can generate just 1 vote + // (for a value that isn't NBH) per period, round & step + if (auto vote_valid = vote_mgr_->isUniqueVote(vote); !vote_valid.first) { + // Create double voting proof + slashing_manager_->submitDoubleVotingProof(vote, vote_valid.second); + throw MaliciousPeerException("Received double vote", vote->getVoter()); + } + + // Validate vote's signature, vrf, etc... + if (const auto vote_valid = vote_mgr_->validateVote(vote); !vote_valid.first) { + LOG(log_wr_) << "Vote " << vote->getHash() << " validation failed. Err: " << vote_valid.second; + return false; + } + + if (!vote_mgr_->addVerifiedVote(vote)) { + LOG(log_dg_) << "Vote " << vote->getHash() << " already inserted in verified queue(race condition)"; + return false; + } + + if (pbft_block) { + pbft_mgr_->processProposedBlock(pbft_block, vote); + } + + return true; +} + +std::pair ExtVotesPacketHandler::validateVotePeriodRoundStep(const std::shared_ptr &vote, + const std::shared_ptr &peer, + bool validate_max_round_step) { + const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); + + auto genErrMsg = [period = current_pbft_period, round = current_pbft_round, + step = pbft_mgr_->getPbftStep()](const std::shared_ptr &vote) -> std::string { + std::stringstream err; + err << "Vote " << vote->getHash() << " (period, round, step) = (" << vote->getPeriod() << ", " << vote->getRound() + << ", " << vote->getStep() << "). Current PBFT (period, round, step) = (" << period << ", " << round << ", " + << step << ")"; + return err.str(); + }; + + // Period validation + // vote->getPeriod() == current_pbft_period - 1 && cert_vote -> potential reward vote + if (vote->getPeriod() < current_pbft_period - 1 || + (vote->getPeriod() == current_pbft_period - 1 && vote->getType() != PbftVoteTypes::cert_vote)) { + return {false, "Invalid period(too small): " + genErrMsg(vote)}; + } else if (kConf.network.ddos_protection.vote_accepting_periods && + vote->getPeriod() - 1 > current_pbft_period + kConf.network.ddos_protection.vote_accepting_periods) { + // skip this check if kConf.network.ddos_protection.vote_accepting_periods == 0 + // vote->getPeriod() - 1 is here because votes are validated against vote_period - 1 in dpos contract + // Do not request round sync too often here + if (vote->getVoter() == peer->getId() && + std::chrono::system_clock::now() - last_pbft_block_sync_request_time_ > kSyncRequestInterval) { + // request PBFT chain sync from this node + sealAndSend(peer->getId(), SubprotocolPacketType::kGetPbftSyncPacket, + std::move(dev::RLPStream(1) << std::max(vote->getPeriod() - 1, peer->pbft_chain_size_.load()))); + last_pbft_block_sync_request_time_ = std::chrono::system_clock::now(); + } + + return {false, "Invalid period(too big): " + genErrMsg(vote)}; + } + + // Round validation + auto checking_round = current_pbft_round; + // If period is not the same we assume current round is equal to 1 + // So we won't accept votes for future period with round bigger than kConf.network.vote_accepting_steps + if (current_pbft_period != vote->getPeriod()) { + checking_round = 1; + } + + // vote->getRound() == checking_round - 1 && next_vote -> previous round next vote + if (vote->getRound() < checking_round - 1 || + (vote->getRound() == checking_round - 1 && vote->getType() != PbftVoteTypes::next_vote)) { + return {false, "Invalid round(too small): " + genErrMsg(vote)}; + } else if (validate_max_round_step && kConf.network.ddos_protection.vote_accepting_rounds && + vote->getRound() >= checking_round + kConf.network.ddos_protection.vote_accepting_rounds) { + // skip this check if kConf.network.vote_accepting_rounds == 0 + // Trigger votes(round) syncing only if we are in sync in terms of period + if (current_pbft_period == vote->getPeriod()) { + // Do not request round sync too often here + if (vote->getVoter() == peer->getId() && + std::chrono::system_clock::now() - last_votes_sync_request_time_ > kSyncRequestInterval) { + // request round votes sync from this node + requestPbftNextVotesAtPeriodRound(peer->getId(), current_pbft_period, current_pbft_round); + last_votes_sync_request_time_ = std::chrono::system_clock::now(); + } + } + + return {false, "Invalid round(too big): " + genErrMsg(vote)}; + } + + // Step validation + auto checking_step = pbft_mgr_->getPbftStep(); + // If period or round is not the same we assume current step is equal to 1 + // So we won't accept votes for future rounds with step bigger than kConf.network.vote_accepting_steps + if (current_pbft_period != vote->getPeriod() || current_pbft_round != vote->getRound()) { + checking_step = 1; + } + + // skip check if kConf.network.vote_accepting_steps == 0 + if (validate_max_round_step && kConf.network.ddos_protection.vote_accepting_steps && + vote->getStep() >= checking_step + kConf.network.ddos_protection.vote_accepting_steps) { + return {false, "Invalid step(too big): " + genErrMsg(vote)}; + } + + return {true, ""}; +} + +bool ExtVotesPacketHandler::validateVoteAndBlock(const std::shared_ptr &vote, + const std::shared_ptr &pbft_block) const { + if (pbft_block->getBlockHash() != vote->getBlockHash()) { + LOG(log_er_) << "Vote " << vote->getHash() << " voted block " << vote->getBlockHash() << " != actual block " + << pbft_block->getBlockHash(); + return false; + } + return true; +} + +bool ExtVotesPacketHandler::isPbftRelevantVote(const std::shared_ptr &vote) const { + const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); + + if (vote->getPeriod() >= current_pbft_period && vote->getRound() >= current_pbft_round) { + // Standard current or future vote + return true; + } else if (vote->getPeriod() == current_pbft_period && vote->getRound() == (current_pbft_round - 1) && + vote->getType() == PbftVoteTypes::next_vote) { + // Previous round next vote + return true; + } else if (vote->getPeriod() == current_pbft_period - 1 && vote->getType() == PbftVoteTypes::cert_vote) { + // Previous period cert vote - potential reward vote + return true; + } + + return false; +} + +void ExtVotesPacketHandler::sendPbftVotesBundle(const std::shared_ptr &peer, + std::vector> &&votes) { + if (votes.empty()) { + return; + } + + auto sendVotes = [this, &peer](std::vector> &&votes) { + auto votes_bytes = encodePbftVotesBundleRlp(std::move(votes)); + if (votes_bytes.empty()) { + LOG(log_er_) << "Unable to send VotesBundle rlp"; + return; + } + + dev::RLPStream votes_rlp_stream; + votes_rlp_stream.appendRaw(votes_bytes); + + if (sealAndSend(peer->getId(), SubprotocolPacketType::kVotesBundlePacket, std::move(votes_rlp_stream))) { + LOG(log_dg_) << " Votes bundle with " << votes.size() << " votes sent to " << peer->getId(); + for (const auto &vote : votes) { + peer->markPbftVoteAsKnown(vote->getHash()); + } + } + }; + + if (votes.size() <= kMaxVotesInBundleRlp) { + sendVotes(std::move(votes)); + return; + } else { + // Need to split votes into multiple packets + size_t index = 0; + while (index < votes.size()) { + const size_t votes_count = std::min(kMaxVotesInBundleRlp, votes.size() - index); + + const auto begin_it = std::next(votes.begin(), index); + const auto end_it = std::next(begin_it, votes_count); + + std::vector> votes_sub_vector; + std::move(begin_it, end_it, std::back_inserter(votes_sub_vector)); + + sendVotes(std::move(votes_sub_vector)); + + index += votes_count; + } + } +} + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/packet_handler.cpp new file mode 100644 index 0000000000..ed3009a103 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/packet_handler.cpp @@ -0,0 +1,144 @@ +#include "network/tarcap/packets_handlers/v4/common/packet_handler.hpp" + +#include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" +#include "network/tarcap/stats/time_period_packets_stats.hpp" + +namespace taraxa::network::tarcap::v4 { + +PacketHandler::PacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, const addr_t& node_addr, + const std::string& log_channel_name) + : kConf(conf), peers_state_(std::move(peers_state)), packets_stats_(std::move(packets_stats)) { + LOG_OBJECTS_CREATE(log_channel_name); +} + +void PacketHandler::checkPacketRlpIsList(const threadpool::PacketData& packet_data) const { + if (!packet_data.rlp_.isList()) { + throw InvalidRlpItemsCountException(packet_data.type_str_ + " RLP must be a list. ", 0, 1); + } +} + +void PacketHandler::processPacket(const threadpool::PacketData& packet_data) { + try { + const auto begin = std::chrono::steady_clock::now(); + + // It can rarely happen that packet was received and pushed into the queue when peer was still in peers map, + // in the meantime the connection was lost and we started to process packet from such peer + const auto peer = peers_state_->getPacketSenderPeer(packet_data.from_node_id_, packet_data.type_); + if (!peer.first) [[unlikely]] { + LOG(log_wr_) << "Unable to process packet. Reason: " << peer.second; + disconnect(packet_data.from_node_id_, dev::p2p::UserReason); + return; + } + + // Validates packet rlp format + // In case there is a type mismatch, one of the dev::RLPException's is thrown during further parsing in process + // function + checkPacketRlpIsList(packet_data); + validatePacketRlpFormat(packet_data); + + // Main processing function + process(packet_data, peer.first); + + auto processing_duration = + std::chrono::duration_cast(std::chrono::steady_clock::now() - begin); + auto tp_wait_duration = std::chrono::duration_cast(std::chrono::steady_clock::now() - + packet_data.receive_time_); + + PacketStats packet_stats{1 /* count */, packet_data.rlp_.data().size(), processing_duration, tp_wait_duration}; + peer.first->addSentPacket(packet_data.type_str_, packet_stats); + + if (kConf.network.ddos_protection.log_packets_stats) { + packets_stats_->addReceivedPacket(packet_data.type_str_, packet_data.from_node_id_, packet_stats); + } + + } catch (const MaliciousPeerException& e) { + // thrown during packets processing -> malicious peer, invalid rlp items count, ... + // If there is custom peer set in exception, disconnect him, not packet sender + if (const auto custom_peer = e.getPeer(); custom_peer.has_value()) { + handle_caught_exception(e.what(), packet_data, *custom_peer, e.getDisconnectReason(), + true /* set peer as malicious */); + } else { + handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, e.getDisconnectReason(), + true /* set peer as malicious */); + } + } catch (const PacketProcessingException& e) { + // thrown during packets processing... + handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, e.getDisconnectReason(), + true /* set peer as malicious */); + } catch (const dev::RLPException& e) { + // thrown during parsing inside aleth/libdevcore -> type mismatch + handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_, dev::p2p::DisconnectReason::BadProtocol, + true /* set peer as malicious */); + } catch (const std::exception& e) { + handle_caught_exception(e.what(), packet_data, packet_data.from_node_id_); + } catch (...) { + handle_caught_exception("Unknown exception", packet_data, packet_data.from_node_id_); + } +} + +void PacketHandler::handle_caught_exception(std::string_view exception_msg, const threadpool::PacketData& packet_data, + const dev::p2p::NodeID& peer, dev::p2p::DisconnectReason disconnect_reason, + bool set_peer_as_malicious) { + LOG(log_er_) << "Exception caught during packet processing: " << exception_msg << " ." + << "PacketData: " << jsonToUnstyledString(packet_data.getPacketDataJson()) + << ", disconnect peer: " << peer.toString(); + + if (set_peer_as_malicious) { + peers_state_->set_peer_malicious(peer); + } + + disconnect(peer, disconnect_reason); +} + +bool PacketHandler::sealAndSend(const dev::p2p::NodeID& node_id, SubprotocolPacketType packet_type, + dev::RLPStream&& rlp) { + auto host = peers_state_->host_.lock(); + if (!host) { + LOG(log_er_) << "sealAndSend failed to obtain host"; + return false; + } + + if (const auto peer = peers_state_->getPacketSenderPeer(node_id, packet_type); !peer.first) [[unlikely]] { + LOG(log_wr_) << "Unable to send packet. Reason: " << peer.second; + host->disconnect(node_id, dev::p2p::UserReason); + return false; + } + + const auto begin = std::chrono::steady_clock::now(); + const size_t packet_size = rlp.out().size(); + + host->send(node_id, TARAXA_CAPABILITY_NAME, packet_type, rlp.invalidate(), + [begin, node_id, packet_size, packet_type, this]() { + if (!kConf.network.ddos_protection.log_packets_stats) { + return; + } + + PacketStats packet_stats{ + 1 /* count */, packet_size, + std::chrono::duration_cast(std::chrono::steady_clock::now() - begin), + std::chrono::microseconds{0}}; + + packets_stats_->addSentPacket(convertPacketTypeToString(packet_type), node_id, packet_stats); + }); + + return true; +} + +void PacketHandler::disconnect(const dev::p2p::NodeID& node_id, dev::p2p::DisconnectReason reason) { + if (auto host = peers_state_->host_.lock(); host) { + LOG(log_nf_) << "Disconnect node " << node_id.abridged(); + host->disconnect(node_id, reason); + } else { + LOG(log_er_) << "Unable to disconnect node " << node_id.abridged() << " due to invalid host."; + } +} + +void PacketHandler::requestPbftNextVotesAtPeriodRound(const dev::p2p::NodeID& peerID, PbftPeriod pbft_period, + PbftRound pbft_round) { + LOG(log_dg_) << "Sending GetNextVotesSyncPacket with period:" << pbft_period << ", round:" << pbft_round; + sealAndSend(peerID, SubprotocolPacketType::kGetNextVotesSyncPacket, + std::move(dev::RLPStream(2) << pbft_period << pbft_round)); +} + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_block_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_block_packet_handler.cpp index 7529eb86b6..d3a82cd7b5 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_block_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_block_packet_handler.cpp @@ -1,7 +1,7 @@ #include "network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp" #include "dag/dag_manager.hpp" -#include "network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "transaction/transaction_manager.hpp" @@ -19,25 +19,49 @@ DagBlockPacketHandler::DagBlockPacketHandler(const FullNodeConfig &conf, std::sh logs_prefix + "DAG_BLOCK_PH"), trx_mgr_(std::move(trx_mgr)) {} -void DagBlockPacketHandler::process(DagBlockPacket &&packet, const std::shared_ptr &peer) { - blk_hash_t const hash = packet.dag_block.getHash(); +void DagBlockPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + constexpr size_t required_size = 2; + // Only one dag block can be received + if (packet_data.rlp_.itemCount() != required_size) { + throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); + } +} - for (const auto &tx : packet.transactions) { - peer->markTransactionAsKnown(tx.first); +void DagBlockPacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { + std::unordered_map> transactions; + auto dag_rlp = packet_data.rlp_; + if (packet_data.rlp_.itemCount() == 2) { + const auto trx_count = packet_data.rlp_[0].itemCount(); + transactions.reserve(trx_count); + + for (const auto tx_rlp : packet_data.rlp_[0]) { + try { + auto trx = std::make_shared(tx_rlp); + peer->markTransactionAsKnown(trx->getHash()); + transactions.emplace(trx->getHash(), std::move(trx)); + } catch (const Transaction::InvalidTransaction &e) { + throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); + } + } + dag_rlp = packet_data.rlp_[1]; } + DagBlock block(dag_rlp); + blk_hash_t const hash = block.getHash(); + peer->markDagBlockAsKnown(hash); - if (packet.dag_block.getLevel() > peer->dag_level_) { - peer->dag_level_ = packet.dag_block.getLevel(); + if (block.getLevel() > peer->dag_level_) { + peer->dag_level_ = block.getLevel(); } // Do not process this block in case we already have it - if (dag_mgr_->isDagBlockKnown(packet.dag_block.getHash())) { + if (dag_mgr_->isDagBlockKnown(block.getHash())) { LOG(log_tr_) << "Received known DagBlockPacket " << hash << "from: " << peer->getId(); return; } - onNewBlockReceived(std::move(packet.dag_block), peer, packet.transactions); + onNewBlockReceived(std::move(block), peer, transactions); } void DagBlockPacketHandler::sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, taraxa::DagBlock block, diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_sync_packet_handler.cpp index 27fcaea920..64400f909d 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_sync_packet_handler.cpp @@ -1,7 +1,7 @@ #include "network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp" #include "dag/dag.hpp" -#include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "transaction/transaction.hpp" #include "transaction/transaction_manager.hpp" @@ -20,33 +20,70 @@ DagSyncPacketHandler::DagSyncPacketHandler(const FullNodeConfig& conf, std::shar logs_prefix + "DAG_SYNC_PH"), trx_mgr_(std::move(trx_mgr)) {} -void DagSyncPacketHandler::process(DagSyncPacket&& packet, const std::shared_ptr& peer) { +void DagSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData& packet_data) const { + if (constexpr size_t required_size = 4; packet_data.rlp_.itemCount() != required_size) { + throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); + } +} + +void DagSyncPacketHandler::process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) { + auto it = packet_data.rlp_.begin(); + const auto request_period = (*it++).toInt(); + const auto response_period = (*it++).toInt(); + // If the periods did not match restart syncing - if (packet.response_period > packet.request_period) { - LOG(log_dg_) << "Received DagSyncPacket with mismatching periods: " << packet.response_period << " " - << packet.request_period << " from " << peer->getId(); - if (peer->pbft_chain_size_ < packet.response_period) { - peer->pbft_chain_size_ = packet.response_period; + if (response_period > request_period) { + LOG(log_dg_) << "Received DagSyncPacket with mismatching periods: " << response_period << " " << request_period + << " from " << packet_data.from_node_id_.abridged(); + if (peer->pbft_chain_size_ < response_period) { + peer->pbft_chain_size_ = response_period; } peer->peer_dag_syncing_ = false; // We might be behind, restart pbft sync if needed startSyncingPbft(); return; - } else if (packet.response_period < packet.request_period) { + } else if (response_period < request_period) { // This should not be possible for honest node std::ostringstream err_msg; - err_msg << "Received DagSyncPacket with mismatching periods: response_period(" << packet.response_period - << ") != request_period(" << packet.request_period << ")"; + err_msg << "Received DagSyncPacket with mismatching periods: response_period(" << response_period + << ") != request_period(" << request_period << ")"; throw MaliciousPeerException(err_msg.str()); } std::vector transactions_to_log; - transactions_to_log.reserve(packet.transactions.size()); - for (auto& trx : packet.transactions) { - peer->markTransactionAsKnown(trx.first); - transactions_to_log.push_back(trx.first); + std::unordered_map> transactions; + const auto trx_count = (*it).itemCount(); + transactions.reserve(trx_count); + transactions_to_log.reserve(trx_count); + + for (const auto tx_rlp : (*it++)) { + try { + auto trx = std::make_shared(tx_rlp); + peer->markTransactionAsKnown(trx->getHash()); + transactions.emplace(trx->getHash(), std::move(trx)); + } catch (const Transaction::InvalidTransaction& e) { + throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); + } + } + + std::vector dag_blocks; + std::vector dag_blocks_to_log; + dag_blocks.reserve((*it).itemCount()); + dag_blocks_to_log.reserve((*it).itemCount()); + + for (const auto block_rlp : *it) { + DagBlock block(block_rlp); + peer->markDagBlockAsKnown(block.getHash()); + if (dag_mgr_->isDagBlockKnown(block.getHash())) { + LOG(log_tr_) << "Received known DagBlock " << block.getHash() << "from: " << peer->getId(); + continue; + } + dag_blocks.emplace_back(std::move(block)); + } + for (auto& trx : transactions) { + transactions_to_log.push_back(trx.first); if (trx_mgr_->isTransactionKnown(trx.first)) { continue; } @@ -59,18 +96,10 @@ void DagSyncPacketHandler::process(DagSyncPacket&& packet, const std::shared_ptr } } - std::vector dag_blocks_to_log; - dag_blocks_to_log.reserve(packet.dag_blocks.size()); - for (auto& block : packet.dag_blocks) { + for (auto& block : dag_blocks) { dag_blocks_to_log.push_back(block.getHash()); - peer->markDagBlockAsKnown(block.getHash()); - - if (dag_mgr_->isDagBlockKnown(block.getHash())) { - LOG(log_tr_) << "Received known DagBlock " << block.getHash() << "from: " << peer->getId(); - continue; - } - auto verified = dag_mgr_->verifyBlock(block, packet.transactions); + auto verified = dag_mgr_->verifyBlock(block, transactions); if (verified.first != DagManager::VerifyBlockReturnType::Verified) { std::ostringstream err_msg; err_msg << "DagBlock " << block.getHash() << " failed verification with error code " @@ -97,7 +126,7 @@ void DagSyncPacketHandler::process(DagSyncPacket&& packet, const std::shared_ptr peer->peer_dag_syncing_ = false; LOG(log_dg_) << "Received DagSyncPacket with blocks: " << dag_blocks_to_log - << " Transactions: " << transactions_to_log << " from " << peer->getId(); + << " Transactions: " << transactions_to_log << " from " << packet_data.from_node_id_; } } // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.cpp index 74ddabb85c..bcdcb938bd 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.cpp @@ -15,13 +15,19 @@ GetDagSyncPacketHandler::GetDagSyncPacketHandler(const FullNodeConfig &conf, std dag_mgr_(std::move(dag_mgr)), db_(std::move(db)) {} -void GetDagSyncPacketHandler::process(GetDagSyncPacket &&packet, +void GetDagSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + if (constexpr size_t required_size = 2; packet_data.rlp_.itemCount() != required_size) { + throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); + } +} + +void GetDagSyncPacketHandler::process(const threadpool::PacketData &packet_data, [[maybe_unused]] const std::shared_ptr &peer) { if (!peer->requestDagSyncingAllowed()) { // This should not be possible for honest node // Each node should perform dag syncing only when allowed std::ostringstream err_msg; - err_msg << "Received multiple GetDagSyncPackets from " << peer->getId().abridged(); + err_msg << "Received multiple GetDagSyncPackets from " << packet_data.from_node_id_.abridged(); throw MaliciousPeerException(err_msg.str()); } @@ -29,19 +35,21 @@ void GetDagSyncPacketHandler::process(GetDagSyncPacket &&packet, // This lock prevents race condition between syncing and gossiping dag blocks std::unique_lock lock(peer->mutex_for_sending_dag_blocks_); - std::unordered_set blocks_hashes_set; + std::unordered_set blocks_hashes; + auto it = packet_data.rlp_.begin(); + const auto peer_period = (*it++).toInt(); + std::string blocks_hashes_to_log; - blocks_hashes_to_log.reserve(packet.blocks_hashes.size()); - for (const auto &hash : packet.blocks_hashes) { - if (blocks_hashes_set.insert(hash).second) { - blocks_hashes_to_log += hash.abridged(); - } + for (const auto block_hash_rlp : *it) { + blk_hash_t hash = block_hash_rlp.toHash(); + blocks_hashes_to_log += hash.abridged(); + blocks_hashes.emplace(hash); } LOG(log_dg_) << "Received GetDagSyncPacket: " << blocks_hashes_to_log << " from " << peer->getId(); - auto [period, blocks, transactions] = dag_mgr_->getNonFinalizedBlocksWithTransactions(blocks_hashes_set); - if (packet.peer_period == period) { + auto [period, blocks, transactions] = dag_mgr_->getNonFinalizedBlocksWithTransactions(blocks_hashes); + if (peer_period == period) { peer->syncing_ = false; peer->peer_requested_dag_syncing_ = true; peer->peer_requested_dag_syncing_time_ = @@ -51,7 +59,7 @@ void GetDagSyncPacketHandler::process(GetDagSyncPacket &&packet, blocks.clear(); transactions.clear(); } - sendBlocks(peer->getId(), std::move(blocks), std::move(transactions), packet.peer_period, period); + sendBlocks(packet_data.from_node_id_, std::move(blocks), std::move(transactions), peer_period, period); } void GetDagSyncPacketHandler::sendBlocks(const dev::p2p::NodeID &peer_id, diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_next_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_next_votes_bundle_packet_handler.cpp new file mode 100644 index 0000000000..8ca116992b --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_next_votes_bundle_packet_handler.cpp @@ -0,0 +1,82 @@ +#include "network/tarcap/packets_handlers/v4/get_next_votes_bundle_packet_handler.hpp" + +#include "pbft/pbft_manager.hpp" +#include "vote_manager/vote_manager.hpp" + +namespace taraxa::network::tarcap::v4 { + +GetNextVotesBundlePacketHandler::GetNextVotesBundlePacketHandler( + const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, std::shared_ptr pbft_mgr, + std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, + std::shared_ptr slashing_manager, const addr_t &node_addr, const std::string &logs_prefix) + : ExtVotesPacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pbft_mgr), + std::move(pbft_chain), std::move(vote_mgr), std::move(slashing_manager), node_addr, + logs_prefix + "GET_NEXT_VOTES_BUNDLE_PH") {} + +void GetNextVotesBundlePacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + if (constexpr size_t required_size = 2; packet_data.rlp_.itemCount() != required_size) { + throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); + } +} + +void GetNextVotesBundlePacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { + LOG(log_dg_) << "Received GetNextVotesSyncPacket request"; + + const PbftPeriod peer_pbft_period = packet_data.rlp_[0].toInt(); + const PbftRound peer_pbft_round = packet_data.rlp_[1].toInt(); + const auto [pbft_round, pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); + + // Send votes only for current_period == peer_period && current_period >= peer_round + if (pbft_period != peer_pbft_period || pbft_round == 1 || pbft_round < peer_pbft_round) { + LOG(log_nf_) << "No previous round next votes sync packet will be sent. pbft_period " << pbft_period + << ", peer_pbft_period " << peer_pbft_period << ", pbft_round " << pbft_round << ", peer_pbft_round " + << peer_pbft_round; + return; + } + + auto next_votes = + vote_mgr_->getTwoTPlusOneVotedBlockVotes(pbft_period, pbft_round - 1, TwoTPlusOneVotedBlockType::NextVotedBlock); + auto next_null_votes = vote_mgr_->getTwoTPlusOneVotedBlockVotes(pbft_period, pbft_round - 1, + TwoTPlusOneVotedBlockType::NextVotedNullBlock); + + // In edge case this could theoretically happen due to race condition when we moved to the next period or round + // right before calling getAllTwoTPlusOneNextVotes with specific period & round + if (next_votes.empty() && next_null_votes.empty()) { + // Try to get period & round values again + const auto [tmp_pbft_round, tmp_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); + // No changes in period & round or new round == 1 + if (pbft_period == tmp_pbft_period && pbft_round == tmp_pbft_round) { + LOG(log_er_) << "No next votes returned for period " << tmp_pbft_period << ", round " << tmp_pbft_round - 1; + return; + } + + if (tmp_pbft_round == 1) { + LOG(log_wr_) << "No next votes returned for period " << tmp_pbft_period << ", round " << tmp_pbft_round - 1 + << " due to race condition - pbft already moved to the next period & round == 1"; + return; + } + + next_votes = vote_mgr_->getTwoTPlusOneVotedBlockVotes(pbft_period, pbft_round - 1, + TwoTPlusOneVotedBlockType::NextVotedBlock); + next_null_votes = vote_mgr_->getTwoTPlusOneVotedBlockVotes(pbft_period, pbft_round - 1, + TwoTPlusOneVotedBlockType::NextVotedNullBlock); + if (next_votes.empty() && next_null_votes.empty()) { + LOG(log_er_) << "No next votes returned for period " << tmp_pbft_period << ", round " << tmp_pbft_round - 1; + return; + } + } + + if (!next_votes.empty()) { + LOG(log_nf_) << "Send next votes bundle with " << next_votes.size() << " votes to " << peer->getId(); + sendPbftVotesBundle(peer, std::move(next_votes)); + } + + if (!next_null_votes.empty()) { + LOG(log_nf_) << "Send next null votes bundle with " << next_null_votes.size() << " votes to " << peer->getId(); + sendPbftVotesBundle(peer, std::move(next_null_votes)); + } +} + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_pbft_sync_packet_handler.cpp new file mode 100644 index 0000000000..d76698ec95 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_pbft_sync_packet_handler.cpp @@ -0,0 +1,114 @@ +#include "network/tarcap/packets_handlers/v4/get_pbft_sync_packet_handler.hpp" + +#include "network/tarcap/shared_states/pbft_syncing_state.hpp" +#include "pbft/pbft_chain.hpp" +#include "storage/storage.hpp" +#include "vote/pbft_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" +#include "vote_manager/vote_manager.hpp" + +namespace taraxa::network::tarcap::v4 { + +GetPbftSyncPacketHandler::GetPbftSyncPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pbft_syncing_state, + std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, std::shared_ptr db, + const addr_t &node_addr, const std::string &logs_prefix) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, + logs_prefix + "GET_PBFT_SYNC_PH"), + pbft_syncing_state_(std::move(pbft_syncing_state)), + pbft_chain_(std::move(pbft_chain)), + vote_mgr_(std::move(vote_mgr)), + db_(std::move(db)) {} + +void GetPbftSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + if (constexpr size_t required_size = 1; packet_data.rlp_.itemCount() != required_size) { + throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); + } +} + +void GetPbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { + LOG(log_tr_) << "Received GetPbftSyncPacket Block"; + + const size_t height_to_sync = packet_data.rlp_[0].toInt(); + // Here need PBFT chain size, not synced period since synced blocks has not verified yet. + const size_t my_chain_size = pbft_chain_->getPbftChainSize(); + if (height_to_sync > my_chain_size) { + // Node update peers PBFT chain size in status packet. Should not request syncing period bigger than pbft chain size + std::ostringstream err_msg; + err_msg << "Peer " << packet_data.from_node_id_ << " request syncing period start at " << height_to_sync + << ". That's bigger than own PBFT chain size " << my_chain_size; + throw MaliciousPeerException(err_msg.str()); + } + + if (kConf.is_light_node && height_to_sync + kConf.light_node_history <= my_chain_size) { + std::ostringstream err_msg; + err_msg << "Peer " << packet_data.from_node_id_ << " request syncing period start at " << height_to_sync + << ". Light node does not have the data " << my_chain_size; + throw MaliciousPeerException(err_msg.str()); + } + + size_t blocks_to_transfer = 0; + auto pbft_chain_synced = false; + const auto total_period_data_size = my_chain_size - height_to_sync + 1; + if (total_period_data_size <= kConf.network.sync_level_size) { + blocks_to_transfer = total_period_data_size; + pbft_chain_synced = true; + } else { + blocks_to_transfer = kConf.network.sync_level_size; + } + LOG(log_tr_) << "Will send " << blocks_to_transfer << " PBFT blocks to " << packet_data.from_node_id_; + + sendPbftBlocks(peer, height_to_sync, blocks_to_transfer, pbft_chain_synced); +} + +// api for pbft syncing +void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr &peer, PbftPeriod from_period, + size_t blocks_to_transfer, bool pbft_chain_synced) { + const auto &peer_id = peer->getId(); + LOG(log_tr_) << "sendPbftBlocks: peer want to sync from pbft chain height " << from_period << ", will send at most " + << blocks_to_transfer << " pbft blocks to " << peer_id; + + for (auto block_period = from_period; block_period < from_period + blocks_to_transfer; block_period++) { + bool last_block = (block_period == from_period + blocks_to_transfer - 1); + auto data = db_->getPeriodDataRaw(block_period); + + if (data.size() == 0) { + // This can happen when switching from light node to full node setting + LOG(log_er_) << "DB corrupted. Cannot find period " << block_period << " PBFT block in db"; + return; + } + + dev::RLPStream s; + if (pbft_chain_synced && last_block) { + // Latest finalized block cert votes are saved in db as reward votes for new blocks + const auto reward_votes = vote_mgr_->getRewardVotes(); + assert(!reward_votes.empty()); + // It is possible that the node pushed another block to the chain in the meantime + if (reward_votes[0]->getPeriod() == block_period) { + s.appendList(3); + s << last_block; + s.appendRaw(data); + s.appendRaw(encodePbftVotesBundleRlp(reward_votes)); + } else { + s.appendList(2); + s << last_block; + s.appendRaw(data); + } + } else { + s.appendList(2); + s << last_block; + s.appendRaw(data); + } + + LOG(log_dg_) << "Sending PbftSyncPacket period " << block_period << " to " << peer_id; + sealAndSend(peer_id, SubprotocolPacketType::kPbftSyncPacket, std::move(s)); + if (pbft_chain_synced && last_block) { + peer->syncing_ = false; + } + } +} + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.cpp new file mode 100644 index 0000000000..e3d2086391 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.cpp @@ -0,0 +1,98 @@ +#include "network/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.hpp" + +#include "network/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.hpp" + +namespace taraxa::network::tarcap::v4 { + +GetPillarVotesBundlePacketHandler::GetPillarVotesBundlePacketHandler( + const FullNodeConfig &conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, + std::shared_ptr pillar_chain_manager, const addr_t &node_addr, + const std::string &logs_prefix) + : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, + logs_prefix + "GET_PILLAR_VOTES_BUNDLE_PH"), + pillar_chain_manager_(std::move(pillar_chain_manager)) {} + +void GetPillarVotesBundlePacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + auto items = packet_data.rlp_.itemCount(); + if (items != kGetPillarVotesBundlePacketSize) { + throw InvalidRlpItemsCountException(packet_data.type_str_, items, kGetPillarVotesBundlePacketSize); + } +} + +void GetPillarVotesBundlePacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { + LOG(log_dg_) << "GetPillarVotesBundlePacketHandler received from peer " << peer->getId(); + const PbftPeriod period = packet_data.rlp_[0].toInt(); + const blk_hash_t pillar_block_hash = packet_data.rlp_[1].toHash(); + + if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(period)) { + std::ostringstream err_msg; + err_msg << "Pillar votes bundle request for period " << period << ", ficus hardfork block num " + << kConf.genesis.state.hardforks.ficus_hf.block_num; + throw MaliciousPeerException(err_msg.str()); + } + + if (!kConf.genesis.state.hardforks.ficus_hf.isPbftWithPillarBlockPeriod(period)) { + std::ostringstream err_msg; + err_msg << "Pillar votes bundle request for period " << period << ". Wrong requested period"; + throw MaliciousPeerException(err_msg.str()); + } + + const auto votes = pillar_chain_manager_->getVerifiedPillarVotes(period, pillar_block_hash); + if (votes.empty()) { + LOG(log_dg_) << "No pillar votes for period " << period << "and pillar block hash " << pillar_block_hash; + return; + } + // Check if the votes size exceeds the maximum limit and split into multiple packets if needed + const size_t total_votes = votes.size(); + size_t votes_sent = 0; + + while (votes_sent < total_votes) { + // Determine the size of the current chunk + const size_t chunk_size = + std::min(PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp, total_votes - votes_sent); + + // Create a new RLPStream for the chunk + dev::RLPStream s(chunk_size); + for (size_t i = 0; i < chunk_size; ++i) { + const auto &sig = votes[votes_sent + i]; + s.appendRaw(sig->rlp()); + } + + // Seal and send the chunk to the peer + if (sealAndSend(peer->getId(), SubprotocolPacketType::kPillarVotesBundlePacket, std::move(s))) { + // Mark the votes in this chunk as known + for (size_t i = 0; i < chunk_size; ++i) { + peer->markPillarVoteAsKnown(votes[votes_sent + i]->getHash()); + } + + LOG(log_nf_) << "Pillar votes bundle for period " << period << ", hash " << pillar_block_hash << " sent to " + << peer->getId() << " (Chunk " + << (votes_sent / PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp) + 1 << "/" + << (total_votes + PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp - 1) / + PillarVotesBundlePacketHandler::kMaxPillarVotesInBundleRlp + << ")"; + } + + // Update the votes_sent counter + votes_sent += chunk_size; + } +} + +void GetPillarVotesBundlePacketHandler::requestPillarVotesBundle(PbftPeriod period, const blk_hash_t &pillar_block_hash, + const std::shared_ptr &peer) { + dev::RLPStream s(kGetPillarVotesBundlePacketSize); + s << period; + s << pillar_block_hash; + + if (sealAndSend(peer->getId(), SubprotocolPacketType::kGetPillarVotesBundlePacket, std::move(s))) { + LOG(log_nf_) << "Requested pillar votes bundle for period " << period << " and pillar block " << pillar_block_hash + << " from peer " << peer->getId(); + } else { + LOG(log_er_) << "Unable to send pillar votes bundle request for period " << period << " and pillar block " + << pillar_block_hash << " to peer " << peer->getId(); + } +} + +} // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pbft_sync_packet_handler.cpp index 087f458935..0710cac3de 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pbft_sync_packet_handler.cpp @@ -22,38 +22,66 @@ PbftSyncPacketHandler::PbftSyncPacketHandler(const FullNodeConfig &conf, std::sh vote_mgr_(std::move(vote_mgr)), periodic_events_tp_(1, true) {} -void PbftSyncPacketHandler::process(PbftSyncPacket &&packet, const std::shared_ptr &peer) { +void PbftSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + if (packet_data.rlp_.itemCount() != kStandardPacketSize && packet_data.rlp_.itemCount() != kChainSyncedPacketSize) { + throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), kStandardPacketSize); + } + + // PeriodData rlp parsing cannot be done through util::rlp_tuple, which automatically checks the rlp size so it is + // checked here manually + if (packet_data.rlp_[1].itemCount() != PeriodData::kBaseRlpItemCount && + packet_data.rlp_[1].itemCount() != PeriodData::kExtendedRlpItemCount) { + throw InvalidRlpItemsCountException(packet_data.type_str_ + ":PeriodData", packet_data.rlp_[1].itemCount(), + PeriodData::kBaseRlpItemCount); + } +} + +void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { // Note: no need to consider possible race conditions due to concurrent processing as it is // disabled on priority_queue blocking dependencies level const auto syncing_peer = pbft_syncing_state_->syncingPeer(); if (!syncing_peer) { - LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << peer->getId().abridged() + LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << packet_data.from_node_id_.abridged() << " but there is no current syncing peer set"; return; } - if (syncing_peer->getId() != peer->getId()) { - LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << peer->getId().abridged() + if (syncing_peer->getId() != packet_data.from_node_id_) { + LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << packet_data.from_node_id_.abridged() << " current syncing peer " << syncing_peer->getId().abridged(); return; } // Process received pbft blocks // pbft_chain_synced is the flag to indicate own PBFT chain has synced with the peer's PBFT chain - const bool pbft_chain_synced = !packet.current_block_cert_votes.empty(); - const auto pbft_blk_hash = packet.period_data.pbft_blk->getBlockHash(); + const bool pbft_chain_synced = packet_data.rlp_.itemCount() == kChainSyncedPacketSize; + // last_block is the flag to indicate this is the last block in each syncing round, doesn't mean PBFT chain has synced + const bool last_block = packet_data.rlp_[0].toInt(); + PeriodData period_data; + try { + period_data = decodePeriodData(packet_data.rlp_[1]); + } catch (const std::runtime_error &e) { + throw MaliciousPeerException("Unable to parse PeriodData: " + std::string(e.what())); + } + + std::vector> current_block_cert_votes; + if (pbft_chain_synced) { + current_block_cert_votes = decodeVotesBundle(packet_data.rlp_[2]); + } + const auto pbft_blk_hash = period_data.pbft_blk->getBlockHash(); std::string received_dag_blocks_str; // This is just log related stuff - for (auto const &block : packet.period_data.dag_blocks) { + for (auto const &block : period_data.dag_blocks) { received_dag_blocks_str += block.getHash().toString() + " "; if (peer->dag_level_ < block.getLevel()) { peer->dag_level_ = block.getLevel(); } } - const auto pbft_block_period = packet.period_data.pbft_blk->getPeriod(); + const auto pbft_block_period = period_data.pbft_blk->getPeriod(); LOG(log_dg_) << "PbftSyncPacket received. Period: " << pbft_block_period - << ", dag Blocks: " << received_dag_blocks_str << " from " << peer->getId(); + << ", dag Blocks: " << received_dag_blocks_str << " from " << packet_data.from_node_id_; peer->markPbftBlockAsKnown(pbft_blk_hash); // Update peer's pbft period if outdated @@ -64,8 +92,8 @@ void PbftSyncPacketHandler::process(PbftSyncPacket &&packet, const std::shared_p LOG(log_tr_) << "Processing pbft block: " << pbft_blk_hash; if (pbft_chain_->findPbftBlockInChain(pbft_blk_hash)) { - LOG(log_wr_) << "PBFT block " << pbft_blk_hash << ", period: " << packet.period_data.pbft_blk->getPeriod() - << " from " << peer->getId() << " already present in chain"; + LOG(log_wr_) << "PBFT block " << pbft_blk_hash << ", period: " << period_data.pbft_blk->getPeriod() << " from " + << packet_data.from_node_id_ << " already present in chain"; } else { if (pbft_block_period != pbft_mgr_->pbftSyncingPeriod() + 1) { // This can happen if we just got synced and block was cert voted @@ -81,11 +109,11 @@ void PbftSyncPacketHandler::process(PbftSyncPacket &&packet, const std::shared_p // Check cert vote matches if final synced block if (pbft_chain_synced) { - for (auto const &vote : packet.current_block_cert_votes) { + for (auto const &vote : current_block_cert_votes) { if (vote->getBlockHash() != pbft_blk_hash) { LOG(log_er_) << "Invalid cert votes block hash " << vote->getBlockHash() << " instead of " << pbft_blk_hash - << " from peer " << peer->getId().abridged() << " received, stop syncing."; - handleMaliciousSyncPeer(peer->getId()); + << " from peer " << packet_data.from_node_id_.abridged() << " received, stop syncing."; + handleMaliciousSyncPeer(packet_data.from_node_id_); return; } } @@ -94,50 +122,52 @@ void PbftSyncPacketHandler::process(PbftSyncPacket &&packet, const std::shared_p // Check votes match the hash of previous block in the queue auto last_pbft_block_hash = pbft_mgr_->lastPbftBlockHashFromQueueOrChain(); // Check cert vote matches - for (auto const &vote : packet.period_data.previous_block_cert_votes) { + for (auto const &vote : period_data.previous_block_cert_votes) { if (vote->getBlockHash() != last_pbft_block_hash) { LOG(log_er_) << "Invalid cert votes block hash " << vote->getBlockHash() << " instead of " - << last_pbft_block_hash << " from peer " << peer->getId().abridged() << " received, stop syncing."; - handleMaliciousSyncPeer(peer->getId()); + << last_pbft_block_hash << " from peer " << packet_data.from_node_id_.abridged() + << " received, stop syncing."; + handleMaliciousSyncPeer(packet_data.from_node_id_); return; } } - if (!pbft_mgr_->validatePillarDataInPeriodData(packet.period_data)) { - handleMaliciousSyncPeer(peer->getId()); + if (!pbft_mgr_->validatePillarDataInPeriodData(period_data)) { + handleMaliciousSyncPeer(packet_data.from_node_id_); return; } - auto order_hash = PbftManager::calculateOrderHash(packet.period_data.dag_blocks); - if (order_hash != packet.period_data.pbft_blk->getOrderHash()) { + auto order_hash = PbftManager::calculateOrderHash(period_data.dag_blocks); + if (order_hash != period_data.pbft_blk->getOrderHash()) { { // This is just log related stuff std::vector trx_order; - trx_order.reserve(packet.period_data.transactions.size()); + trx_order.reserve(period_data.transactions.size()); std::vector blk_order; - blk_order.reserve(packet.period_data.dag_blocks.size()); - for (auto t : packet.period_data.transactions) { + blk_order.reserve(period_data.dag_blocks.size()); + for (auto t : period_data.transactions) { trx_order.push_back(t->getHash()); } - for (auto b : packet.period_data.dag_blocks) { + for (auto b : period_data.dag_blocks) { blk_order.push_back(b.getHash()); } LOG(log_er_) << "Order hash incorrect in period data " << pbft_blk_hash << " expected: " << order_hash - << " received " << packet.period_data.pbft_blk->getOrderHash() << "; Dag order: " << blk_order - << "; Trx order: " << trx_order << "; from " << peer->getId().abridged() << ", stop syncing."; + << " received " << period_data.pbft_blk->getOrderHash() << "; Dag order: " << blk_order + << "; Trx order: " << trx_order << "; from " << packet_data.from_node_id_.abridged() + << ", stop syncing."; } - handleMaliciousSyncPeer(peer->getId()); + handleMaliciousSyncPeer(packet_data.from_node_id_); return; } // This is special case when queue is empty and we can not say for sure that all votes that are part of this block // have been verified before if (pbft_mgr_->periodDataQueueEmpty()) { - for (const auto &v : packet.period_data.previous_block_cert_votes) { + for (const auto &v : period_data.previous_block_cert_votes) { if (auto vote_is_valid = vote_mgr_->validateVote(v); vote_is_valid.first == false) { - LOG(log_er_) << "Invalid reward votes in block " << packet.period_data.pbft_blk->getBlockHash() - << " from peer " << peer->getId().abridged() + LOG(log_er_) << "Invalid reward votes in block " << period_data.pbft_blk->getBlockHash() << " from peer " + << packet_data.from_node_id_.abridged() << " received, stop syncing. Validation failed. Err: " << vote_is_valid.second; - handleMaliciousSyncPeer(peer->getId()); + handleMaliciousSyncPeer(packet_data.from_node_id_); return; } @@ -145,8 +175,8 @@ void PbftSyncPacketHandler::process(PbftSyncPacket &&packet, const std::shared_p } // And now we need to replace it with verified votes - if (auto votes = vote_mgr_->checkRewardVotes(packet.period_data.pbft_blk, true); votes.first) { - packet.period_data.previous_block_cert_votes = std::move(votes.second); + if (auto votes = vote_mgr_->checkRewardVotes(period_data.pbft_blk, true); votes.first) { + period_data.previous_block_cert_votes = std::move(votes.second); } else { // checkRewardVotes could fail because we just cert voted this block and moved to next period, // in that case we are probably fully synced @@ -155,18 +185,18 @@ void PbftSyncPacketHandler::process(PbftSyncPacket &&packet, const std::shared_p return; } - LOG(log_er_) << "Invalid reward votes in block " << packet.period_data.pbft_blk->getBlockHash() << " from peer " - << peer->getId().abridged() << " received, stop syncing."; - handleMaliciousSyncPeer(peer->getId()); + LOG(log_er_) << "Invalid reward votes in block " << period_data.pbft_blk->getBlockHash() << " from peer " + << packet_data.from_node_id_.abridged() << " received, stop syncing."; + handleMaliciousSyncPeer(packet_data.from_node_id_); return; } } LOG(log_tr_) << "Synced PBFT block hash " << pbft_blk_hash << " with " - << packet.period_data.previous_block_cert_votes.size() << " cert votes"; - LOG(log_tr_) << "Synced PBFT block " << packet.period_data; - pbft_mgr_->periodDataQueuePush(std::move(packet.period_data), peer->getId(), - std::move(packet.current_block_cert_votes)); + << period_data.previous_block_cert_votes.size() << " cert votes"; + LOG(log_tr_) << "Synced PBFT block " << period_data; + pbft_mgr_->periodDataQueuePush(std::move(period_data), packet_data.from_node_id_, + std::move(current_block_cert_votes)); } auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); @@ -179,7 +209,7 @@ void PbftSyncPacketHandler::process(PbftSyncPacket &&packet, const std::shared_p return; } - if (packet.last_block) { + if (last_block) { // If current sync period is actually bigger than the block we just received we are probably synced if (pbft_sync_period > pbft_block_period) { pbft_syncing_state_->setPbftSyncing(false); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_vote_packet_handler.cpp index 5ef5f327e7..9676fc736a 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_vote_packet_handler.cpp @@ -11,16 +11,25 @@ PillarVotePacketHandler::PillarVotePacketHandler(const FullNodeConfig &conf, std : ExtPillarVotePacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pillar_chain_manager), node_addr, logs_prefix + "PILLAR_VOTE_PH") {} -void PillarVotePacketHandler::process(PillarVotePacket &&packet, const std::shared_ptr &peer) { - if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(packet.pillar_vote->getPeriod())) { +void PillarVotePacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + auto items = packet_data.rlp_.itemCount(); + if (items != PillarVote::kStandardRlpSize) { + throw InvalidRlpItemsCountException(packet_data.type_str_, items, PillarVote::kStandardRlpSize); + } +} + +void PillarVotePacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { + const auto pillar_vote = std::make_shared(packet_data.rlp_); + if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(pillar_vote->getPeriod())) { std::ostringstream err_msg; - err_msg << "Pillar vote " << packet.pillar_vote->getHash() << ", period " << packet.pillar_vote->getPeriod() + err_msg << "Pillar vote " << pillar_vote->getHash() << ", period " << pillar_vote->getPeriod() << " < ficus hardfork block num"; throw MaliciousPeerException(err_msg.str()); } - if (processPillarVote(packet.pillar_vote, peer)) { - onNewPillarVote(packet.pillar_vote); + if (processPillarVote(pillar_vote, peer)) { + onNewPillarVote(pillar_vote); } } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.cpp index 71e6224685..032239389d 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.cpp @@ -12,12 +12,20 @@ PillarVotesBundlePacketHandler::PillarVotesBundlePacketHandler( : ExtPillarVotePacketHandler(conf, std::move(peers_state), std::move(packets_stats), std::move(pillar_chain_manager), node_addr, logs_prefix + "PILLAR_VOTES_BUNDLE_PH") {} -void PillarVotesBundlePacketHandler::process(PillarVotesBundlePacket &&packet, +void PillarVotesBundlePacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + auto items = packet_data.rlp_.itemCount(); + if (items == 0 || items > kMaxPillarVotesInBundleRlp) { + throw InvalidRlpItemsCountException(packet_data.type_str_, items, kMaxPillarVotesInBundleRlp); + } +} + +void PillarVotesBundlePacketHandler::process(const threadpool::PacketData &packet_data, const std::shared_ptr &peer) { // TODO[2744]: there could be the same protection as in pbft syncing that only requested bundle packet is accepted LOG(log_dg_) << "PillarVotesBundlePacket received from peer " << peer->getId(); - for (const auto &pillar_vote : packet.pillar_votes) { + for (const auto vote_rlp : packet_data.rlp_) { + const auto pillar_vote = std::make_shared(vote_rlp); if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(pillar_vote->getPeriod())) { std::ostringstream err_msg; err_msg << "Synced pillar vote " << pillar_vote->getHash() << ", period " << pillar_vote->getPeriod() diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/readme.md b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/readme.md new file mode 100644 index 0000000000..3872e21f26 --- /dev/null +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/readme.md @@ -0,0 +1,6 @@ +### Multiple taraxa capabilities support +- Derive new packet handlers with different logic than the original ones. +- +`!!! Important:` These handlers must be +directly on indirectly derived from the latest packets handlers, which are inside +`network/tarcap/packets_handlers/latest/` folder, otherwise network class would not work properly diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/status_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/status_packet_handler.cpp index 74267373e2..8847e0d4aa 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/status_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/status_packet_handler.cpp @@ -2,7 +2,7 @@ #include "config/version.hpp" #include "dag/dag.hpp" -#include "network/tarcap/packets_handlers/latest/common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "pbft/pbft_chain.hpp" #include "pbft/pbft_manager.hpp" @@ -21,82 +21,105 @@ StatusPacketHandler::StatusPacketHandler(const FullNodeConfig& conf, std::shared logs_prefix + "STATUS_PH"), kGenesisHash(genesis_hash) {} -void StatusPacketHandler::process(StatusPacket&& packet, const std::shared_ptr& peer) { +void StatusPacketHandler::validatePacketRlpFormat(const threadpool::PacketData& packet_data) const { + if (const auto items_count = packet_data.rlp_.itemCount(); + items_count != kInitialStatusPacketItemsCount && items_count != kStandardStatusPacketItemsCount) { + throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), + kStandardStatusPacketItemsCount); + } +} + +void StatusPacketHandler::process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) { // Important !!! Use only "selected_peer" and not "peer" in this function as "peer" might be nullptr auto selected_peer = peer; const auto pbft_synced_period = pbft_mgr_->pbftSyncingPeriod(); // Initial status packet - if (packet.isInitialStatusPacket()) { + if (packet_data.rlp_.itemCount() == kInitialStatusPacketItemsCount) { if (!selected_peer) { - selected_peer = peers_state_->getPendingPeer(peer->getId()); + selected_peer = peers_state_->getPendingPeer(packet_data.from_node_id_); if (!selected_peer) { - LOG(log_wr_) << "Peer " << peer->getId().abridged() + LOG(log_wr_) << "Peer " << packet_data.from_node_id_.abridged() << " missing in both peers and pending peers map - will be disconnected."; - disconnect(peer->getId(), dev::p2p::UserReason); + disconnect(packet_data.from_node_id_, dev::p2p::UserReason); return; } } - if (*packet.peer_chain_id != kConf.genesis.chain_id) { + auto it = packet_data.rlp_.begin(); + auto const peer_chain_id = (*it++).toInt(); + auto const peer_dag_level = (*it++).toInt(); + auto const genesis_hash = (*it++).toHash(); + auto const peer_pbft_chain_size = (*it++).toInt(); + auto const peer_syncing = (*it++).toInt(); + auto const peer_pbft_round = (*it++).toInt(); + auto const node_major_version = (*it++).toInt(); + auto const node_minor_version = (*it++).toInt(); + auto const node_patch_version = (*it++).toInt(); + auto const is_light_node = (*it++).toInt(); + auto const node_history = (*it++).toInt(); + + if (peer_chain_id != kConf.genesis.chain_id) { LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) - << "Incorrect network id " << *packet.peer_chain_id << ", host " << peer->getId().abridged() + << "Incorrect network id " << peer_chain_id << ", host " << packet_data.from_node_id_.abridged() << " will be disconnected"; - disconnect(peer->getId(), dev::p2p::UserReason); + disconnect(packet_data.from_node_id_, dev::p2p::UserReason); return; } - if (*packet.genesis_hash != kGenesisHash) { + if (genesis_hash != kGenesisHash) { LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) - << "Incorrect genesis hash " << *packet.genesis_hash << ", host " << peer->getId().abridged() + << "Incorrect genesis hash " << genesis_hash << ", host " << packet_data.from_node_id_.abridged() << " will be disconnected"; - disconnect(peer->getId(), dev::p2p::UserReason); + disconnect(packet_data.from_node_id_, dev::p2p::UserReason); return; } // If this is a light node and it cannot serve our sync request disconnect from it - if (*packet.is_light_node) { + if (is_light_node) { selected_peer->peer_light_node = true; - selected_peer->peer_light_node_history = *packet.node_history; - if (pbft_synced_period + *packet.node_history < packet.peer_pbft_chain_size) { + selected_peer->peer_light_node_history = node_history; + if (pbft_synced_period + node_history < peer_pbft_chain_size) { LOG((peers_state_->getPeersCount()) ? log_nf_ : log_er_) - << "Light node " << peer->getId().abridged() << " would not be able to serve our syncing request. " - << "Current synced period " << pbft_synced_period << ", peer synced period " << packet.peer_pbft_chain_size - << ", peer light node history " << *packet.node_history << ". Peer will be disconnected"; - disconnect(peer->getId(), dev::p2p::UserReason); + << "Light node " << packet_data.from_node_id_.abridged() + << " would not be able to serve our syncing request. " + << "Current synced period " << pbft_synced_period << ", peer synced period " << peer_pbft_chain_size + << ", peer light node history " << node_history << ". Peer will be disconnected"; + disconnect(packet_data.from_node_id_, dev::p2p::UserReason); return; } } - selected_peer->dag_level_ = packet.peer_dag_level; - selected_peer->pbft_chain_size_ = packet.peer_pbft_chain_size; - selected_peer->syncing_ = packet.peer_syncing; - selected_peer->pbft_period_ = packet.peer_pbft_chain_size + 1; - selected_peer->pbft_round_ = packet.peer_pbft_round; + selected_peer->dag_level_ = peer_dag_level; + selected_peer->pbft_chain_size_ = peer_pbft_chain_size; + selected_peer->syncing_ = peer_syncing; + selected_peer->pbft_period_ = peer_pbft_chain_size + 1; + selected_peer->pbft_round_ = peer_pbft_round; - peers_state_->setPeerAsReadyToSendMessages(peer->getId(), selected_peer); + peers_state_->setPeerAsReadyToSendMessages(packet_data.from_node_id_, selected_peer); - LOG(log_dg_) << "Received initial status message from " << peer->getId() << ", network id " << *packet.peer_chain_id - << ", peer DAG max level " << selected_peer->dag_level_ << ", genesis " << *packet.genesis_hash - << ", peer pbft chain size " << selected_peer->pbft_chain_size_ << ", peer syncing " << std::boolalpha - << selected_peer->syncing_ << ", peer pbft period " << selected_peer->pbft_period_ - << ", peer pbft round " << selected_peer->pbft_round_ << ", node major version" - << *packet.node_major_version << ", node minor version" << *packet.node_minor_version - << ", node patch version" << *packet.node_patch_version; + LOG(log_dg_) << "Received initial status message from " << packet_data.from_node_id_ << ", network id " + << peer_chain_id << ", peer DAG max level " << selected_peer->dag_level_ << ", genesis " + << genesis_hash << ", peer pbft chain size " << selected_peer->pbft_chain_size_ << ", peer syncing " + << std::boolalpha << selected_peer->syncing_ << ", peer pbft period " << selected_peer->pbft_period_ + << ", peer pbft round " << selected_peer->pbft_round_ << ", node major version" << node_major_version + << ", node minor version" << node_minor_version << ", node patch version" << node_patch_version; } else { // Standard status packet + // TODO: initial and standard status packet could be separated... if (!selected_peer) { - LOG(log_er_) << "Received standard status packet from " << peer->getId().abridged() + LOG(log_er_) << "Received standard status packet from " << packet_data.from_node_id_.abridged() << ", without previously received initial status packet. Will be disconnected"; - disconnect(peer->getId(), dev::p2p::UserReason); + disconnect(packet_data.from_node_id_, dev::p2p::UserReason); return; } - selected_peer->dag_level_ = packet.peer_dag_level; - selected_peer->pbft_chain_size_ = packet.peer_pbft_chain_size; + auto it = packet_data.rlp_.begin(); + selected_peer->dag_level_ = (*it++).toInt(); + selected_peer->pbft_chain_size_ = (*it++).toInt(); selected_peer->pbft_period_ = selected_peer->pbft_chain_size_ + 1; - selected_peer->syncing_ = packet.peer_syncing; - selected_peer->pbft_round_ = packet.peer_pbft_round; + selected_peer->syncing_ = (*it++).toInt(); + selected_peer->pbft_round_ = (*it++).toInt(); // TODO: Address malicious status if (!pbft_syncing_state_->isPbftSyncing()) { @@ -124,7 +147,7 @@ void StatusPacketHandler::process(StatusPacket&& packet, const std::shared_ptrlast_status_pbft_chain_size_ = selected_peer->pbft_chain_size_.load(); - LOG(log_dg_) << "Received status message from " << peer->getId() << ", peer DAG max level " + LOG(log_dg_) << "Received status message from " << packet_data.from_node_id_ << ", peer DAG max level " << selected_peer->dag_level_ << ", peer pbft chain size " << selected_peer->pbft_chain_size_ << ", peer syncing " << std::boolalpha << selected_peer->syncing_ << ", peer pbft round " << selected_peer->pbft_round_; @@ -146,16 +169,14 @@ bool StatusPacketHandler::sendStatus(const dev::p2p::NodeID& node_id, bool initi if (initial) { success = sealAndSend( node_id, SubprotocolPacketType::kStatusPacket, - // TODO[2865]: use packet class to automatically create rlp - std::move(dev::RLPStream(v4::StatusPacket::kInitialStatusPacketItemsCount) + std::move(dev::RLPStream(kInitialStatusPacketItemsCount) << kConf.genesis.chain_id << dag_max_level << kGenesisHash << pbft_chain_size << pbft_syncing_state_->isPbftSyncing() << pbft_round << TARAXA_MAJOR_VERSION << TARAXA_MINOR_VERSION << TARAXA_PATCH_VERSION << kConf.is_light_node << kConf.light_node_history)); } else { success = sealAndSend( node_id, SubprotocolPacketType::kStatusPacket, - // TODO[2865]: use packet class to automatically create rlp - std::move(dev::RLPStream(v4::StatusPacket::kStandardStatusPacketItemsCount) + std::move(dev::RLPStream(kStandardStatusPacketItemsCount) << dag_max_level << pbft_chain_size << pbft_syncing_state_->isDeepPbftSyncing() << pbft_round)); } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/transaction_packet_handler.cpp index a9de4e0ba6..8c6d742a26 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/transaction_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/transaction_packet_handler.cpp @@ -14,15 +14,61 @@ TransactionPacketHandler::TransactionPacketHandler(const FullNodeConfig &conf, s : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, logs_prefix + "TRANSACTION_PH"), trx_mgr_(std::move(trx_mgr)) {} -inline void TransactionPacketHandler::process(TransactionPacket &&packet, const std::shared_ptr &peer) { - size_t unseen_txs_count = 0; - for (auto &transaction : packet.transactions) { +void TransactionPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { + auto items = packet_data.rlp_.itemCount(); + if (items != kTransactionPacketItemCount) { + throw InvalidRlpItemsCountException(packet_data.type_str_, items, kTransactionPacketItemCount); + } + auto hashes_count = packet_data.rlp_[0].itemCount(); + auto trx_count = packet_data.rlp_[1].itemCount(); + + if (hashes_count < trx_count) { + throw InvalidRlpItemsCountException(packet_data.type_str_, hashes_count, trx_count); + } + if (hashes_count == 0 || hashes_count > kMaxTransactionsInPacket + kMaxHashesInPacket) { + throw InvalidRlpItemsCountException(packet_data.type_str_, hashes_count, + kMaxTransactionsInPacket + kMaxHashesInPacket); + } + + if (trx_count > kMaxTransactionsInPacket) { + throw InvalidRlpItemsCountException(packet_data.type_str_, trx_count, kMaxTransactionsInPacket); + } +} + +inline void TransactionPacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { + std::vector received_transactions; + + const auto transaction_hashes_count = packet_data.rlp_[0].itemCount(); + const auto transaction_count = packet_data.rlp_[1].itemCount(); + received_transactions.reserve(transaction_count); + + std::vector trx_hashes; + trx_hashes.reserve(transaction_hashes_count); + + // First extract only transaction hashes + for (const auto trx_hash_rlp : packet_data.rlp_[0]) { + auto trx_hash = trx_hash_rlp.toHash(); + peer->markTransactionAsKnown(trx_hash); + trx_hashes.emplace_back(std::move(trx_hash)); + } + + for (size_t tx_idx = 0; tx_idx < transaction_count; tx_idx++) { + const auto &trx_hash = trx_hashes[tx_idx]; + // Skip any transactions that are already known to the trx mgr - if (trx_mgr_->isTransactionKnown(transaction->getHash())) { + if (trx_mgr_->isTransactionKnown(trx_hash)) { continue; } - unseen_txs_count++; + std::shared_ptr transaction; + // Deserialization is expensive, do it only for the transactions we are about to process + try { + transaction = std::make_shared(packet_data.rlp_[1][tx_idx].data().toBytes()); + received_transactions.emplace_back(trx_hash); + } catch (const Transaction::InvalidTransaction &e) { + throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); + } const auto [verified, reason] = trx_mgr_->verifyTransaction(transaction); if (!verified) { @@ -46,10 +92,10 @@ inline void TransactionPacketHandler::process(TransactionPacket &&packet, const } } - if (!packet.transactions.empty()) { - LOG(log_tr_) << "Received TransactionPacket with " << packet.transactions.size() << " transactions"; - LOG(log_dg_) << "Received TransactionPacket with " << packet.transactions.size() - << " unseen transactions:" << unseen_txs_count << " from: " << peer->getId().abridged(); + if (transaction_count > 0) { + LOG(log_tr_) << "Received TransactionPacket with " << packet_data.rlp_.itemCount() << " transactions"; + LOG(log_dg_) << "Received TransactionPacket with " << received_transactions.size() + << " unseen transactions:" << received_transactions << " from: " << peer->getId().abridged(); } } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/vote_packet_handler.cpp index bebd419e41..8a3771a1bc 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/vote_packet_handler.cpp @@ -15,26 +15,44 @@ VotePacketHandler::VotePacketHandler(const FullNodeConfig &conf, std::shared_ptr std::move(pbft_chain), std::move(vote_mgr), std::move(slashing_manager), node_addr, logs_prefix + "PBFT_VOTE_PH") {} -void VotePacketHandler::process(VotePacket &&packet, const std::shared_ptr &peer) { +void VotePacketHandler::validatePacketRlpFormat([[maybe_unused]] const threadpool::PacketData &packet_data) const { + auto items = packet_data.rlp_.itemCount(); + // Vote packet can contain either just a vote or vote + block + peer_chain_size + if (items != kVotePacketSize && items != kExtendedVotePacketSize) { + throw InvalidRlpItemsCountException(packet_data.type_str_, items, kExtendedVotePacketSize); + } +} + +void VotePacketHandler::process(const threadpool::PacketData &packet_data, const std::shared_ptr &peer) { const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); - if (packet.pbft_block) { - LOG(log_dg_) << "Received PBFT vote " << packet.vote->getHash() << " with PBFT block " - << packet.pbft_block->getBlockHash(); + // Optional packet items + std::shared_ptr pbft_block{nullptr}; + std::optional peer_chain_size{}; + + std::shared_ptr vote = std::make_shared(packet_data.rlp_[0]); + if (const size_t item_count = packet_data.rlp_.itemCount(); item_count == kExtendedVotePacketSize) { + try { + pbft_block = std::make_shared(packet_data.rlp_[1]); + } catch (const std::exception &e) { + throw MaliciousPeerException(e.what()); + } + peer_chain_size = packet_data.rlp_[2].toInt(); + LOG(log_dg_) << "Received PBFT vote " << vote->getHash() << " with PBFT block " << pbft_block->getBlockHash(); } else { - LOG(log_dg_) << "Received PBFT vote " << packet.vote->getHash(); + LOG(log_dg_) << "Received PBFT vote " << vote->getHash(); } // Update peer's max chain size - if (packet.peer_chain_size.has_value() && *packet.peer_chain_size > peer->pbft_chain_size_) { - peer->pbft_chain_size_ = *packet.peer_chain_size; + if (peer_chain_size.has_value() && *peer_chain_size > peer->pbft_chain_size_) { + peer->pbft_chain_size_ = *peer_chain_size; } - const auto vote_hash = packet.vote->getHash(); + const auto vote_hash = vote->getHash(); - if (!isPbftRelevantVote(packet.vote)) { + if (!isPbftRelevantVote(vote)) { LOG(log_dg_) << "Drop irrelevant vote " << vote_hash << " for current pbft state. Vote (period, round, step) = (" - << packet.vote->getPeriod() << ", " << packet.vote->getRound() << ", " << packet.vote->getStep() + << vote->getPeriod() << ", " << vote->getRound() << ", " << vote->getStep() << "). Current PBFT (period, round, step) = (" << current_pbft_period << ", " << current_pbft_round << ", " << pbft_mgr_->getPbftStep() << ")"; return; @@ -46,26 +64,25 @@ void VotePacketHandler::process(VotePacket &&packet, const std::shared_ptrgetBlockHash() != packet.vote->getBlockHash()) { + if (pbft_block) { + if (pbft_block->getBlockHash() != vote->getBlockHash()) { std::ostringstream err_msg; - err_msg << "Vote " << packet.vote->getHash().abridged() << " voted block " - << packet.vote->getBlockHash().abridged() << " != actual block " - << packet.pbft_block->getBlockHash().abridged(); + err_msg << "Vote " << vote->getHash().abridged() << " voted block " << vote->getBlockHash().abridged() + << " != actual block " << pbft_block->getBlockHash().abridged(); throw MaliciousPeerException(err_msg.str()); } - peer->markPbftBlockAsKnown(packet.pbft_block->getBlockHash()); + peer->markPbftBlockAsKnown(pbft_block->getBlockHash()); } - if (!processVote(packet.vote, packet.pbft_block, peer, true)) { + if (!processVote(vote, pbft_block, peer, true)) { return; } // Do not mark it before, as peers have small caches of known votes. Only mark gossiping votes peer->markPbftVoteAsKnown(vote_hash); - pbft_mgr_->gossipVote(packet.vote, packet.pbft_block); + pbft_mgr_->gossipVote(vote, pbft_block); } void VotePacketHandler::onNewPbftVote(const std::shared_ptr &vote, const std::shared_ptr &block, @@ -100,14 +117,12 @@ void VotePacketHandler::sendPbftVote(const std::shared_ptr &peer, co dev::RLPStream s; if (block) { - // TODO[2865]: use packet class to automatically create rlp - s = dev::RLPStream(v4::VotePacket::kExtendedVotePacketSize); + s = dev::RLPStream(kExtendedVotePacketSize); s.appendRaw(vote->rlp(true, false)); s.appendRaw(block->rlp(true)); s.append(pbft_chain_->getPbftChainSize()); } else { - // TODO[2865]: use packet class to automatically create rlp - s = dev::RLPStream(v4::VotePacket::kVotePacketSize); + s = dev::RLPStream(kVotePacketSize); s.appendRaw(vote->rlp(true, false)); } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/votes_bundle_packet_handler.cpp index 0eb5c50221..86c8241a12 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/votes_bundle_packet_handler.cpp @@ -17,27 +17,48 @@ VotesBundlePacketHandler::VotesBundlePacketHandler(const FullNodeConfig &conf, s std::move(pbft_chain), std::move(vote_mgr), std::move(slashing_manager), node_addr, logs_prefix + "VOTES_BUNDLE_PH") {} -void VotesBundlePacketHandler::process(VotesBundlePacket &&packet, const std::shared_ptr &peer) { +void VotesBundlePacketHandler::validatePacketRlpFormat( + [[maybe_unused]] const threadpool::PacketData &packet_data) const { + auto items = packet_data.rlp_.itemCount(); + if (items != kPbftVotesBundleRlpSize) { + throw InvalidRlpItemsCountException(packet_data.type_str_, items, kPbftVotesBundleRlpSize); + } + + auto votes_count = packet_data.rlp_[kPbftVotesBundleRlpSize - 1].itemCount(); + if (votes_count == 0 || votes_count > kMaxVotesInBundleRlp) { + throw InvalidRlpItemsCountException(packet_data.type_str_, items, kMaxVotesInBundleRlp); + } +} + +void VotesBundlePacketHandler::process(const threadpool::PacketData &packet_data, + const std::shared_ptr &peer) { const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); - const auto &reference_vote = packet.votes.front(); + const auto votes_bundle_block_hash = packet_data.rlp_[0].toHash(); + const auto votes_bundle_pbft_period = packet_data.rlp_[1].toInt(); + const auto votes_bundle_pbft_round = packet_data.rlp_[2].toInt(); + const auto votes_bundle_votes_step = packet_data.rlp_[3].toInt(); + + const auto &reference_vote = + std::make_shared(votes_bundle_block_hash, votes_bundle_pbft_period, votes_bundle_pbft_round, + votes_bundle_votes_step, packet_data.rlp_[4][0]); const auto votes_bundle_votes_type = reference_vote->getType(); // Votes sync bundles are allowed to cotain only votes bundles of the same type, period, round and step so if first // vote is irrelevant, all of them are - if (!isPbftRelevantVote(packet.votes[0])) { + if (!isPbftRelevantVote(reference_vote)) { LOG(log_wr_) << "Drop votes sync bundle as it is irrelevant for current pbft state. Votes (period, round, step) = (" - << packet.votes_bundle_pbft_period << ", " << packet.votes_bundle_pbft_round << ", " - << reference_vote->getStep() << "). Current PBFT (period, round, step) = (" << current_pbft_period - << ", " << current_pbft_round << ", " << pbft_mgr_->getPbftStep() << ")"; + << votes_bundle_pbft_period << ", " << votes_bundle_pbft_round << ", " << reference_vote->getStep() + << "). Current PBFT (period, round, step) = (" << current_pbft_period << ", " << current_pbft_round + << ", " << pbft_mgr_->getPbftStep() << ")"; return; } // VotesBundlePacket does not support propose votes if (reference_vote->getType() == PbftVoteTypes::propose_vote) { - LOG(log_er_) << "Dropping votes bundle packet due to received \"propose\" votes from " << peer->getId() + LOG(log_er_) << "Dropping votes bundle packet due to received \"propose\" votes from " << packet_data.from_node_id_ << ". The peer may be a malicious player, will be disconnected"; - disconnect(peer->getId(), dev::p2p::UserReason); + disconnect(packet_data.from_node_id_, dev::p2p::UserReason); return; } @@ -48,8 +69,10 @@ void VotesBundlePacketHandler::process(VotesBundlePacket &&packet, const std::sh check_max_round_step = false; } - size_t processed_votes_count = 0; - for (const auto &vote : packet.votes) { + std::vector> votes; + for (const auto vote_rlp : packet_data.rlp_[4]) { + auto vote = std::make_shared(votes_bundle_block_hash, votes_bundle_pbft_period, votes_bundle_pbft_round, + votes_bundle_votes_step, vote_rlp); peer->markPbftVoteAsKnown(vote->getHash()); // Do not process vote that has already been validated @@ -64,14 +87,14 @@ void VotesBundlePacketHandler::process(VotesBundlePacket &&packet, const std::sh continue; } - processed_votes_count++; + votes.push_back(std::move(vote)); } - LOG(log_nf_) << "Received " << packet.votes.size() << " (processed " << processed_votes_count - << " ) sync votes from peer " << peer->getId() << " node current round " << current_pbft_round - << ", peer pbft round " << packet.votes_bundle_pbft_round; + LOG(log_nf_) << "Received " << packet_data.rlp_[4].itemCount() << " (processed " << votes.size() + << " ) sync votes from peer " << packet_data.from_node_id_ << " node current round " + << current_pbft_round << ", peer pbft round " << votes_bundle_pbft_round; - onNewPbftVotesBundle(packet.votes, false, peer->getId()); + onNewPbftVotesBundle(votes, false, packet_data.from_node_id_); } void VotesBundlePacketHandler::onNewPbftVotesBundle(const std::vector> &votes, diff --git a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp index 7a43004033..bee9e177f9 100644 --- a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp +++ b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp @@ -19,6 +19,9 @@ #include "network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp" #include "network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp" #include "network/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/get_next_votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/get_pbft_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.hpp" #include "network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp" #include "network/tarcap/packets_handlers/v4/pillar_vote_packet_handler.hpp" #include "network/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.hpp" @@ -313,7 +316,8 @@ const TaraxaCapability::InitPacketsHandlers TaraxaCapability::kInitV4Handlers = // Consensus packets with high processing priority packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_mgr, pbft_chain, vote_mgr, slashing_manager, node_addr, logs_prefix); - packets_handlers->registerHandler( + packets_handlers->registerHandler( + config, peers_state, packets_stats, pbft_mgr, pbft_chain, vote_mgr, slashing_manager, node_addr, logs_prefix); packets_handlers->registerHandler( config, peers_state, packets_stats, pbft_mgr, pbft_chain, vote_mgr, slashing_manager, node_addr, logs_prefix); @@ -337,7 +341,7 @@ const TaraxaCapability::InitPacketsHandlers TaraxaCapability::kInitV4Handlers = pbft_syncing_state, pbft_chain, pbft_mgr, dag_mgr, trx_mgr, db, node_addr, logs_prefix); - packets_handlers->registerHandler( + packets_handlers->registerHandler( config, peers_state, packets_stats, pbft_syncing_state, pbft_chain, vote_mgr, db, node_addr, logs_prefix); packets_handlers->registerHandler(config, peers_state, packets_stats, @@ -345,8 +349,8 @@ const TaraxaCapability::InitPacketsHandlers TaraxaCapability::kInitV4Handlers = vote_mgr, db, node_addr, logs_prefix); packets_handlers->registerHandler(config, peers_state, packets_stats, pillar_chain_mgr, node_addr, logs_prefix); - packets_handlers->registerHandler(config, peers_state, packets_stats, - pillar_chain_mgr, node_addr, logs_prefix); + packets_handlers->registerHandler( + config, peers_state, packets_stats, pillar_chain_mgr, node_addr, logs_prefix); packets_handlers->registerHandler(config, peers_state, packets_stats, pillar_chain_mgr, node_addr, logs_prefix); From 6558e338d7517648355c1a4964e2d50c6b8157dd Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Mon, 21 Oct 2024 12:30:36 +0200 Subject: [PATCH 074/105] fix tarcap threadpool test --- tests/tarcap_threadpool_test.cpp | 2148 +++++++++++++++--------------- 1 file changed, 1056 insertions(+), 1092 deletions(-) diff --git a/tests/tarcap_threadpool_test.cpp b/tests/tarcap_threadpool_test.cpp index 8da3a3a005..1801631288 100644 --- a/tests/tarcap_threadpool_test.cpp +++ b/tests/tarcap_threadpool_test.cpp @@ -1,1092 +1,1056 @@ -// #include -// -// #include "config/config.hpp" -// #include "config/version.hpp" -// #include "dag/dag_block.hpp" -// #include "logger/logger.hpp" -// #include "network/tarcap/packets_handler.hpp" -// #include "network/tarcap/packets_handlers/latest/common/packet_handler.hpp" -// #include "network/tarcap/shared_states/peers_state.hpp" -// #include "network/threadpool/tarcap_thread_pool.hpp" -// #include "test_util/test_util.hpp" -// -// namespace taraxa::core_tests { -// -// using namespace std::literals; -// -//// Do not use NodesTest from "test_util/gtest.hpp" as its functionality is not needed in this test -// struct NodesTest : virtual testing::Test { -// testing::UnitTest* current_test = ::testing::UnitTest::GetInstance(); -// testing::TestInfo const* current_test_info = current_test->current_test_info(); -// -// NodesTest() = default; -// virtual ~NodesTest() = default; -// -// NodesTest(const NodesTest&) = delete; -// NodesTest(NodesTest&&) = delete; -// NodesTest& operator=(const NodesTest&) = delete; -// NodesTest& operator=(NodesTest&&) = delete; -// }; -// -// struct TarcapTpTest : NodesTest {}; -// -// using namespace taraxa::network; -// -// class PacketsProcessingInfo { -// public: -// struct PacketProcessingTimes { -// std::chrono::steady_clock::time_point start_time_; -// std::chrono::steady_clock::time_point finish_time_; -// }; -// -// public: -// void addPacketProcessingTimes(threadpool::PacketData::PacketId packet_id, -// const PacketProcessingTimes& packet_processing_times) { -// std::scoped_lock lock(mutex_); -// bool res = packets_processing_times_.emplace(packet_id, packet_processing_times).second; -// assert(res); -// } -// -// PacketProcessingTimes getPacketProcessingTimes(threadpool::PacketData::PacketId packet_id) const { -// std::shared_lock lock(mutex_); -// -// auto found_packet_info = packets_processing_times_.find(packet_id); -// -// // Failed to obtain processing times for packet id: packet_id. Processing did not finish yet. This should be -// // caught in processing times comparing -// if (found_packet_info == packets_processing_times_.end()) { -// return {}; -// } -// -// return found_packet_info->second; -// } -// -// size_t getPacketProcessingTimesCount() const { -// std::shared_lock lock(mutex_); -// return packets_processing_times_.size(); -// } -// -// private: -// std::unordered_map packets_processing_times_; -// mutable std::shared_mutex mutex_; -// }; -// -//// Help functions for tests -// struct HandlersInitData { -// FullNodeConfig conf; -// dev::p2p::NodeID sender_node_id; -// addr_t own_node_addr; -// -// std::shared_ptr peers_state; -// std::shared_ptr packets_stats; -// std::shared_ptr packets_processing_info; -// -// dev::p2p::NodeID copySender() { return sender_node_id; } -// }; -// -// struct DummyPacket { -// DummyPacket(const dev::RLP& packet_rlp) {} -// }; -// -// class DummyPacketHandler : public tarcap::PacketHandler { -// public: -// DummyPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, -// uint32_t processing_delay_ms) -// : PacketHandler(init_data.conf, init_data.peers_state, init_data.packets_stats, init_data.own_node_addr, -// log_channel_name), -// processing_delay_ms_(processing_delay_ms), -// packets_proc_info_(init_data.packets_processing_info) {} -// -// virtual ~DummyPacketHandler() = default; -// DummyPacketHandler(const DummyPacketHandler&) = default; -// DummyPacketHandler(DummyPacketHandler&&) = default; -// DummyPacketHandler& operator=(const DummyPacketHandler&) = delete; -// DummyPacketHandler& operator=(DummyPacketHandler&&) = delete; -// -// private: -// void validatePacketRlpFormat([[maybe_unused]] const threadpool::PacketData& packet_data) const override {} -// -// void process(DummyPacket&& packet_data, -// [[maybe_unused]] const std::shared_ptr& peer) override { -// // Note do not use LOG() before saving start & finish time as it is internally synchronized and can -// // cause delays, which result in tests fails -// auto start_time = std::chrono::steady_clock::now(); -// std::this_thread::sleep_for(std::chrono::milliseconds(processing_delay_ms_)); -// auto finish_time = std::chrono::steady_clock::now(); -// -// LOG(log_dg_) << "Processing packet: " << packet_data.type_str_ << ", id(" << packet_data.id_ << ") finished. " -// << "Start time: " << start_time.time_since_epoch().count() -// << ", finish time: " << finish_time.time_since_epoch().count(); -// -// packets_proc_info_->addPacketProcessingTimes(packet_data.id_, {start_time, finish_time}); -// } -// -// uint32_t processing_delay_ms_{0}; -// std::shared_ptr packets_proc_info_; -// }; -// -// class DummyTransactionPacketHandler : public DummyPacketHandler { -// public: -// DummyTransactionPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, -// uint32_t processing_delay_ms) -// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} -// -// // Packet type that is processed by this handler -// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kTransactionPacket; -// }; -// -// class DummyDagBlockPacketHandler : public DummyPacketHandler { -// public: -// DummyDagBlockPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, -// uint32_t processing_delay_ms) -// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} -// -// // Packet type that is processed by this handler -// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagBlockPacket; -// }; -// -// class DummyStatusPacketHandler : public DummyPacketHandler { -// public: -// DummyStatusPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, -// uint32_t processing_delay_ms) -// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} -// -// // Packet type that is processed by this handler -// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kStatusPacket; -// }; -// -// class DummyVotePacketHandler : public DummyPacketHandler { -// public: -// DummyVotePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, -// uint32_t processing_delay_ms) -// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} -// -// // Packet type that is processed by this handler -// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotePacket; -// }; -// -// class DummyGetNextVotesBundlePacketHandler : public DummyPacketHandler { -// public: -// DummyGetNextVotesBundlePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, -// uint32_t processing_delay_ms) -// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} -// -// // Packet type that is processed by this handler -// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetNextVotesSyncPacket; -// }; -// -// class DummyVotesBundlePacketHandler : public DummyPacketHandler { -// public: -// DummyVotesBundlePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, -// uint32_t processing_delay_ms) -// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} -// -// // Packet type that is processed by this handler -// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotesBundlePacket; -// }; -// -// class DummyGetDagSyncPacketHandler : public DummyPacketHandler { -// public: -// DummyGetDagSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, -// uint32_t processing_delay_ms) -// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} -// -// // Packet type that is processed by this handler -// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetDagSyncPacket; -// }; -// -// class DummyGetPbftSyncPacketHandler : public DummyPacketHandler { -// public: -// DummyGetPbftSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, -// uint32_t processing_delay_ms) -// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} -// -// // Packet type that is processed by this handler -// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetPbftSyncPacket; -// }; -// -// class DummyDagSyncPacketHandler : public DummyPacketHandler { -// public: -// DummyDagSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, -// uint32_t processing_delay_ms) -// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} -// -// // Packet type that is processed by this handler -// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagSyncPacket; -// }; -// -// class DummyPbftSyncPacketHandler : public DummyPacketHandler { -// public: -// DummyPbftSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, -// uint32_t processing_delay_ms) -// : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} -// -// // Packet type that is processed by this handler -// static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPbftSyncPacket; -// }; -// -// HandlersInitData createHandlersInitData() { -// HandlersInitData ret_init_data; -// -// ret_init_data.sender_node_id = dev::p2p::NodeID(1); -// ret_init_data.own_node_addr = addr_t(2); -// ret_init_data.peers_state = std::make_shared(std::weak_ptr(), -// FullNodeConfig()); ret_init_data.packets_stats = -// std::make_shared(std::chrono::milliseconds(0), ret_init_data.own_node_addr); -// ret_init_data.packets_processing_info = std::make_shared(); -// -// // Enable packets from sending peer to be processed -// auto peer = ret_init_data.peers_state->addPendingPeer(ret_init_data.sender_node_id, ""); -// ret_init_data.peers_state->setPeerAsReadyToSendMessages(ret_init_data.sender_node_id, peer); -// -// return ret_init_data; -// } -// -// std::pair createPacket( -// const dev::p2p::NodeID& sender_node_id, SubprotocolPacketType packet_type, -// std::optional> packet_rlp_bytes = {}) { -// if (packet_rlp_bytes.has_value()) { -// threadpool::PacketData packet_data(packet_type, sender_node_id, std::move(packet_rlp_bytes.value())); -// return {TARAXA_NET_VERSION, std::move(packet_data)}; -// } -// -// dev::RLPStream s(0); -// threadpool::PacketData packet_data(packet_type, sender_node_id, s.invalidate()); -// return {TARAXA_NET_VERSION, std::move(packet_data)}; -// } -// -// bytes createDagBlockRlp(level_t level, uint32_t sig = 777) { -// // Creates dag block rlp as it is required for blocking mask to extract dag block level -// DagBlock blk(blk_hash_t(10), level, {}, {}, sig_t(sig), blk_hash_t(1), addr_t(15)); -// return blk.rlp(true); -// } -// -///** -// * @brief Check all combinations(without repetition) of provided packets that they were processed concurrently: -// * - packet1.start_time < packet2.finish_time -// * - packet2.start_time < packet1.finish_time -// * -// * @param packets -// */ -// void checkConcurrentProcessing( -// const std::vector>& packets) { -// assert(packets.size() >= 2); -// -// for (size_t i = 0; i < packets.size(); i++) { -// const auto& packet_l = packets[0]; -// for (size_t j = i + 1; j < packets.size(); j++) { -// const auto& packet_r = packets[j]; -// EXPECT_LT(packet_l.first.start_time_, packet_r.first.finish_time_) -// << packet_l.second << ".start_time < " << packet_r.second << ".finish_time"; -// EXPECT_LT(packet_r.first.start_time_, packet_l.first.finish_time_) -// << packet_r.second << ".start_time < " << packet_l.second << ".finish_time"; -// } -// } -//} -// -///** -// * @brief Check all combinations(without repetition) of provided packets that they were processed serial: -// * - packet1.finish_time < packet2.start_time -// * -// * @param packets -// */ -// void checkSerialProcessing( -// const std::vector>& packets) { -// assert(packets.size() >= 2); -// -// for (size_t i = 0; i < packets.size(); i++) { -// const auto& packet_l = packets[0]; -// for (size_t j = i + 1; j < packets.size(); j++) { -// const auto& packet_r = packets[j]; -// EXPECT_LT(packet_l.first.finish_time_, packet_r.first.start_time_) -// << packet_l.second << ".finish_time < " << packet_r.second << ".start_time"; -// } -// } -//} -// -// size_t queuesSize(const threadpool::PacketsThreadPool& tp) { -// const auto [high_priority_queue_size, mid_priority_queue_size, low_priority_queue_size] = tp.getQueueSize(); -// -// return high_priority_queue_size + mid_priority_queue_size + low_priority_queue_size; -//} -// -//// Threshold for packets queue to be emptied -// constexpr std::chrono::milliseconds QUEUE_EMPTIED_WAIT_TRESHOLD_MS = 15ms; -// -//// Test all packet types if they are either in non-blocking or blocking list of packets -// TEST_F(TarcapTpTest, packets_blocking_dependencies) { -// network::threadpool::PriorityQueue priority_queue(3); -// -// for (auto packet_type = SubprotocolPacketType{0}; packet_type != SubprotocolPacketType::kPacketCount; -// packet_type = static_cast(static_cast(packet_type) + 1)) { -// // Skip unreal packet types -// switch (packet_type) { -// case SubprotocolPacketType::kHighPriorityPackets: -// case SubprotocolPacketType::kMidPriorityPackets: -// case SubprotocolPacketType::kLowPriorityPackets: -// case SubprotocolPacketType::kPacketCount: -// continue; -// } -// -// std::vector packet_bytes; -// -// // Generate proper rlp for packets that need it for processing -// if (packet_type == SubprotocolPacketType::kDagBlockPacket) { -// DagBlock blk(blk_hash_t(1), 1, {}, {trx_hash_t(1), trx_hash_t(2)}, sig_t(3), blk_hash_t(0x4), addr_t(5)); -// packet_bytes = blk.rlp(true); -// } -// network::threadpool::PacketData packet_data{packet_type, {}, std::move(packet_bytes)}; -// packet_data.id_ = static_cast(packet_type); -// -// bool is_non_blocking_packet = priority_queue.isNonBlockingPacket(packet_data.type_); -// bool is_blocking_packet = priority_queue.updateBlockingDependencies(packet_data); -// -// EXPECT_TRUE(is_non_blocking_packet != is_blocking_packet); -// } -// } -// -//// Test if all "block-free" packets are processed concurrently -//// Note: in case someone creates new blocking dependency and does not adjust tests, this test should fail -// TEST_F(TarcapTpTest, block_free_packets) { -// HandlersInitData init_data = createHandlersInitData(); -// -// // Creates sender 2 to bypass peer order block on Transaction -> DagBlock packet. In case those packets sent -// // 2 different senders those packets are "block-free" -// dev::p2p::NodeID sender2(3); -// auto peer = init_data.peers_state->addPendingPeer(sender2, ""); -// init_data.peers_state->setPeerAsReadyToSendMessages(sender2, peer); -// -// auto packets_handler = std::make_shared(); -// -// packets_handler->registerHandler(init_data, "TX_PH", 20); -// packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); -// packets_handler->registerHandler(init_data, "STATUS_PH", 20); -// packets_handler->registerHandler(init_data, "VOTE_PH", 20); -// packets_handler->registerHandler(init_data, "GET_NEXT_VOTES_SYNC_PH", 20); -// packets_handler->registerHandler(init_data, "VOTES_SYNC_PH", 20); -// -// // Creates threadpool -// // Note: make num of threads >= num of packets to check if they are processed concurrently without blocks, -// otherwise -// // some blocks would be blocked for processing due to max threads limit -// threadpool::PacketsThreadPool tp(18); -// tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); -// -// // Pushes packets to the tp -// auto packet = createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {}); -// if (packet.second.rlp_.isList()) { -// std::cout << "is list"; -// } else { -// std::cout << "not list"; -// } -// const auto packet0_tx_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); -// const auto packet1_tx_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); -// const auto packet2_tx_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); -// const auto packet3_tx_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); -// -// const auto packet4_dag_block_id = -// tp.push( -// createPacket(dev::p2p::NodeID(sender2), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, -// 1)})) -// .value(); -// const auto packet5_dag_block_id = -// tp.push( -// createPacket(dev::p2p::NodeID(sender2), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, -// 2)})) -// .value(); -// -// const auto packet8_status_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); -// const auto packet9_status_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); -// -// const auto packet12_vote_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); -// const auto packet13_vote_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); -// -// const auto packet14_get_pbft_next_votes_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetNextVotesSyncPacket, {})).value(); -// const auto packet15_get_pbft_next_votes_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetNextVotesSyncPacket, {})).value(); -// -// const auto packet16_pbft_next_votes_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotesBundlePacket, {})).value(); -// -// size_t packets_count = 0; -// const auto packet17_pbft_next_votes_id = packets_count = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotesBundlePacket, {})).value(); -// -// tp.startProcessing(); -// -// // How should packets be processed: -// // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or -// // synchronously due to some blocking dependencies - depends on situation), check -// // PriorityQueue::updateDependenciesStart -// /* -// ---------------------- -// - packet0_transaction - -// ---------------------- -// ---------------------- -// - packet1_transaction - -// ---------------------- -// ----------------------- -// - packet2_transaction - -// ----------------------- -// -// -||- -// ... -// -// ----------------------- -// - packet17_votes_sync - -// ----------------------- -// 0.....................20.................... time [ms] -// */ -// -// // All packets should be already being processed after short amount of time -// std::this_thread::sleep_for(QUEUE_EMPTIED_WAIT_TRESHOLD_MS); -// EXPECT_EQ(queuesSize(tp), 0); -// -// // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to -// locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { -// // Check if transactions was propagated to node0 -// WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) -// }); -// -// // Check order of packets how they were processed -// const auto packets_proc_info = init_data.packets_processing_info; -// -// const auto packet0_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_tx_id); -// const auto packet1_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_tx_id); -// const auto packet2_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_tx_id); -// const auto packet3_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_tx_id); -// -// const auto packet4_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_dag_block_id); -// const auto packet5_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet5_dag_block_id); -// -// const auto packet8_status_proc_info = packets_proc_info->getPacketProcessingTimes(packet8_status_id); -// const auto packet9_status_proc_info = packets_proc_info->getPacketProcessingTimes(packet9_status_id); -// -// const auto packet12_vote_proc_info = packets_proc_info->getPacketProcessingTimes(packet12_vote_id); -// const auto packet13_vote_proc_info = packets_proc_info->getPacketProcessingTimes(packet13_vote_id); -// -// const auto packet14_get_pbft_next_votes_proc_info = -// packets_proc_info->getPacketProcessingTimes(packet14_get_pbft_next_votes_id); -// const auto packet15_get_pbft_next_votes_proc_info = -// packets_proc_info->getPacketProcessingTimes(packet15_get_pbft_next_votes_id); -// -// const auto packet16_pbft_next_votes_proc_info = -// packets_proc_info->getPacketProcessingTimes(packet16_pbft_next_votes_id); -// const auto packet17_pbft_next_votes_proc_info = -// packets_proc_info->getPacketProcessingTimes(packet17_pbft_next_votes_id); -// -// checkConcurrentProcessing({ -// {packet0_tx_proc_info, "packet0_tx"}, -// {packet1_tx_proc_info, "packet1_tx"}, -// {packet2_tx_proc_info, "packet2_tx"}, -// {packet3_tx_proc_info, "packet3_tx"}, -// {packet4_dag_block_proc_info, "packet4_dag_block"}, -// {packet5_dag_block_proc_info, "packet5_dag_block"}, -// {packet8_status_proc_info, "packet8_status"}, -// {packet9_status_proc_info, "packet9_status"}, -// {packet12_vote_proc_info, "packet12_vote"}, -// {packet13_vote_proc_info, "packet13_vote"}, -// {packet14_get_pbft_next_votes_proc_info, "packet14_get_pbft_next_votes"}, -// {packet15_get_pbft_next_votes_proc_info, "packet15_get_pbft_next_votes"}, -// {packet16_pbft_next_votes_proc_info, "packet16_pbft_next_votes"}, -// {packet17_pbft_next_votes_proc_info, "packet17_pbft_next_votes"}, -// }); -// } -// -//// Test "hard blocking dependencies" related synchronous processing of certain packets: -//// -//// Packets types that are currently hard blocked for processing in another threads due to dependencies, -//// e.g. syncing packets must be processed synchronously one by one, etc... -//// Each packet type might be simultaneously blocked by multiple different packets that are being processed. -// TEST_F(TarcapTpTest, hard_blocking_deps) { -// HandlersInitData init_data = createHandlersInitData(); -// -// auto packets_handler = std::make_shared(); -// packets_handler->registerHandler(init_data, "GET_DAG_SYNC_PH", 20); -// packets_handler->registerHandler(init_data, "GET_PBFT_SYNC_PH", 20); -// packets_handler->registerHandler(init_data, "DAG_SYNC_PH", 20); -// packets_handler->registerHandler(init_data, "PBFT_SYNC_PH", 20); -// -// // Creates threadpool -// threadpool::PacketsThreadPool tp(10); -// tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); -// -// // Pushes packets to the tp -// const auto packet0_dag_sync_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket, {})).value(); -// const auto packet1_dag_sync_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket, {})).value(); -// const auto packet2_get_dag_sync_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); -// const auto packet3_get_dag_sync_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); -// const auto packet4_get_pbft_sync_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetPbftSyncPacket, {})).value(); -// const auto packet5_get_pbft_sync_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetPbftSyncPacket, {})).value(); -// const auto packet6_pbft_sync_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kPbftSyncPacket, {})).value(); -// const auto packet7_pbft_sync_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kPbftSyncPacket, {})).value(); -// -// size_t packets_count = 0; -// const auto packet8_get_dag_sync_id = packets_count = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); -// -// tp.startProcessing(); -// -// // How should packets be processed: -// // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or -// // synchronously due to some blocking dependencies - depends on situation), check -// // PriorityQueue::updateDependenciesStart -// /* -// ------------------------ -// --- packet0_dag_sync --- -// ------------------------ -// ------------------------ -// --- packet1_dag_sync --- -// ------------------------ -// ------------------------- -// -- packet2_get_dag_sync - -// ------------------------- -// ------------------------- -// -- packet3_get_dag_sync - -// ------------------------- -// ------------------------- -// - packet4_get_pbft_sync - -// ------------------------- -// ------------------------- -// - packet5_get_pbft_sync - -// ------------------------- -// ------------------------ -// --- packet6_pbft_sync -- -// ------------------------ -// ------------------------ -// --- packet7_pbft_sync -- -// ------------------------ -// ------------------------ -// - packet8_get_dag_sync - -// ------------------------ -// 0......................20........................40........................60.......... time -// */ -// -// // All packets should be already being processed after short amount of time -// std::this_thread::sleep_for(60ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); -// EXPECT_EQ(queuesSize(tp), 0); -// -// // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to -// locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { -// // Check if transactions was propagated to node0 -// WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) -// }); -// -// // Check order of packets how they were processed -// const auto packets_proc_info = init_data.packets_processing_info; -// -// const auto packet0_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_dag_sync_id); -// const auto packet1_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_dag_sync_id); -// const auto packet2_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_get_dag_sync_id); -// const auto packet3_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_get_dag_sync_id); -// const auto packet4_get_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_get_pbft_sync_id); -// const auto packet5_get_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet5_get_pbft_sync_id); -// const auto packet6_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet6_pbft_sync_id); -// const auto packet7_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet7_pbft_sync_id); -// const auto packet8_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet8_get_dag_sync_id); -// -// checkConcurrentProcessing({ -// {packet0_dag_sync_proc_info, "packet0_dag_sync"}, -// {packet2_get_dag_sync_proc_info, "packet2_get_dag_sync"}, -// {packet4_get_pbft_sync_proc_info, "packet4_get_pbft_sync"}, -// {packet6_pbft_sync_proc_info, "packet6_pbft_sync"}, -// }); -// -// checkConcurrentProcessing({ -// {packet1_dag_sync_proc_info, "packet1_dag_sync"}, -// {packet3_get_dag_sync_proc_info, "packet3_get_dag_sync"}, -// {packet5_get_pbft_sync_proc_info, "packet5_get_pbft_sync"}, -// {packet7_pbft_sync_proc_info, "packet7_pbft_sync"}, -// }); -// -// EXPECT_GT(packet1_dag_sync_proc_info.start_time_, packet0_dag_sync_proc_info.finish_time_); -// EXPECT_GT(packet3_get_dag_sync_proc_info.start_time_, packet2_get_dag_sync_proc_info.finish_time_); -// EXPECT_GT(packet5_get_pbft_sync_proc_info.start_time_, packet4_get_pbft_sync_proc_info.finish_time_); -// EXPECT_GT(packet7_pbft_sync_proc_info.start_time_, packet6_pbft_sync_proc_info.finish_time_); -// -// EXPECT_GT(packet8_get_dag_sync_proc_info.start_time_, packet3_get_dag_sync_proc_info.finish_time_); -// } -// -//// Test "peer-order blocking dependencies" related to specific (peer & order) combination: -//// -//// Packets types that are blocked only for processing when received from specific peer & after specific -//// time (order), e.g.: new dag block packet processing is blocked until all transactions packets that were received -//// before it are processed. This blocking dependency is applied only for the same peer so transaction packet from one -//// peer does not block new dag block packet from another peer -// TEST_F(TarcapTpTest, peer_order_blocking_deps) { -// HandlersInitData init_data = createHandlersInitData(); -// -// auto packets_handler = std::make_shared(); -// packets_handler->registerHandler(init_data, "TX_PH", 20); -// packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 0); -// packets_handler->registerHandler(init_data, "SYNC_TEST_PH", 40); -// -// // Creates threadpool -// threadpool::PacketsThreadPool tp(10); -// tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); -// -// // Pushes packets to the tp -// const auto packet0_tx_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); -// const auto packet1_tx_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); -// const auto packet2_dag_sync_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket)).value(); -// const auto packet3_tx_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); -// -// size_t packets_count = 0; -// const auto packet4_dag_block_id = packets_count = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1)})) -// .value(); -// -// // How should packets be processed: -// // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or -// // synchronously due to some blocking dependencies - depends on situation), check -// // PriorityQueue::updateDependenciesStart -// /* -// -------------- -// - packet0_tx - -// -------------- -// -------------- -// - packet1_tx - -// -------------- -// ---------------------------- -// ----- packet2_dag_sync ----- -// ---------------------------- -// -------------- -// - packet3_tx - -// -------------- -// --------------------- -// - packet4_dag_block - -// --------------------- -// 0............20.............40....................60.................. time [ms] -// */ -// -// tp.startProcessing(); -// -// // All packets should be already being processed after short amount of time -// std::this_thread::sleep_for(60ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); -// EXPECT_EQ(queuesSize(tp), 0); -// -// // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to -// locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { -// // Check if transactions was propagated to node0 -// WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) -// }); -// -// // Check order of packets how they were processed -// const auto packets_proc_info = init_data.packets_processing_info; -// -// const auto packet0_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_tx_id); -// const auto packet1_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_tx_id); -// const auto packet3_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_tx_id); -// const auto packet4_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_dag_block_id); -// const auto packet2_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_dag_sync_id); -// -// checkConcurrentProcessing({ -// {packet0_tx_proc_info, "packet0_tx"}, -// {packet1_tx_proc_info, "packet1_tx"}, -// {packet2_dag_sync_proc_info, "packet2_dag_sync"}, -// {packet3_tx_proc_info, "packet3_tx"}, -// }); -// -// EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet0_tx_proc_info.finish_time_); -// EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet1_tx_proc_info.finish_time_); -// EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet3_tx_proc_info.finish_time_); -// -// EXPECT_GT(packet4_dag_block_proc_info.start_time_, packet2_dag_sync_proc_info.finish_time_); -// } -// -//// Test "dag-block blocking dependencies" related to dag blocks: -//// -//// Same dag blocks should not be processed at the same time -// TEST_F(TarcapTpTest, same_dag_blks_ordering) { -// HandlersInitData init_data = createHandlersInitData(); -// -// auto packets_handler = std::make_shared(); -// packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); -// -// // Creates threadpool -// threadpool::PacketsThreadPool tp(10); -// tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); -// -// auto dag_block = createDagBlockRlp(0); -// -// // Pushes packets to the tp -// const auto blk0_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); -// const auto blk1_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); -// const auto blk2_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); -// const auto blk3_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); -// -// size_t packets_count = 0; -// const auto blk4_id = packets_count = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); -// -// tp.startProcessing(); -// -// // How should dag blocks packets be processed: -// // Same dag blocks should not be processed concurrently but one after another -// -// // All packets should be already being processed after short amount of time -// std::this_thread::sleep_for(200ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); -// EXPECT_EQ(queuesSize(tp), 0); -// -// // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to -// locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { -// // Check if transactions was propagated to node0 -// WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) -// }); -// -// // Check order of packets how they were processed -// const auto packets_proc_info = init_data.packets_processing_info; -// -// const auto blk0_proc_info = packets_proc_info->getPacketProcessingTimes(blk0_id); -// const auto blk1_proc_info = packets_proc_info->getPacketProcessingTimes(blk1_id); -// const auto blk2_proc_info = packets_proc_info->getPacketProcessingTimes(blk2_id); -// const auto blk3_proc_info = packets_proc_info->getPacketProcessingTimes(blk3_id); -// const auto blk4_proc_info = packets_proc_info->getPacketProcessingTimes(blk4_id); -// -// checkSerialProcessing({ -// {blk0_proc_info, "blk0"}, -// {blk1_proc_info, "blk1"}, -// {blk2_proc_info, "blk2"}, -// {blk3_proc_info, "blk3"}, -// {blk4_proc_info, "blk4"}, -// }); -// } -// -//// Test "dag-level blocking dependencies" related to dag blocks levels: -//// -//// Ideally only dag blocks with the same level should be processed. In reality there are situation when node receives -//// dag block with smaller level than the level of blocks that are already being processed. In such case these blocks -//// with smaller levels can be processed concurrently with blocks that have higher level. All new dag blocks with -/// higher / level than the lowest level from all the blocks that currently being processed are blocked for processing -// TEST_F(TarcapTpTest, dag_blks_lvls_ordering) { -// HandlersInitData init_data = createHandlersInitData(); -// -// auto packets_handler = std::make_shared(); -// packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); -// -// // Creates threadpool -// threadpool::PacketsThreadPool tp(10); -// tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); -// -// // Pushes packets to the tp -// const auto blk0_lvl1_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, -// 1)})) -// .value(); -// const auto blk1_lvl1_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, -// 2)})) -// .value(); -// const auto blk2_lvl0_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, -// 3)})) -// .value(); -// const auto blk3_lvl1_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, -// 4)})) -// .value(); -// const auto blk4_lvl2_id = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(2, -// 5)})) -// .value(); -// -// size_t packets_count = 0; -// const auto blk5_lvl3_id = packets_count = -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(3, -// 6)})) -// .value(); -// -// tp.startProcessing(); -// -// // How should dag blocks packets be processed: -// // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or -// // synchronously due to some blocking dependencies - depends on situation), check -// // PriorityQueue::updateDependenciesStart -// /* -// ------------- -// - blk0_lvl1 - -// ------------- -// ------------- -// - blk1_lvl1 - -// ------------- -// ------------- -// - blk2_lvl0 - -// ------------- -// ------------- -// - blk3_lvl1 - -// ------------- -// ------------- -// - blk4_lvl2 - -// ------------- -// ------------- -// - blk5_lvl3 - -// ------------- -// 0...........20............40............60.............80................. time [ms] -// */ -// -// // All packets should be already being processed after short amount of time -// std::this_thread::sleep_for(80ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); -// EXPECT_EQ(queuesSize(tp), 0); -// -// // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to -// locking EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { -// // Check if transactions was propagated to node0 -// WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) -// }); -// -// // Check order of packets how they were processed -// const auto packets_proc_info = init_data.packets_processing_info; -// -// const auto blk0_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk0_lvl1_id); -// const auto blk1_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk1_lvl1_id); -// const auto blk2_lvl0_proc_info = packets_proc_info->getPacketProcessingTimes(blk2_lvl0_id); -// const auto blk3_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk3_lvl1_id); -// const auto blk4_lvl2_proc_info = packets_proc_info->getPacketProcessingTimes(blk4_lvl2_id); -// const auto blk5_lvl3_proc_info = packets_proc_info->getPacketProcessingTimes(blk5_lvl3_id); -// -// checkConcurrentProcessing({ -// {blk0_lvl1_proc_info, "blk0_lvl1"}, -// {blk1_lvl1_proc_info, "blk1_lvl1"}, -// {blk2_lvl0_proc_info, "blk2_lvl0"}, -// }); -// -// EXPECT_GT(blk3_lvl1_proc_info.start_time_, blk2_lvl0_proc_info.finish_time_); -// EXPECT_GT(blk4_lvl2_proc_info.start_time_, blk3_lvl1_proc_info.finish_time_); -// EXPECT_GT(blk5_lvl3_proc_info.start_time_, blk4_lvl2_proc_info.finish_time_); -// } -// -//// Test threads borrowing -//// -//// It can happen that no packet for processing was returned during the first iteration over priority queues as there -//// are limits for max total workers per each priority queue. These limits can and should be ignored in some -//// scenarios... For example: -//// High priority queue reached it's max workers limit, other queues have inside many blocked packets that cannot be -//// currently processed concurrently and MAX_TOTAL_WORKERS_COUNT is not reached yet. In such case some threads might -//// be unused. In such cases priority queues max workers limits can and should be ignored. -//// -//// Always keep 1 reserved thread for each priority queue at all times -// TEST_F(TarcapTpTest, threads_borrowing) { -// HandlersInitData init_data = createHandlersInitData(); -// -// auto packets_handler = std::make_shared(); -// packets_handler->registerHandler(init_data, "VOTE_PH", 100); -// -// // Creates threadpool -// const size_t threads_num = 10; -// threadpool::PacketsThreadPool tp(threads_num); -// tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); -// -// // Pushes packets to the tp -// std::vector pushed_packets_ids; -// for (size_t i = 0; i < threads_num; i++) { -// uint64_t packet_id = tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, -// {})).value(); pushed_packets_ids.push_back(packet_id); -// } -// -// tp.startProcessing(); -// -// // How should packets be processed: -// // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or -// // synchronously due to some blocking dependencies - depends on situation), check -// // PriorityQueue::updateDependenciesStart -// // -// // Note: each queue has 1 thread reserved at all times(even if it does not do anything) and there is 10 threads in -// // total, even with borrowing only 8 threads could be used at the same time -// /* -// ---------------- -// - packet0_vote - -// ---------------- -// ---------------- -// - packet1_vote - -// ---------------- -// ---------------- -// - packet2_vote - -// ---------------- -// -// -||- -// ... -// -// ---------------- -// - packet7_vote - -// ---------------- -// ---------------- -// - packet8_vote - -// ---------------- -// ---------------- -// - packet9_vote - -// ---------------- -// 0..............100...............200........... time [ms] -// */ -// -// // First 8 packets should be already processed by this time -// std::this_thread::sleep_for(100ms + 50ms /* might take longer due to threads borrowing */); -// EXPECT_LE(queuesSize(tp), 2); -// -// // Check order of packets how they were processed -// const auto packets_proc_info = init_data.packets_processing_info; -// -// // In case some packet processing is not finished yet, getPacketProcessingTimes() returns default (empty) value -// std::chrono::steady_clock::time_point default_time_point; -// -// // Because each queue has 1 thread reserved at all times(even if it does not do anything) and there is 10 threads -// in -// // total, even with borrowing only 8 threads could be used at the same time, thus last 2 packets (9th & 10th) -// should -// // not be processed after (100 + WAIT_TRESHOLD_MS) ms -// EXPECT_EQ(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[8]).finish_time_, default_time_point); -// EXPECT_EQ(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[9]).finish_time_, default_time_point); -// -// std::vector> packets_proc_info_vec; -// for (size_t i = 0; i < threads_num - (threadpool::PacketData::PacketPriority::Count - 1); i++) { -// packets_proc_info_vec.emplace_back(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[i]), -// "packet" + std::to_string(pushed_packets_ids[i]) + "_vote"); -// } -// -// // Check if first 8 pbft vote packets were processed concurrently -> threads from other queues had to be borrowed -// for -// // that -// checkConcurrentProcessing(packets_proc_info_vec); -// } -// -//// Test low priority queue starvation -//// -//// It should never happen that packets from lower priority queues are waiting to be processed until all packets from -//// higher priority queues are processed -// TEST_F(TarcapTpTest, low_priotity_queue_starvation) { -// HandlersInitData init_data = createHandlersInitData(); -// -// auto packets_handler = std::make_shared(); -// // Handler for packet from high priority queue -// packets_handler->registerHandler(init_data, "VOTE_PH", 20); -// -// // Handler for packet from mid priority queue -// packets_handler->registerHandler(init_data, "TX_PH", 20); -// -// // Handler for packet from low priority queue -// packets_handler->registerHandler(init_data, "STATUS_PH", 20); -// -// // Creates threadpool -// size_t threads_num = 10; -// threadpool::PacketsThreadPool tp(threads_num); -// tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); -// -// // Push 10x more packets for each prioriy queue than max tp capacity to make sure that tp wont be able to process -// all -// // packets from each queue concurrently -> many packets will be waiting due to max threads num reached for specific -// // priority queues -// for (size_t i = 0; i < 2 * 10 * threads_num; i++) { -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); -// } -// -// // Push a few packets low priority packets -// for (size_t i = 0; i < 4; i++) { -// tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); -// } -// -// tp.startProcessing(); -// -// // How should packets be processed: -// // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or -// // synchronously due to some blocking dependencies - depends on situation), check -// // PriorityQueue::updateDependenciesStart In this test are max concurrent processing limits for queues reached, so -// // when we have 10 threads in thredpool: -// // - 4 is limit for High priority queue - VotePacket -// // - 4 is limit for Mid priority queue - TransactionPacket -// // - 3 is limit for Low priority queue - StatusPacket, but because max total limit (10) is always checked first -// // , low priority queue wont be able to use more than 2 threads concurrently -// /* -// ---------------- -// - packet0_vote - -// ---------------- -// ---------------- -// - packet1_vote - -// ---------------- -// ---------------- -// - packet2_vote - -// ---------------- -// ---------------- -// - packet3_vote - -// ---------------- -// ---------------- -// -- packet4_tx -- -// ---------------- -// ---------------- -// -- packet5_tx -- -// ---------------- -// ---------------- -// -- packet6_tx -- -// ---------------- -// ---------------- -// -- packet7_tx -- -// ---------------- -// -// .... -// votes and tx packets are processed concurrently 4 at a time until all of them are processed -// -// -// ------------------ -// - packet400_test - -// ------------------ -// ------------------ -// - packet401_test - -// ------------------ -// ------------------ -// - packet402_test - -// ------------------ -// ------------------ -// - packet403_test - -// ------------------ -// 0.................20.................40................... time [ms] -// */ -// -// std::this_thread::sleep_for(40ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); -// -// const auto [high_priority_queue_size, mid_priority_queue_size, low_priority_queue_size] = tp.getQueueSize(); -// -// EXPECT_GT(high_priority_queue_size, 0); -// EXPECT_GT(mid_priority_queue_size, 0); -// EXPECT_EQ(low_priority_queue_size, 0); -// } -// -// } // namespace taraxa::core_tests -// -// int main(int argc, char** argv) { -// using namespace taraxa; -// -// auto logging = logger::createDefaultLoggingConfig(); -// -// // Set this to debug to see log msgs -// logging.verbosity = logger::Verbosity::Debug; -// -// addr_t node_addr; -// logger::InitLogging(logging, node_addr); -// -// ::testing::InitGoogleTest(&argc, argv); -// return RUN_ALL_TESTS(); -// } \ No newline at end of file +#include + +#include "config/config.hpp" +#include "config/version.hpp" +#include "dag/dag_block.hpp" +#include "logger/logger.hpp" +#include "network/tarcap/packets_handler.hpp" +#include "network/tarcap/packets_handlers/latest/common/base_packet_handler.hpp" +#include "network/tarcap/shared_states/peers_state.hpp" +#include "network/threadpool/tarcap_thread_pool.hpp" +#include "test_util/test_util.hpp" + +namespace taraxa::core_tests { + +using namespace std::literals; + +// Do not use NodesTest from "test_util/gtest.hpp" as its functionality is not needed in this test +struct NodesTest : virtual testing::Test { + testing::UnitTest* current_test = ::testing::UnitTest::GetInstance(); + testing::TestInfo const* current_test_info = current_test->current_test_info(); + + NodesTest() = default; + virtual ~NodesTest() = default; + + NodesTest(const NodesTest&) = delete; + NodesTest(NodesTest&&) = delete; + NodesTest& operator=(const NodesTest&) = delete; + NodesTest& operator=(NodesTest&&) = delete; +}; + +struct TarcapTpTest : NodesTest {}; + +using namespace taraxa::network; + +class PacketsProcessingInfo { + public: + struct PacketProcessingTimes { + std::chrono::steady_clock::time_point start_time_; + std::chrono::steady_clock::time_point finish_time_; + }; + + public: + void addPacketProcessingTimes(threadpool::PacketData::PacketId packet_id, + const PacketProcessingTimes& packet_processing_times) { + std::scoped_lock lock(mutex_); + bool res = packets_processing_times_.emplace(packet_id, packet_processing_times).second; + assert(res); + } + + PacketProcessingTimes getPacketProcessingTimes(threadpool::PacketData::PacketId packet_id) const { + std::shared_lock lock(mutex_); + + auto found_packet_info = packets_processing_times_.find(packet_id); + + // Failed to obtain processing times for packet id: packet_id. Processing did not finish yet. This should be + // caught in processing times comparing + if (found_packet_info == packets_processing_times_.end()) { + return {}; + } + + return found_packet_info->second; + } + + size_t getPacketProcessingTimesCount() const { + std::shared_lock lock(mutex_); + return packets_processing_times_.size(); + } + + private: + std::unordered_map packets_processing_times_; + mutable std::shared_mutex mutex_; +}; + +// Help functions for tests +struct HandlersInitData { + FullNodeConfig conf; + dev::p2p::NodeID sender_node_id; + addr_t own_node_addr; + + std::shared_ptr peers_state; + std::shared_ptr packets_stats; + std::shared_ptr packets_processing_info; + + dev::p2p::NodeID copySender() { return sender_node_id; } +}; + +struct DummyPacket { + std::string type_str; + threadpool::PacketData::PacketId packet_id; +}; + +class DummyPacketHandler : public network::tarcap::BasePacketHandler { + public: + DummyPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : processing_delay_ms_(processing_delay_ms), packets_proc_info_(init_data.packets_processing_info) { + const auto node_addr = init_data.own_node_addr; + LOG_OBJECTS_CREATE(log_channel_name); + } + + virtual ~DummyPacketHandler() = default; + DummyPacketHandler(const DummyPacketHandler&) = default; + DummyPacketHandler(DummyPacketHandler&&) = default; + DummyPacketHandler& operator=(const DummyPacketHandler&) = delete; + DummyPacketHandler& operator=(DummyPacketHandler&&) = delete; + + void processPacket(const threadpool::PacketData& packet_data) override { + // Decode packet rlp into packet object + DummyPacket packet{packet_data.type_str_, packet_data.id_}; + + // Main processing function + process(std::move(packet), {}); + } + + private: + void process(DummyPacket&& packet, [[maybe_unused]] const std::shared_ptr& peer) { + // Note do not use LOG() before saving start & finish time as it is internally synchronized and can + // cause delays, which result in tests fails + auto start_time = std::chrono::steady_clock::now(); + std::this_thread::sleep_for(std::chrono::milliseconds(processing_delay_ms_)); + auto finish_time = std::chrono::steady_clock::now(); + + LOG(log_dg_) << "Processing packet: " << packet.type_str << ", id(" << packet.packet_id << ") finished. " + << "Start time: " << start_time.time_since_epoch().count() + << ", finish time: " << finish_time.time_since_epoch().count(); + + packets_proc_info_->addPacketProcessingTimes(packet.packet_id, {start_time, finish_time}); + } + + // Declare logger instances + LOG_OBJECTS_DEFINE + + uint32_t processing_delay_ms_{0}; + std::shared_ptr packets_proc_info_; +}; + +class DummyTransactionPacketHandler : public DummyPacketHandler { + public: + DummyTransactionPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kTransactionPacket; +}; + +class DummyDagBlockPacketHandler : public DummyPacketHandler { + public: + DummyDagBlockPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagBlockPacket; +}; + +class DummyStatusPacketHandler : public DummyPacketHandler { + public: + DummyStatusPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kStatusPacket; +}; + +class DummyVotePacketHandler : public DummyPacketHandler { + public: + DummyVotePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotePacket; +}; + +class DummyGetNextVotesBundlePacketHandler : public DummyPacketHandler { + public: + DummyGetNextVotesBundlePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetNextVotesSyncPacket; +}; + +class DummyVotesBundlePacketHandler : public DummyPacketHandler { + public: + DummyVotesBundlePacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kVotesBundlePacket; +}; + +class DummyGetDagSyncPacketHandler : public DummyPacketHandler { + public: + DummyGetDagSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetDagSyncPacket; +}; + +class DummyGetPbftSyncPacketHandler : public DummyPacketHandler { + public: + DummyGetPbftSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetPbftSyncPacket; +}; + +class DummyDagSyncPacketHandler : public DummyPacketHandler { + public: + DummyDagSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagSyncPacket; +}; + +class DummyPbftSyncPacketHandler : public DummyPacketHandler { + public: + DummyPbftSyncPacketHandler(const HandlersInitData& init_data, const std::string& log_channel_name, + uint32_t processing_delay_ms) + : DummyPacketHandler(init_data, log_channel_name, processing_delay_ms) {} + + // Packet type that is processed by this handler + static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPbftSyncPacket; +}; + +HandlersInitData createHandlersInitData() { + HandlersInitData ret_init_data; + + ret_init_data.sender_node_id = dev::p2p::NodeID(1); + ret_init_data.own_node_addr = addr_t(2); + ret_init_data.peers_state = std::make_shared(std::weak_ptr(), FullNodeConfig()); + ret_init_data.packets_stats = + std::make_shared(std::chrono::milliseconds(0), ret_init_data.own_node_addr); + ret_init_data.packets_processing_info = std::make_shared(); + + // Enable packets from sending peer to be processed + auto peer = ret_init_data.peers_state->addPendingPeer(ret_init_data.sender_node_id, ""); + ret_init_data.peers_state->setPeerAsReadyToSendMessages(ret_init_data.sender_node_id, peer); + + return ret_init_data; +} + +std::pair createPacket( + const dev::p2p::NodeID& sender_node_id, SubprotocolPacketType packet_type, + std::optional> packet_rlp_bytes = {}) { + if (packet_rlp_bytes.has_value()) { + threadpool::PacketData packet_data(packet_type, sender_node_id, std::move(packet_rlp_bytes.value())); + return {TARAXA_NET_VERSION, std::move(packet_data)}; + } + + dev::RLPStream s(0); + threadpool::PacketData packet_data(packet_type, sender_node_id, s.invalidate()); + return {TARAXA_NET_VERSION, std::move(packet_data)}; +} + +bytes createDagBlockRlp(level_t level, uint32_t sig = 777) { + // Creates dag block rlp as it is required for blocking mask to extract dag block level + DagBlock blk(blk_hash_t(10), level, {}, {}, sig_t(sig), blk_hash_t(1), addr_t(15)); + return blk.rlp(true); +} + +/** + * @brief Check all combinations(without repetition) of provided packets that they were processed concurrently: + * - packet1.start_time < packet2.finish_time + * - packet2.start_time < packet1.finish_time + * + * @param packets + */ +void checkConcurrentProcessing( + const std::vector>& packets) { + assert(packets.size() >= 2); + + for (size_t i = 0; i < packets.size(); i++) { + const auto& packet_l = packets[0]; + for (size_t j = i + 1; j < packets.size(); j++) { + const auto& packet_r = packets[j]; + EXPECT_LT(packet_l.first.start_time_, packet_r.first.finish_time_) + << packet_l.second << ".start_time < " << packet_r.second << ".finish_time"; + EXPECT_LT(packet_r.first.start_time_, packet_l.first.finish_time_) + << packet_r.second << ".start_time < " << packet_l.second << ".finish_time"; + } + } +} + +/** + * @brief Check all combinations(without repetition) of provided packets that they were processed serial: + * - packet1.finish_time < packet2.start_time + * + * @param packets + */ +void checkSerialProcessing( + const std::vector>& packets) { + assert(packets.size() >= 2); + + for (size_t i = 0; i < packets.size(); i++) { + const auto& packet_l = packets[0]; + for (size_t j = i + 1; j < packets.size(); j++) { + const auto& packet_r = packets[j]; + EXPECT_LT(packet_l.first.finish_time_, packet_r.first.start_time_) + << packet_l.second << ".finish_time < " << packet_r.second << ".start_time"; + } + } +} + +size_t queuesSize(const threadpool::PacketsThreadPool& tp) { + const auto [high_priority_queue_size, mid_priority_queue_size, low_priority_queue_size] = tp.getQueueSize(); + + return high_priority_queue_size + mid_priority_queue_size + low_priority_queue_size; +} + +// Threshold for packets queue to be emptied +constexpr std::chrono::milliseconds QUEUE_EMPTIED_WAIT_TRESHOLD_MS = 15ms; + +// Test if all "block-free" packets are processed concurrently +// Note: in case someone creates new blocking dependency and does not adjust tests, this test should fail +TEST_F(TarcapTpTest, block_free_packets) { + HandlersInitData init_data = createHandlersInitData(); + + // Creates sender 2 to bypass peer order block on Transaction -> DagBlock packet. In case those packets sent + // 2 different senders those packets are "block-free" + dev::p2p::NodeID sender2(3); + auto peer = init_data.peers_state->addPendingPeer(sender2, ""); + init_data.peers_state->setPeerAsReadyToSendMessages(sender2, peer); + + auto packets_handler = std::make_shared(); + + packets_handler->registerHandler(init_data, "TX_PH", 20); + packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); + packets_handler->registerHandler(init_data, "STATUS_PH", 20); + packets_handler->registerHandler(init_data, "VOTE_PH", 20); + packets_handler->registerHandler(init_data, "GET_NEXT_VOTES_SYNC_PH", 20); + packets_handler->registerHandler(init_data, "VOTES_SYNC_PH", 20); + + // Creates threadpool + // Note: make num of threads >= num of packets to check if they are processed concurrently without blocks, otherwise + // some blocks would be blocked for processing due to max threads limit + threadpool::PacketsThreadPool tp(18); + tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); + + // Pushes packets to the tp + auto packet = createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {}); + if (packet.second.rlp_.isList()) { + std::cout << "is list"; + } else { + std::cout << "not list"; + } + const auto packet0_tx_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); + const auto packet1_tx_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); + const auto packet2_tx_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); + const auto packet3_tx_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); + + const auto packet4_dag_block_id = + tp.push( + createPacket(dev::p2p::NodeID(sender2), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, 1)})) + .value(); + const auto packet5_dag_block_id = + tp.push( + createPacket(dev::p2p::NodeID(sender2), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, 2)})) + .value(); + + const auto packet8_status_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); + const auto packet9_status_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); + + const auto packet12_vote_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); + const auto packet13_vote_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); + + const auto packet14_get_pbft_next_votes_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetNextVotesSyncPacket, {})).value(); + const auto packet15_get_pbft_next_votes_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetNextVotesSyncPacket, {})).value(); + + const auto packet16_pbft_next_votes_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotesBundlePacket, {})).value(); + + size_t packets_count = 0; + const auto packet17_pbft_next_votes_id = packets_count = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotesBundlePacket, {})).value(); + + tp.startProcessing(); + + // How should packets be processed: + // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or + // synchronously due to some blocking dependencies - depends on situation), check + // PriorityQueue::updateDependenciesStart + /* + ---------------------- + - packet0_transaction - + ---------------------- + ---------------------- + - packet1_transaction - + ---------------------- + ----------------------- + - packet2_transaction - + ----------------------- + + -||- + ... + + ----------------------- + - packet17_votes_sync - + ----------------------- + 0.....................20.................... time [ms] + */ + + // All packets should be already being processed after short amount of time + std::this_thread::sleep_for(QUEUE_EMPTIED_WAIT_TRESHOLD_MS); + EXPECT_EQ(queuesSize(tp), 0); + + // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to locking + EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { + // Check if transactions was propagated to node0 + WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) + }); + + // Check order of packets how they were processed + const auto packets_proc_info = init_data.packets_processing_info; + + const auto packet0_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_tx_id); + const auto packet1_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_tx_id); + const auto packet2_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_tx_id); + const auto packet3_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_tx_id); + + const auto packet4_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_dag_block_id); + const auto packet5_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet5_dag_block_id); + + const auto packet8_status_proc_info = packets_proc_info->getPacketProcessingTimes(packet8_status_id); + const auto packet9_status_proc_info = packets_proc_info->getPacketProcessingTimes(packet9_status_id); + + const auto packet12_vote_proc_info = packets_proc_info->getPacketProcessingTimes(packet12_vote_id); + const auto packet13_vote_proc_info = packets_proc_info->getPacketProcessingTimes(packet13_vote_id); + + const auto packet14_get_pbft_next_votes_proc_info = + packets_proc_info->getPacketProcessingTimes(packet14_get_pbft_next_votes_id); + const auto packet15_get_pbft_next_votes_proc_info = + packets_proc_info->getPacketProcessingTimes(packet15_get_pbft_next_votes_id); + + const auto packet16_pbft_next_votes_proc_info = + packets_proc_info->getPacketProcessingTimes(packet16_pbft_next_votes_id); + const auto packet17_pbft_next_votes_proc_info = + packets_proc_info->getPacketProcessingTimes(packet17_pbft_next_votes_id); + + checkConcurrentProcessing({ + {packet0_tx_proc_info, "packet0_tx"}, + {packet1_tx_proc_info, "packet1_tx"}, + {packet2_tx_proc_info, "packet2_tx"}, + {packet3_tx_proc_info, "packet3_tx"}, + {packet4_dag_block_proc_info, "packet4_dag_block"}, + {packet5_dag_block_proc_info, "packet5_dag_block"}, + {packet8_status_proc_info, "packet8_status"}, + {packet9_status_proc_info, "packet9_status"}, + {packet12_vote_proc_info, "packet12_vote"}, + {packet13_vote_proc_info, "packet13_vote"}, + {packet14_get_pbft_next_votes_proc_info, "packet14_get_pbft_next_votes"}, + {packet15_get_pbft_next_votes_proc_info, "packet15_get_pbft_next_votes"}, + {packet16_pbft_next_votes_proc_info, "packet16_pbft_next_votes"}, + {packet17_pbft_next_votes_proc_info, "packet17_pbft_next_votes"}, + }); +} + +// Test "hard blocking dependencies" related synchronous processing of certain packets: +// +// Packets types that are currently hard blocked for processing in another threads due to dependencies, +// e.g. syncing packets must be processed synchronously one by one, etc... +// Each packet type might be simultaneously blocked by multiple different packets that are being processed. +TEST_F(TarcapTpTest, hard_blocking_deps) { + HandlersInitData init_data = createHandlersInitData(); + + auto packets_handler = std::make_shared(); + packets_handler->registerHandler(init_data, "GET_DAG_SYNC_PH", 20); + packets_handler->registerHandler(init_data, "GET_PBFT_SYNC_PH", 20); + packets_handler->registerHandler(init_data, "DAG_SYNC_PH", 20); + packets_handler->registerHandler(init_data, "PBFT_SYNC_PH", 20); + + // Creates threadpool + threadpool::PacketsThreadPool tp(10); + tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); + + // Pushes packets to the tp + const auto packet0_dag_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket, {})).value(); + const auto packet1_dag_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket, {})).value(); + const auto packet2_get_dag_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); + const auto packet3_get_dag_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); + const auto packet4_get_pbft_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetPbftSyncPacket, {})).value(); + const auto packet5_get_pbft_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetPbftSyncPacket, {})).value(); + const auto packet6_pbft_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kPbftSyncPacket, {})).value(); + const auto packet7_pbft_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kPbftSyncPacket, {})).value(); + + size_t packets_count = 0; + const auto packet8_get_dag_sync_id = packets_count = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kGetDagSyncPacket, {})).value(); + + tp.startProcessing(); + + // How should packets be processed: + // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or + // synchronously due to some blocking dependencies - depends on situation), check + // PriorityQueue::updateDependenciesStart + /* + ------------------------ + --- packet0_dag_sync --- + ------------------------ + ------------------------ + --- packet1_dag_sync --- + ------------------------ + ------------------------- + -- packet2_get_dag_sync - + ------------------------- + ------------------------- + -- packet3_get_dag_sync - + ------------------------- + ------------------------- + - packet4_get_pbft_sync - + ------------------------- + ------------------------- + - packet5_get_pbft_sync - + ------------------------- + ------------------------ + --- packet6_pbft_sync -- + ------------------------ + ------------------------ + --- packet7_pbft_sync -- + ------------------------ + ------------------------ + - packet8_get_dag_sync - + ------------------------ + 0......................20........................40........................60.......... time + */ + + // All packets should be already being processed after short amount of time + std::this_thread::sleep_for(60ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); + EXPECT_EQ(queuesSize(tp), 0); + + // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to locking + EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { + // Check if transactions was propagated to node0 + WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) + }); + + // Check order of packets how they were processed + const auto packets_proc_info = init_data.packets_processing_info; + + const auto packet0_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_dag_sync_id); + const auto packet1_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_dag_sync_id); + const auto packet2_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_get_dag_sync_id); + const auto packet3_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_get_dag_sync_id); + const auto packet4_get_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_get_pbft_sync_id); + const auto packet5_get_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet5_get_pbft_sync_id); + const auto packet6_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet6_pbft_sync_id); + const auto packet7_pbft_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet7_pbft_sync_id); + const auto packet8_get_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet8_get_dag_sync_id); + + checkConcurrentProcessing({ + {packet0_dag_sync_proc_info, "packet0_dag_sync"}, + {packet2_get_dag_sync_proc_info, "packet2_get_dag_sync"}, + {packet4_get_pbft_sync_proc_info, "packet4_get_pbft_sync"}, + {packet6_pbft_sync_proc_info, "packet6_pbft_sync"}, + }); + + checkConcurrentProcessing({ + {packet1_dag_sync_proc_info, "packet1_dag_sync"}, + {packet3_get_dag_sync_proc_info, "packet3_get_dag_sync"}, + {packet5_get_pbft_sync_proc_info, "packet5_get_pbft_sync"}, + {packet7_pbft_sync_proc_info, "packet7_pbft_sync"}, + }); + + EXPECT_GT(packet1_dag_sync_proc_info.start_time_, packet0_dag_sync_proc_info.finish_time_); + EXPECT_GT(packet3_get_dag_sync_proc_info.start_time_, packet2_get_dag_sync_proc_info.finish_time_); + EXPECT_GT(packet5_get_pbft_sync_proc_info.start_time_, packet4_get_pbft_sync_proc_info.finish_time_); + EXPECT_GT(packet7_pbft_sync_proc_info.start_time_, packet6_pbft_sync_proc_info.finish_time_); + + EXPECT_GT(packet8_get_dag_sync_proc_info.start_time_, packet3_get_dag_sync_proc_info.finish_time_); +} + +// Test "peer-order blocking dependencies" related to specific (peer & order) combination: +// +// Packets types that are blocked only for processing when received from specific peer & after specific +// time (order), e.g.: new dag block packet processing is blocked until all transactions packets that were received +// before it are processed. This blocking dependency is applied only for the same peer so transaction packet from one +// peer does not block new dag block packet from another peer +TEST_F(TarcapTpTest, peer_order_blocking_deps) { + HandlersInitData init_data = createHandlersInitData(); + + auto packets_handler = std::make_shared(); + packets_handler->registerHandler(init_data, "TX_PH", 20); + packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 0); + packets_handler->registerHandler(init_data, "SYNC_TEST_PH", 40); + + // Creates threadpool + threadpool::PacketsThreadPool tp(10); + tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); + + // Pushes packets to the tp + const auto packet0_tx_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); + const auto packet1_tx_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); + const auto packet2_dag_sync_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagSyncPacket)).value(); + const auto packet3_tx_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket)).value(); + + size_t packets_count = 0; + const auto packet4_dag_block_id = packets_count = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1)})) + .value(); + + // How should packets be processed: + // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or + // synchronously due to some blocking dependencies - depends on situation), check + // PriorityQueue::updateDependenciesStart + /* + -------------- + - packet0_tx - + -------------- + -------------- + - packet1_tx - + -------------- + ---------------------------- + ----- packet2_dag_sync ----- + ---------------------------- + -------------- + - packet3_tx - + -------------- + --------------------- + - packet4_dag_block - + --------------------- + 0............20.............40....................60.................. time [ms] + */ + + tp.startProcessing(); + + // All packets should be already being processed after short amount of time + std::this_thread::sleep_for(60ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); + EXPECT_EQ(queuesSize(tp), 0); + + // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to locking + EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { + // Check if transactions was propagated to node0 + WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) + }); + + // Check order of packets how they were processed + const auto packets_proc_info = init_data.packets_processing_info; + + const auto packet0_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet0_tx_id); + const auto packet1_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet1_tx_id); + const auto packet3_tx_proc_info = packets_proc_info->getPacketProcessingTimes(packet3_tx_id); + const auto packet4_dag_block_proc_info = packets_proc_info->getPacketProcessingTimes(packet4_dag_block_id); + const auto packet2_dag_sync_proc_info = packets_proc_info->getPacketProcessingTimes(packet2_dag_sync_id); + + checkConcurrentProcessing({ + {packet0_tx_proc_info, "packet0_tx"}, + {packet1_tx_proc_info, "packet1_tx"}, + {packet2_dag_sync_proc_info, "packet2_dag_sync"}, + {packet3_tx_proc_info, "packet3_tx"}, + }); + + EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet0_tx_proc_info.finish_time_); + EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet1_tx_proc_info.finish_time_); + EXPECT_GT(packet2_dag_sync_proc_info.finish_time_, packet3_tx_proc_info.finish_time_); + + EXPECT_GT(packet4_dag_block_proc_info.start_time_, packet2_dag_sync_proc_info.finish_time_); +} + +// Test "dag-block blocking dependencies" related to dag blocks: +// +// Same dag blocks should not be processed at the same time +TEST_F(TarcapTpTest, same_dag_blks_ordering) { + HandlersInitData init_data = createHandlersInitData(); + + auto packets_handler = std::make_shared(); + packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); + + // Creates threadpool + threadpool::PacketsThreadPool tp(10); + tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); + + auto dag_block = createDagBlockRlp(0); + + // Pushes packets to the tp + const auto blk0_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); + const auto blk1_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); + const auto blk2_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); + const auto blk3_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); + + size_t packets_count = 0; + const auto blk4_id = packets_count = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {dag_block})).value(); + + tp.startProcessing(); + + // How should dag blocks packets be processed: + // Same dag blocks should not be processed concurrently but one after another + + // All packets should be already being processed after short amount of time + std::this_thread::sleep_for(200ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); + EXPECT_EQ(queuesSize(tp), 0); + + // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to locking + EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { + // Check if transactions was propagated to node0 + WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) + }); + + // Check order of packets how they were processed + const auto packets_proc_info = init_data.packets_processing_info; + + const auto blk0_proc_info = packets_proc_info->getPacketProcessingTimes(blk0_id); + const auto blk1_proc_info = packets_proc_info->getPacketProcessingTimes(blk1_id); + const auto blk2_proc_info = packets_proc_info->getPacketProcessingTimes(blk2_id); + const auto blk3_proc_info = packets_proc_info->getPacketProcessingTimes(blk3_id); + const auto blk4_proc_info = packets_proc_info->getPacketProcessingTimes(blk4_id); + + checkSerialProcessing({ + {blk0_proc_info, "blk0"}, + {blk1_proc_info, "blk1"}, + {blk2_proc_info, "blk2"}, + {blk3_proc_info, "blk3"}, + {blk4_proc_info, "blk4"}, + }); +} + +// Test "dag-level blocking dependencies" related to dag blocks levels: +// +// Ideally only dag blocks with the same level should be processed. In reality there are situation when node receives +// dag block with smaller level than the level of blocks that are already being processed. In such case these blocks +// with smaller levels can be processed concurrently with blocks that have higher level. All new dag blocks with higher +// level than the lowest level from all the blocks that currently being processed are blocked for processing +TEST_F(TarcapTpTest, dag_blks_lvls_ordering) { + HandlersInitData init_data = createHandlersInitData(); + + auto packets_handler = std::make_shared(); + packets_handler->registerHandler(init_data, "DAG_BLOCK_PH", 20); + + // Creates threadpool + threadpool::PacketsThreadPool tp(10); + tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); + + // Pushes packets to the tp + const auto blk0_lvl1_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, 1)})) + .value(); + const auto blk1_lvl1_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, 2)})) + .value(); + const auto blk2_lvl0_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(0, 3)})) + .value(); + const auto blk3_lvl1_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(1, 4)})) + .value(); + const auto blk4_lvl2_id = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(2, 5)})) + .value(); + + size_t packets_count = 0; + const auto blk5_lvl3_id = packets_count = + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kDagBlockPacket, {createDagBlockRlp(3, 6)})) + .value(); + + tp.startProcessing(); + + // How should dag blocks packets be processed: + // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or + // synchronously due to some blocking dependencies - depends on situation), check + // PriorityQueue::updateDependenciesStart + /* + ------------- + - blk0_lvl1 - + ------------- + ------------- + - blk1_lvl1 - + ------------- + ------------- + - blk2_lvl0 - + ------------- + ------------- + - blk3_lvl1 - + ------------- + ------------- + - blk4_lvl2 - + ------------- + ------------- + - blk5_lvl3 - + ------------- + 0...........20............40............60.............80................. time [ms] + */ + + // All packets should be already being processed after short amount of time + std::this_thread::sleep_for(80ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); + EXPECT_EQ(queuesSize(tp), 0); + + // Wait until processing of all packets is finished - in some edge cases it might be little bit delayed due to locking + EXPECT_HAPPENS({500s, 20ms}, [&](auto& ctx) { + // Check if transactions was propagated to node0 + WAIT_EXPECT_EQ(ctx, init_data.packets_processing_info->getPacketProcessingTimesCount(), packets_count + 1) + }); + + // Check order of packets how they were processed + const auto packets_proc_info = init_data.packets_processing_info; + + const auto blk0_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk0_lvl1_id); + const auto blk1_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk1_lvl1_id); + const auto blk2_lvl0_proc_info = packets_proc_info->getPacketProcessingTimes(blk2_lvl0_id); + const auto blk3_lvl1_proc_info = packets_proc_info->getPacketProcessingTimes(blk3_lvl1_id); + const auto blk4_lvl2_proc_info = packets_proc_info->getPacketProcessingTimes(blk4_lvl2_id); + const auto blk5_lvl3_proc_info = packets_proc_info->getPacketProcessingTimes(blk5_lvl3_id); + + checkConcurrentProcessing({ + {blk0_lvl1_proc_info, "blk0_lvl1"}, + {blk1_lvl1_proc_info, "blk1_lvl1"}, + {blk2_lvl0_proc_info, "blk2_lvl0"}, + }); + + EXPECT_GT(blk3_lvl1_proc_info.start_time_, blk2_lvl0_proc_info.finish_time_); + EXPECT_GT(blk4_lvl2_proc_info.start_time_, blk3_lvl1_proc_info.finish_time_); + EXPECT_GT(blk5_lvl3_proc_info.start_time_, blk4_lvl2_proc_info.finish_time_); +} + +// Test threads borrowing +// +// It can happen that no packet for processing was returned during the first iteration over priority queues as there +// are limits for max total workers per each priority queue. These limits can and should be ignored in some +// scenarios... For example: +// High priority queue reached it's max workers limit, other queues have inside many blocked packets that cannot be +// currently processed concurrently and MAX_TOTAL_WORKERS_COUNT is not reached yet. In such case some threads might +// be unused. In such cases priority queues max workers limits can and should be ignored. +// +// Always keep 1 reserved thread for each priority queue at all times +TEST_F(TarcapTpTest, threads_borrowing) { + HandlersInitData init_data = createHandlersInitData(); + + auto packets_handler = std::make_shared(); + packets_handler->registerHandler(init_data, "VOTE_PH", 100); + + // Creates threadpool + const size_t threads_num = 10; + threadpool::PacketsThreadPool tp(threads_num); + tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); + + // Pushes packets to the tp + std::vector pushed_packets_ids; + for (size_t i = 0; i < threads_num; i++) { + uint64_t packet_id = tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); + pushed_packets_ids.push_back(packet_id); + } + + tp.startProcessing(); + + // How should packets be processed: + // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or + // synchronously due to some blocking dependencies - depends on situation), check + // PriorityQueue::updateDependenciesStart + // + // Note: each queue has 1 thread reserved at all times(even if it does not do anything) and there is 10 threads in + // total, even with borrowing only 8 threads could be used at the same time + /* + ---------------- + - packet0_vote - + ---------------- + ---------------- + - packet1_vote - + ---------------- + ---------------- + - packet2_vote - + ---------------- + + -||- + ... + + ---------------- + - packet7_vote - + ---------------- + ---------------- + - packet8_vote - + ---------------- + ---------------- + - packet9_vote - + ---------------- + 0..............100...............200........... time [ms] + */ + + // First 8 packets should be already processed by this time + std::this_thread::sleep_for(100ms + 50ms /* might take longer due to threads borrowing */); + EXPECT_LE(queuesSize(tp), 2); + + // Check order of packets how they were processed + const auto packets_proc_info = init_data.packets_processing_info; + + // In case some packet processing is not finished yet, getPacketProcessingTimes() returns default (empty) value + std::chrono::steady_clock::time_point default_time_point; + + // Because each queue has 1 thread reserved at all times(even if it does not do anything) and there is 10 threads in + // total, even with borrowing only 8 threads could be used at the same time, thus last 2 packets (9th & 10th) should + // not be processed after (100 + WAIT_TRESHOLD_MS) ms + EXPECT_EQ(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[8]).finish_time_, default_time_point); + EXPECT_EQ(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[9]).finish_time_, default_time_point); + + std::vector> packets_proc_info_vec; + for (size_t i = 0; i < threads_num - (threadpool::PacketData::PacketPriority::Count - 1); i++) { + packets_proc_info_vec.emplace_back(packets_proc_info->getPacketProcessingTimes(pushed_packets_ids[i]), + "packet" + std::to_string(pushed_packets_ids[i]) + "_vote"); + } + + // Check if first 8 pbft vote packets were processed concurrently -> threads from other queues had to be borrowed for + // that + checkConcurrentProcessing(packets_proc_info_vec); +} + +// Test low priority queue starvation +// +// It should never happen that packets from lower priority queues are waiting to be processed until all packets from +// higher priority queues are processed +TEST_F(TarcapTpTest, low_priotity_queue_starvation) { + HandlersInitData init_data = createHandlersInitData(); + + auto packets_handler = std::make_shared(); + // Handler for packet from high priority queue + packets_handler->registerHandler(init_data, "VOTE_PH", 20); + + // Handler for packet from mid priority queue + packets_handler->registerHandler(init_data, "TX_PH", 20); + + // Handler for packet from low priority queue + packets_handler->registerHandler(init_data, "STATUS_PH", 20); + + // Creates threadpool + size_t threads_num = 10; + threadpool::PacketsThreadPool tp(threads_num); + tp.setPacketsHandlers(TARAXA_NET_VERSION, packets_handler); + + // Push 10x more packets for each prioriy queue than max tp capacity to make sure that tp wont be able to process all + // packets from each queue concurrently -> many packets will be waiting due to max threads num reached for specific + // priority queues + for (size_t i = 0; i < 2 * 10 * threads_num; i++) { + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kVotePacket, {})).value(); + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kTransactionPacket, {})).value(); + } + + // Push a few packets low priority packets + for (size_t i = 0; i < 4; i++) { + tp.push(createPacket(init_data.copySender(), SubprotocolPacketType::kStatusPacket, {})).value(); + } + + tp.startProcessing(); + + // How should packets be processed: + // Note: To understand how are different packet types processed (concurrently without any blocking dependencies or + // synchronously due to some blocking dependencies - depends on situation), check + // PriorityQueue::updateDependenciesStart In this test are max concurrent processing limits for queues reached, so + // when we have 10 threads in thredpool: + // - 4 is limit for High priority queue - VotePacket + // - 4 is limit for Mid priority queue - TransactionPacket + // - 3 is limit for Low priority queue - StatusPacket, but because max total limit (10) is always checked first + // , low priority queue wont be able to use more than 2 threads concurrently + /* + ---------------- + - packet0_vote - + ---------------- + ---------------- + - packet1_vote - + ---------------- + ---------------- + - packet2_vote - + ---------------- + ---------------- + - packet3_vote - + ---------------- + ---------------- + -- packet4_tx -- + ---------------- + ---------------- + -- packet5_tx -- + ---------------- + ---------------- + -- packet6_tx -- + ---------------- + ---------------- + -- packet7_tx -- + ---------------- + + .... + votes and tx packets are processed concurrently 4 at a time until all of them are processed + + + ------------------ + - packet400_test - + ------------------ + ------------------ + - packet401_test - + ------------------ + ------------------ + - packet402_test - + ------------------ + ------------------ + - packet403_test - + ------------------ + 0.................20.................40................... time [ms] + */ + + std::this_thread::sleep_for(40ms + QUEUE_EMPTIED_WAIT_TRESHOLD_MS); + + const auto [high_priority_queue_size, mid_priority_queue_size, low_priority_queue_size] = tp.getQueueSize(); + + EXPECT_GT(high_priority_queue_size, 0); + EXPECT_GT(mid_priority_queue_size, 0); + EXPECT_EQ(low_priority_queue_size, 0); +} + +} // namespace taraxa::core_tests + +int main(int argc, char** argv) { + using namespace taraxa; + + auto logging = logger::createDefaultLoggingConfig(); + + // Set this to debug to see log msgs + logging.verbosity = logger::Verbosity::Debug; + + addr_t node_addr; + logger::InitLogging(logging, node_addr); + + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} \ No newline at end of file From b8b3acc6a56f2fb81294a1891342aad808ace720 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Tue, 29 Oct 2024 14:44:31 +0100 Subject: [PATCH 075/105] use optimized rlp format for votes bundles --- .../packets/latest/pillar_votes_bundle_packet.hpp | 7 +++++-- .../tarcap/packets/latest/votes_bundle_packet.hpp | 12 +++--------- 2 files changed, 8 insertions(+), 11 deletions(-) diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp index af5aa0a6f2..bf2bf1f632 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp @@ -3,14 +3,17 @@ #include "common/encoding_rlp.hpp" #include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "vote/pillar_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" namespace taraxa::network::tarcap { struct PillarVotesBundlePacket { - // TODO[2870]: optimize rlp size (use custom class), see encodePillarVotesBundleRlp std::vector> pillar_votes; - RLP_FIELDS_DEFINE_INPLACE(pillar_votes) + void rlp(::taraxa::util::RLPDecoderRef encoding) { pillar_votes = decodePillarVotesBundleRlp(encoding.value); } + void rlp(::taraxa::util::RLPEncoderRef encoding) const { + encoding.appendRaw(encodePillarVotesBundleRlp(pillar_votes)); + } }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp index 261b5a52a6..a05f519e88 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp @@ -2,21 +2,15 @@ #include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "vote/pbft_vote.hpp" +#include "vote/votes_bundle_rlp.hpp" namespace taraxa::network::tarcap { struct VotesBundlePacket { - // TODO[2870]: Create votes bundles class - // blk_hash_t votes_bundle_block_hash; - // PbftPeriod votes_bundle_pbft_period; - // PbftRound votes_bundle_pbft_round; - // PbftStep votes_bundle_votes_step; - std::vector> votes; - // RLP_FIELDS_DEFINE_INPLACE(votes_bundle_block_hash, votes_bundle_pbft_period, votes_bundle_pbft_round, - // votes_bundle_votes_step, votes) - RLP_FIELDS_DEFINE_INPLACE(votes) + void rlp(::taraxa::util::RLPDecoderRef encoding) { votes = decodePbftVotesBundleRlp(encoding.value); } + void rlp(::taraxa::util::RLPEncoderRef encoding) const { encoding.appendRaw(encodePbftVotesBundleRlp(votes)); } }; } // namespace taraxa::network::tarcap From c289cfb2fc803c7f835d9c396330c65ba6952005 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Wed, 30 Oct 2024 14:54:27 +0100 Subject: [PATCH 076/105] refactor periodData getter + fix votes_bundle processing --- .../src/dag/sortition_params_manager.cpp | 12 +++++----- .../consensus/src/final_chain/final_chain.cpp | 11 +++++----- .../consensus/src/pbft/pbft_manager.cpp | 22 +++++++++---------- .../packets/latest/pbft_sync_packet.hpp | 6 ++--- .../packets/latest/votes_bundle_packet.hpp | 5 ++--- .../common/ext_votes_packet_handler.hpp | 5 +++-- .../latest/get_pbft_sync_packet_handler.cpp | 14 +++++------- .../latest/pbft_sync_packet_handler.cpp | 11 ++++++---- .../latest/votes_bundle_packet_handler.cpp | 14 ++++++------ .../storage/include/storage/storage.hpp | 2 +- libraries/core_libs/storage/src/storage.cpp | 9 ++++++++ .../vote/include/vote/votes_bundle_rlp.hpp | 14 ++++++++++++ libraries/types/vote/src/votes_bundle_rlp.cpp | 14 ++++++++++++ tests/pbft_manager_test.cpp | 7 +++--- 14 files changed, 90 insertions(+), 56 deletions(-) diff --git a/libraries/core_libs/consensus/src/dag/sortition_params_manager.cpp b/libraries/core_libs/consensus/src/dag/sortition_params_manager.cpp index aa37a3a6b0..69d1158486 100644 --- a/libraries/core_libs/consensus/src/dag/sortition_params_manager.cpp +++ b/libraries/core_libs/consensus/src/dag/sortition_params_manager.cpp @@ -46,14 +46,16 @@ SortitionParamsManager::SortitionParamsManager(const addr_t& node_addr, Sortitio auto period = params_changes_.back().period + 1; ignored_efficiency_counter_ = 0; while (true) { - auto data = db_->getPeriodDataRaw(period); - if (data.size() == 0) break; + auto period_data = db_->getPeriodData(period); + if (!period_data.has_value()) { + break; + } + period++; - PeriodData period_data(data); - if (period_data.pbft_blk->getPivotDagBlockHash() != kNullBlockHash) { + if (period_data->pbft_blk->getPivotDagBlockHash() != kNullBlockHash) { if (static_cast(ignored_efficiency_counter_) >= config_.changing_interval - config_.computation_interval) { - dag_efficiencies_.push_back(calculateDagEfficiency(period_data)); + dag_efficiencies_.push_back(calculateDagEfficiency(*period_data)); } else { ignored_efficiency_counter_++; } diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index ec706648bc..19f5ea47a9 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -57,13 +57,12 @@ FinalChain::FinalChain(const std::shared_ptr& db, const taraxa::FullN if (*last_blk_num != state_db_descriptor.blk_num) [[unlikely]] { auto batch = db_->createWriteBatch(); for (auto block_n = *last_blk_num; block_n != state_db_descriptor.blk_num; --block_n) { - auto raw_period_data = db_->getPeriodDataRaw(block_n); - assert(raw_period_data.size() > 0); + auto period_data = db_->getPeriodData(block_n); + assert(period_data.has_value()); - const PeriodData period_data(std::move(raw_period_data)); - if (period_data.transactions.size()) { - num_executed_dag_blk_ -= period_data.dag_blocks.size(); - num_executed_trx_ -= period_data.transactions.size(); + if (period_data->transactions.size()) { + num_executed_dag_blk_ -= period_data->dag_blocks.size(); + num_executed_trx_ -= period_data->transactions.size(); } auto period_system_transactions = db_->getPeriodSystemTransactionsHashes(block_n); num_executed_trx_ -= period_system_transactions.size(); diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index f2d4235e11..d814c2ee27 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -54,26 +54,25 @@ PbftManager::PbftManager(const FullNodeConfig &conf, std::shared_ptr for (auto period = final_chain_->lastBlockNumber() + 1, curr_period = pbft_chain_->getPbftChainSize(); period <= curr_period; ++period) { - auto period_raw = db_->getPeriodDataRaw(period); - if (period_raw.size() == 0) { + auto period_data = db_->getPeriodData(period); + if (!period_data.has_value()) { LOG(log_er_) << "DB corrupted - Cannot find PBFT block in period " << period << " in PBFT chain DB pbft_blocks."; assert(false); } - PeriodData period_data(period_raw); - if (period_data.pbft_blk->getPeriod() != period) { - LOG(log_er_) << "DB corrupted - PBFT block hash " << period_data.pbft_blk->getBlockHash() - << " has different period " << period_data.pbft_blk->getPeriod() + if (period_data->pbft_blk->getPeriod() != period) { + LOG(log_er_) << "DB corrupted - PBFT block hash " << period_data->pbft_blk->getBlockHash() + << " has different period " << period_data->pbft_blk->getPeriod() << " in block data than in block order db: " << period; assert(false); } // We need this section because votes need to be verified for reward distribution - for (const auto &v : period_data.previous_block_cert_votes) { + for (const auto &v : period_data->previous_block_cert_votes) { vote_mgr_->validateVote(v); } - finalize_(std::move(period_data), db_->getFinalizedDagBlockHashesByPeriod(period), period == curr_period); + finalize_(std::move(*period_data), db_->getFinalizedDagBlockHashesByPeriod(period), period == curr_period); } PbftPeriod start_period = 1; @@ -83,13 +82,12 @@ PbftManager::PbftManager(const FullNodeConfig &conf, std::shared_ptr start_period = pbft_chain_->getPbftChainSize() - recently_finalized_transactions_periods; } for (PbftPeriod period = start_period; period <= pbft_chain_->getPbftChainSize(); period++) { - auto period_raw = db_->getPeriodDataRaw(period); - if (period_raw.size() == 0) { + auto period_data = db_->getPeriodData(period); + if (!period_data.has_value()) { LOG(log_er_) << "DB corrupted - Cannot find PBFT block in period " << period << " in PBFT chain DB pbft_blocks."; assert(false); } - PeriodData period_data(period_raw); - trx_mgr_->initializeRecentlyFinalizedTransactions(period_data); + trx_mgr_->initializeRecentlyFinalizedTransactions(*period_data); } // Initialize PBFT status diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/pbft_sync_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/pbft_sync_packet.hpp index 206a08bb4a..b3f5fd4a12 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/pbft_sync_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/pbft_sync_packet.hpp @@ -9,11 +9,9 @@ namespace taraxa::network::tarcap { struct PbftSyncPacket { bool last_block; PeriodData period_data; - // TODO: should it be optional ??? - // TODO[2870]: optimize rlp size (use custom class), see encodePbftVotesBundleRlp - std::vector> current_block_cert_votes; + std::optional current_block_cert_votes_bundle; - RLP_FIELDS_DEFINE_INPLACE(last_block, period_data, current_block_cert_votes) + RLP_FIELDS_DEFINE_INPLACE(last_block, period_data, current_block_cert_votes_bundle) }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp index a05f519e88..064b84634d 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/votes_bundle_packet.hpp @@ -7,10 +7,9 @@ namespace taraxa::network::tarcap { struct VotesBundlePacket { - std::vector> votes; + OptimizedPbftVotesBundle votes_bundle; - void rlp(::taraxa::util::RLPDecoderRef encoding) { votes = decodePbftVotesBundleRlp(encoding.value); } - void rlp(::taraxa::util::RLPEncoderRef encoding) const { encoding.appendRaw(encodePbftVotesBundleRlp(votes)); } + RLP_FIELDS_DEFINE_INPLACE(votes_bundle) }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp index 9171c7adba..4c832c97b1 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp @@ -122,8 +122,9 @@ class ExtVotesPacketHandler : public PacketHandler { auto sendVotes = [this, &peer](std::vector>&& votes) { // TODO[2868]: optimize this auto votes_copy = votes; - if (this->sealAndSend(peer->getId(), SubprotocolPacketType::kVotesBundlePacket, - encodePacketRlp(VotesBundlePacket{std::move(votes_copy)}))) { + if (this->sealAndSend( + peer->getId(), SubprotocolPacketType::kVotesBundlePacket, + encodePacketRlp(VotesBundlePacket{OptimizedPbftVotesBundle{.votes = std::move(votes_copy)}}))) { LOG(this->log_dg_) << " Votes bundle with " << votes.size() << " votes sent to " << peer->getId(); for (const auto& vote : votes) { peer->markPbftVoteAsKnown(vote->getHash()); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp index 2521ab511a..817a872d8a 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp @@ -66,15 +66,13 @@ void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr for (auto block_period = from_period; block_period < from_period + blocks_to_transfer; block_period++) { bool last_block = (block_period == from_period + blocks_to_transfer - 1); - auto data = db_->getPeriodDataRaw(block_period); - - if (data.size() == 0) { + auto period_data = db_->getPeriodData(block_period); + if (!period_data.has_value()) { // This can happen when switching from light node to full node setting LOG(log_er_) << "DB corrupted. Cannot find period " << block_period << " PBFT block in db"; return; } - PeriodData period_data(std::move(data)); std::shared_ptr pbft_sync_packet; if (pbft_chain_synced && last_block) { @@ -84,13 +82,13 @@ void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr // It is possible that the node pushed another block to the chain in the meantime if (reward_votes[0]->getPeriod() == block_period) { // TODO[2870]: use custom votes bundle class instead of vector - pbft_sync_packet = - std::make_shared(last_block, std::move(period_data), std::move(reward_votes)); + pbft_sync_packet = std::make_shared(last_block, std::move(*period_data), + OptimizedPbftVotesBundle{std::move(reward_votes)}); } else { - pbft_sync_packet = std::make_shared(last_block, std::move(period_data)); + pbft_sync_packet = std::make_shared(last_block, std::move(*period_data)); } } else { - pbft_sync_packet = std::make_shared(last_block, std::move(period_data)); + pbft_sync_packet = std::make_shared(last_block, std::move(*period_data)); } LOG(log_dg_) << "Sending PbftSyncPacket period " << block_period << " to " << peer_id; diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp index 048a6f5d69..6a83b26787 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp @@ -40,7 +40,7 @@ void PbftSyncPacketHandler::process(PbftSyncPacket &&packet, const std::shared_p // Process received pbft blocks // pbft_chain_synced is the flag to indicate own PBFT chain has synced with the peer's PBFT chain - const bool pbft_chain_synced = packet.current_block_cert_votes.size() > 0; + const bool pbft_chain_synced = packet.current_block_cert_votes_bundle.has_value(); const auto pbft_blk_hash = packet.period_data.pbft_blk->getBlockHash(); std::string received_dag_blocks_str; // This is just log related stuff @@ -81,7 +81,7 @@ void PbftSyncPacketHandler::process(PbftSyncPacket &&packet, const std::shared_p // Check cert vote matches if final synced block if (pbft_chain_synced) { - for (auto const &vote : packet.current_block_cert_votes) { + for (auto const &vote : packet.current_block_cert_votes_bundle->votes) { if (vote->getBlockHash() != pbft_blk_hash) { LOG(log_er_) << "Invalid cert votes block hash " << vote->getBlockHash() << " instead of " << pbft_blk_hash << " from peer " << peer->getId().abridged() << " received, stop syncing."; @@ -165,8 +165,11 @@ void PbftSyncPacketHandler::process(PbftSyncPacket &&packet, const std::shared_p LOG(log_tr_) << "Synced PBFT block hash " << pbft_blk_hash << " with " << packet.period_data.previous_block_cert_votes.size() << " cert votes"; LOG(log_tr_) << "Synced PBFT block " << packet.period_data; - pbft_mgr_->periodDataQueuePush(std::move(packet.period_data), peer->getId(), - std::move(packet.current_block_cert_votes)); + std::vector> current_block_cert_votes; + if (pbft_chain_synced) { + current_block_cert_votes = std::move(packet.current_block_cert_votes_bundle->votes); + } + pbft_mgr_->periodDataQueuePush(std::move(packet.period_data), peer->getId(), std::move(current_block_cert_votes)); } auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp index 184d53e5fe..9d5de0f714 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/votes_bundle_packet_handler.cpp @@ -18,18 +18,18 @@ VotesBundlePacketHandler::VotesBundlePacketHandler(const FullNodeConfig &conf, s logs_prefix + "VOTES_BUNDLE_PH") {} void VotesBundlePacketHandler::process(VotesBundlePacket &&packet, const std::shared_ptr &peer) { - if (packet.votes.size() == 0 || packet.votes.size() > kMaxVotesInBundleRlp) { - throw InvalidRlpItemsCountException("VotesBundlePacket", packet.votes.size(), kMaxVotesInBundleRlp); + if (packet.votes_bundle.votes.size() == 0 || packet.votes_bundle.votes.size() > kMaxVotesInBundleRlp) { + throw InvalidRlpItemsCountException("VotesBundlePacket", packet.votes_bundle.votes.size(), kMaxVotesInBundleRlp); } const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); - const auto &reference_vote = packet.votes.front(); + const auto &reference_vote = packet.votes_bundle.votes.front(); const auto votes_bundle_votes_type = reference_vote->getType(); // Votes sync bundles are allowed to cotain only votes bundles of the same type, period, round and step so if first // vote is irrelevant, all of them are - if (!isPbftRelevantVote(packet.votes[0])) { + if (!isPbftRelevantVote(packet.votes_bundle.votes[0])) { LOG(log_wr_) << "Drop votes sync bundle as it is irrelevant for current pbft state. Votes (period, round, step) = (" << reference_vote->getPeriod() << ", " << reference_vote->getRound() << ", " << reference_vote->getStep() << "). Current PBFT (period, round, step) = (" << current_pbft_period @@ -53,7 +53,7 @@ void VotesBundlePacketHandler::process(VotesBundlePacket &&packet, const std::sh } size_t processed_votes_count = 0; - for (const auto &vote : packet.votes) { + for (const auto &vote : packet.votes_bundle.votes) { peer->markPbftVoteAsKnown(vote->getHash()); // Do not process vote that has already been validated @@ -71,11 +71,11 @@ void VotesBundlePacketHandler::process(VotesBundlePacket &&packet, const std::sh processed_votes_count++; } - LOG(log_nf_) << "Received " << packet.votes.size() << " (processed " << processed_votes_count + LOG(log_nf_) << "Received " << packet.votes_bundle.votes.size() << " (processed " << processed_votes_count << " ) sync votes from peer " << peer->getId() << " node current round " << current_pbft_round << ", peer pbft round " << reference_vote->getRound(); - onNewPbftVotesBundle(packet.votes, false, peer->getId()); + onNewPbftVotesBundle(packet.votes_bundle.votes, false, peer->getId()); } void VotesBundlePacketHandler::onNewPbftVotesBundle(const std::vector> &votes, diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 51266fc07c..31e3498c4b 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -224,8 +224,8 @@ class DbStorage : public std::enable_shared_from_this { // Period data void savePeriodData(const PeriodData& period_data, Batch& write_batch); void clearPeriodDataHistory(PbftPeriod period, uint64_t dag_level_to_keep); - // TODO[2868]: return PeriodData instead of bytes dev::bytes getPeriodDataRaw(PbftPeriod period) const; + std::optional getPeriodData(PbftPeriod period) const; std::optional getPbftBlock(PbftPeriod period) const; std::vector> getPeriodCertVotes(PbftPeriod period) const; blk_hash_t getPeriodBlockHash(PbftPeriod period) const; diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index ed497069e2..eddc4a177e 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -706,6 +706,15 @@ dev::bytes DbStorage::getPeriodDataRaw(PbftPeriod period) const { return asBytes(lookup(toSlice(period), Columns::period_data)); } +std::optional DbStorage::getPeriodData(PbftPeriod period) const { + auto period_data_bytes = getPeriodDataRaw(period); + if (period_data_bytes.empty()) { + return {}; + } + + return PeriodData{std::move(period_data_bytes)}; +} + void DbStorage::savePillarBlock(const std::shared_ptr& pillar_block) { insert(Columns::pillar_block, pillar_block->getPeriod(), pillar_block->getRlp()); } diff --git a/libraries/types/vote/include/vote/votes_bundle_rlp.hpp b/libraries/types/vote/include/vote/votes_bundle_rlp.hpp index 8a1a1072cc..a7a794f572 100644 --- a/libraries/types/vote/include/vote/votes_bundle_rlp.hpp +++ b/libraries/types/vote/include/vote/votes_bundle_rlp.hpp @@ -5,6 +5,8 @@ #include +#include "common/encoding_rlp.hpp" + namespace taraxa { class PbftVote; @@ -32,6 +34,12 @@ dev::bytes encodePbftVotesBundleRlp(const std::vector> */ std::vector> decodePbftVotesBundleRlp(const dev::RLP& votes_bundle_rlp); +struct OptimizedPbftVotesBundle { + std::vector> votes; + + HAS_RLP_FIELDS +}; + constexpr static size_t kPillarVotesBundleRlpSize{3}; /** @@ -50,6 +58,12 @@ dev::bytes encodePillarVotesBundleRlp(const std::vector> decodePillarVotesBundleRlp(const dev::RLP& votes_bundle_rlp); +struct OptimizedPillarVotesBundle { + std::vector> pillar_votes; + + HAS_RLP_FIELDS +}; + /** @}*/ } // namespace taraxa diff --git a/libraries/types/vote/src/votes_bundle_rlp.cpp b/libraries/types/vote/src/votes_bundle_rlp.cpp index d557dace94..5350459636 100644 --- a/libraries/types/vote/src/votes_bundle_rlp.cpp +++ b/libraries/types/vote/src/votes_bundle_rlp.cpp @@ -50,6 +50,13 @@ std::vector> decodePbftVotesBundleRlp(const dev::RLP& return votes; } +void OptimizedPbftVotesBundle::rlp(::taraxa::util::RLPDecoderRef encoding) { + votes = decodePbftVotesBundleRlp(encoding.value); +} +void OptimizedPbftVotesBundle::rlp(::taraxa::util::RLPEncoderRef encoding) const { + encoding.appendRaw(encodePbftVotesBundleRlp(votes)); +} + dev::bytes encodePillarVotesBundleRlp(const std::vector>& votes) { if (votes.empty()) { assert(false); @@ -89,4 +96,11 @@ std::vector> decodePillarVotesBundleRlp(const dev::R return votes; } +void OptimizedPillarVotesBundle::rlp(::taraxa::util::RLPDecoderRef encoding) { + pillar_votes = decodePillarVotesBundleRlp(encoding.value); +} +void OptimizedPillarVotesBundle::rlp(::taraxa::util::RLPEncoderRef encoding) const { + encoding.appendRaw(encodePillarVotesBundleRlp(pillar_votes)); +} + } // namespace taraxa \ No newline at end of file diff --git a/tests/pbft_manager_test.cpp b/tests/pbft_manager_test.cpp index 313c0fde60..942cdf241a 100644 --- a/tests/pbft_manager_test.cpp +++ b/tests/pbft_manager_test.cpp @@ -663,10 +663,9 @@ TEST_F(PbftManagerWithDagCreation, produce_overweighted_block) { // verify that last block is overweighted, but it is in chain const auto period = node->getFinalChain()->lastBlockNumber(); - auto period_raw = node->getDB()->getPeriodDataRaw(period); - ASSERT_FALSE(period_raw.empty()); - PeriodData period_data(period_raw); - EXPECT_FALSE(node->getPbftManager()->checkBlockWeight(period_data.dag_blocks)); + auto period_data = node->getDB()->getPeriodData(period); + ASSERT_TRUE(period_data.has_value()); + EXPECT_FALSE(node->getPbftManager()->checkBlockWeight(period_data->dag_blocks)); } TEST_F(PbftManagerWithDagCreation, proposed_blocks) { From a186423321a793f13204bbbfb1304fab5364ae0d Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Sat, 2 Nov 2024 15:20:43 +0100 Subject: [PATCH 077/105] adjust pillar votes bundle packet --- .../tarcap/packets/latest/pillar_votes_bundle_packet.hpp | 7 ++----- .../latest/get_pillar_votes_bundle_packet_handler.cpp | 2 +- .../latest/pillar_votes_bundle_packet_handler.cpp | 7 ++++--- libraries/types/vote/include/vote/votes_bundle_rlp.hpp | 1 + 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp index bf2bf1f632..eb9e4061bf 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/pillar_votes_bundle_packet.hpp @@ -8,12 +8,9 @@ namespace taraxa::network::tarcap { struct PillarVotesBundlePacket { - std::vector> pillar_votes; + OptimizedPillarVotesBundle pillar_votes_bundle; - void rlp(::taraxa::util::RLPDecoderRef encoding) { pillar_votes = decodePillarVotesBundleRlp(encoding.value); } - void rlp(::taraxa::util::RLPEncoderRef encoding) const { - encoding.appendRaw(encodePillarVotesBundleRlp(pillar_votes)); - } + RLP_FIELDS_DEFINE_INPLACE(pillar_votes_bundle) }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp index 9885cb638c..9c36e9d304 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pillar_votes_bundle_packet_handler.cpp @@ -52,7 +52,7 @@ void GetPillarVotesBundlePacketHandler::process(GetPillarVotesBundlePacket &&pac for (size_t i = 0; i < chunk_size; ++i) { pillar_votes.emplace_back(votes[votes_sent + i]); } - PillarVotesBundlePacket pillar_votes_bundle_packet(std::move(pillar_votes)); + PillarVotesBundlePacket pillar_votes_bundle_packet(OptimizedPillarVotesBundle{std::move(pillar_votes)}); // Seal and send the chunk to the peer if (sealAndSend(peer->getId(), SubprotocolPacketType::kPillarVotesBundlePacket, diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.cpp index dce719e0e5..cbbf4e3177 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pillar_votes_bundle_packet_handler.cpp @@ -14,15 +14,16 @@ PillarVotesBundlePacketHandler::PillarVotesBundlePacketHandler( void PillarVotesBundlePacketHandler::process(PillarVotesBundlePacket &&packet, const std::shared_ptr &peer) { - if (packet.pillar_votes.size() == 0 || packet.pillar_votes.size() > kMaxPillarVotesInBundleRlp) { - throw InvalidRlpItemsCountException("PillarVotesBundlePacket", packet.pillar_votes.size(), + if (packet.pillar_votes_bundle.pillar_votes.size() == 0 || + packet.pillar_votes_bundle.pillar_votes.size() > kMaxPillarVotesInBundleRlp) { + throw InvalidRlpItemsCountException("PillarVotesBundlePacket", packet.pillar_votes_bundle.pillar_votes.size(), kMaxPillarVotesInBundleRlp); } // TODO[2744]: there could be the same protection as in pbft syncing that only requested bundle packet is accepted LOG(log_dg_) << "PillarVotesBundlePacket received from peer " << peer->getId(); - for (const auto &pillar_vote : packet.pillar_votes) { + for (const auto &pillar_vote : packet.pillar_votes_bundle.pillar_votes) { if (!kConf.genesis.state.hardforks.ficus_hf.isFicusHardfork(pillar_vote->getPeriod())) { std::ostringstream err_msg; err_msg << "Synced pillar vote " << pillar_vote->getHash() << ", period " << pillar_vote->getPeriod() diff --git a/libraries/types/vote/include/vote/votes_bundle_rlp.hpp b/libraries/types/vote/include/vote/votes_bundle_rlp.hpp index a7a794f572..2975dc455f 100644 --- a/libraries/types/vote/include/vote/votes_bundle_rlp.hpp +++ b/libraries/types/vote/include/vote/votes_bundle_rlp.hpp @@ -16,6 +16,7 @@ class PillarVote; * @{ */ +// TOOD[2865]: move to cpp file constexpr static size_t kPbftVotesBundleRlpSize{5}; /** From 1f6ce1491cfd61aa25f238da2019c2aa97fa5013 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Sat, 2 Nov 2024 19:13:17 +0100 Subject: [PATCH 078/105] refactor dag blocks processing to work with shared pointers to avoid objects copying --- .../include/dag/dag_block_proposer.hpp | 4 +- .../consensus/include/dag/dag_manager.hpp | 13 +- .../consensus/include/pbft/pbft_manager.hpp | 6 +- .../consensus/src/dag/dag_block_proposer.cpp | 20 +- .../consensus/src/dag/dag_manager.cpp | 128 ++--- .../src/dag/sortition_params_manager.cpp | 2 +- .../consensus/src/pbft/pbft_manager.cpp | 16 +- .../consensus/src/rewards/block_stats.cpp | 14 +- .../network/include/network/network.hpp | 2 +- .../packets/latest/dag_block_packet.hpp | 2 +- .../latest/dag_block_packet_handler.hpp | 7 +- .../v4/dag_block_packet_handler.hpp | 7 +- .../network/include/network/ws_server.hpp | 4 +- libraries/core_libs/network/src/network.cpp | 2 +- .../latest/dag_block_packet_handler.cpp | 34 +- .../latest/dag_sync_packet_handler.cpp | 5 +- .../latest/pbft_sync_packet_handler.cpp | 8 +- .../v4/dag_block_packet_handler.cpp | 36 +- .../v4/dag_sync_packet_handler.cpp | 20 +- .../v4/pbft_sync_packet_handler.cpp | 8 +- libraries/core_libs/network/src/ws_server.cpp | 6 +- libraries/core_libs/node/src/node.cpp | 2 +- .../storage/include/storage/storage.hpp | 6 +- libraries/core_libs/storage/src/storage.cpp | 72 ++- .../include/dag/dag_block_bundle_rlp.hpp | 4 +- .../dag_block/src/dag_block_bundle_rlp.cpp | 14 +- .../pbft_block/include/pbft/period_data.hpp | 2 +- .../types/pbft_block/src/period_data.cpp | 4 +- tests/dag_block_test.cpp | 53 +- tests/dag_test.cpp | 180 ++++--- tests/final_chain_test.cpp | 5 +- tests/full_node_test.cpp | 25 +- tests/network_test.cpp | 244 +++++----- tests/pbft_chain_test.cpp | 4 +- tests/rewards_stats_test.cpp | 28 +- tests/sortition_test.cpp | 4 +- .../test_util/node_dag_creation_fixture.hpp | 2 +- tests/test_util/include/test_util/samples.hpp | 7 +- .../src/node_dag_creation_fixture.cpp | 13 +- tests/test_util/src/samples.cpp | 457 ++++++++---------- tests/transaction_test.cpp | 5 +- 41 files changed, 764 insertions(+), 711 deletions(-) diff --git a/libraries/core_libs/consensus/include/dag/dag_block_proposer.hpp b/libraries/core_libs/consensus/include/dag/dag_block_proposer.hpp index 5ceb738401..9e1fea8fc6 100644 --- a/libraries/core_libs/consensus/include/dag/dag_block_proposer.hpp +++ b/libraries/core_libs/consensus/include/dag/dag_block_proposer.hpp @@ -90,8 +90,8 @@ class DagBlockProposer { * @param estimations transactions gas estimation * @param vdf vdf with correct difficulty calculation */ - DagBlock createDagBlock(DagFrontier&& frontier, level_t level, const SharedTransactions& trxs, - std::vector&& estimations, VdfSortition&& vdf) const; + std::shared_ptr createDagBlock(DagFrontier&& frontier, level_t level, const SharedTransactions& trxs, + std::vector&& estimations, VdfSortition&& vdf) const; /** * @brief Gets transactions to include in the block - sharding not supported yet diff --git a/libraries/core_libs/consensus/include/dag/dag_manager.hpp b/libraries/core_libs/consensus/include/dag/dag_manager.hpp index a978a378b0..a2e077576a 100644 --- a/libraries/core_libs/consensus/include/dag/dag_manager.hpp +++ b/libraries/core_libs/consensus/include/dag/dag_manager.hpp @@ -79,14 +79,15 @@ class DagManager : public std::enable_shared_from_this { * @return verification result and all the transactions which are part of the block */ std::pair verifyBlock( - const DagBlock &blk, const std::unordered_map> &trxs = {}); + const std::shared_ptr &blk, + const std::unordered_map> &trxs = {}); /** * @brief Checks if block pivot and tips are in DAG * @param blk Block to check * @return true if all pivot and tips are in the DAG, false if some is missing with the hash of missing tips/pivot */ - std::pair> pivotAndTipsAvailable(DagBlock const &blk); + std::pair> pivotAndTipsAvailable(const std::shared_ptr &blk); /** * @brief adds verified DAG block in the DAG @@ -95,8 +96,8 @@ class DagManager : public std::enable_shared_from_this { * @param save if true save block and transactions to database * @return true if block added successfully, false with the hash of missing tips/pivot */ - std::pair> addDagBlock(DagBlock &&blk, SharedTransactions &&trxs = {}, - bool proposed = false, + std::pair> addDagBlock(const std::shared_ptr &blk, + SharedTransactions &&trxs = {}, bool proposed = false, bool save = true); // insert to buffer if fail /** @@ -186,7 +187,7 @@ class DagManager : public std::enable_shared_from_this { uint32_t getNonFinalizedBlocksMinDifficulty() const; - util::Event const block_verified_{}; + util::Event> const block_verified_{}; /** * @brief Retrieves Dag Manager mutex, only to be used when finalizing pbft block @@ -276,7 +277,7 @@ class DagManager : public std::enable_shared_from_this { const uint32_t cache_max_size_ = 10000; const uint32_t cache_delete_step_ = 100; - ExpirationCacheMap seen_blocks_; + ExpirationCacheMap> seen_blocks_; std::shared_ptr final_chain_; const uint64_t kPbftGasLimit; const HardforksConfig kHardforks; diff --git a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp index 9bdbaed94e..6e8d46fb6e 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp @@ -212,7 +212,7 @@ class PbftManager { * @param dag_blocks DAG blocks * @return DAG blocks ordering hash */ - static blk_hash_t calculateOrderHash(const std::vector &dag_blocks); + static blk_hash_t calculateOrderHash(const std::vector> &dag_blocks); /** * @brief Reorder transactions data if DAG reordering caused transactions with same sender to have nonce in incorrect @@ -226,7 +226,7 @@ class PbftManager { * @param dag_blocks dag blocks * @return true if total weight of gas estimation is less or equal to gas limit. Otherwise return false */ - bool checkBlockWeight(const std::vector &dag_blocks) const; + bool checkBlockWeight(const std::vector> &dag_blocks) const; blk_hash_t getLastPbftBlockHash(); @@ -564,7 +564,7 @@ class PbftManager { // Multiple proposed pbft blocks could have same dag block anchor at same period so this cache improves retrieval of // dag block order for specific anchor - mutable std::unordered_map> anchor_dag_block_order_cache_; + mutable std::unordered_map>> anchor_dag_block_order_cache_; std::unique_ptr daemon_; std::shared_ptr db_; diff --git a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp index 20f8782029..510778868a 100644 --- a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp @@ -168,12 +168,12 @@ bool DagBlockProposer::proposeDagBlock() { auto dag_block = createDagBlock(std::move(frontier), propose_level, transactions, std::move(estimations), std::move(vdf)); - if (dag_mgr_->addDagBlock(std::move(dag_block), std::move(transactions), true).first) { - LOG(log_nf_) << "Proposed new DAG block " << dag_block.getHash() << ", pivot " << dag_block.getPivot() - << " , txs num " << dag_block.getTrxs().size(); + if (dag_mgr_->addDagBlock(dag_block, std::move(transactions), true).first) { + LOG(log_nf_) << "Proposed new DAG block " << dag_block->getHash() << ", pivot " << dag_block->getPivot() + << " , txs num " << dag_block->getTrxs().size(); proposed_blocks_count_ += 1; } else { - LOG(log_er_) << "Failed to add newly proposed dag block " << dag_block.getHash() << " into dag"; + LOG(log_er_) << "Failed to add newly proposed dag block " << dag_block->getHash() << " into dag"; } last_propose_level_ = propose_level; @@ -331,8 +331,10 @@ vec_blk_t DagBlockProposer::selectDagBlockTips(const vec_blk_t& frontier_tips, u return tips; } -DagBlock DagBlockProposer::createDagBlock(DagFrontier&& frontier, level_t level, const SharedTransactions& trxs, - std::vector&& estimations, VdfSortition&& vdf) const { +std::shared_ptr DagBlockProposer::createDagBlock(DagFrontier&& frontier, level_t level, + const SharedTransactions& trxs, + std::vector&& estimations, + VdfSortition&& vdf) const { // When we propose block we know it is valid, no need for block verification with queue, // simply add the block to the DAG vec_trx_t trx_hashes; @@ -347,10 +349,8 @@ DagBlock DagBlockProposer::createDagBlock(DagFrontier&& frontier, level_t level, frontier.tips = selectDagBlockTips(frontier.tips, kPbftGasLimit - block_estimation); } - DagBlock block(frontier.pivot, std::move(level), std::move(frontier.tips), std::move(trx_hashes), block_estimation, - std::move(vdf), node_sk_); - - return block; + return std::make_shared(frontier.pivot, std::move(level), std::move(frontier.tips), std::move(trx_hashes), + block_estimation, std::move(vdf), node_sk_); } bool DagBlockProposer::isValidDposProposer(PbftPeriod propose_period) const { diff --git a/libraries/core_libs/consensus/src/dag/dag_manager.cpp b/libraries/core_libs/consensus/src/dag/dag_manager.cpp index 129b6abff0..fd4e89e9ac 100644 --- a/libraries/core_libs/consensus/src/dag/dag_manager.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_manager.cpp @@ -88,9 +88,9 @@ void DagManager::drawPivotGraph(std::string const &str) const { pivot_tree_->drawGraph(str); } -std::pair> DagManager::pivotAndTipsAvailable(DagBlock const &blk) { - auto dag_blk_hash = blk.getHash(); - const auto pivot_hash = blk.getPivot(); +std::pair> DagManager::pivotAndTipsAvailable(const std::shared_ptr &blk) { + auto dag_blk_hash = blk->getHash(); + const auto pivot_hash = blk->getPivot(); const auto dag_blk_pivot = getDagBlock(pivot_hash); std::vector missing_tips_or_pivot; @@ -102,7 +102,7 @@ std::pair> DagManager::pivotAndTipsAvailable(DagBl missing_tips_or_pivot.push_back(pivot_hash); } - for (auto const &tip : blk.getTips()) { + for (auto const &tip : blk->getTips()) { auto tip_block = getDagBlock(tip); if (tip_block) { expected_level = std::max(expected_level, tip_block->getLevel() + 1); @@ -116,8 +116,8 @@ std::pair> DagManager::pivotAndTipsAvailable(DagBl return {false, missing_tips_or_pivot}; } - if (expected_level != blk.getLevel()) { - LOG(log_er_) << "DAG Block " << dag_blk_hash << " level " << blk.getLevel() + if (expected_level != blk->getLevel()) { + LOG(log_er_) << "DAG Block " << dag_blk_hash << " level " << blk->getLevel() << ", expected level: " << expected_level; return {false, missing_tips_or_pivot}; } @@ -130,9 +130,9 @@ DagFrontier DagManager::getDagFrontier() { return frontier_; } -std::pair> DagManager::addDagBlock(DagBlock &&blk, SharedTransactions &&trxs, - bool proposed, bool save) { - auto blk_hash = blk.getHash(); +std::pair> DagManager::addDagBlock(const std::shared_ptr &blk, + SharedTransactions &&trxs, bool proposed, bool save) { + auto blk_hash = blk->getHash(); { // One mutex protects the DagManager internal state, the other mutex ensures that dag blocks are gossiped in @@ -142,14 +142,14 @@ std::pair> DagManager::addDagBlock(DagBlock &&blk, { std::scoped_lock lock(mutex_); if (save) { - if (db_->dagBlockInDb(blk.getHash())) { + if (db_->dagBlockInDb(blk->getHash())) { // It is a valid scenario that two threads can receive same block from two peers and process at same time return {true, {}}; } - if (blk.getLevel() < dag_expiry_level_) { + if (blk->getLevel() < dag_expiry_level_) { LOG(log_nf_) << "Dropping old block: " << blk_hash << ". Expiry level: " << dag_expiry_level_ - << ". Block level: " << blk.getLevel(); + << ". Block level: " << blk->getLevel(); return {false, {}}; } @@ -162,14 +162,14 @@ std::pair> DagManager::addDagBlock(DagBlock &&blk, // Save the dag block db_->saveDagBlock(blk); } - seen_blocks_.insert(blk.getHash(), blk); - auto pivot_hash = blk.getPivot(); + seen_blocks_.insert(blk->getHash(), blk); + auto pivot_hash = blk->getPivot(); - std::vector tips = blk.getTips(); + std::vector tips = blk->getTips(); level_t current_max_level = max_level_; - max_level_ = std::max(current_max_level, blk.getLevel()); + max_level_ = std::max(current_max_level, blk->getLevel()); - addToDag(blk_hash, pivot_hash, tips, blk.getLevel()); + addToDag(blk_hash, pivot_hash, tips, blk->getLevel()); if (non_finalized_blks_min_difficulty_ > blk.getDifficulty()) { non_finalized_blks_min_difficulty_ = blk.getDifficulty(); } @@ -335,11 +335,11 @@ uint DagManager::setDagBlockOrder(blk_hash_t const &new_anchor, PbftPeriod perio } // Only update counter for blocks that are in the dag_order and not in memory DAG, this is only possible when pbft // syncing and processing period data - std::vector dag_blocks_to_update_counters; + std::vector> dag_blocks_to_update_counters; for (auto const &blk : dag_order) { if (non_finalized_blocks_set.count(blk) == 0) { auto dag_block = getDagBlock(blk); - dag_blocks_to_update_counters.push_back(*dag_block); + dag_blocks_to_update_counters.push_back(dag_block); } } @@ -490,38 +490,38 @@ void DagManager::recoverDag() { for (auto &blk : lvl.second) { // These are some sanity checks that difficulty is correct and block is truly non-finalized. // This is only done on startup - auto period = db_->getDagBlockPeriod(blk.getHash()); + auto period = db_->getDagBlockPeriod(blk->getHash()); if (period != nullptr) { LOG(log_er_) << "Nonfinalized Dag Block actually finalized in period " << period->first; break; } else { - auto propose_period = db_->getProposalPeriodForDagLevel(blk.getLevel()); + auto propose_period = db_->getProposalPeriodForDagLevel(blk->getLevel()); if (!propose_period.has_value()) { - LOG(log_er_) << "No propose period for dag level " << blk.getLevel() << " found"; + LOG(log_er_) << "No propose period for dag level " << blk->getLevel() << " found"; assert(false); break; } - const auto pk = key_manager_->getVrfKey(*propose_period, blk.getSender()); + const auto pk = key_manager_->getVrfKey(*propose_period, blk->getSender()); if (!pk) { - LOG(log_er_) << "DAG block " << blk.getHash() << " with " << blk.getLevel() - << " level is missing VRF key for sender " << blk.getSender(); + LOG(log_er_) << "DAG block " << blk->getHash() << " with " << blk->getLevel() + << " level is missing VRF key for sender " << blk->getSender(); break; } // Verify VDF solution try { uint64_t max_vote_count = 0; - const auto vote_count = final_chain_->dposEligibleVoteCount(*propose_period, blk.getSender()); + const auto vote_count = final_chain_->dposEligibleVoteCount(*propose_period, blk->getSender()); if (*propose_period < kHardforks.magnolia_hf.block_num) { max_vote_count = final_chain_->dposEligibleTotalVoteCount(*propose_period); } else { max_vote_count = kValidatorMaxVote; } - blk.verifyVdf(sortition_params_manager_.getSortitionParams(*propose_period), - db_->getPeriodBlockHash(*propose_period), *pk, vote_count, max_vote_count); + blk->verifyVdf(sortition_params_manager_.getSortitionParams(*propose_period), + db_->getPeriodBlockHash(*propose_period), *pk, vote_count, max_vote_count); } catch (vdf_sortition::VdfSortition::InvalidVdfSortition const &e) { - LOG(log_er_) << "DAG block " << blk.getHash() << " with " << blk.getLevel() - << " level failed on VDF verification with pivot hash " << blk.getPivot() << " reason " + LOG(log_er_) << "DAG block " << blk->getHash() << " with " << blk->getLevel() + << " level failed on VDF verification with pivot hash " << blk->getPivot() << " reason " << e.what(); break; } @@ -530,14 +530,14 @@ void DagManager::recoverDag() { // In case an invalid block somehow ended in DAG db, remove it auto res = pivotAndTipsAvailable(blk); if (res.first) { - if (!addDagBlock(std::move(blk), {}, false, false).first) { - LOG(log_er_) << "DAG block " << blk.getHash() << " could not be added to DAG on startup, removing from db"; - db_->removeDagBlock(blk.getHash()); + if (!addDagBlock(blk, {}, false, false).first) { + LOG(log_er_) << "DAG block " << blk->getHash() << " could not be added to DAG on startup, removing from db"; + db_->removeDagBlock(blk->getHash()); } } else { - LOG(log_er_) << "DAG block " << blk.getHash() + LOG(log_er_) << "DAG block " << blk->getHash() << " could not be added to DAG on startup since it has missing tip/pivot"; - db_->removeDagBlock(blk.getHash()); + db_->removeDagBlock(blk->getHash()); } } } @@ -596,34 +596,34 @@ std::pair DagManager::getNonFinalizedBlocksSize() const { } std::pair DagManager::verifyBlock( - const DagBlock &blk, const std::unordered_map> &trxs) { - const auto &block_hash = blk.getHash(); - vec_trx_t const &all_block_trx_hashes = blk.getTrxs(); + const std::shared_ptr &blk, const std::unordered_map> &trxs) { + const auto &block_hash = blk->getHash(); + vec_trx_t const &all_block_trx_hashes = blk->getTrxs(); vec_trx_t trx_hashes_to_query; SharedTransactions all_block_trxs; // Verify tips/pivot count and uniqueness std::unordered_set unique_tips_pivot; - unique_tips_pivot.insert(blk.getPivot()); - if (blk.getTips().size() > kDagBlockMaxTips) { - LOG(log_er_) << "DAG Block " << block_hash << " tips count " << blk.getTips().size() << " over the limit"; + unique_tips_pivot.insert(blk->getPivot()); + if (blk->getTips().size() > kDagBlockMaxTips) { + LOG(log_er_) << "DAG Block " << block_hash << " tips count " << blk->getTips().size() << " over the limit"; return {VerifyBlockReturnType::FailedTipsVerification, {}}; } - for (auto const &tip : blk.getTips()) { + for (auto const &tip : blk->getTips()) { if (!unique_tips_pivot.insert(tip).second) { LOG(log_er_) << "DAG Block " << block_hash << " tip " << tip << " duplicate"; return {VerifyBlockReturnType::FailedTipsVerification, {}}; } } - auto propose_period = db_->getProposalPeriodForDagLevel(blk.getLevel()); + auto propose_period = db_->getProposalPeriodForDagLevel(blk->getLevel()); // Verify DPOS if (!propose_period.has_value()) { // Cannot find the proposal period in DB yet. The slow node gets an ahead block, remove from seen_blocks - LOG(log_nf_) << "Cannot find proposal period in DB for DAG block " << blk.getHash(); + LOG(log_nf_) << "Cannot find proposal period in DB for DAG block " << blk->getHash(); seen_blocks_.erase(block_hash); return {VerifyBlockReturnType::AheadBlock, {}}; } @@ -655,39 +655,39 @@ std::pair DagManager::ver all_block_trxs.emplace_back(std::move(t)); } - if (blk.getLevel() < dag_expiry_level_) { - LOG(log_nf_) << "Dropping old block: " << blk.getHash() << ". Expiry level: " << dag_expiry_level_ - << ". Block level: " << blk.getLevel(); + if (blk->getLevel() < dag_expiry_level_) { + LOG(log_nf_) << "Dropping old block: " << blk->getHash() << ". Expiry level: " << dag_expiry_level_ + << ". Block level: " << blk->getLevel(); return {VerifyBlockReturnType::ExpiredBlock, {}}; } // Verify VDF solution - const auto pk = key_manager_->getVrfKey(*propose_period, blk.getSender()); + const auto pk = key_manager_->getVrfKey(*propose_period, blk->getSender()); if (!pk) { - LOG(log_er_) << "DAG block " << blk.getHash() << " with " << blk.getLevel() - << " level is missing VRF key for sender " << blk.getSender(); + LOG(log_er_) << "DAG block " << blk->getHash() << " with " << blk->getLevel() + << " level is missing VRF key for sender " << blk->getSender(); return {VerifyBlockReturnType::FailedVdfVerification, {}}; } try { const auto proposal_period_hash = db_->getPeriodBlockHash(*propose_period); uint64_t max_vote_count = 0; - const auto vote_count = final_chain_->dposEligibleVoteCount(*propose_period, blk.getSender()); + const auto vote_count = final_chain_->dposEligibleVoteCount(*propose_period, blk->getSender()); if (*propose_period < kHardforks.magnolia_hf.block_num) { max_vote_count = final_chain_->dposEligibleTotalVoteCount(*propose_period); } else { max_vote_count = kValidatorMaxVote; } - blk.verifyVdf(sortition_params_manager_.getSortitionParams(*propose_period), proposal_period_hash, *pk, vote_count, - max_vote_count); + blk->verifyVdf(sortition_params_manager_.getSortitionParams(*propose_period), proposal_period_hash, *pk, vote_count, + max_vote_count); } catch (vdf_sortition::VdfSortition::InvalidVdfSortition const &e) { - LOG(log_er_) << "DAG block " << block_hash << " with " << blk.getLevel() - << " level failed on VDF verification with pivot hash " << blk.getPivot() << " reason " << e.what(); + LOG(log_er_) << "DAG block " << block_hash << " with " << blk->getLevel() + << " level failed on VDF verification with pivot hash " << blk->getPivot() << " reason " << e.what(); LOG(log_er_) << "period from map: " << *propose_period << " current: " << pbft_chain_->getPbftChainSize(); return {VerifyBlockReturnType::FailedVdfVerification, {}}; } - auto dag_block_sender = blk.getSender(); + auto dag_block_sender = blk->getSender(); bool dpos_qualified; try { dpos_qualified = final_chain_->dposIsEligible(*propose_period, dag_block_sender); @@ -703,27 +703,27 @@ std::pair DagManager::ver } { u256 total_block_weight = 0; - auto block_gas_estimation = blk.getGasEstimation(); + auto block_gas_estimation = blk->getGasEstimation(); for (const auto &trx : all_block_trxs) { total_block_weight += trx_mgr_->estimateTransactionGas(trx, propose_period); } if (total_block_weight != block_gas_estimation) { - LOG(log_er_) << "Invalid block_gas_estimation. DAG block " << blk.getHash() + LOG(log_er_) << "Invalid block_gas_estimation. DAG block " << blk->getHash() << " block_gas_estimation: " << block_gas_estimation << " total_block_weight " << total_block_weight << " current period " << final_chain_->lastBlockNumber(); return {VerifyBlockReturnType::IncorrectTransactionsEstimation, {}}; } if (total_block_weight > getDagConfig().gas_limit) { - LOG(log_er_) << "BlockTooBig. DAG block " << blk.getHash() << " gas_limit: " << getDagConfig().gas_limit + LOG(log_er_) << "BlockTooBig. DAG block " << blk->getHash() << " gas_limit: " << getDagConfig().gas_limit << " total_block_weight " << total_block_weight << " current period " << final_chain_->lastBlockNumber(); return {VerifyBlockReturnType::BlockTooBig, {}}; } - if ((blk.getTips().size() + 1) > kPbftGasLimit / getDagConfig().gas_limit) { - for (const auto &t : blk.getTips()) { + if ((blk->getTips().size() + 1) > kPbftGasLimit / getDagConfig().gas_limit) { + for (const auto &t : blk->getTips()) { const auto tip_blk = getDagBlock(t); if (tip_blk == nullptr) { LOG(log_er_) << "DAG Block " << block_hash << " tip " << t << " not present"; @@ -732,7 +732,7 @@ std::pair DagManager::ver block_gas_estimation += tip_blk->getGasEstimation(); } if (block_gas_estimation > kPbftGasLimit) { - LOG(log_er_) << "BlockTooBig. DAG block " << blk.getHash() << " with tips has limit: " << kPbftGasLimit + LOG(log_er_) << "BlockTooBig. DAG block " << blk->getHash() << " with tips has limit: " << kPbftGasLimit << " block_gas_estimation " << block_gas_estimation << " current period " << final_chain_->lastBlockNumber(); return {VerifyBlockReturnType::BlockTooBig, {}}; @@ -740,7 +740,7 @@ std::pair DagManager::ver } } - LOG(log_dg_) << "Verified DAG block " << blk.getHash(); + LOG(log_dg_) << "Verified DAG block " << blk->getHash(); return {VerifyBlockReturnType::Verified, std::move(all_block_trxs)}; } @@ -756,7 +756,7 @@ bool DagManager::isDagBlockKnown(const blk_hash_t &hash) const { std::shared_ptr DagManager::getDagBlock(const blk_hash_t &hash) const { auto blk = seen_blocks_.get(hash); if (blk.second) { - return std::make_shared(blk.first); + return blk.first; } if (hash == genesis_block_->getHash()) { return genesis_block_; diff --git a/libraries/core_libs/consensus/src/dag/sortition_params_manager.cpp b/libraries/core_libs/consensus/src/dag/sortition_params_manager.cpp index 69d1158486..ab8894fda7 100644 --- a/libraries/core_libs/consensus/src/dag/sortition_params_manager.cpp +++ b/libraries/core_libs/consensus/src/dag/sortition_params_manager.cpp @@ -79,7 +79,7 @@ SortitionParams SortitionParamsManager::getSortitionParams(std::optionalgetTrxs(); total_transactions_count += trxs.size(); } diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index d814c2ee27..ee8f338f23 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -1180,14 +1180,14 @@ blk_hash_t PbftManager::calculateOrderHash(const std::vector &dag_bl return dev::sha3(order_stream.out()); } -blk_hash_t PbftManager::calculateOrderHash(const std::vector &dag_blocks) { +blk_hash_t PbftManager::calculateOrderHash(const std::vector> &dag_blocks) { if (dag_blocks.empty()) { return kNullBlockHash; } dev::RLPStream order_stream(1); order_stream.appendList(dag_blocks.size()); for (auto const &blk : dag_blocks) { - order_stream << blk.getHash(); + order_stream << blk->getHash(); } return dev::sha3(order_stream.out()); } @@ -1545,7 +1545,7 @@ bool PbftManager::validatePbftBlock(const std::shared_ptr &pbft_block for (auto const &dag_blk_hash : dag_blocks_order) { auto dag_block = dag_mgr_->getDagBlock(dag_blk_hash); assert(dag_block); - anchor_dag_block_order_cache_[anchor_hash].emplace_back(std::move(*dag_block)); + anchor_dag_block_order_cache_[anchor_hash].emplace_back(std::move(dag_block)); } auto last_pbft_block_hash = pbft_chain_->getLastPbftBlockHash(); @@ -1575,7 +1575,7 @@ bool PbftManager::pushCertVotedPbftBlockIntoChain_(const std::shared_ptr transactions_to_query; period_data.dag_blocks.reserve(dag_order_it->second.size()); for (const auto &dag_blk : dag_order_it->second) { - for (const auto &trx_hash : dag_blk.getTrxs()) { + for (const auto &trx_hash : dag_blk->getTrxs()) { if (trx_set.insert(trx_hash).second) { transactions_to_query.emplace_back(trx_hash); } @@ -1746,7 +1746,7 @@ bool PbftManager::pushPbftBlock_(PeriodData &&period_data, std::vectorgetHash(); }); // We need to reorder transactions before saving them reorderTransactions(period_data.transactions); @@ -1930,7 +1930,7 @@ std::optional>>> Pbf std::unordered_set trx_set; std::vector transactions_to_query; for (auto const &dag_block : period_data.dag_blocks) { - for (auto const &trx_hash : dag_block.getTrxs()) { + for (auto const &trx_hash : dag_block->getTrxs()) { if (trx_set.insert(trx_hash).second) { transactions_to_query.emplace_back(trx_hash); } @@ -2155,10 +2155,10 @@ void PbftManager::periodDataQueuePush(PeriodData &&period_data, dev::p2p::NodeID size_t PbftManager::periodDataQueueSize() const { return sync_queue_.size(); } -bool PbftManager::checkBlockWeight(const std::vector &dag_blocks) const { +bool PbftManager::checkBlockWeight(const std::vector> &dag_blocks) const { const u256 total_weight = std::accumulate(dag_blocks.begin(), dag_blocks.end(), u256(0), - [](u256 value, const auto &dag_block) { return value + dag_block.getGasEstimation(); }); + [](u256 value, const auto &dag_block) { return value + dag_block->getGasEstimation(); }); if (total_weight > kGenesisConfig.pbft.gas_limit) { return false; } diff --git a/libraries/core_libs/consensus/src/rewards/block_stats.cpp b/libraries/core_libs/consensus/src/rewards/block_stats.cpp index 643e098eee..caa02c32cb 100644 --- a/libraries/core_libs/consensus/src/rewards/block_stats.cpp +++ b/libraries/core_libs/consensus/src/rewards/block_stats.cpp @@ -86,9 +86,9 @@ void BlockStats::processStats(const PeriodData& block, const bool aspen_dag_rewa void BlockStats::processDagBlocks(const PeriodData& block) { auto block_transactions_hashes_ = toTrxHashesSet(block.transactions); for (const auto& dag_block : block.dag_blocks) { - const addr_t& dag_block_author = dag_block.getSender(); + const addr_t& dag_block_author = dag_block->getSender(); bool has_unique_transactions = false; - for (const auto& tx_hash : dag_block.getTrxs()) { + for (const auto& tx_hash : dag_block->getTrxs()) { // we should also check that we have transactions in pbft block(period data). Because in dag blocks could be // included transaction that was finalized in previous blocks if (!block_transactions_hashes_.contains(tx_hash)) { @@ -110,17 +110,17 @@ void BlockStats::processDagBlocks(const PeriodData& block) { void BlockStats::processDagBlocksAspen(const PeriodData& block) { uint16_t min_difficulty = UINT16_MAX; for (const auto& dag_block : block.dag_blocks) { - if (dag_block.getDifficulty() < min_difficulty) { - min_difficulty = dag_block.getDifficulty(); + if (dag_block->getDifficulty() < min_difficulty) { + min_difficulty = dag_block->getDifficulty(); } } for (const auto& dag_block : block.dag_blocks) { - const addr_t& dag_block_author = dag_block.getSender(); - if (dag_block.getDifficulty() == min_difficulty) { + const addr_t& dag_block_author = dag_block->getSender(); + if (dag_block->getDifficulty() == min_difficulty) { validators_stats_[dag_block_author].dag_blocks_count_ += 1; total_dag_blocks_count_ += 1; } - for (const auto& tx_hash : dag_block.getTrxs()) { + for (const auto& tx_hash : dag_block->getTrxs()) { addTransaction(tx_hash, dag_block_author); } } diff --git a/libraries/core_libs/network/include/network/network.hpp b/libraries/core_libs/network/include/network/network.hpp index 36d55064b5..9cce7ec551 100644 --- a/libraries/core_libs/network/include/network/network.hpp +++ b/libraries/core_libs/network/include/network/network.hpp @@ -55,7 +55,7 @@ class Network { uint64_t syncTimeSeconds() const; void setSyncStatePeriod(PbftPeriod period); - void gossipDagBlock(const DagBlock &block, bool proposed, const SharedTransactions &trxs); + void gossipDagBlock(const std::shared_ptr &block, bool proposed, const SharedTransactions &trxs); void gossipVote(const std::shared_ptr &vote, const std::shared_ptr &block, bool rebroadcast = false); void gossipVotesBundle(const std::vector> &votes, bool rebroadcast = false); diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_block_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_block_packet.hpp index f624996840..092bc55054 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_block_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/dag_block_packet.hpp @@ -7,7 +7,7 @@ namespace taraxa::network::tarcap { struct DagBlockPacket { std::vector> transactions; - DagBlock dag_block; + std::shared_ptr dag_block; RLP_FIELDS_DEFINE_INPLACE(transactions, dag_block) }; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp index 4d99d08cba..a639a57301 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp @@ -18,10 +18,11 @@ class DagBlockPacketHandler : public ExtSyncingPacketHandler { std::shared_ptr trx_mgr, std::shared_ptr db, const addr_t &node_addr, const std::string &logs_prefix = ""); - void sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, DagBlock block, const SharedTransactions &trxs); - void onNewBlockReceived(DagBlock &&block, const std::shared_ptr &peer = nullptr, + void sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, const std::shared_ptr &block, + const SharedTransactions &trxs); + void onNewBlockReceived(std::shared_ptr &&block, const std::shared_ptr &peer = nullptr, const std::unordered_map> &trxs = {}); - void onNewBlockVerified(const DagBlock &block, bool proposed, const SharedTransactions &trxs); + void onNewBlockVerified(const std::shared_ptr &block, bool proposed, const SharedTransactions &trxs); // Packet type that is processed by this handler static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagBlockPacket; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp index dc9a172a25..6de3bb9fcf 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp @@ -17,10 +17,11 @@ class DagBlockPacketHandler : public ExtSyncingPacketHandler { std::shared_ptr trx_mgr, std::shared_ptr db, const addr_t &node_addr, const std::string &logs_prefix = ""); - void sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, DagBlock block, const SharedTransactions &trxs); - void onNewBlockReceived(DagBlock &&block, const std::shared_ptr &peer = nullptr, + void sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, const std::shared_ptr &block, + const SharedTransactions &trxs); + void onNewBlockReceived(std::shared_ptr &&block, const std::shared_ptr &peer = nullptr, const std::unordered_map> &trxs = {}); - void onNewBlockVerified(const DagBlock &block, bool proposed, const SharedTransactions &trxs); + void onNewBlockVerified(const std::shared_ptr &block, bool proposed, const SharedTransactions &trxs); // Packet type that is processed by this handler static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kDagBlockPacket; diff --git a/libraries/core_libs/network/include/network/ws_server.hpp b/libraries/core_libs/network/include/network/ws_server.hpp index 60a3f6ccbd..c7224e79f1 100644 --- a/libraries/core_libs/network/include/network/ws_server.hpp +++ b/libraries/core_libs/network/include/network/ws_server.hpp @@ -51,7 +51,7 @@ class WsSession : public std::enable_shared_from_this { virtual std::string processRequest(const std::string_view& request) = 0; void newEthBlock(const ::taraxa::final_chain::BlockHeader& payload, const TransactionHashes& trx_hashes); - void newDagBlock(const DagBlock& blk); + void newDagBlock(const std::shared_ptr& blk); void newDagBlockFinalized(const blk_hash_t& blk, uint64_t period); void newPbftBlockExecuted(const Json::Value& payload); void newPendingTransaction(const trx_hash_t& trx_hash); @@ -97,7 +97,7 @@ class WsServer : public std::enable_shared_from_this, public jsonrpc:: // Start accepting incoming connections void run(); void newEthBlock(const ::taraxa::final_chain::BlockHeader& payload, const TransactionHashes& trx_hashes); - void newDagBlock(const DagBlock& blk); + void newDagBlock(const std::shared_ptr& blk); void newDagBlockFinalized(const blk_hash_t& blk, uint64_t period); void newPbftBlockExecuted(const PbftBlock& sche_blk, const std::vector& finalized_dag_blk_hashes); void newPendingTransaction(const trx_hash_t& trx_hash); diff --git a/libraries/core_libs/network/src/network.cpp b/libraries/core_libs/network/src/network.cpp index d450320159..1e8ad6f63d 100644 --- a/libraries/core_libs/network/src/network.cpp +++ b/libraries/core_libs/network/src/network.cpp @@ -289,7 +289,7 @@ void Network::addBootNodes(bool initial) { } } -void Network::gossipDagBlock(const DagBlock &block, bool proposed, const SharedTransactions &trxs) { +void Network::gossipDagBlock(const std::shared_ptr &block, bool proposed, const SharedTransactions &trxs) { for (const auto &tarcap : tarcaps_) { tarcap.second->getSpecificHandler()->onNewBlockVerified(block, proposed, trxs); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp index d6da181ddb..43bfd4e5d2 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp @@ -20,19 +20,19 @@ DagBlockPacketHandler::DagBlockPacketHandler(const FullNodeConfig &conf, std::sh trx_mgr_(std::move(trx_mgr)) {} void DagBlockPacketHandler::process(DagBlockPacket &&packet, const std::shared_ptr &peer) { - blk_hash_t const hash = packet.dag_block.getHash(); + blk_hash_t const hash = packet.dag_block->getHash(); for (const auto &tx : packet.transactions) { peer->markTransactionAsKnown(tx->getHash()); } peer->markDagBlockAsKnown(hash); - if (packet.dag_block.getLevel() > peer->dag_level_) { - peer->dag_level_ = packet.dag_block.getLevel(); + if (packet.dag_block->getLevel() > peer->dag_level_) { + peer->dag_level_ = packet.dag_block->getLevel(); } // Do not process this block in case we already have it - if (dag_mgr_->isDagBlockKnown(packet.dag_block.getHash())) { + if (dag_mgr_->isDagBlockKnown(packet.dag_block->getHash())) { LOG(log_tr_) << "Received known DagBlockPacket " << hash << "from: " << peer->getId(); return; } @@ -46,11 +46,12 @@ void DagBlockPacketHandler::process(DagBlockPacket &&packet, const std::shared_p onNewBlockReceived(std::move(packet.dag_block), peer, txs_map); } -void DagBlockPacketHandler::sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, taraxa::DagBlock block, +void DagBlockPacketHandler::sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, + const std::shared_ptr &block, const SharedTransactions &trxs) { std::shared_ptr peer = peers_state_->getPeer(peer_id); if (!peer) { - LOG(log_wr_) << "Send dag block " << block.getHash() << ". Failed to obtain peer " << peer_id; + LOG(log_wr_) << "Send dag block " << block->getHash() << ". Failed to obtain peer " << peer_id; return; } @@ -61,18 +62,18 @@ void DagBlockPacketHandler::sendBlockWithTransactions(dev::p2p::NodeID const &pe DagBlockPacket dag_block_packet{.transactions = trxs, .dag_block = block}; if (!sealAndSend(peer_id, SubprotocolPacketType::kDagBlockPacket, encodePacketRlp(dag_block_packet))) { - LOG(log_wr_) << "Sending DagBlock " << block.getHash() << " failed to " << peer_id; + LOG(log_wr_) << "Sending DagBlock " << block->getHash() << " failed to " << peer_id; return; } // Mark data as known if sending was successful - peer->markDagBlockAsKnown(block.getHash()); + peer->markDagBlockAsKnown(block->getHash()); } void DagBlockPacketHandler::onNewBlockReceived( - DagBlock &&block, const std::shared_ptr &peer, + std::shared_ptr &&block, const std::shared_ptr &peer, const std::unordered_map> &trxs) { - const auto block_hash = block.getHash(); + const auto block_hash = block->getHash(); auto verified = dag_mgr_->verifyBlock(block, trxs); switch (verified.first) { case DagManager::VerifyBlockReturnType::IncorrectTransactionsEstimation: @@ -135,7 +136,7 @@ void DagBlockPacketHandler::onNewBlockReceived( } break; case DagManager::VerifyBlockReturnType::Verified: { - auto status = dag_mgr_->addDagBlock(std::move(block), std::move(verified.second)); + auto status = dag_mgr_->addDagBlock(block, std::move(verified.second)); if (!status.first) { LOG(log_dg_) << "Received DagBlockPacket " << block_hash << "from: " << peer->getId(); // Ignore new block packets when pbft syncing @@ -147,9 +148,9 @@ void DagBlockPacketHandler::onNewBlockReceived( if (peer->peer_dag_synced_) { std::ostringstream err_msg; if (status.second.size() > 0) - err_msg << "DagBlock" << block.getHash() << " has missing pivot or/and tips " << status.second; + err_msg << "DagBlock" << block->getHash() << " has missing pivot or/and tips " << status.second; else - err_msg << "DagBlock" << block.getHash() << " could not be added to DAG"; + err_msg << "DagBlock" << block->getHash() << " could not be added to DAG"; throw MaliciousPeerException(err_msg.str()); } else { // peer_dag_synced_ flag ensures that this can only be performed once for a peer @@ -163,14 +164,15 @@ void DagBlockPacketHandler::onNewBlockReceived( } } -void DagBlockPacketHandler::onNewBlockVerified(const DagBlock &block, bool proposed, const SharedTransactions &trxs) { +void DagBlockPacketHandler::onNewBlockVerified(const std::shared_ptr &block, bool proposed, + const SharedTransactions &trxs) { // If node is pbft syncing and block is not proposed by us, this is an old block that has been verified - no block // gossip is needed if (!proposed && pbft_syncing_state_->isDeepPbftSyncing()) { return; } - const auto &block_hash = block.getHash(); + const auto &block_hash = block->getHash(); LOG(log_tr_) << "Verified NewBlock " << block_hash.toString(); std::vector peers_to_send; @@ -218,7 +220,7 @@ void DagBlockPacketHandler::onNewBlockVerified(const DagBlock &block, bool propo } } } - LOG(log_dg_) << "Send DagBlock " << block.getHash() << " to peers: " << peer_and_transactions_to_log; + LOG(log_dg_) << "Send DagBlock " << block->getHash() << " to peers: " << peer_and_transactions_to_log; if (!peers_to_send.empty()) LOG(log_tr_) << "Sent block to " << peers_to_send.size() << " peers"; } } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_sync_packet_handler.cpp index 9c8e2319fe..ca002eba3b 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_sync_packet_handler.cpp @@ -74,7 +74,7 @@ void DagSyncPacketHandler::process(DagSyncPacket&& packet, const std::shared_ptr continue; } - auto verified = dag_mgr_->verifyBlock(*block, transactions_map); + auto verified = dag_mgr_->verifyBlock(block, transactions_map); if (verified.first != DagManager::VerifyBlockReturnType::Verified) { std::ostringstream err_msg; err_msg << "DagBlock " << block->getHash() << " failed verification with error code " @@ -84,8 +84,7 @@ void DagSyncPacketHandler::process(DagSyncPacket&& packet, const std::shared_ptr if (block->getLevel() > peer->dag_level_) peer->dag_level_ = block->getLevel(); - // TODO[2869]: fix dag blocks usage - shared_ptr vs object type on different places... - auto status = dag_mgr_->addDagBlock(std::move(*block), std::move(verified.second)); + auto status = dag_mgr_->addDagBlock(block, std::move(verified.second)); if (!status.first) { std::ostringstream err_msg; if (status.second.size() > 0) diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp index 6a83b26787..93ebe44028 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/pbft_sync_packet_handler.cpp @@ -45,9 +45,9 @@ void PbftSyncPacketHandler::process(PbftSyncPacket &&packet, const std::shared_p std::string received_dag_blocks_str; // This is just log related stuff for (auto const &block : packet.period_data.dag_blocks) { - received_dag_blocks_str += block.getHash().toString() + " "; - if (peer->dag_level_ < block.getLevel()) { - peer->dag_level_ = block.getLevel(); + received_dag_blocks_str += block->getHash().toString() + " "; + if (peer->dag_level_ < block->getLevel()) { + peer->dag_level_ = block->getLevel(); } } @@ -119,7 +119,7 @@ void PbftSyncPacketHandler::process(PbftSyncPacket &&packet, const std::shared_p trx_order.push_back(t->getHash()); } for (auto b : packet.period_data.dag_blocks) { - blk_order.push_back(b.getHash()); + blk_order.push_back(b->getHash()); } LOG(log_er_) << "Order hash incorrect in period data " << pbft_blk_hash << " expected: " << order_hash << " received " << packet.period_data.pbft_blk->getOrderHash() << "; Dag order: " << blk_order diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_block_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_block_packet_handler.cpp index d3a82cd7b5..65c1754512 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_block_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_block_packet_handler.cpp @@ -46,17 +46,17 @@ void DagBlockPacketHandler::process(const threadpool::PacketData &packet_data, } dag_rlp = packet_data.rlp_[1]; } - DagBlock block(dag_rlp); - blk_hash_t const hash = block.getHash(); + auto block = std::make_shared(dag_rlp); + blk_hash_t const hash = block->getHash(); peer->markDagBlockAsKnown(hash); - if (block.getLevel() > peer->dag_level_) { - peer->dag_level_ = block.getLevel(); + if (block->getLevel() > peer->dag_level_) { + peer->dag_level_ = block->getLevel(); } // Do not process this block in case we already have it - if (dag_mgr_->isDagBlockKnown(block.getHash())) { + if (dag_mgr_->isDagBlockKnown(block->getHash())) { LOG(log_tr_) << "Received known DagBlockPacket " << hash << "from: " << peer->getId(); return; } @@ -64,11 +64,12 @@ void DagBlockPacketHandler::process(const threadpool::PacketData &packet_data, onNewBlockReceived(std::move(block), peer, transactions); } -void DagBlockPacketHandler::sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, taraxa::DagBlock block, +void DagBlockPacketHandler::sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, + const std::shared_ptr &block, const SharedTransactions &trxs) { std::shared_ptr peer = peers_state_->getPeer(peer_id); if (!peer) { - LOG(log_wr_) << "Send dag block " << block.getHash() << ". Failed to obtain peer " << peer_id; + LOG(log_wr_) << "Send dag block " << block->getHash() << ". Failed to obtain peer " << peer_id; return; } @@ -86,21 +87,21 @@ void DagBlockPacketHandler::sendBlockWithTransactions(dev::p2p::NodeID const &pe s.appendList(trxs.size()); s.appendRaw(trx_bytes, trxs.size()); - s.appendRaw(block.rlp(true)); + s.appendRaw(block->rlp(true)); if (!sealAndSend(peer_id, SubprotocolPacketType::kDagBlockPacket, std::move(s))) { - LOG(log_wr_) << "Sending DagBlock " << block.getHash() << " failed to " << peer_id; + LOG(log_wr_) << "Sending DagBlock " << block->getHash() << " failed to " << peer_id; return; } // Mark data as known if sending was successful - peer->markDagBlockAsKnown(block.getHash()); + peer->markDagBlockAsKnown(block->getHash()); } void DagBlockPacketHandler::onNewBlockReceived( - DagBlock &&block, const std::shared_ptr &peer, + std::shared_ptr &&block, const std::shared_ptr &peer, const std::unordered_map> &trxs) { - const auto block_hash = block.getHash(); + const auto block_hash = block->getHash(); auto verified = dag_mgr_->verifyBlock(block, trxs); switch (verified.first) { case DagManager::VerifyBlockReturnType::IncorrectTransactionsEstimation: @@ -175,9 +176,9 @@ void DagBlockPacketHandler::onNewBlockReceived( if (peer->peer_dag_synced_) { std::ostringstream err_msg; if (status.second.size() > 0) - err_msg << "DagBlock" << block.getHash() << " has missing pivot or/and tips " << status.second; + err_msg << "DagBlock" << block->getHash() << " has missing pivot or/and tips " << status.second; else - err_msg << "DagBlock" << block.getHash() << " could not be added to DAG"; + err_msg << "DagBlock" << block->getHash() << " could not be added to DAG"; throw MaliciousPeerException(err_msg.str()); } else { // peer_dag_synced_ flag ensures that this can only be performed once for a peer @@ -191,14 +192,15 @@ void DagBlockPacketHandler::onNewBlockReceived( } } -void DagBlockPacketHandler::onNewBlockVerified(const DagBlock &block, bool proposed, const SharedTransactions &trxs) { +void DagBlockPacketHandler::onNewBlockVerified(const std::shared_ptr &block, bool proposed, + const SharedTransactions &trxs) { // If node is pbft syncing and block is not proposed by us, this is an old block that has been verified - no block // gossip is needed if (!proposed && pbft_syncing_state_->isDeepPbftSyncing()) { return; } - const auto &block_hash = block.getHash(); + const auto &block_hash = block->getHash(); LOG(log_tr_) << "Verified NewBlock " << block_hash.toString(); std::vector peers_to_send; @@ -246,7 +248,7 @@ void DagBlockPacketHandler::onNewBlockVerified(const DagBlock &block, bool propo } } } - LOG(log_dg_) << "Send DagBlock " << block.getHash() << " to peers: " << peer_and_transactions_to_log; + LOG(log_dg_) << "Send DagBlock " << block->getHash() << " to peers: " << peer_and_transactions_to_log; if (!peers_to_send.empty()) LOG(log_tr_) << "Sent block to " << peers_to_send.size() << " peers"; } } // namespace taraxa::network::tarcap::v4 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_sync_packet_handler.cpp index 64400f909d..f2b6894921 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_sync_packet_handler.cpp @@ -67,16 +67,16 @@ void DagSyncPacketHandler::process(const threadpool::PacketData& packet_data, co } } - std::vector dag_blocks; + std::vector> dag_blocks; std::vector dag_blocks_to_log; dag_blocks.reserve((*it).itemCount()); dag_blocks_to_log.reserve((*it).itemCount()); for (const auto block_rlp : *it) { - DagBlock block(block_rlp); - peer->markDagBlockAsKnown(block.getHash()); - if (dag_mgr_->isDagBlockKnown(block.getHash())) { - LOG(log_tr_) << "Received known DagBlock " << block.getHash() << "from: " << peer->getId(); + auto block = std::make_shared(block_rlp); + peer->markDagBlockAsKnown(block->getHash()); + if (dag_mgr_->isDagBlockKnown(block->getHash())) { + LOG(log_tr_) << "Received known DagBlock " << block->getHash() << "from: " << peer->getId(); continue; } dag_blocks.emplace_back(std::move(block)); @@ -97,25 +97,25 @@ void DagSyncPacketHandler::process(const threadpool::PacketData& packet_data, co } for (auto& block : dag_blocks) { - dag_blocks_to_log.push_back(block.getHash()); + dag_blocks_to_log.push_back(block->getHash()); auto verified = dag_mgr_->verifyBlock(block, transactions); if (verified.first != DagManager::VerifyBlockReturnType::Verified) { std::ostringstream err_msg; - err_msg << "DagBlock " << block.getHash() << " failed verification with error code " + err_msg << "DagBlock " << block->getHash() << " failed verification with error code " << static_cast(verified.first); throw MaliciousPeerException(err_msg.str()); } - if (block.getLevel() > peer->dag_level_) peer->dag_level_ = block.getLevel(); + if (block->getLevel() > peer->dag_level_) peer->dag_level_ = block->getLevel(); auto status = dag_mgr_->addDagBlock(std::move(block), std::move(verified.second)); if (!status.first) { std::ostringstream err_msg; if (status.second.size() > 0) - err_msg << "DagBlock" << block.getHash() << " has missing pivot or/and tips " << status.second; + err_msg << "DagBlock" << block->getHash() << " has missing pivot or/and tips " << status.second; else - err_msg << "DagBlock" << block.getHash() << " could not be added to DAG"; + err_msg << "DagBlock" << block->getHash() << " could not be added to DAG"; throw MaliciousPeerException(err_msg.str()); } } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pbft_sync_packet_handler.cpp index 0710cac3de..a7acc54653 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pbft_sync_packet_handler.cpp @@ -73,9 +73,9 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, std::string received_dag_blocks_str; // This is just log related stuff for (auto const &block : period_data.dag_blocks) { - received_dag_blocks_str += block.getHash().toString() + " "; - if (peer->dag_level_ < block.getLevel()) { - peer->dag_level_ = block.getLevel(); + received_dag_blocks_str += block->getHash().toString() + " "; + if (peer->dag_level_ < block->getLevel()) { + peer->dag_level_ = block->getLevel(); } } @@ -148,7 +148,7 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, trx_order.push_back(t->getHash()); } for (auto b : period_data.dag_blocks) { - blk_order.push_back(b.getHash()); + blk_order.push_back(b->getHash()); } LOG(log_er_) << "Order hash incorrect in period data " << pbft_blk_hash << " expected: " << order_hash << " received " << period_data.pbft_blk->getOrderHash() << "; Dag order: " << blk_order diff --git a/libraries/core_libs/network/src/ws_server.cpp b/libraries/core_libs/network/src/ws_server.cpp index bb361550e8..d25a1175d6 100644 --- a/libraries/core_libs/network/src/ws_server.cpp +++ b/libraries/core_libs/network/src/ws_server.cpp @@ -120,12 +120,12 @@ void WsSession::newEthBlock(const ::taraxa::final_chain::BlockHeader &payload, c writeAsync(std::move(response)); } } -void WsSession::newDagBlock(DagBlock const &blk) { +void WsSession::newDagBlock(const std::shared_ptr &blk) { if (new_dag_blocks_subscription_) { Json::Value res, params; res["jsonrpc"] = "2.0"; res["method"] = "eth_subscription"; - params["result"] = blk.getJson(); + params["result"] = blk->getJson(); params["subscription"] = dev::toJS(new_dag_blocks_subscription_); res["params"] = params; auto response = util::to_string(res); @@ -292,7 +292,7 @@ void WsServer::on_accept(beast::error_code ec, tcp::socket socket) { if (!stopped_) do_accept(); } -void WsServer::newDagBlock(DagBlock const &blk) { +void WsServer::newDagBlock(const std::shared_ptr &blk) { boost::shared_lock lock(sessions_mtx_); for (auto const &session : sessions) { if (!session->is_closed()) session->newDagBlock(blk); diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index c05a4d4e5c..bc86ad17d9 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -271,7 +271,7 @@ void FullNode::start() { }, *rpc_thread_pool_); dag_mgr_->block_verified_.subscribe( - [eth_json_rpc = as_weak(eth_json_rpc), ws = as_weak(jsonrpc_ws_)](auto const &dag_block) { + [eth_json_rpc = as_weak(eth_json_rpc), ws = as_weak(jsonrpc_ws_)](const std::shared_ptr &dag_block) { if (auto _ws = ws.lock()) { _ws->newDagBlock(dag_block); } diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 31e3498c4b..47264c59f2 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -242,14 +242,14 @@ class DbStorage : public std::enable_shared_from_this { std::optional getCurrentPillarBlockData() const; // DAG - void saveDagBlock(DagBlock const& blk, Batch* write_batch_p = nullptr); + void saveDagBlock(const std::shared_ptr& blk, Batch* write_batch_p = nullptr); std::shared_ptr getDagBlock(blk_hash_t const& hash); bool dagBlockInDb(blk_hash_t const& hash); std::set getBlocksByLevel(level_t level); level_t getLastBlocksLevel() const; std::vector> getDagBlocksAtLevel(level_t level, int number_of_levels); - void updateDagBlockCounters(std::vector blks); - std::map> getNonfinalizedDagBlocks(); + void updateDagBlockCounters(std::vector> blks); + std::map>> getNonfinalizedDagBlocks(); void removeDagBlockBatch(Batch& write_batch, blk_hash_t const& hash); void removeDagBlock(blk_hash_t const& hash); // Sortition params diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index eddc4a177e..b2c0401e7b 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -487,12 +487,12 @@ std::vector> DbStorage::getDagBlocksAtLevel(level_t le return res; } -std::map> DbStorage::getNonfinalizedDagBlocks() { - std::map> res; +std::map>> DbStorage::getNonfinalizedDagBlocks() { + std::map>> res; auto i = std::unique_ptr(db_->NewIterator(read_options_, handle(Columns::dag_blocks))); for (i->SeekToFirst(); i->Valid(); i->Next()) { - DagBlock block(asBytes(i->value().ToString())); - res[block.getLevel()].emplace_back(std::move(block)); + auto block = std::make_shared(asBytes(i->value().ToString())); + res[block->getLevel()].emplace_back(std::move(block)); } return res; } @@ -512,39 +512,39 @@ void DbStorage::removeDagBlockBatch(Batch& write_batch, blk_hash_t const& hash) void DbStorage::removeDagBlock(blk_hash_t const& hash) { remove(Columns::dag_blocks, toSlice(hash)); } -void DbStorage::updateDagBlockCounters(std::vector blks) { +void DbStorage::updateDagBlockCounters(std::vector> blks) { // Lock is needed since we are editing some fields std::lock_guard u_lock(dag_blocks_mutex_); auto write_batch = createWriteBatch(); for (auto const& blk : blks) { - auto level = blk.getLevel(); + auto level = blk->getLevel(); auto block_hashes = getBlocksByLevel(level); - block_hashes.emplace(blk.getHash()); + block_hashes.emplace(blk->getHash()); dev::RLPStream blocks_stream(block_hashes.size()); for (auto const& hash : block_hashes) { blocks_stream << hash; } insert(write_batch, Columns::dag_blocks_level, toSlice(level), toSlice(blocks_stream.out())); dag_blocks_count_.fetch_add(1); - dag_edge_count_.fetch_add(blk.getTips().size() + 1); + dag_edge_count_.fetch_add(blk->getTips().size() + 1); } insert(write_batch, Columns::status, toSlice((uint8_t)StatusDbField::DagBlkCount), toSlice(dag_blocks_count_.load())); insert(write_batch, Columns::status, toSlice((uint8_t)StatusDbField::DagEdgeCount), toSlice(dag_edge_count_.load())); commitWriteBatch(write_batch); } -void DbStorage::saveDagBlock(DagBlock const& blk, Batch* write_batch_p) { +void DbStorage::saveDagBlock(const std::shared_ptr& blk, Batch* write_batch_p) { // Lock is needed since we are editing some fields std::lock_guard u_lock(dag_blocks_mutex_); auto write_batch_up = write_batch_p ? std::unique_ptr() : std::make_unique(); auto commit = !write_batch_p; auto& write_batch = write_batch_p ? *write_batch_p : *write_batch_up; - auto block_bytes = blk.rlp(true); - auto block_hash = blk.getHash(); + auto block_bytes = blk->rlp(true); + auto block_hash = blk->getHash(); insert(write_batch, Columns::dag_blocks, toSlice(block_hash.asBytes()), toSlice(block_bytes)); - auto level = blk.getLevel(); + auto level = blk->getLevel(); auto block_hashes = getBlocksByLevel(level); - block_hashes.emplace(blk.getHash()); + block_hashes.emplace(blk->getHash()); dev::RLPStream blocks_stream(block_hashes.size()); for (auto const& hash : block_hashes) { blocks_stream << hash; @@ -552,7 +552,7 @@ void DbStorage::saveDagBlock(DagBlock const& blk, Batch* write_batch_p) { insert(write_batch, Columns::dag_blocks_level, toSlice(level), toSlice(blocks_stream.out())); dag_blocks_count_.fetch_add(1); - dag_edge_count_.fetch_add(blk.getTips().size() + 1); + dag_edge_count_.fetch_add(blk->getTips().size() + 1); insert(write_batch, Columns::status, toSlice((uint8_t)StatusDbField::DagBlkCount), toSlice(dag_blocks_count_.load())); insert(write_batch, Columns::status, toSlice((uint8_t)StatusDbField::DagEdgeCount), toSlice(dag_edge_count_.load())); if (commit) { @@ -686,8 +686,8 @@ void DbStorage::savePeriodData(const PeriodData& period_data, Batch& write_batch // Remove dag blocks from non finalized column in db and add dag_block_period in DB uint32_t block_pos = 0; for (auto const& block : period_data.dag_blocks) { - removeDagBlockBatch(write_batch, block.getHash()); - addDagBlockPeriodToBatch(block.getHash(), period, block_pos, write_batch); + removeDagBlockBatch(write_batch, block->getHash()); + addDagBlockPeriodToBatch(block->getHash(), period, block_pos, write_batch); block_pos++; } @@ -1262,41 +1262,35 @@ std::vector DbStorage::getFinalizedDagBlockHashesByPeriod(PbftPeriod const auto dag_blocks = decodeDAGBlocksBundleRlp(dag_blocks_data); ret.reserve(dag_blocks.size()); std::transform(dag_blocks.begin(), dag_blocks.end(), std::back_inserter(ret), - [](const auto& dag_block) { return dag_block.getHash(); }); + [](const auto& dag_block) { return dag_block->getHash(); }); } return ret; } std::vector> DbStorage::getFinalizedDagBlockByPeriod(PbftPeriod period) { - std::vector> ret; - if (auto period_data = getPeriodDataRaw(period); period_data.size() > 0) { - auto dag_blocks_data = dev::RLP(period_data)[DAG_BLOCKS_POS_IN_PERIOD_DATA]; - auto dag_blocks = decodeDAGBlocksBundleRlp(dag_blocks_data); - ret.reserve(dag_blocks.size()); - for (const auto& block : dag_blocks) { - ret.emplace_back(std::make_shared(std::move(block))); - } + auto period_data = getPeriodDataRaw(period); + if (period_data.empty()) { + return {}; } - return ret; + + auto dag_blocks_data = dev::RLP(period_data)[DAG_BLOCKS_POS_IN_PERIOD_DATA]; + return decodeDAGBlocksBundleRlp(dag_blocks_data); } std::pair>> DbStorage::getLastPbftBlockHashAndFinalizedDagBlockByPeriod(PbftPeriod period) { - std::vector> ret; - blk_hash_t last_pbft_block_hash; - if (auto period_data = getPeriodDataRaw(period); period_data.size() > 0) { - auto const period_data_rlp = dev::RLP(period_data); - auto dag_blocks_data = period_data_rlp[DAG_BLOCKS_POS_IN_PERIOD_DATA]; - auto dag_blocks = decodeDAGBlocksBundleRlp(dag_blocks_data); - ret.reserve(dag_blocks.size()); - for (const auto& block : dag_blocks) { - ret.emplace_back(std::make_shared(std::move(block))); - } - last_pbft_block_hash = - period_data_rlp[PBFT_BLOCK_POS_IN_PERIOD_DATA][PREV_BLOCK_HASH_POS_IN_PBFT_BLOCK].toHash(); + auto period_data = getPeriodDataRaw(period); + if (period_data.empty()) { + return {}; } - return {last_pbft_block_hash, ret}; + + const auto period_data_rlp = dev::RLP(period_data); + auto dag_blocks_data = period_data_rlp[DAG_BLOCKS_POS_IN_PERIOD_DATA]; + auto blocks = decodeDAGBlocksBundleRlp(dag_blocks_data); + auto last_pbft_block_hash = + period_data_rlp[PBFT_BLOCK_POS_IN_PERIOD_DATA][PREV_BLOCK_HASH_POS_IN_PBFT_BLOCK].toHash(); + return {last_pbft_block_hash, std::move(blocks)}; } std::optional DbStorage::getProposalPeriodForDagLevel(uint64_t level) { diff --git a/libraries/types/dag_block/include/dag/dag_block_bundle_rlp.hpp b/libraries/types/dag_block/include/dag/dag_block_bundle_rlp.hpp index 828399ec7d..21b6be4135 100644 --- a/libraries/types/dag_block/include/dag/dag_block_bundle_rlp.hpp +++ b/libraries/types/dag_block/include/dag/dag_block_bundle_rlp.hpp @@ -21,7 +21,7 @@ constexpr static size_t kDAGBlocksBundleRlpSize{3}; * @param blocks * @return blocks bundle rlp bytes */ -dev::bytes encodeDAGBlocksBundleRlp(const std::vector& blocks); +dev::bytes encodeDAGBlocksBundleRlp(const std::vector>& blocks); /** * @brief Decodes pbft blocks from optimized blocks bundle rlp @@ -29,7 +29,7 @@ dev::bytes encodeDAGBlocksBundleRlp(const std::vector& blocks); * @param blocks_bundle_rlp * @return blocks */ -std::vector decodeDAGBlocksBundleRlp(const dev::RLP& blocks_bundle_rlp); +std::vector> decodeDAGBlocksBundleRlp(const dev::RLP& blocks_bundle_rlp); /** * @brief Decodes single dag block from optimized blocks bundle rlp diff --git a/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp index b57a196669..57831ccea6 100644 --- a/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp +++ b/libraries/types/dag_block/src/dag_block_bundle_rlp.cpp @@ -7,7 +7,7 @@ namespace taraxa { -dev::bytes encodeDAGBlocksBundleRlp(const std::vector& blocks) { +dev::bytes encodeDAGBlocksBundleRlp(const std::vector>& blocks) { if (blocks.empty()) { return {}; } @@ -18,9 +18,9 @@ dev::bytes encodeDAGBlocksBundleRlp(const std::vector& blocks) { for (const auto& block : blocks) { std::vector idx; - idx.reserve(block.getTrxs().size()); + idx.reserve(block->getTrxs().size()); - for (const auto& trx : block.getTrxs()) { + for (const auto& trx : block->getTrxs()) { if (const auto [_, ok] = trx_hash_map.try_emplace(trx, static_cast(trx_hash_map.size())); ok) { ordered_trx_hashes.push_back(trx); // Track the insertion order } @@ -43,12 +43,12 @@ dev::bytes encodeDAGBlocksBundleRlp(const std::vector& blocks) { } blocks_bundle_rlp.appendList(blocks.size()); for (const auto& block : blocks) { - blocks_bundle_rlp.appendRaw(block.rlp(true, false)); + blocks_bundle_rlp.appendRaw(block->rlp(true, false)); } return blocks_bundle_rlp.invalidate(); } -std::vector decodeDAGBlocksBundleRlp(const dev::RLP& blocks_bundle_rlp) { +std::vector> decodeDAGBlocksBundleRlp(const dev::RLP& blocks_bundle_rlp) { if (blocks_bundle_rlp.itemCount() != kDAGBlocksBundleRlpSize) { return {}; } @@ -70,11 +70,11 @@ std::vector decodeDAGBlocksBundleRlp(const dev::RLP& blocks_bundle_rlp dags_trx_hashes.push_back(std::move(hashes)); } - std::vector blocks; + std::vector> blocks; blocks.reserve(blocks_bundle_rlp[2].itemCount()); for (size_t i = 0; i < blocks_bundle_rlp[2].itemCount(); i++) { - auto block = DagBlock(blocks_bundle_rlp[2][i], std::move(dags_trx_hashes[i])); + auto block = std::make_shared(blocks_bundle_rlp[2][i], std::move(dags_trx_hashes[i])); blocks.push_back(std::move(block)); } diff --git a/libraries/types/pbft_block/include/pbft/period_data.hpp b/libraries/types/pbft_block/include/pbft/period_data.hpp index 060a75268b..f1eb17282b 100644 --- a/libraries/types/pbft_block/include/pbft/period_data.hpp +++ b/libraries/types/pbft_block/include/pbft/period_data.hpp @@ -38,7 +38,7 @@ class PeriodData { std::shared_ptr pbft_blk; std::vector> previous_block_cert_votes; // These votes are the cert votes of previous block // which match reward votes in current pbft block - std::vector dag_blocks; + std::vector> dag_blocks; SharedTransactions transactions; // Pillar votes should be present only if pbft block contains also pillar block hash diff --git a/libraries/types/pbft_block/src/period_data.cpp b/libraries/types/pbft_block/src/period_data.cpp index 8948ca683b..7f9a09572b 100644 --- a/libraries/types/pbft_block/src/period_data.cpp +++ b/libraries/types/pbft_block/src/period_data.cpp @@ -88,7 +88,7 @@ PeriodData PeriodData::FromOldPeriodData(const dev::RLP& rlp) { } for (auto const dag_block_rlp : *it++) { - period_data.dag_blocks.emplace_back(dag_block_rlp); + period_data.dag_blocks.emplace_back(std::make_shared(dag_block_rlp)); } for (auto const trx_rlp : *it++) { @@ -116,7 +116,7 @@ bytes PeriodData::ToOldPeriodData(const bytes& rlp) { s.appendList(period_data.dag_blocks.size()); for (auto const& b : period_data.dag_blocks) { - s.appendRaw(b.rlp(true)); + s.appendRaw(b->rlp(true)); } s.appendList(period_data.transactions.size()); diff --git a/tests/dag_block_test.cpp b/tests/dag_block_test.cpp index 62b5987b5d..ecf9866999 100644 --- a/tests/dag_block_test.cpp +++ b/tests/dag_block_test.cpp @@ -26,8 +26,6 @@ using namespace vdf_sortition; struct DagBlockTest : NodesTest {}; struct DagBlockMgrTest : NodesTest {}; -auto g_blk_samples = samples::createMockDagBlkSamples(0, NUM_BLK, 0, BLK_TRX_LEN, BLK_TRX_OVERLAP); - auto g_secret = dev::Secret("3800b2875669d9b2053c1aff9224ecfdc411423aac5b5a73d7a45ced1c3b9dcd", dev::Secret::ConstructFromStringType::FromHex); auto g_key_pair = dev::KeyPair(g_secret); @@ -230,14 +228,16 @@ TEST_F(DagBlockMgrTest, incorrect_tx_estimation) { // transactions.size and estimations size is not equal { - DagBlock blk(dag_genesis, propose_level, {}, {trx->getHash()}, {}, vdf1, node->getSecretKey()); + auto blk = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, vec_trx_t{trx->getHash()}, 0, vdf1, + node->getSecretKey()); EXPECT_EQ(node->getDagManager()->verifyBlock(std::move(blk)).first, DagManager::VerifyBlockReturnType::IncorrectTransactionsEstimation); } // wrong estimated tx { - DagBlock blk(dag_genesis, propose_level, {}, {trx->getHash()}, 100, vdf1, node->getSecretKey()); + auto blk = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, vec_trx_t{trx->getHash()}, 100, vdf1, + node->getSecretKey()); EXPECT_EQ(node->getDagManager()->verifyBlock(std::move(blk)).first, DagManager::VerifyBlockReturnType::IncorrectTransactionsEstimation); } @@ -265,8 +265,9 @@ TEST_F(DagBlockMgrTest, dag_block_tips_verification) { dev::bytes vdf_msg = DagManager::getVdfMessage(dag_genesis, {trx}); vdf.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk(dag_genesis, propose_level, {}, {trx->getHash()}, 100000, vdf, node->getSecretKey()); - dag_blocks_hashes.push_back(blk.getHash()); + auto blk = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, vec_trx_t{trx->getHash()}, 100000, + vdf, node->getSecretKey()); + dag_blocks_hashes.push_back(blk->getHash()); EXPECT_EQ(node->getDagManager()->verifyBlock(blk).first, DagManager::VerifyBlockReturnType::Verified); EXPECT_TRUE(node->getDagManager()->addDagBlock(std::move(blk), {trx}).first); } @@ -275,21 +276,21 @@ TEST_F(DagBlockMgrTest, dag_block_tips_verification) { vdf.computeVdfSolution(vdf_config, vdf_msg, false); // Verify block over the kDagBlockMaxTips is rejected - DagBlock blk_over_limit(dag_genesis, propose_level, dag_blocks_hashes, {trxs[0]->getHash()}, 100000, vdf, - node->getSecretKey()); + auto blk_over_limit = std::make_shared(dag_genesis, propose_level, dag_blocks_hashes, + vec_trx_t{trxs[0]->getHash()}, 100000, vdf, node->getSecretKey()); EXPECT_EQ(node->getDagManager()->verifyBlock(blk_over_limit).first, DagManager::VerifyBlockReturnType::FailedTipsVerification); // Verify block at kDagBlockMaxTips is accepted dag_blocks_hashes.resize(kDagBlockMaxTips); - DagBlock blk_at_limit(dag_genesis, propose_level, dag_blocks_hashes, {trxs[0]->getHash()}, 100000, vdf, - node->getSecretKey()); + auto blk_at_limit = std::make_shared(dag_genesis, propose_level, dag_blocks_hashes, + vec_trx_t{trxs[0]->getHash()}, 100000, vdf, node->getSecretKey()); EXPECT_EQ(node->getDagManager()->verifyBlock(blk_at_limit).first, DagManager::VerifyBlockReturnType::Verified); // Verify block below kDagBlockMaxTips is accepted dag_blocks_hashes.resize(kDagBlockMaxTips - 1); - DagBlock blk_under_limit(dag_genesis, propose_level, dag_blocks_hashes, {trxs[0]->getHash()}, 100000, vdf, - node->getSecretKey()); + auto blk_under_limit = std::make_shared(dag_genesis, propose_level, dag_blocks_hashes, + vec_trx_t{trxs[0]->getHash()}, 100000, vdf, node->getSecretKey()); EXPECT_EQ(node->getDagManager()->verifyBlock(blk_under_limit).first, DagManager::VerifyBlockReturnType::Verified); auto dag_blocks_hashes_with_duplicate_pivot = dag_blocks_hashes; @@ -299,14 +300,16 @@ TEST_F(DagBlockMgrTest, dag_block_tips_verification) { dag_blocks_hashes_with_duplicate_tip.push_back(dag_blocks_hashes[0]); // Verify block with duplicate pivot is rejected - DagBlock blk_with_duplicate_pivot(dag_genesis, propose_level, dag_blocks_hashes_with_duplicate_pivot, - {trxs[0]->getHash()}, 100000, vdf, node->getSecretKey()); + auto blk_with_duplicate_pivot = + std::make_shared(dag_genesis, propose_level, dag_blocks_hashes_with_duplicate_pivot, + vec_trx_t{trxs[0]->getHash()}, 100000, vdf, node->getSecretKey()); EXPECT_EQ(node->getDagManager()->verifyBlock(blk_with_duplicate_pivot).first, DagManager::VerifyBlockReturnType::FailedTipsVerification); // Verify block with duplicate tip is rejected - DagBlock blk_with_duplicate_tip(dag_genesis, propose_level, dag_blocks_hashes_with_duplicate_tip, - {trxs[0]->getHash()}, 100000, vdf, node->getSecretKey()); + auto blk_with_duplicate_tip = + std::make_shared(dag_genesis, propose_level, dag_blocks_hashes_with_duplicate_tip, + vec_trx_t{trxs[0]->getHash()}, 100000, vdf, node->getSecretKey()); EXPECT_EQ(node->getDagManager()->verifyBlock(blk_with_duplicate_tip).first, DagManager::VerifyBlockReturnType::FailedTipsVerification); } @@ -334,8 +337,9 @@ TEST_F(DagBlockMgrTest, dag_block_tips_proposal) { dev::bytes vdf_msg = DagManager::getVdfMessage(dag_genesis, {trx}); vdf.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk(dag_genesis, propose_level, {}, {trx->getHash()}, dag_block_gas, vdf, node->getSecretKey()); - dag_blocks_hashes.push_back(blk.getHash()); + auto blk = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, vec_trx_t{trx->getHash()}, + dag_block_gas, vdf, node->getSecretKey()); + dag_blocks_hashes.push_back(blk->getHash()); EXPECT_EQ(node->getDagManager()->verifyBlock(blk).first, DagManager::VerifyBlockReturnType::Verified); EXPECT_TRUE(node->getDagManager()->addDagBlock(std::move(blk), {trx}).first); } @@ -366,8 +370,9 @@ TEST_F(DagBlockMgrTest, dag_block_tips_proposal) { dev::bytes vdf_msg = DagManager::getVdfMessage(dag_blocks_hashes[0], {trx}); vdf.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk(dag_blocks_hashes[0], propose_level, {}, {trx->getHash()}, 100000, vdf, node->getSecretKey()); - dag_blocks_hashes.push_back(blk.getHash()); + auto blk = std::make_shared(dag_blocks_hashes[0], propose_level, vec_blk_t{}, vec_trx_t{trx->getHash()}, + 100000, vdf, node->getSecretKey()); + dag_blocks_hashes.push_back(blk->getHash()); EXPECT_EQ(node->getDagManager()->verifyBlock(blk).first, DagManager::VerifyBlockReturnType::Verified); EXPECT_TRUE(node->getDagManager()->addDagBlock(std::move(blk), {trx}).first); } @@ -387,8 +392,9 @@ TEST_F(DagBlockMgrTest, dag_block_tips_proposal) { dev::bytes vdf_msg = DagManager::getVdfMessage(dag_genesis, {trxs[0]}); vdf.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk(dag_genesis, propose_level, {}, {trxs[0]->getHash()}, 100000, vdf, node_cfgs[1].node_secret); - dag_blocks_hashes.push_back(blk.getHash()); + auto blk = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, vec_trx_t{trxs[0]->getHash()}, 100000, + vdf, node_cfgs[1].node_secret); + dag_blocks_hashes.push_back(blk->getHash()); EXPECT_TRUE(node->getDagManager()->addDagBlock(std::move(blk), {trxs[0]}).first); selected_tips = node->getDagBlockProposer()->selectDagBlockTips(dag_blocks_hashes, selection_gas_limit); @@ -434,7 +440,8 @@ TEST_F(DagBlockMgrTest, too_big_dag_block) { vdf1.computeVdfSolution(vdf_config, vdf_msg, false); { - DagBlock blk(dag_genesis, propose_level, {}, hashes, estimations, vdf1, node->getSecretKey()); + auto blk = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, hashes, estimations, vdf1, + node->getSecretKey()); EXPECT_EQ(node->getDagManager()->verifyBlock(std::move(blk)).first, DagManager::VerifyBlockReturnType::BlockTooBig); } } diff --git a/tests/dag_test.cpp b/tests/dag_test.cpp index 36b9abf27c..97a4cfe7db 100644 --- a/tests/dag_test.cpp +++ b/tests/dag_test.cpp @@ -138,23 +138,34 @@ TEST_F(DagTest, compute_epoch) { node_cfgs[0].genesis.pbft.gas_limit = 100000; auto mgr = std::make_shared(node_cfgs[0], addr_t(), trx_mgr, pbft_chain, nullptr, db_ptr, nullptr); - DagBlock blkA(GENESIS, 1, {}, {trx_hash_t(2)}, sig_t(1), blk_hash_t(2), addr_t(1)); - DagBlock blkB(GENESIS, 1, {}, {trx_hash_t(3), trx_hash_t(4)}, sig_t(1), blk_hash_t(3), addr_t(1)); - DagBlock blkC(blk_hash_t(2), 2, {blk_hash_t(3)}, {}, sig_t(1), blk_hash_t(4), addr_t(1)); - DagBlock blkD(blk_hash_t(2), 2, {}, {}, sig_t(1), blk_hash_t(5), addr_t(1)); - DagBlock blkE(blk_hash_t(4), 3, {blk_hash_t(5), blk_hash_t(7)}, {}, sig_t(1), blk_hash_t(6), addr_t(1)); - DagBlock blkF(blk_hash_t(3), 2, {}, {}, sig_t(1), blk_hash_t(7), addr_t(1)); - DagBlock blkG(blk_hash_t(2), 2, {}, {trx_hash_t(4)}, sig_t(1), blk_hash_t(8), addr_t(1)); - DagBlock blkH(blk_hash_t(6), 5, {blk_hash_t(8), blk_hash_t(10)}, {}, sig_t(1), blk_hash_t(9), addr_t(1)); - DagBlock blkI(blk_hash_t(11), 4, {blk_hash_t(4)}, {}, sig_t(1), blk_hash_t(10), addr_t(1)); - DagBlock blkJ(blk_hash_t(7), 3, {}, {}, sig_t(1), blk_hash_t(11), addr_t(1)); - DagBlock blkK(blk_hash_t(9), 6, {}, {}, sig_t(1), blk_hash_t(12), addr_t(1)); - - const auto blkA_hash = blkA.getHash(); - const auto blkC_hash = blkC.getHash(); - const auto blkE_hash = blkE.getHash(); - const auto blkH_hash = blkH.getHash(); - const auto blkK_hash = blkK.getHash(); + auto blkA = + std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{trx_hash_t(2)}, sig_t(1), blk_hash_t(2), addr_t(1)); + auto blkB = std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{trx_hash_t(3), trx_hash_t(4)}, sig_t(1), + blk_hash_t(3), addr_t(1)); + auto blkC = std::make_shared(blk_hash_t(2), 2, vec_blk_t{blk_hash_t(3)}, vec_trx_t{}, sig_t(1), + blk_hash_t(4), addr_t(1)); + auto blkD = + std::make_shared(blk_hash_t(2), 2, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(5), addr_t(1)); + auto blkE = std::make_shared(blk_hash_t(4), 3, vec_blk_t{blk_hash_t(5), blk_hash_t(7)}, vec_trx_t{}, + sig_t(1), blk_hash_t(6), addr_t(1)); + auto blkF = + std::make_shared(blk_hash_t(3), 2, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(7), addr_t(1)); + auto blkG = std::make_shared(blk_hash_t(2), 2, vec_blk_t{}, vec_trx_t{trx_hash_t(4)}, sig_t(1), + blk_hash_t(8), addr_t(1)); + auto blkH = std::make_shared(blk_hash_t(6), 5, vec_blk_t{blk_hash_t(8), blk_hash_t(10)}, vec_trx_t{}, + sig_t(1), blk_hash_t(9), addr_t(1)); + auto blkI = std::make_shared(blk_hash_t(11), 4, vec_blk_t{blk_hash_t(4)}, vec_trx_t{}, sig_t(1), + blk_hash_t(10), addr_t(1)); + auto blkJ = + std::make_shared(blk_hash_t(7), 3, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(11), addr_t(1)); + auto blkK = + std::make_shared(blk_hash_t(9), 6, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(12), addr_t(1)); + + const auto blkA_hash = blkA->getHash(); + const auto blkC_hash = blkC->getHash(); + const auto blkE_hash = blkE->getHash(); + const auto blkH_hash = blkH->getHash(); + const auto blkK_hash = blkK->getHash(); EXPECT_TRUE(mgr->addDagBlock(std::move(blkA)).first); EXPECT_TRUE(mgr->addDagBlock(std::move(blkB)).first); @@ -232,19 +243,30 @@ TEST_F(DagTest, dag_expiry) { node_cfgs[0].genesis.pbft.gas_limit = 100000; auto mgr = std::make_shared(node_cfgs[0], addr_t(), trx_mgr, pbft_chain, nullptr, db_ptr, nullptr); - DagBlock blkA(GENESIS, 1, {}, {trx_hash_t(2)}, sig_t(1), blk_hash_t(2), addr_t(1)); - DagBlock blkB(GENESIS, 1, {}, {trx_hash_t(3), trx_hash_t(4)}, sig_t(1), blk_hash_t(3), addr_t(1)); - DagBlock blkC(blk_hash_t(2), 2, {blk_hash_t(3)}, {}, sig_t(1), blk_hash_t(4), addr_t(1)); - DagBlock blkD(blk_hash_t(2), 2, {}, {}, sig_t(1), blk_hash_t(5), addr_t(1)); - DagBlock blkE(blk_hash_t(4), 3, {blk_hash_t(5), blk_hash_t(7)}, {}, sig_t(1), blk_hash_t(6), addr_t(1)); - DagBlock blkF(blk_hash_t(3), 2, {}, {}, sig_t(1), blk_hash_t(7), addr_t(1)); - DagBlock blkG(blk_hash_t(2), 2, {}, {trx_hash_t(4)}, sig_t(1), blk_hash_t(8), addr_t(1)); - DagBlock blkH(blk_hash_t(6), 5, {blk_hash_t(8), blk_hash_t(10)}, {}, sig_t(1), blk_hash_t(9), addr_t(1)); - DagBlock blkI(blk_hash_t(11), 4, {blk_hash_t(4)}, {}, sig_t(1), blk_hash_t(10), addr_t(1)); - DagBlock blkJ(blk_hash_t(7), 3, {}, {}, sig_t(1), blk_hash_t(11), addr_t(1)); - DagBlock blkK(blk_hash_t(9), 6, {}, {}, sig_t(1), blk_hash_t(12), addr_t(1)); - - const auto blkK_hash = blkK.getHash(); + auto blkA = + std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{trx_hash_t(2)}, sig_t(1), blk_hash_t(2), addr_t(1)); + auto blkB = std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{trx_hash_t(3), trx_hash_t(4)}, sig_t(1), + blk_hash_t(3), addr_t(1)); + auto blkC = std::make_shared(blk_hash_t(2), 2, vec_blk_t{blk_hash_t(3)}, vec_trx_t{}, sig_t(1), + blk_hash_t(4), addr_t(1)); + auto blkD = + std::make_shared(blk_hash_t(2), 2, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(5), addr_t(1)); + auto blkE = std::make_shared(blk_hash_t(4), 3, vec_blk_t{blk_hash_t(5), blk_hash_t(7)}, vec_trx_t{}, + sig_t(1), blk_hash_t(6), addr_t(1)); + auto blkF = + std::make_shared(blk_hash_t(3), 2, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(7), addr_t(1)); + auto blkG = std::make_shared(blk_hash_t(2), 2, vec_blk_t{}, vec_trx_t{trx_hash_t(4)}, sig_t(1), + blk_hash_t(8), addr_t(1)); + auto blkH = std::make_shared(blk_hash_t(6), 5, vec_blk_t{blk_hash_t(8), blk_hash_t(10)}, vec_trx_t{}, + sig_t(1), blk_hash_t(9), addr_t(1)); + auto blkI = std::make_shared(blk_hash_t(11), 4, vec_blk_t{blk_hash_t(4)}, vec_trx_t{}, sig_t(1), + blk_hash_t(10), addr_t(1)); + auto blkJ = + std::make_shared(blk_hash_t(7), 3, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(11), addr_t(1)); + auto blkK = + std::make_shared(blk_hash_t(9), 6, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(12), addr_t(1)); + + const auto blkK_hash = blkK->getHash(); mgr->addDagBlock(std::move(blkA)); mgr->addDagBlock(std::move(blkB)); @@ -270,33 +292,35 @@ TEST_F(DagTest, dag_expiry) { mgr->setDagBlockOrder(blkK_hash, 1, orders); // Verify expiry level - EXPECT_EQ(mgr->getDagExpiryLevel(), blkK.getLevel() - EXPIRY_LIMIT); + EXPECT_EQ(mgr->getDagExpiryLevel(), blkK->getLevel() - EXPIRY_LIMIT); - DagBlock blk_under_limit(blk_hash_t(2), blkK.getLevel() - EXPIRY_LIMIT - 1, {}, {}, sig_t(1), blk_hash_t(13), - addr_t(1)); - DagBlock blk_at_limit(blk_hash_t(4), blkK.getLevel() - EXPIRY_LIMIT, {}, {}, sig_t(1), blk_hash_t(14), addr_t(1)); - DagBlock blk_over_limit(blk_hash_t(11), blkK.getLevel() - EXPIRY_LIMIT + 1, {}, {}, sig_t(1), blk_hash_t(15), - addr_t(1)); + auto blk_under_limit = std::make_shared(blk_hash_t(2), blkK->getLevel() - EXPIRY_LIMIT - 1, vec_blk_t{}, + vec_trx_t{}, sig_t(1), blk_hash_t(13), addr_t(1)); + auto blk_at_limit = std::make_shared(blk_hash_t(4), blkK->getLevel() - EXPIRY_LIMIT, vec_blk_t{}, + vec_trx_t{}, sig_t(1), blk_hash_t(14), addr_t(1)); + auto blk_over_limit = std::make_shared(blk_hash_t(11), blkK->getLevel() - EXPIRY_LIMIT + 1, vec_blk_t{}, + vec_trx_t{}, sig_t(1), blk_hash_t(15), addr_t(1)); // Block under limit is not accepted to DAG since it is expired EXPECT_FALSE(mgr->addDagBlock(std::move(blk_under_limit)).first); EXPECT_TRUE(mgr->addDagBlock(std::move(blk_at_limit)).first); EXPECT_TRUE(mgr->addDagBlock(std::move(blk_over_limit)).first); - EXPECT_FALSE(db_ptr->dagBlockInDb(blk_under_limit.getHash())); - EXPECT_TRUE(db_ptr->dagBlockInDb(blk_at_limit.getHash())); - EXPECT_TRUE(db_ptr->dagBlockInDb(blk_over_limit.getHash())); + EXPECT_FALSE(db_ptr->dagBlockInDb(blk_under_limit->getHash())); + EXPECT_TRUE(db_ptr->dagBlockInDb(blk_at_limit->getHash())); + EXPECT_TRUE(db_ptr->dagBlockInDb(blk_over_limit->getHash())); - DagBlock blk_new_anchor(blk_hash_t(12), 7, {}, {}, sig_t(1), blk_hash_t(16), addr_t(1)); + auto blk_new_anchor = + std::make_shared(blk_hash_t(12), 7, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(16), addr_t(1)); EXPECT_TRUE(mgr->addDagBlock(std::move(blk_new_anchor)).first); - orders = mgr->getDagBlockOrder(blk_new_anchor.getHash(), 2); - mgr->setDagBlockOrder(blk_new_anchor.getHash(), 2, orders); + orders = mgr->getDagBlockOrder(blk_new_anchor->getHash(), 2); + mgr->setDagBlockOrder(blk_new_anchor->getHash(), 2, orders); // Verify that the block blk_at_limit which was initially part of the DAG became expired once new anchor moved the // limit - EXPECT_FALSE(db_ptr->dagBlockInDb(blk_under_limit.getHash())); - EXPECT_FALSE(db_ptr->dagBlockInDb(blk_at_limit.getHash())); - EXPECT_TRUE(db_ptr->dagBlockInDb(blk_over_limit.getHash())); + EXPECT_FALSE(db_ptr->dagBlockInDb(blk_under_limit->getHash())); + EXPECT_FALSE(db_ptr->dagBlockInDb(blk_at_limit->getHash())); + EXPECT_TRUE(db_ptr->dagBlockInDb(blk_over_limit->getHash())); } TEST_F(DagTest, receive_block_in_order) { @@ -307,9 +331,11 @@ TEST_F(DagTest, receive_block_in_order) { node_cfgs[0].genesis.pbft.gas_limit = 100000; auto mgr = std::make_shared(node_cfgs[0], addr_t(), trx_mgr, pbft_chain, nullptr, db_ptr, nullptr); - DagBlock blk1(GENESIS, 1, {}, {}, sig_t(777), blk_hash_t(1), addr_t(15)); - DagBlock blk2(blk_hash_t(1), 2, {}, {}, sig_t(777), blk_hash_t(2), addr_t(15)); - DagBlock blk3(GENESIS, 3, {blk_hash_t(1), blk_hash_t(2)}, {}, sig_t(777), blk_hash_t(3), addr_t(15)); + auto blk1 = std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{}, sig_t(777), blk_hash_t(1), addr_t(15)); + auto blk2 = + std::make_shared(blk_hash_t(1), 2, vec_blk_t{}, vec_trx_t{}, sig_t(777), blk_hash_t(2), addr_t(15)); + auto blk3 = std::make_shared(GENESIS, 3, vec_blk_t{blk_hash_t(1), blk_hash_t(2)}, vec_trx_t{}, sig_t(777), + blk_hash_t(3), addr_t(15)); mgr->addDagBlock(std::move(blk1)); mgr->addDagBlock(std::move(blk2)); @@ -338,23 +364,34 @@ TEST_F(DagTest, compute_epoch_2) { node_cfgs[0].genesis.pbft.gas_limit = 100000; auto mgr = std::make_shared(node_cfgs[0], addr_t(), trx_mgr, pbft_chain, nullptr, db_ptr, nullptr); - DagBlock blkA(GENESIS, 1, {}, {trx_hash_t(2)}, sig_t(1), blk_hash_t(2), addr_t(1)); - DagBlock blkB(GENESIS, 1, {}, {trx_hash_t(3), trx_hash_t(4)}, sig_t(1), blk_hash_t(3), addr_t(1)); - DagBlock blkC(blk_hash_t(2), 2, {blk_hash_t(3)}, {}, sig_t(1), blk_hash_t(4), addr_t(1)); - DagBlock blkD(blk_hash_t(2), 2, {}, {}, sig_t(1), blk_hash_t(5), addr_t(1)); - DagBlock blkE(blk_hash_t(4), 3, {blk_hash_t(5), blk_hash_t(7)}, {}, sig_t(1), blk_hash_t(6), addr_t(1)); - DagBlock blkF(blk_hash_t(3), 2, {}, {}, sig_t(1), blk_hash_t(7), addr_t(1)); - DagBlock blkG(blk_hash_t(2), 2, {}, {trx_hash_t(4)}, sig_t(1), blk_hash_t(8), addr_t(1)); - DagBlock blkH(blk_hash_t(6), 5, {blk_hash_t(8), blk_hash_t(10)}, {}, sig_t(1), blk_hash_t(9), addr_t(1)); - DagBlock blkI(blk_hash_t(11), 4, {blk_hash_t(4)}, {}, sig_t(1), blk_hash_t(10), addr_t(1)); - DagBlock blkJ(blk_hash_t(7), 3, {}, {}, sig_t(1), blk_hash_t(11), addr_t(1)); - DagBlock blkK(blk_hash_t(10), 5, {}, {}, sig_t(1), blk_hash_t(12), addr_t(1)); - - const auto blkA_hash = blkA.getHash(); - const auto blkC_hash = blkC.getHash(); - const auto blkE_hash = blkE.getHash(); - const auto blkH_hash = blkH.getHash(); - const auto blkK_hash = blkK.getHash(); + auto blkA = + std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{trx_hash_t(2)}, sig_t(1), blk_hash_t(2), addr_t(1)); + auto blkB = std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{trx_hash_t(3), trx_hash_t(4)}, sig_t(1), + blk_hash_t(3), addr_t(1)); + auto blkC = std::make_shared(blk_hash_t(2), 2, vec_blk_t{blk_hash_t(3)}, vec_trx_t{}, sig_t(1), + blk_hash_t(4), addr_t(1)); + auto blkD = + std::make_shared(blk_hash_t(2), 2, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(5), addr_t(1)); + auto blkE = std::make_shared(blk_hash_t(4), 3, vec_blk_t{blk_hash_t(5), blk_hash_t(7)}, vec_trx_t{}, + sig_t(1), blk_hash_t(6), addr_t(1)); + auto blkF = + std::make_shared(blk_hash_t(3), 2, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(7), addr_t(1)); + auto blkG = std::make_shared(blk_hash_t(2), 2, vec_blk_t{}, vec_trx_t{trx_hash_t(4)}, sig_t(1), + blk_hash_t(8), addr_t(1)); + auto blkH = std::make_shared(blk_hash_t(6), 5, vec_blk_t{blk_hash_t(8), blk_hash_t(10)}, vec_trx_t{}, + sig_t(1), blk_hash_t(9), addr_t(1)); + auto blkI = std::make_shared(blk_hash_t(11), 4, vec_blk_t{blk_hash_t(4)}, vec_trx_t{}, sig_t(1), + blk_hash_t(10), addr_t(1)); + auto blkJ = + std::make_shared(blk_hash_t(7), 3, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(11), addr_t(1)); + auto blkK = + std::make_shared(blk_hash_t(10), 5, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(12), addr_t(1)); + + const auto blkA_hash = blkA->getHash(); + const auto blkC_hash = blkC->getHash(); + const auto blkE_hash = blkE->getHash(); + const auto blkH_hash = blkH->getHash(); + const auto blkK_hash = blkK->getHash(); mgr->addDagBlock(std::move(blkA)); mgr->addDagBlock(std::move(blkB)); @@ -420,11 +457,14 @@ TEST_F(DagTest, get_latest_pivot_tips) { node_cfgs[0].genesis.pbft.gas_limit = 100000; auto mgr = std::make_shared(node_cfgs[0], addr_t(), trx_mgr, pbft_chain, nullptr, db_ptr, nullptr); - DagBlock blk2(GENESIS, 1, {}, {}, sig_t(1), blk_hash_t(2), addr_t(15)); - DagBlock blk3(blk_hash_t(2), 2, {}, {}, sig_t(1), blk_hash_t(3), addr_t(15)); - DagBlock blk4(GENESIS, 1, {}, {}, sig_t(1), blk_hash_t(4), addr_t(15)); - DagBlock blk5(blk_hash_t(4), 2, {}, {}, sig_t(1), blk_hash_t(5), addr_t(15)); - DagBlock blk6(blk_hash_t(2), 3, {blk_hash_t(5)}, {}, sig_t(1), blk_hash_t(6), addr_t(15)); + auto blk2 = std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(2), addr_t(15)); + auto blk3 = + std::make_shared(blk_hash_t(2), 2, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(3), addr_t(15)); + auto blk4 = std::make_shared(GENESIS, 1, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(4), addr_t(15)); + auto blk5 = + std::make_shared(blk_hash_t(4), 2, vec_blk_t{}, vec_trx_t{}, sig_t(1), blk_hash_t(5), addr_t(15)); + auto blk6 = std::make_shared(blk_hash_t(2), 3, vec_blk_t{blk_hash_t(5)}, vec_trx_t{}, sig_t(1), + blk_hash_t(6), addr_t(15)); mgr->addDagBlock(std::move(blk2)); mgr->addDagBlock(std::move(blk3)); mgr->addDagBlock(std::move(blk4)); diff --git a/tests/final_chain_test.cpp b/tests/final_chain_test.cpp index 451bfd970a..376ce2fa92 100644 --- a/tests/final_chain_test.cpp +++ b/tests/final_chain_test.cpp @@ -66,7 +66,8 @@ struct FinalChainTest : WithDataDir { trx_hashes.emplace_back(trx->getHash()); } - DagBlock dag_blk({}, {}, {}, trx_hashes, {}, {}, dag_proposer_keys.secret()); + auto dag_blk = std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, trx_hashes, 0, VdfSortition{}, + dag_proposer_keys.secret()); db->saveDagBlock(dag_blk); std::vector reward_votes_hashes; auto pbft_block = @@ -87,7 +88,7 @@ struct FinalChainTest : WithDataDir { db->savePeriodData(period_data, batch); db->commitWriteBatch(batch); - auto result = SUT->finalize(std::move(period_data), {dag_blk.getHash()}).get(); + auto result = SUT->finalize(std::move(period_data), {dag_blk->getHash()}).get(); const auto& blk_h = *result->final_chain_blk; EXPECT_EQ(util::rlp_enc(blk_h), util::rlp_enc(*SUT->blockHeader(blk_h.number))); EXPECT_EQ(util::rlp_enc(blk_h), util::rlp_enc(*SUT->blockHeader())); diff --git a/tests/full_node_test.cpp b/tests/full_node_test.cpp index 634548d5e6..e495028a71 100644 --- a/tests/full_node_test.cpp +++ b/tests/full_node_test.cpp @@ -55,20 +55,23 @@ struct FullNodeTest : NodesTest {}; TEST_F(FullNodeTest, db_test) { auto db_ptr = std::make_shared(data_dir); auto &db = *db_ptr; - DagBlock blk1(blk_hash_t(1), 1, {}, {trx_hash_t(1), trx_hash_t(2)}, sig_t(777), blk_hash_t(0xB1), addr_t(999)); - DagBlock blk2(blk_hash_t(1), 1, {}, {trx_hash_t(3), trx_hash_t(4)}, sig_t(777), blk_hash_t(0xB2), addr_t(999)); - DagBlock blk3(blk_hash_t(0xB1), 2, {}, {trx_hash_t(5)}, sig_t(777), blk_hash_t(0xB6), addr_t(999)); + auto blk1 = std::make_shared(blk_hash_t(1), 1, vec_blk_t{}, vec_trx_t{trx_hash_t(1), trx_hash_t(2)}, + sig_t(777), blk_hash_t(0xB1), addr_t(999)); + auto blk2 = std::make_shared(blk_hash_t(1), 1, vec_blk_t{}, vec_trx_t{trx_hash_t(3), trx_hash_t(4)}, + sig_t(777), blk_hash_t(0xB2), addr_t(999)); + auto blk3 = std::make_shared(blk_hash_t(0xB1), 2, vec_blk_t{}, vec_trx_t{trx_hash_t(5)}, sig_t(777), + blk_hash_t(0xB6), addr_t(999)); // DAG db.saveDagBlock(blk1); db.saveDagBlock(blk2); db.saveDagBlock(blk3); - EXPECT_EQ(blk1, *db.getDagBlock(blk1.getHash())); - EXPECT_EQ(blk2, *db.getDagBlock(blk2.getHash())); - EXPECT_EQ(blk3, *db.getDagBlock(blk3.getHash())); + EXPECT_EQ(*blk1, *db.getDagBlock(blk1->getHash())); + EXPECT_EQ(*blk2, *db.getDagBlock(blk2->getHash())); + EXPECT_EQ(*blk3, *db.getDagBlock(blk3->getHash())); std::set s1, s2; - s1.emplace(blk1.getHash()); - s1.emplace(blk2.getHash()); - s2.emplace(blk3.getHash()); + s1.emplace(blk1->getHash()); + s1.emplace(blk2->getHash()); + s2.emplace(blk3->getHash()); EXPECT_EQ(db.getBlocksByLevel(1), s1); EXPECT_EQ(db.getBlocksByLevel(2), s2); @@ -833,7 +836,7 @@ TEST_F(FullNodeTest, reconstruct_dag) { taraxa::thisThreadSleepForMilliSeconds(100); for (size_t i = 0; i < num_blks; i++) { - EXPECT_EQ(true, node->getDagManager()->addDagBlock(DagBlock(mock_dags[i])).first); + EXPECT_EQ(true, node->getDagManager()->addDagBlock(mock_dags[i]).first); } taraxa::thisThreadSleepForMilliSeconds(100); @@ -853,7 +856,7 @@ TEST_F(FullNodeTest, reconstruct_dag) { // TODO: pbft does not support node stop yet, to be fixed ... node->getPbftManager()->stop(); for (size_t i = 0; i < num_blks; i++) { - EXPECT_EQ(true, node->getDagManager()->addDagBlock(DagBlock(mock_dags[i])).first); + EXPECT_EQ(true, node->getDagManager()->addDagBlock(mock_dags[i]).first); } taraxa::thisThreadSleepForMilliSeconds(100); vertices3 = node->getDagManager()->getNumVerticesInDag().first; diff --git a/tests/network_test.cpp b/tests/network_test.cpp index 3b6952468a..e4ea3bf976 100644 --- a/tests/network_test.cpp +++ b/tests/network_test.cpp @@ -99,8 +99,9 @@ TEST_F(NetworkTest, transfer_lot_of_blocks) { const auto dag_genesis = node1->getConfig().genesis.dag_genesis_block.getHash(); dev::bytes vdf_msg = DagManager::getVdfMessage(dag_genesis, {trxs[0]}); vdf.computeVdfSolution(sortition_params, vdf_msg, false); - DagBlock blk(dag_genesis, proposal_level, {}, {trxs[0]->getHash()}, estimation, vdf, node1->getSecretKey()); - const auto block_hash = blk.getHash(); + auto blk = std::make_shared(dag_genesis, proposal_level, vec_blk_t{}, vec_trx_t{trxs[0]->getHash()}, + estimation, vdf, node1->getSecretKey()); + const auto block_hash = blk->getHash(); dag_mgr1->addDagBlock(std::move(blk), {trxs[0]}); std::vector> dag_blocks; @@ -124,8 +125,8 @@ TEST_F(NetworkTest, transfer_lot_of_blocks) { node1->getTransactionManager()->insertValidatedTransaction(std::move(tx)); } for (size_t i = 0; i < dag_blocks.size(); i++) { - if (dag_mgr1->verifyBlock(*dag_blocks[i]).first == DagManager::VerifyBlockReturnType::Verified) - dag_mgr1->addDagBlock(DagBlock(*dag_blocks[i]), {trxs[i]}); + if (dag_mgr1->verifyBlock(dag_blocks[i]).first == DagManager::VerifyBlockReturnType::Verified) + dag_mgr1->addDagBlock(dag_blocks[i], {trxs[i]}); } wait({1s, 200ms}, [&](auto& ctx) { WAIT_EXPECT_NE(ctx, dag_mgr1->getDagBlock(block_hash), nullptr) }); const auto node1_period = node1->getPbftChain()->getPbftChainSize(); @@ -164,9 +165,10 @@ TEST_F(NetworkTest, propagate_block) { const auto dag_genesis = node1->getConfig().genesis.dag_genesis_block.getHash(); dev::bytes vdf_msg = DagManager::getVdfMessage(dag_genesis, {trxs[0]}); vdf.computeVdfSolution(sortition_params, vdf_msg, false); - DagBlock blk(dag_genesis, proposal_level, {}, {trxs[0]->getHash()}, estimation, vdf, node1->getSecretKey()); + auto blk = std::make_shared(dag_genesis, proposal_level, vec_blk_t{}, vec_trx_t{trxs[0]->getHash()}, + estimation, vdf, node1->getSecretKey()); - const auto block_hash = blk.getHash(); + const auto block_hash = blk->getHash(); // Add block gossip it to connected peers dag_mgr1->addDagBlock(std::move(blk), {trxs[0]}); @@ -413,7 +415,7 @@ TEST_F(NetworkTest, node_sync) { // Allow node to start up taraxa::thisThreadSleepForMilliSeconds(1000); - std::vector>> blks; + std::vector, std::shared_ptr>> blks; // Generate DAG blocks const auto dag_genesis = node1->getConfig().genesis.dag_genesis_block.getHash(); const auto sk = node1->getSecretKey(); @@ -428,43 +430,48 @@ TEST_F(NetworkTest, node_sync) { dev::bytes vdf_msg1 = DagManager::getVdfMessage(dag_genesis, {g_signed_trx_samples[1]}); vdf1.computeVdfSolution(vdf_config, vdf_msg1, false); - DagBlock blk1(dag_genesis, propose_level, {}, {g_signed_trx_samples[1]->getHash()}, estimation, vdf1, sk); + auto blk1 = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, + vec_trx_t{g_signed_trx_samples[1]->getHash()}, estimation, vdf1, sk); propose_level = 2; vdf_sortition::VdfSortition vdf2(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg2 = DagManager::getVdfMessage(blk1.getHash(), {g_signed_trx_samples[2]}); + dev::bytes vdf_msg2 = DagManager::getVdfMessage(blk1->getHash(), {g_signed_trx_samples[2]}); vdf2.computeVdfSolution(vdf_config, vdf_msg2, false); - DagBlock blk2(blk1.getHash(), propose_level, {}, {g_signed_trx_samples[2]->getHash()}, estimation, vdf2, sk); + auto blk2 = std::make_shared(blk1->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{g_signed_trx_samples[2]->getHash()}, estimation, vdf2, sk); propose_level = 3; vdf_sortition::VdfSortition vdf3(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg3 = DagManager::getVdfMessage(blk2.getHash(), {g_signed_trx_samples[3]}); + dev::bytes vdf_msg3 = DagManager::getVdfMessage(blk2->getHash(), {g_signed_trx_samples[3]}); vdf3.computeVdfSolution(vdf_config, vdf_msg3, false); - DagBlock blk3(blk2.getHash(), propose_level, {}, {g_signed_trx_samples[3]->getHash()}, estimation, vdf3, sk); + auto blk3 = std::make_shared(blk2->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{g_signed_trx_samples[3]->getHash()}, estimation, vdf3, sk); propose_level = 4; vdf_sortition::VdfSortition vdf4(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg4 = DagManager::getVdfMessage(blk3.getHash(), {g_signed_trx_samples[4]}); + dev::bytes vdf_msg4 = DagManager::getVdfMessage(blk3->getHash(), {g_signed_trx_samples[4]}); vdf4.computeVdfSolution(vdf_config, vdf_msg4, false); - DagBlock blk4(blk3.getHash(), propose_level, {}, {g_signed_trx_samples[4]->getHash()}, estimation, vdf4, sk); + auto blk4 = std::make_shared(blk3->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{g_signed_trx_samples[4]->getHash()}, estimation, vdf4, sk); propose_level = 5; vdf_sortition::VdfSortition vdf5(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg5 = DagManager::getVdfMessage(blk4.getHash(), {g_signed_trx_samples[5]}); + dev::bytes vdf_msg5 = DagManager::getVdfMessage(blk4->getHash(), {g_signed_trx_samples[5]}); vdf5.computeVdfSolution(vdf_config, vdf_msg5, false); - DagBlock blk5(blk4.getHash(), propose_level, {}, {g_signed_trx_samples[5]->getHash()}, estimation, vdf5, sk); + auto blk5 = std::make_shared(blk4->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{g_signed_trx_samples[5]->getHash()}, estimation, vdf5, sk); propose_level = 6; vdf_sortition::VdfSortition vdf6(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg6 = DagManager::getVdfMessage(blk5.getHash(), {g_signed_trx_samples[6]}); + dev::bytes vdf_msg6 = DagManager::getVdfMessage(blk5->getHash(), {g_signed_trx_samples[6]}); vdf6.computeVdfSolution(vdf_config, vdf_msg6, false); - DagBlock blk6(blk5.getHash(), propose_level, {blk4.getHash(), blk3.getHash()}, {g_signed_trx_samples[6]->getHash()}, - estimation, vdf6, sk); + auto blk6 = std::make_shared(blk5->getHash(), propose_level, vec_blk_t{blk4->getHash(), blk3->getHash()}, + vec_trx_t{g_signed_trx_samples[6]->getHash()}, estimation, vdf6, sk); blks.push_back(std::make_pair(blk1, g_signed_trx_samples[1])); blks.push_back(std::make_pair(blk2, g_signed_trx_samples[2])); @@ -521,18 +528,19 @@ TEST_F(NetworkTest, node_pbft_sync) { vdf_sortition::VdfSortition vdf1(vdf_config, vrf_sk, getRlpBytes(level), 1, 1); dev::bytes vdf_msg1 = DagManager::getVdfMessage(dag_genesis, {g_signed_trx_samples[0], g_signed_trx_samples[1]}); vdf1.computeVdfSolution(vdf_config, vdf_msg1, false); - DagBlock blk1(dag_genesis, 1, {}, {g_signed_trx_samples[0]->getHash(), g_signed_trx_samples[1]->getHash()}, 0, vdf1, - sk); + auto blk1 = std::make_shared( + dag_genesis, 1, vec_blk_t{}, vec_trx_t{g_signed_trx_samples[0]->getHash(), g_signed_trx_samples[1]->getHash()}, 0, + vdf1, sk); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[0])); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[1])); - node1->getDagManager()->verifyBlock(DagBlock(blk1)); - node1->getDagManager()->addDagBlock(DagBlock(blk1)); + node1->getDagManager()->verifyBlock(blk1); + node1->getDagManager()->addDagBlock(blk1); dev::RLPStream order_stream(1); order_stream.appendList(1); - order_stream << blk1.getHash(); + order_stream << blk1->getHash(); - PbftBlock pbft_block1(prev_block_hash, blk1.getHash(), dev::sha3(order_stream.out()), kNullBlockHash, period, + PbftBlock pbft_block1(prev_block_hash, blk1->getHash(), dev::sha3(order_stream.out()), kNullBlockHash, period, beneficiary, node1->getSecretKey(), {}, {}); std::vector> votes_for_pbft_blk1; votes_for_pbft_blk1.emplace_back( @@ -557,10 +565,10 @@ TEST_F(NetworkTest, node_pbft_sync) { db1->commitWriteBatch(batch); vec_blk_t order1; - order1.push_back(blk1.getHash()); + order1.push_back(blk1->getHash()); { std::unique_lock dag_lock(node1->getDagManager()->getDagMutex()); - node1->getDagManager()->setDagBlockOrder(blk1.getHash(), level, order1); + node1->getDagManager()->setDagBlockOrder(blk1->getHash(), level, order1); } uint64_t expect_pbft_chain_size = 1; @@ -571,22 +579,23 @@ TEST_F(NetworkTest, node_pbft_sync) { level = 2; vdf_sortition::VdfSortition vdf2(vdf_config, vrf_sk, getRlpBytes(level), 1, 1); - dev::bytes vdf_msg2 = DagManager::getVdfMessage(blk1.getHash(), {g_signed_trx_samples[2], g_signed_trx_samples[3]}); + dev::bytes vdf_msg2 = DagManager::getVdfMessage(blk1->getHash(), {g_signed_trx_samples[2], g_signed_trx_samples[3]}); vdf2.computeVdfSolution(vdf_config, vdf_msg2, false); - DagBlock blk2(blk1.getHash(), 2, {}, {g_signed_trx_samples[2]->getHash(), g_signed_trx_samples[3]->getHash()}, 0, - vdf2, sk); + auto blk2 = std::make_shared( + blk1->getHash(), 2, vec_blk_t{}, + vec_trx_t{g_signed_trx_samples[2]->getHash(), g_signed_trx_samples[3]->getHash()}, 0, vdf2, sk); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[2])); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[3])); - node1->getDagManager()->verifyBlock(DagBlock(blk2)); - node1->getDagManager()->addDagBlock(DagBlock(blk2)); + node1->getDagManager()->verifyBlock(blk2); + node1->getDagManager()->addDagBlock(blk2); batch = db1->createWriteBatch(); period = 2; beneficiary = addr_t(654); dev::RLPStream order_stream2(1); order_stream2.appendList(1); - order_stream2 << blk2.getHash(); - PbftBlock pbft_block2(prev_block_hash, blk2.getHash(), dev::sha3(order_stream2.out()), kNullBlockHash, period, + order_stream2 << blk2->getHash(); + PbftBlock pbft_block2(prev_block_hash, blk2->getHash(), dev::sha3(order_stream2.out()), kNullBlockHash, period, beneficiary, node1->getSecretKey(), {}, {}); std::vector> votes_for_pbft_blk2; votes_for_pbft_blk2.emplace_back( @@ -617,10 +626,10 @@ TEST_F(NetworkTest, node_pbft_sync) { db1->commitWriteBatch(batch); vec_blk_t order2; - order2.push_back(blk2.getHash()); + order2.push_back(blk2->getHash()); { std::unique_lock dag_lock(node1->getDagManager()->getDagMutex()); - node1->getDagManager()->setDagBlockOrder(blk2.getHash(), level, order2); + node1->getDagManager()->setDagBlockOrder(blk2->getHash(), level, order2); } expect_pbft_chain_size = 2; @@ -668,18 +677,19 @@ TEST_F(NetworkTest, node_pbft_sync_without_enough_votes) { vdf_sortition::VdfSortition vdf1(vdf_config, vrf_sk, getRlpBytes(level), 1, 1); dev::bytes vdf_msg1 = DagManager::getVdfMessage(dag_genesis, {g_signed_trx_samples[0], g_signed_trx_samples[1]}); vdf1.computeVdfSolution(vdf_config, vdf_msg1, false); - DagBlock blk1(dag_genesis, 1, {}, {g_signed_trx_samples[0]->getHash(), g_signed_trx_samples[1]->getHash()}, 0, vdf1, - sk); + auto blk1 = std::make_shared( + dag_genesis, 1, vec_blk_t{}, vec_trx_t{g_signed_trx_samples[0]->getHash(), g_signed_trx_samples[1]->getHash()}, 0, + vdf1, sk); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[0])); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[1])); - node1->getDagManager()->verifyBlock(DagBlock(blk1)); - node1->getDagManager()->addDagBlock(DagBlock(blk1)); + node1->getDagManager()->verifyBlock(blk1); + node1->getDagManager()->addDagBlock(blk1); dev::RLPStream order_stream(1); order_stream.appendList(1); - order_stream << blk1.getHash(); + order_stream << blk1->getHash(); - PbftBlock pbft_block1(prev_block_hash, blk1.getHash(), dev::sha3(order_stream.out()), kNullBlockHash, period, + PbftBlock pbft_block1(prev_block_hash, blk1->getHash(), dev::sha3(order_stream.out()), kNullBlockHash, period, beneficiary, node1->getSecretKey(), {}, {}); const auto pbft_block1_cert_vote = node1->getVoteManager()->generateVote( pbft_block1.getBlockHash(), PbftVoteTypes::cert_vote, pbft_block1.getPeriod(), 1, 3); @@ -707,14 +717,15 @@ TEST_F(NetworkTest, node_pbft_sync_without_enough_votes) { prev_block_hash = pbft_block1.getBlockHash(); level = 2; vdf_sortition::VdfSortition vdf2(vdf_config, vrf_sk, getRlpBytes(level), 1, 1); - dev::bytes vdf_msg2 = DagManager::getVdfMessage(blk1.getHash(), {g_signed_trx_samples[2], g_signed_trx_samples[3]}); + dev::bytes vdf_msg2 = DagManager::getVdfMessage(blk1->getHash(), {g_signed_trx_samples[2], g_signed_trx_samples[3]}); vdf2.computeVdfSolution(vdf_config, vdf_msg2, false); - DagBlock blk2(blk1.getHash(), 2, {}, {g_signed_trx_samples[2]->getHash(), g_signed_trx_samples[3]->getHash()}, 0, - vdf2, sk); + auto blk2 = std::make_shared( + blk1->getHash(), 2, vec_blk_t{}, + vec_trx_t{g_signed_trx_samples[2]->getHash(), g_signed_trx_samples[3]->getHash()}, 0, vdf2, sk); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[2])); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[3])); - node1->getDagManager()->verifyBlock(DagBlock(blk2)); - node1->getDagManager()->addDagBlock(DagBlock(blk2)); + node1->getDagManager()->verifyBlock(blk2); + node1->getDagManager()->addDagBlock(blk2); batch = db1->createWriteBatch(); period = 2; @@ -722,9 +733,9 @@ TEST_F(NetworkTest, node_pbft_sync_without_enough_votes) { dev::RLPStream order_stream2(1); order_stream2.appendList(1); - order_stream2 << blk2.getHash(); + order_stream2 << blk2->getHash(); - PbftBlock pbft_block2(prev_block_hash, blk2.getHash(), dev::sha3(order_stream2.out()), kNullBlockHash, period, + PbftBlock pbft_block2(prev_block_hash, blk2->getHash(), dev::sha3(order_stream2.out()), kNullBlockHash, period, beneficiary, node1->getSecretKey(), {}, {}); const auto pbft_block2_cert_vote = node1->getVoteManager()->generateVote( pbft_block2.getBlockHash(), PbftVoteTypes::cert_vote, pbft_block2.getPeriod(), 1, 3); @@ -886,71 +897,76 @@ TEST_F(NetworkTest, node_sync_with_transactions) { dev::RLPStream s; dev::bytes vdf_msg1 = DagManager::getVdfMessage(dag_genesis, {g_signed_trx_samples[0], g_signed_trx_samples[1]}); vdf1.computeVdfSolution(vdf_config, vdf_msg1, false); - DagBlock blk1(dag_genesis, propose_level, {}, - {g_signed_trx_samples[0]->getHash(), g_signed_trx_samples[1]->getHash()}, 2 * estimation, vdf1, sk); + auto blk1 = std::make_shared( + dag_genesis, propose_level, vec_blk_t{}, + vec_trx_t{g_signed_trx_samples[0]->getHash(), g_signed_trx_samples[1]->getHash()}, 2 * estimation, vdf1, sk); propose_level = 2; vdf_sortition::VdfSortition vdf2(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg2 = DagManager::getVdfMessage(blk1.getHash(), {g_signed_trx_samples[2]}); + dev::bytes vdf_msg2 = DagManager::getVdfMessage(blk1->getHash(), {g_signed_trx_samples[2]}); vdf2.computeVdfSolution(vdf_config, vdf_msg2, false); - DagBlock blk2(blk1.getHash(), propose_level, {}, {g_signed_trx_samples[2]->getHash()}, estimation, vdf2, sk); + auto blk2 = std::make_shared(blk1->getHash(), propose_level, vec_trx_t{}, + vec_trx_t{g_signed_trx_samples[2]->getHash()}, estimation, vdf2, sk); propose_level = 3; vdf_sortition::VdfSortition vdf3(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg3 = DagManager::getVdfMessage(blk2.getHash(), {g_signed_trx_samples[3]}); + dev::bytes vdf_msg3 = DagManager::getVdfMessage(blk2->getHash(), {g_signed_trx_samples[3]}); vdf3.computeVdfSolution(vdf_config, vdf_msg3, false); - DagBlock blk3(blk2.getHash(), propose_level, {}, {g_signed_trx_samples[3]->getHash()}, estimation, vdf3, sk); + auto blk3 = std::make_shared(blk2->getHash(), propose_level, vec_trx_t{}, + vec_trx_t{g_signed_trx_samples[3]->getHash()}, estimation, vdf3, sk); propose_level = 4; vdf_sortition::VdfSortition vdf4(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg4 = DagManager::getVdfMessage(blk3.getHash(), {g_signed_trx_samples[4]}); + dev::bytes vdf_msg4 = DagManager::getVdfMessage(blk3->getHash(), {g_signed_trx_samples[4]}); vdf4.computeVdfSolution(vdf_config, vdf_msg4, false); - DagBlock blk4(blk3.getHash(), propose_level, {}, {g_signed_trx_samples[4]->getHash()}, estimation, vdf4, sk); + auto blk4 = std::make_shared(blk3->getHash(), propose_level, vec_trx_t{}, + vec_trx_t{g_signed_trx_samples[4]->getHash()}, estimation, vdf4, sk); propose_level = 5; vdf_sortition::VdfSortition vdf5(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg5 = DagManager::getVdfMessage(blk4.getHash(), {g_signed_trx_samples[5], g_signed_trx_samples[6], - g_signed_trx_samples[7], g_signed_trx_samples[8]}); + dev::bytes vdf_msg5 = DagManager::getVdfMessage(blk4->getHash(), {g_signed_trx_samples[5], g_signed_trx_samples[6], + g_signed_trx_samples[7], g_signed_trx_samples[8]}); vdf5.computeVdfSolution(vdf_config, vdf_msg5, false); - DagBlock blk5(blk4.getHash(), propose_level, {}, - {g_signed_trx_samples[5]->getHash(), g_signed_trx_samples[6]->getHash(), - g_signed_trx_samples[7]->getHash(), g_signed_trx_samples[8]->getHash()}, - 4 * estimation, vdf5, sk); + auto blk5 = + std::make_shared(blk4->getHash(), propose_level, vec_trx_t{}, + vec_trx_t{g_signed_trx_samples[5]->getHash(), g_signed_trx_samples[6]->getHash(), + g_signed_trx_samples[7]->getHash(), g_signed_trx_samples[8]->getHash()}, + 4 * estimation, vdf5, sk); propose_level = 6; vdf_sortition::VdfSortition vdf6(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - dev::bytes vdf_msg6 = DagManager::getVdfMessage(blk5.getHash(), {g_signed_trx_samples[9]}); + dev::bytes vdf_msg6 = DagManager::getVdfMessage(blk5->getHash(), {g_signed_trx_samples[9]}); vdf6.computeVdfSolution(vdf_config, vdf_msg6, false); - DagBlock blk6(blk5.getHash(), propose_level, {blk4.getHash(), blk3.getHash()}, {g_signed_trx_samples[9]->getHash()}, - estimation, vdf6, sk); + auto blk6 = std::make_shared(blk5->getHash(), propose_level, vec_trx_t{blk4->getHash(), blk3->getHash()}, + vec_trx_t{g_signed_trx_samples[9]->getHash()}, estimation, vdf6, sk); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[0])); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[1])); EXPECT_EQ(node1->getDagManager()->verifyBlock(std::move(blk1)).first, DagManager::VerifyBlockReturnType::Verified); - node1->getDagManager()->addDagBlock(DagBlock(blk1)); + node1->getDagManager()->addDagBlock(blk1); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[2])); EXPECT_EQ(node1->getDagManager()->verifyBlock(std::move(blk2)).first, DagManager::VerifyBlockReturnType::Verified); - node1->getDagManager()->addDagBlock(DagBlock(blk2)); + node1->getDagManager()->addDagBlock(blk2); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[3])); EXPECT_EQ(node1->getDagManager()->verifyBlock(std::move(blk3)).first, DagManager::VerifyBlockReturnType::Verified); - node1->getDagManager()->addDagBlock(DagBlock(blk3)); + node1->getDagManager()->addDagBlock(blk3); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[4])); EXPECT_EQ(node1->getDagManager()->verifyBlock(std::move(blk4)).first, DagManager::VerifyBlockReturnType::Verified); - node1->getDagManager()->addDagBlock(DagBlock(blk4)); + node1->getDagManager()->addDagBlock(blk4); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[5])); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[6])); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[7])); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[8])); EXPECT_EQ(node1->getDagManager()->verifyBlock(std::move(blk5)).first, DagManager::VerifyBlockReturnType::Verified); - node1->getDagManager()->addDagBlock(DagBlock(blk5)); + node1->getDagManager()->addDagBlock(blk5); node1->getTransactionManager()->insertValidatedTransaction(std::shared_ptr(g_signed_trx_samples[9])); EXPECT_EQ(node1->getDagManager()->verifyBlock(std::move(blk6)).first, DagManager::VerifyBlockReturnType::Verified); - node1->getDagManager()->addDagBlock(DagBlock(blk6)); + node1->getDagManager()->addDagBlock(blk6); // To make sure blocks are stored before starting node 2 taraxa::thisThreadSleepForMilliSeconds(1000); @@ -974,7 +990,7 @@ TEST_F(NetworkTest, node_sync2) { auto node_cfgs = make_node_cfgs(2, 1, 5); auto node1 = create_nodes({node_cfgs[0]}, true /*start*/).front(); - std::vector blks; + std::vector> blks; // Generate DAG blocks const auto dag_genesis = node1->getConfig().genesis.dag_genesis_block.getHash(); const auto sk = node1->getSecretKey(); @@ -989,8 +1005,9 @@ TEST_F(NetworkTest, node_sync2) { 1, 1); dev::bytes vdf_msg = DagManager::getVdfMessage(dag_genesis, {transactions[0], transactions[1]}); vdf1.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk1(dag_genesis, propose_level, {}, {transactions[0]->getHash(), transactions[1]->getHash()}, - 2 * estimation, vdf1, sk); + auto blk1 = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, + vec_trx_t{transactions[0]->getHash(), transactions[1]->getHash()}, + 2 * estimation, vdf1, sk); SharedTransactions tr1({transactions[0], transactions[1]}); // DAG block2 propose_level = 1; @@ -998,98 +1015,109 @@ TEST_F(NetworkTest, node_sync2) { 1, 1); vdf_msg = DagManager::getVdfMessage(dag_genesis, {transactions[2], transactions[3]}); vdf2.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk2(dag_genesis, propose_level, {}, {transactions[2]->getHash(), transactions[3]->getHash()}, - 2 * estimation, vdf2, sk); + auto blk2 = std::make_shared(dag_genesis, propose_level, vec_blk_t{}, + vec_trx_t{transactions[2]->getHash(), transactions[3]->getHash()}, + 2 * estimation, vdf2, sk); SharedTransactions tr2({transactions[2], transactions[3]}); // DAG block3 propose_level = 2; vdf_sortition::VdfSortition vdf3(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk1.getHash(), {transactions[4], transactions[5]}); + vdf_msg = DagManager::getVdfMessage(blk1->getHash(), {transactions[4], transactions[5]}); vdf3.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk3(blk1.getHash(), propose_level, {}, {transactions[4]->getHash(), transactions[5]->getHash()}, - 2 * estimation, vdf3, sk); + auto blk3 = std::make_shared(blk1->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[4]->getHash(), transactions[5]->getHash()}, + 2 * estimation, vdf3, sk); SharedTransactions tr3({transactions[4], transactions[5]}); // DAG block4 propose_level = 3; vdf_sortition::VdfSortition vdf4(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk3.getHash(), {transactions[6], transactions[7]}); + vdf_msg = DagManager::getVdfMessage(blk3->getHash(), {transactions[6], transactions[7]}); vdf4.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk4(blk3.getHash(), propose_level, {}, {transactions[6]->getHash(), transactions[7]->getHash()}, - 2 * estimation, vdf4, sk); + auto blk4 = std::make_shared(blk3->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[6]->getHash(), transactions[7]->getHash()}, + 2 * estimation, vdf4, sk); SharedTransactions tr4({transactions[6], transactions[7]}); // DAG block5 propose_level = 2; vdf_sortition::VdfSortition vdf5(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk2.getHash(), {transactions[8], transactions[9]}); + vdf_msg = DagManager::getVdfMessage(blk2->getHash(), {transactions[8], transactions[9]}); vdf5.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk5(blk2.getHash(), propose_level, {}, {transactions[8]->getHash(), transactions[9]->getHash()}, - 2 * estimation, vdf5, sk); + auto blk5 = std::make_shared(blk2->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[8]->getHash(), transactions[9]->getHash()}, + 2 * estimation, vdf5, sk); SharedTransactions tr5({transactions[8], transactions[9]}); // DAG block6 propose_level = 2; vdf_sortition::VdfSortition vdf6(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk1.getHash(), {transactions[10], transactions[11]}); + vdf_msg = DagManager::getVdfMessage(blk1->getHash(), {transactions[10], transactions[11]}); vdf6.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk6(blk1.getHash(), propose_level, {}, {transactions[10]->getHash(), transactions[11]->getHash()}, - 2 * estimation, vdf6, sk); + auto blk6 = std::make_shared(blk1->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[10]->getHash(), transactions[11]->getHash()}, + 2 * estimation, vdf6, sk); SharedTransactions tr6({transactions[10], transactions[11]}); // DAG block7 propose_level = 3; vdf_sortition::VdfSortition vdf7(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk6.getHash(), {transactions[12], transactions[13]}); + vdf_msg = DagManager::getVdfMessage(blk6->getHash(), {transactions[12], transactions[13]}); vdf7.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk7(blk6.getHash(), propose_level, {}, {transactions[12]->getHash(), transactions[13]->getHash()}, - 2 * estimation, vdf7, sk); + auto blk7 = std::make_shared(blk6->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[12]->getHash(), transactions[13]->getHash()}, + 2 * estimation, vdf7, sk); SharedTransactions tr7({transactions[12], transactions[13]}); // DAG block8 propose_level = 4; vdf_sortition::VdfSortition vdf8(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk1.getHash(), {transactions[14], transactions[15]}); + vdf_msg = DagManager::getVdfMessage(blk1->getHash(), {transactions[14], transactions[15]}); vdf8.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk8(blk1.getHash(), propose_level, {blk7.getHash()}, - {transactions[14]->getHash(), transactions[15]->getHash()}, 2 * estimation, vdf8, sk); + auto blk8 = std::make_shared(blk1->getHash(), propose_level, vec_blk_t{blk7->getHash()}, + vec_trx_t{transactions[14]->getHash(), transactions[15]->getHash()}, + 2 * estimation, vdf8, sk); SharedTransactions tr8({transactions[14], transactions[15]}); // DAG block9 propose_level = 2; vdf_sortition::VdfSortition vdf9(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk1.getHash(), {transactions[16], transactions[17]}); + vdf_msg = DagManager::getVdfMessage(blk1->getHash(), {transactions[16], transactions[17]}); vdf9.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk9(blk1.getHash(), propose_level, {}, {transactions[16]->getHash(), transactions[17]->getHash()}, - 2 * estimation, vdf9, sk); + auto blk9 = std::make_shared(blk1->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[16]->getHash(), transactions[17]->getHash()}, + 2 * estimation, vdf9, sk); SharedTransactions tr9({transactions[16], transactions[17]}); // DAG block10 propose_level = 5; vdf_sortition::VdfSortition vdf10(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk8.getHash(), {transactions[18], transactions[19]}); + vdf_msg = DagManager::getVdfMessage(blk8->getHash(), {transactions[18], transactions[19]}); vdf10.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk10(blk8.getHash(), propose_level, {}, {transactions[18]->getHash(), transactions[19]->getHash()}, - 2 * estimation, vdf10, sk); + auto blk10 = std::make_shared(blk8->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[18]->getHash(), transactions[19]->getHash()}, + 2 * estimation, vdf10, sk); SharedTransactions tr10({transactions[18], transactions[19]}); // DAG block11 propose_level = 3; vdf_sortition::VdfSortition vdf11(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk3.getHash(), {transactions[20], transactions[21]}); + vdf_msg = DagManager::getVdfMessage(blk3->getHash(), {transactions[20], transactions[21]}); vdf11.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk11(blk3.getHash(), propose_level, {}, {transactions[20]->getHash(), transactions[21]->getHash()}, - 2 * estimation, vdf11, sk); + auto blk11 = std::make_shared(blk3->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[20]->getHash(), transactions[21]->getHash()}, + 2 * estimation, vdf11, sk); SharedTransactions tr11({transactions[20], transactions[21]}); // DAG block12 propose_level = 3; vdf_sortition::VdfSortition vdf12(vdf_config, vrf_sk, VrfSortitionBase::makeVrfInput(propose_level, period_block_hash), 1, 1); - vdf_msg = DagManager::getVdfMessage(blk5.getHash(), {transactions[22], transactions[23]}); + vdf_msg = DagManager::getVdfMessage(blk5->getHash(), {transactions[22], transactions[23]}); vdf12.computeVdfSolution(vdf_config, vdf_msg, false); - DagBlock blk12(blk5.getHash(), propose_level, {}, {transactions[22]->getHash(), transactions[23]->getHash()}, - 2 * estimation, vdf12, sk); + auto blk12 = std::make_shared(blk5->getHash(), propose_level, vec_blk_t{}, + vec_trx_t{transactions[22]->getHash(), transactions[23]->getHash()}, + 2 * estimation, vdf12, sk); SharedTransactions tr12({transactions[22], transactions[23]}); blks.push_back(blk1); @@ -1122,7 +1150,7 @@ TEST_F(NetworkTest, node_sync2) { for (size_t i = 0; i < blks.size(); ++i) { for (auto t : trxs[i]) node1->getTransactionManager()->insertValidatedTransaction(std::move(t)); node1->getDagManager()->verifyBlock(std::move(blks[i])); - node1->getDagManager()->addDagBlock(DagBlock(blks[i])); + node1->getDagManager()->addDagBlock(blks[i]); } auto node2 = create_nodes({node_cfgs[1]}, true /*start*/).front(); diff --git a/tests/pbft_chain_test.cpp b/tests/pbft_chain_test.cpp index dea4f2a9a4..7adee1ceb9 100644 --- a/tests/pbft_chain_test.cpp +++ b/tests/pbft_chain_test.cpp @@ -51,11 +51,11 @@ TEST_F(PbftChainTest, pbft_db_test) { level_t level = 1; vdf_sortition::VdfSortition vdf1(vdf_config, vrf_sk, getRlpBytes(level), 1, 100); vdf1.computeVdfSolution(vdf_config, dag_genesis.asBytes(), false); - DagBlock blk1(dag_genesis, 1, {}, {}, {}, vdf1, sk); + auto blk1 = std::make_shared(dag_genesis, 1, vec_blk_t{}, vec_trx_t{}, 0, vdf1, sk); PbftPeriod period = 1; addr_t beneficiary(987); - PbftBlock pbft_block(prev_block_hash, blk1.getHash(), kNullBlockHash, kNullBlockHash, period, beneficiary, + PbftBlock pbft_block(prev_block_hash, blk1->getHash(), kNullBlockHash, kNullBlockHash, period, beneficiary, node->getSecretKey(), {}); // put into pbft chain and store into DB diff --git a/tests/rewards_stats_test.cpp b/tests/rewards_stats_test.cpp index ea813a1c44..9b2e9d4272 100644 --- a/tests/rewards_stats_test.cpp +++ b/tests/rewards_stats_test.cpp @@ -207,7 +207,8 @@ TEST_F(RewardsStatsTest, feeRewards) { auto trx = std::make_shared(nonce++, 0, 1, trx_gas_fee, dev::fromHex(samples::greeter_contract_code), pbft_proposer.secret()); - DagBlock dag_blk({}, {}, {}, {trx->getHash()}, {}, {}, dag_proposer.secret()); + auto dag_blk = std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, vec_trx_t{trx->getHash()}, 0, + VdfSortition{}, dag_proposer.secret()); db->saveDagBlock(dag_blk); std::vector reward_votes_hashes; auto pbft_block = @@ -266,35 +267,40 @@ TEST_F(RewardsStatsTest, dagBlockRewards) { vdf_sortition::VdfSortition vdf1(sortition_params, vrfs, vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(1)), 1, 1); - DagBlock dag_blk1({}, {}, {}, {trxs[0]->getHash()}, 0, vdf1, dag_key1.secret()); + auto dag_blk1 = std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, vec_trx_t{trxs[0]->getHash()}, 0, + vdf1, dag_key1.secret()); block.dag_blocks.push_back(dag_blk1); vdf_sortition::VdfSortition vdf2(sortition_params, vrfs, vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(2)), 1, 1); - DagBlock dag_blk2({}, {}, {}, {trxs[1]->getHash()}, 0, vdf2, dag_key2.secret()); + auto dag_blk2 = std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, vec_trx_t{trxs[1]->getHash()}, 0, + vdf2, dag_key2.secret()); block.dag_blocks.push_back(dag_blk2); vdf_sortition::VdfSortition vdf3(sortition_params, vrfs, vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(3)), 1, 1); - DagBlock dag_blk3({}, {}, {}, {trxs[0]->getHash()}, 0, vdf3, dag_key3.secret()); + auto dag_blk3 = std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, vec_trx_t{trxs[0]->getHash()}, 0, + vdf3, dag_key3.secret()); block.dag_blocks.push_back(dag_blk3); vdf_sortition::VdfSortition vdf4(sortition_params, vrfs, vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(4)), 1, 1); - DagBlock dag_blk4({}, {}, {}, {trxs[1]->getHash()}, 0, vdf4, dag_key4.secret()); + auto dag_blk4 = std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, vec_trx_t{trxs[1]->getHash()}, 0, + vdf4, dag_key4.secret()); block.dag_blocks.push_back(dag_blk4); vdf_sortition::VdfSortition vdf5(sortition_params, vrfs, vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(5)), 1, 1); - DagBlock dag_blk5({}, {}, {}, {trxs[2]->getHash()}, 0, vdf5, dag_key5.secret()); + auto dag_blk5 = std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, vec_trx_t{trxs[2]->getHash()}, 0, + vdf5, dag_key5.secret()); block.dag_blocks.push_back(dag_blk5); block.transactions = trxs; - ASSERT_EQ(dag_blk1.getDifficulty(), 17); - ASSERT_EQ(dag_blk2.getDifficulty(), 17); - ASSERT_EQ(dag_blk3.getDifficulty(), 16); - ASSERT_EQ(dag_blk4.getDifficulty(), 17); - ASSERT_EQ(dag_blk5.getDifficulty(), 16); + ASSERT_EQ(dag_blk1->getDifficulty(), 17); + ASSERT_EQ(dag_blk2->getDifficulty(), 17); + ASSERT_EQ(dag_blk3->getDifficulty(), 16); + ASSERT_EQ(dag_blk4->getDifficulty(), 17); + ASSERT_EQ(dag_blk5->getDifficulty(), 16); std::vector gas_used{10, 20, 30}; diff --git a/tests/sortition_test.cpp b/tests/sortition_test.cpp index d4c79419de..3bc8023534 100644 --- a/tests/sortition_test.cpp +++ b/tests/sortition_test.cpp @@ -43,7 +43,7 @@ PeriodData createBlock(PbftPeriod period, uint16_t efficiency, size_t dag_blocks for (size_t i = 0; i < dag_blocks_count; ++i) { vec_trx_t trxs{trx_hashes.begin() + i * trx_per_block, trx_hashes.begin() + (i + 1) * trx_per_block}; - b.dag_blocks.push_back({{}, {}, {}, trxs, {}}); + b.dag_blocks.push_back(std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, trxs, secret_t{})); }; size_t issued_overlap_count = 0; @@ -51,7 +51,7 @@ PeriodData createBlock(PbftPeriod period, uint16_t efficiency, size_t dag_blocks size_t overlap = std::min(kTrxCount - effective_transactions - issued_overlap_count, trx_hashes.size()); issued_overlap_count += overlap; vec_trx_t trxs{trx_hashes.begin(), trx_hashes.begin() + overlap}; - b.dag_blocks.push_back({{}, {}, {}, trxs, {}}); + b.dag_blocks.push_back(std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, trxs, secret_t{})); } return b; } diff --git a/tests/test_util/include/test_util/node_dag_creation_fixture.hpp b/tests/test_util/include/test_util/node_dag_creation_fixture.hpp index d8b4d34b7d..938dc124b5 100644 --- a/tests/test_util/include/test_util/node_dag_creation_fixture.hpp +++ b/tests/test_util/include/test_util/node_dag_creation_fixture.hpp @@ -18,7 +18,7 @@ struct NodeDagCreationFixture : NodesTest { NodeDagCreationFixture() : NodesTest() {} ~NodeDagCreationFixture() = default; struct DagBlockWithTxs { - DagBlock blk; + std::shared_ptr blk; SharedTransactions trxs; }; void modifyConfig(FullNodeConfig &cfg); diff --git a/tests/test_util/include/test_util/samples.hpp b/tests/test_util/include/test_util/samples.hpp index 828bdd93ee..ac0e436b59 100644 --- a/tests/test_util/include/test_util/samples.hpp +++ b/tests/test_util/include/test_util/samples.hpp @@ -104,11 +104,8 @@ bool sendTrx(uint64_t count, unsigned port, dev::Secret secret); SharedTransactions createSignedTrxSamples(unsigned start, unsigned num, secret_t const &sk, bytes data = dev::fromHex("00FEDCBA9876543210000000")); -std::vector createMockDagBlkSamples(unsigned pivot_start, unsigned blk_num, unsigned trx_start, - unsigned trx_len, unsigned trx_overlap); +std::vector> createMockDag0(const blk_hash_t &genesis); -std::vector createMockDag0(const blk_hash_t &genesis); - -std::vector createMockDag1(const blk_hash_t &genesis); +std::vector> createMockDag1(const blk_hash_t &genesis); } // namespace taraxa::core_tests::samples diff --git a/tests/test_util/src/node_dag_creation_fixture.cpp b/tests/test_util/src/node_dag_creation_fixture.cpp index 5adc2624f9..45f6a48ad6 100644 --- a/tests/test_util/src/node_dag_creation_fixture.cpp +++ b/tests/test_util/src/node_dag_creation_fixture.cpp @@ -139,8 +139,9 @@ std::vector NodeDagCreationFixture::gen std::vector trx_hashes; std::transform(trx_itr, trx_itr_next, std::back_inserter(trx_hashes), [](std::shared_ptr trx) { return trx->getHash(); }); - DagBlock blk(pivot, level, tips, trx_hashes, trx_per_block * trx_estimation, vdf, node->getSecretKey()); - this_level_blocks.push_back(blk.getHash()); + auto blk = std::make_shared(pivot, level, tips, trx_hashes, trx_per_block * trx_estimation, vdf, + node->getSecretKey()); + this_level_blocks.push_back(blk->getHash()); result.emplace_back(DagBlockWithTxs{blk, SharedTransactions(trx_itr, trx_itr_next)}); trx_itr = trx_itr_next; } @@ -155,11 +156,11 @@ std::vector NodeDagCreationFixture::gen vdf_sortition::VdfSortition vdf(vdf_config, node->getVrfSecretKey(), vrf_wrapper::VrfSortitionBase::makeVrfInput(level, period_block_hash), 1, 1); vdf.computeVdfSolution(vdf_config, dag_genesis.asBytes(), false); - DagBlock blk(pivot, level, tips, {transactions.rbegin()->get()->getHash()}, trx_per_block * trx_estimation, vdf, - node->getSecretKey()); + auto blk = std::make_shared(pivot, level, tips, vec_trx_t{transactions.rbegin()->get()->getHash()}, + trx_per_block * trx_estimation, vdf, node->getSecretKey()); result.emplace_back(DagBlockWithTxs{blk, SharedTransactions(transactions.rbegin(), transactions.rbegin() + 1)}); - pivot = blk.getHash(); - tips = {blk.getHash()}; + pivot = blk->getHash(); + tips = {blk->getHash()}; trx_itr_next++; EXPECT_EQ(trx_itr_next, transactions.end()); diff --git a/tests/test_util/src/samples.cpp b/tests/test_util/src/samples.cpp index c029a700b3..b25fff901c 100644 --- a/tests/test_util/src/samples.cpp +++ b/tests/test_util/src/samples.cpp @@ -39,113 +39,84 @@ SharedTransactions createSignedTrxSamples(unsigned start, unsigned num, secret_t return trxs; } -std::vector createMockDagBlkSamples(unsigned pivot_start, unsigned blk_num, unsigned trx_start, - unsigned trx_len, unsigned trx_overlap) { - assert(pivot_start + blk_num < std::numeric_limits::max()); - std::vector blks; - unsigned trx = trx_start; - for (auto i = pivot_start; i < blk_num; ++i) { - blk_hash_t pivot(i); - blk_hash_t hash(i + 1); - vec_trx_t trxs; - for (unsigned i = 0; i < trx_len; ++i, trx++) { - trxs.emplace_back(trx_hash_t(trx)); - } - for (unsigned i = 0; i < trx_overlap; ++i) { - trx--; - } - - DagBlock blk(blk_hash_t(pivot), // pivot - level_t(0), // level - {blk_hash_t(2), blk_hash_t(3), blk_hash_t(4)}, // tips - trxs, // trxs - sig_t(7777), // sig - blk_hash_t(hash), // hash - addr_t(12345)); // sender - - blks.emplace_back(blk); - } - return blks; -} - -std::vector createMockDag0(const blk_hash_t& genesis) { - std::vector blks; - DagBlock blk1(genesis, // pivot - 1, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk2(genesis, // pivot - 1, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk3(genesis, // pivot - 1, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk4(blk1.getHash(), // pivot - 2, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk5(blk1.getHash(), // pivot - 2, // level - {blk2.getHash()}, // tips - {}, secret_t::random()); - DagBlock blk6(blk3.getHash(), // pivot - 2, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk7(blk5.getHash(), // pivot - 3, // level - {blk6.getHash()}, // tips - {}, secret_t::random()); - DagBlock blk8(blk5.getHash(), // pivot - 3, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk9(blk6.getHash(), // pivot - 3, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk10(blk7.getHash(), // pivot - 4, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk11(blk7.getHash(), // pivot - 4, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk12(blk9.getHash(), // pivot - 4, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk13(blk10.getHash(), // pivot - 5, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk14(blk11.getHash(), // pivot - 5, // level - {blk12.getHash()}, // tips - {}, secret_t::random()); - DagBlock blk15(blk13.getHash(), // pivot - 6, // level - {blk14.getHash()}, // tips - {}, secret_t::random()); - DagBlock blk16(blk13.getHash(), // pivot - 6, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk17(blk12.getHash(), // pivot - 5, // level - {}, // tips - {}, secret_t::random()); - DagBlock blk18(blk15.getHash(), // pivot - 7, // level - {blk8.getHash(), blk16.getHash(), blk17.getHash()}, // tips - {}, secret_t::random()); - DagBlock blk19(blk18.getHash(), // pivot - 8, // level - {}, // tips - {}, secret_t::random()); +std::vector> createMockDag0(const blk_hash_t& genesis) { + std::vector> blks; + auto blk1 = std::make_shared(genesis, // pivot + 1, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk2 = std::make_shared(genesis, // pivot + 1, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk3 = std::make_shared(genesis, // pivot + 1, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk4 = std::make_shared(blk1->getHash(), // pivot + 2, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk5 = std::make_shared(blk1->getHash(), // pivot + 2, // level + vec_blk_t{blk2->getHash()}, // tips + vec_trx_t{}, secret_t::random()); + auto blk6 = std::make_shared(blk3->getHash(), // pivot + 2, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk7 = std::make_shared(blk5->getHash(), // pivot + 3, // level + vec_blk_t{blk6->getHash()}, // tips + vec_trx_t{}, secret_t::random()); + auto blk8 = std::make_shared(blk5->getHash(), // pivot + 3, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk9 = std::make_shared(blk6->getHash(), // pivot + 3, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk10 = std::make_shared(blk7->getHash(), // pivot + 4, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk11 = std::make_shared(blk7->getHash(), // pivot + 4, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk12 = std::make_shared(blk9->getHash(), // pivot + 4, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk13 = std::make_shared(blk10->getHash(), // pivot + 5, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk14 = std::make_shared(blk11->getHash(), // pivot + 5, // level + vec_blk_t{blk12->getHash()}, // tips + vec_trx_t{}, secret_t::random()); + auto blk15 = std::make_shared(blk13->getHash(), // pivot + 6, // level + vec_blk_t{blk14->getHash()}, // tips + vec_trx_t{}, secret_t::random()); + auto blk16 = std::make_shared(blk13->getHash(), // pivot + 6, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk17 = std::make_shared(blk12->getHash(), // pivot + 5, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); + auto blk18 = std::make_shared(blk15->getHash(), // pivot + 7, // level + vec_blk_t{blk8->getHash(), blk16->getHash(), blk17->getHash()}, // tips + vec_trx_t{}, secret_t::random()); + auto blk19 = std::make_shared(blk18->getHash(), // pivot + 8, // level + vec_blk_t{}, // tips + vec_trx_t{}, secret_t::random()); blks.emplace_back(blk1); blks.emplace_back(blk2); blks.emplace_back(blk3); @@ -169,143 +140,143 @@ std::vector createMockDag0(const blk_hash_t& genesis) { return blks; } -std::vector createMockDag1(const blk_hash_t& genesis) { - std::vector blks; - DagBlock dummy; - DagBlock blk1(genesis, // pivot - 1, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(1), // hash - addr_t(123)); +std::vector> createMockDag1(const blk_hash_t& genesis) { + std::vector> blks; + std::shared_ptr dummy; + auto blk1 = std::make_shared(genesis, // pivot + 1, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(1), // hash + addr_t(123)); - DagBlock blk2(genesis, // pivot - 1, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(2), // hash - addr_t(123)); - DagBlock blk3(genesis, // pivot - 1, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(3), // hash - addr_t(123)); - DagBlock blk4(blk_hash_t(1), // pivot - 2, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(4), // hash - addr_t(123)); - DagBlock blk5(blk_hash_t(1), // pivot - 2, // level - {blk_hash_t(2)}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(5), // hash - addr_t(123)); - DagBlock blk6(blk_hash_t(3), // pivot - 2, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(6), // hash - addr_t(123)); - DagBlock blk7(blk_hash_t(5), // pivot - 3, // level - {blk_hash_t(6)}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(7), // hash - addr_t(123)); - DagBlock blk8(blk_hash_t(5), // pivot - 3, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(8), // hash - addr_t(123)); - DagBlock blk9(blk_hash_t(6), // pivot - 3, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(9), // hash - addr_t(123)); - DagBlock blk10(blk_hash_t(7), // pivot - 4, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(10), // hash - addr_t(123)); - DagBlock blk11(blk_hash_t(7), // pivot - 4, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(11), // hash - addr_t(123)); - DagBlock blk12(blk_hash_t(9), // pivot - 4, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(12), // hash - addr_t(123)); - DagBlock blk13(blk_hash_t(10), // pivot - 5, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(13), // hash - addr_t(123)); - DagBlock blk14(blk_hash_t(11), // pivot - 5, // level - {blk_hash_t(12)}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(14), // hash - addr_t(123)); - DagBlock blk15(blk_hash_t(13), // pivot - 6, // level - {blk_hash_t(14)}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(15), // hash - addr_t(123)); - DagBlock blk16(blk_hash_t(13), // pivot - 6, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(16), // hash - addr_t(123)); - DagBlock blk17(blk_hash_t(12), // pivot - 5, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(17), // hash - addr_t(123)); - DagBlock blk18(blk_hash_t(15), // pivot - 7, // level - {blk_hash_t(8), blk_hash_t(16), blk_hash_t(17)}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(18), // hash - addr_t(123)); - DagBlock blk19(blk_hash_t(18), // pivot - 8, // level - {}, // tips - {}, // trxs - sig_t(0), // sig - blk_hash_t(19), // hash - addr_t(123)); + auto blk2 = std::make_shared(genesis, // pivot + 1, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(2), // hash + addr_t(123)); + auto blk3 = std::make_shared(genesis, // pivot + 1, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(3), // hash + addr_t(123)); + auto blk4 = std::make_shared(blk_hash_t(1), // pivot + 2, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(4), // hash + addr_t(123)); + auto blk5 = std::make_shared(blk_hash_t(1), // pivot + 2, // level + vec_blk_t{blk_hash_t(2)}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(5), // hash + addr_t(123)); + auto blk6 = std::make_shared(blk_hash_t(3), // pivot + 2, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(6), // hash + addr_t(123)); + auto blk7 = std::make_shared(blk_hash_t(5), // pivot + 3, // level + vec_blk_t{blk_hash_t(6)}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(7), // hash + addr_t(123)); + auto blk8 = std::make_shared(blk_hash_t(5), // pivot + 3, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(8), // hash + addr_t(123)); + auto blk9 = std::make_shared(blk_hash_t(6), // pivot + 3, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(9), // hash + addr_t(123)); + auto blk10 = std::make_shared(blk_hash_t(7), // pivot + 4, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(10), // hash + addr_t(123)); + auto blk11 = std::make_shared(blk_hash_t(7), // pivot + 4, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(11), // hash + addr_t(123)); + auto blk12 = std::make_shared(blk_hash_t(9), // pivot + 4, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(12), // hash + addr_t(123)); + auto blk13 = std::make_shared(blk_hash_t(10), // pivot + 5, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(13), // hash + addr_t(123)); + auto blk14 = std::make_shared(blk_hash_t(11), // pivot + 5, // level + vec_blk_t{blk_hash_t(12)}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(14), // hash + addr_t(123)); + auto blk15 = std::make_shared(blk_hash_t(13), // pivot + 6, // level + vec_blk_t{blk_hash_t(14)}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(15), // hash + addr_t(123)); + auto blk16 = std::make_shared(blk_hash_t(13), // pivot + 6, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(16), // hash + addr_t(123)); + auto blk17 = std::make_shared(blk_hash_t(12), // pivot + 5, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(17), // hash + addr_t(123)); + auto blk18 = std::make_shared(blk_hash_t(15), // pivot + 7, // level + vec_blk_t{blk_hash_t(8), blk_hash_t(16), blk_hash_t(17)}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(18), // hash + addr_t(123)); + auto blk19 = std::make_shared(blk_hash_t(18), // pivot + 8, // level + vec_blk_t{}, // tips + vec_trx_t{}, // trxs + sig_t(0), // sig + blk_hash_t(19), // hash + addr_t(123)); blks.emplace_back(dummy); blks.emplace_back(blk1); blks.emplace_back(blk2); diff --git a/tests/transaction_test.cpp b/tests/transaction_test.cpp index e75356386b..c1c61ee35f 100644 --- a/tests/transaction_test.cpp +++ b/tests/transaction_test.cpp @@ -29,7 +29,6 @@ auto g_secret = Lazy([] { }); auto g_key_pair = Lazy([] { return dev::KeyPair(g_secret); }); auto g_signed_trx_samples = Lazy([] { return samples::createSignedTrxSamples(1, NUM_TRX, g_secret); }); -auto g_blk_samples = Lazy([] { return samples::createMockDagBlkSamples(0, NUM_BLK, 0, BLK_TRX_LEN, BLK_TRX_OVERLAP); }); struct TransactionTest : NodesTest {}; @@ -203,7 +202,7 @@ TEST_F(TransactionTest, transaction_low_nonce) { EXPECT_TRUE(trx_mgr.insertTransaction(trx_1).first); EXPECT_TRUE(trx_mgr.insertTransaction(trx_2).first); std::vector trx_hashes{trx_1->getHash(), trx_2->getHash()}; - DagBlock dag_blk({}, {}, {}, trx_hashes, secret_t::random()); + auto dag_blk = std::make_shared(blk_hash_t{}, level_t{}, vec_blk_t{}, trx_hashes, secret_t::random()); db->saveDagBlock(dag_blk); std::vector reward_votes_hashes; auto pbft_block = @@ -216,7 +215,7 @@ TEST_F(TransactionTest, transaction_low_nonce) { auto batch = db->createWriteBatch(); db->savePeriodData(period_data, batch); db->commitWriteBatch(batch); - final_chain->finalize(std::move(period_data), {dag_blk.getHash()}).get(); + final_chain->finalize(std::move(period_data), {dag_blk->getHash()}).get(); // Verify low nonce transaction is detected in verification auto low_nonce_trx = std::make_shared(1, 101, 0, 100000, dev::bytes(), g_secret, addr_t::random()); From 18f19678bee6efb8fffece088662b5cb018721ea Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Tue, 5 Nov 2024 18:27:20 +0100 Subject: [PATCH 079/105] remove unused constants --- tests/dag_block_test.cpp | 3 --- tests/transaction_test.cpp | 3 --- 2 files changed, 6 deletions(-) diff --git a/tests/dag_block_test.cpp b/tests/dag_block_test.cpp index ecf9866999..6b3b3539c0 100644 --- a/tests/dag_block_test.cpp +++ b/tests/dag_block_test.cpp @@ -18,9 +18,6 @@ #include "vdf/sortition.hpp" namespace taraxa::core_tests { -const unsigned NUM_BLK = 4; -const unsigned BLK_TRX_LEN = 4; -const unsigned BLK_TRX_OVERLAP = 1; using namespace vdf_sortition; struct DagBlockTest : NodesTest {}; diff --git a/tests/transaction_test.cpp b/tests/transaction_test.cpp index c1c61ee35f..66a4312e13 100644 --- a/tests/transaction_test.cpp +++ b/tests/transaction_test.cpp @@ -20,9 +20,6 @@ namespace taraxa::core_tests { const unsigned NUM_TRX = 40; -const unsigned NUM_BLK = 4; -const unsigned BLK_TRX_LEN = 4; -const unsigned BLK_TRX_OVERLAP = 1; auto g_secret = Lazy([] { return dev::Secret("3800b2875669d9b2053c1aff9224ecfdc411423aac5b5a73d7a45ced1c3b9dcd", dev::Secret::ConstructFromStringType::FromHex); From a9f9cf2b7461753366b72af871d35e76fd082a7b Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Mon, 11 Nov 2024 10:41:36 +0100 Subject: [PATCH 080/105] fix dag block packet double sending of txs, optimize packets ctors --- .../common/ext_votes_packet_handler.hpp | 12 ++- .../latest/dag_block_packet_handler.hpp | 4 +- .../latest/dag_block_packet_handler.cpp | 76 ++++++++----------- .../latest/get_pbft_sync_packet_handler.cpp | 1 - .../latest/transaction_packet_handler.cpp | 6 +- 5 files changed, 41 insertions(+), 58 deletions(-) diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp index 4c832c97b1..7a9e662b38 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/common/ext_votes_packet_handler.hpp @@ -120,13 +120,11 @@ class ExtVotesPacketHandler : public PacketHandler { } auto sendVotes = [this, &peer](std::vector>&& votes) { - // TODO[2868]: optimize this - auto votes_copy = votes; - if (this->sealAndSend( - peer->getId(), SubprotocolPacketType::kVotesBundlePacket, - encodePacketRlp(VotesBundlePacket{OptimizedPbftVotesBundle{.votes = std::move(votes_copy)}}))) { - LOG(this->log_dg_) << " Votes bundle with " << votes.size() << " votes sent to " << peer->getId(); - for (const auto& vote : votes) { + auto packet = VotesBundlePacket{OptimizedPbftVotesBundle{.votes = std::move(votes)}}; + if (this->sealAndSend(peer->getId(), SubprotocolPacketType::kVotesBundlePacket, encodePacketRlp(packet))) { + LOG(this->log_dg_) << " Votes bundle with " << packet.votes_bundle.votes.size() << " votes sent to " + << peer->getId(); + for (const auto& vote : packet.votes_bundle.votes) { peer->markPbftVoteAsKnown(vote->getHash()); } } diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp index a639a57301..14eca484ed 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp @@ -18,8 +18,8 @@ class DagBlockPacketHandler : public ExtSyncingPacketHandler { std::shared_ptr trx_mgr, std::shared_ptr db, const addr_t &node_addr, const std::string &logs_prefix = ""); - void sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, const std::shared_ptr &block, - const SharedTransactions &trxs); + void sendBlockWithTransactions(const std::shared_ptr &peer, const std::shared_ptr &block, + SharedTransactions &&trxs); void onNewBlockReceived(std::shared_ptr &&block, const std::shared_ptr &peer = nullptr, const std::unordered_map> &trxs = {}); void onNewBlockVerified(const std::shared_ptr &block, bool proposed, const SharedTransactions &trxs); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp index 43bfd4e5d2..0755cdb6c8 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/dag_block_packet_handler.cpp @@ -46,23 +46,15 @@ void DagBlockPacketHandler::process(DagBlockPacket &&packet, const std::shared_p onNewBlockReceived(std::move(packet.dag_block), peer, txs_map); } -void DagBlockPacketHandler::sendBlockWithTransactions(dev::p2p::NodeID const &peer_id, +void DagBlockPacketHandler::sendBlockWithTransactions(const std::shared_ptr &peer, const std::shared_ptr &block, - const SharedTransactions &trxs) { - std::shared_ptr peer = peers_state_->getPeer(peer_id); - if (!peer) { - LOG(log_wr_) << "Send dag block " << block->getHash() << ". Failed to obtain peer " << peer_id; - return; - } - + SharedTransactions &&trxs) { // This lock prevents race condition between syncing and gossiping dag blocks std::unique_lock lock(peer->mutex_for_sending_dag_blocks_); - // TODO[2868]: optimize args, use move semantics - DagBlockPacket dag_block_packet{.transactions = trxs, .dag_block = block}; - - if (!sealAndSend(peer_id, SubprotocolPacketType::kDagBlockPacket, encodePacketRlp(dag_block_packet))) { - LOG(log_wr_) << "Sending DagBlock " << block->getHash() << " failed to " << peer_id; + DagBlockPacket dag_block_packet{.transactions = std::move(trxs), .dag_block = block}; + if (!sealAndSend(peer->getId(), SubprotocolPacketType::kDagBlockPacket, encodePacketRlp(dag_block_packet))) { + LOG(log_wr_) << "Sending DagBlock " << block->getHash() << " failed to " << peer->getId(); return; } @@ -173,7 +165,7 @@ void DagBlockPacketHandler::onNewBlockVerified(const std::shared_ptr & } const auto &block_hash = block->getHash(); - LOG(log_tr_) << "Verified NewBlock " << block_hash.toString(); + LOG(log_tr_) << "Verified dag block " << block_hash.toString(); std::vector peers_to_send; for (auto const &peer : peers_state_->getAllPeers()) { @@ -182,45 +174,39 @@ void DagBlockPacketHandler::onNewBlockVerified(const std::shared_ptr & } } - std::string peer_and_transactions_to_log; // Sending it in same order favours some peers over others, always start with a different position const auto peers_to_send_count = peers_to_send.size(); - if (peers_to_send_count > 0) { - uint32_t start_with = rand() % peers_to_send_count; - for (uint32_t i = 0; i < peers_to_send_count; i++) { - auto peer_id = peers_to_send[(start_with + i) % peers_to_send_count]; - dev::RLPStream ts; - auto peer = peers_state_->getPeer(peer_id); - if (peer && !peer->syncing_) { - peer_and_transactions_to_log += " Peer: " + peer->getId().abridged() + " Trxs: "; - - SharedTransactions transactions_to_send; - for (const auto &trx : trxs) { - const auto &trx_hash = trx->getHash(); - if (peer->isTransactionKnown(trx_hash)) { - continue; - } - transactions_to_send.push_back(trx); - peer_and_transactions_to_log += trx_hash.abridged(); - } + if (peers_to_send_count == 0) { + return; + } - for (const auto &trx : trxs) { - assert(trx != nullptr); - const auto trx_hash = trx->getHash(); - if (peer->isTransactionKnown(trx_hash)) { - continue; - } + std::string peer_and_transactions_to_log; + uint32_t start_with = rand() % peers_to_send_count; + for (uint32_t i = 0; i < peers_to_send_count; i++) { + auto peer_id = peers_to_send[(start_with + i) % peers_to_send_count]; + auto peer = peers_state_->getPeer(peer_id); + if (!peer || peer->syncing_) { + continue; + } - transactions_to_send.push_back(trx); - peer_and_transactions_to_log += trx_hash.abridged(); - } + peer_and_transactions_to_log += " Peer: " + peer->getId().abridged() + " Trxs: "; - sendBlockWithTransactions(peer_id, block, transactions_to_send); - peer->markDagBlockAsKnown(block_hash); + SharedTransactions transactions_to_send; + for (const auto &trx : trxs) { + assert(trx != nullptr); + const auto trx_hash = trx->getHash(); + if (peer->isTransactionKnown(trx_hash)) { + continue; } + + transactions_to_send.push_back(trx); + peer_and_transactions_to_log += trx_hash.abridged(); } + + sendBlockWithTransactions(peer, block, std::move(transactions_to_send)); } + LOG(log_dg_) << "Send DagBlock " << block->getHash() << " to peers: " << peer_and_transactions_to_log; - if (!peers_to_send.empty()) LOG(log_tr_) << "Sent block to " << peers_to_send.size() << " peers"; + LOG(log_tr_) << "Sent block to " << peers_to_send.size() << " peers"; } } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp index 817a872d8a..26c3d46c75 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/get_pbft_sync_packet_handler.cpp @@ -81,7 +81,6 @@ void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr assert(!reward_votes.empty()); // It is possible that the node pushed another block to the chain in the meantime if (reward_votes[0]->getPeriod() == block_period) { - // TODO[2870]: use custom votes bundle class instead of vector pbft_sync_packet = std::make_shared(last_block, std::move(*period_data), OptimizedPbftVotesBundle{std::move(reward_votes)}); } else { diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp index c450488840..c5d9d1a63c 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp @@ -154,10 +154,10 @@ void TransactionPacketHandler::sendTransactions(std::shared_ptr peer const auto peer_id = peer->getId(); LOG(log_tr_) << "sendTransactions " << transactions.first.size() << " to " << peer_id; + TransactionPacket packet{.transactions = std::move(transactions.first)}; - if (sealAndSend(peer_id, SubprotocolPacketType::kTransactionPacket, - encodePacketRlp(TransactionPacket(transactions.first)))) { - for (const auto &trx : transactions.first) { + if (sealAndSend(peer_id, SubprotocolPacketType::kTransactionPacket, encodePacketRlp(packet))) { + for (const auto &trx : packet.transactions) { peer->markTransactionAsKnown(trx->getHash()); } } From 82cb70b1a342c26c23c745551e4531b482c12528 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Fri, 8 Nov 2024 11:09:56 +0100 Subject: [PATCH 081/105] fix transaction packet --- .../packets/latest/transaction_packet.hpp | 5 ++--- .../latest/transaction_packet_handler.cpp | 21 +++++++++++++++---- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp b/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp index 4a46325f94..3ef4d4f3fa 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets/latest/transaction_packet.hpp @@ -6,10 +6,9 @@ namespace taraxa::network::tarcap { struct TransactionPacket { std::vector> transactions; + std::vector extra_transactions_hashes; - RLP_FIELDS_DEFINE_INPLACE(transactions) - - constexpr static uint32_t kMaxTransactionsInPacket{500}; + RLP_FIELDS_DEFINE_INPLACE(transactions, extra_transactions_hashes) }; } // namespace taraxa::network::tarcap diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp index c5d9d1a63c..f57fbee88d 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp @@ -17,7 +17,19 @@ TransactionPacketHandler::TransactionPacketHandler(const FullNodeConfig &conf, s inline void TransactionPacketHandler::process(TransactionPacket &&packet, const std::shared_ptr &peer) { if (packet.transactions.size() > kMaxTransactionsInPacket) { - throw InvalidRlpItemsCountException("TransactionPacket", packet.transactions.size(), kMaxTransactionsInPacket); + throw InvalidRlpItemsCountException("TransactionPacket:transactions", packet.transactions.size(), + kMaxTransactionsInPacket); + } + + if (packet.extra_transactions_hashes.size() > kMaxHashesInPacket) { + throw InvalidRlpItemsCountException("TransactionPacket:hashes", packet.extra_transactions_hashes.size(), + kMaxHashesInPacket); + } + + // Extra hashes are hashes of transactions that were not sent as full transactions due to max limit, just mark them as + // known for sender + for (const auto &extra_tx_hash : packet.extra_transactions_hashes) { + peer->markTransactionAsKnown(extra_tx_hash); } size_t unseen_txs_count = 0; @@ -134,7 +146,6 @@ TransactionPacketHandler::transactionsToSendToPeers(std::vector &&transactions) { - // TODO[2871]: do not process hashes auto peers_with_transactions_to_send = transactionsToSendToPeers(std::move(transactions)); const auto peers_to_send_count = peers_with_transactions_to_send.size(); if (peers_to_send_count > 0) { @@ -147,19 +158,21 @@ void TransactionPacketHandler::periodicSendTransactions(std::vector peer, std::pair> &&transactions) { if (!peer) return; const auto peer_id = peer->getId(); LOG(log_tr_) << "sendTransactions " << transactions.first.size() << " to " << peer_id; - TransactionPacket packet{.transactions = std::move(transactions.first)}; + TransactionPacket packet{.transactions = std::move(transactions.first), .extra_transactions_hashes = std::move(transactions.second)}; if (sealAndSend(peer_id, SubprotocolPacketType::kTransactionPacket, encodePacketRlp(packet))) { for (const auto &trx : packet.transactions) { peer->markTransactionAsKnown(trx->getHash()); } + for (const auto &trx_hash : packet.extra_transactions_hashes) { + peer->markTransactionAsKnown(trx_hash); + } } } From 846610c590d96318527f53bc8df69feb51585ec0 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Mon, 11 Nov 2024 11:38:32 +0100 Subject: [PATCH 082/105] mark received tx as known for the peer --- .../latest/transaction_packet_handler.cpp | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp index f57fbee88d..f7fb8cc327 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp @@ -34,8 +34,11 @@ inline void TransactionPacketHandler::process(TransactionPacket &&packet, const size_t unseen_txs_count = 0; for (auto &transaction : packet.transactions) { + const auto tx_hash = transaction->getHash(); + peer->markTransactionAsKnown(tx_hash); + // Skip any transactions that are already known to the trx mgr - if (trx_mgr_->isTransactionKnown(transaction->getHash())) { + if (trx_mgr_->isTransactionKnown(tx_hash)) { continue; } @@ -44,12 +47,12 @@ inline void TransactionPacketHandler::process(TransactionPacket &&packet, const const auto [verified, reason] = trx_mgr_->verifyTransaction(transaction); if (!verified) { std::ostringstream err_msg; - err_msg << "DagBlock transaction " << transaction->getHash() << " validation failed: " << reason; + err_msg << "DagBlock transaction " << tx_hash << " validation failed: " << reason; throw MaliciousPeerException(err_msg.str()); } received_trx_count_++; - const auto tx_hash = transaction->getHash(); + const auto status = trx_mgr_->insertValidatedTransaction(std::move(transaction)); if (status == TransactionStatus::Inserted) { unique_received_trx_count_++; From 1cd58bb621d5bc60a2be14aa97dd3e99d274f384 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Wed, 13 Nov 2024 12:14:27 +0100 Subject: [PATCH 083/105] fix transacion packet sending --- .../packets_handlers/latest/transaction_packet_handler.cpp | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp index f7fb8cc327..d4c45e2ec9 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/latest/transaction_packet_handler.cpp @@ -167,15 +167,14 @@ void TransactionPacketHandler::sendTransactions(std::shared_ptr peer const auto peer_id = peer->getId(); LOG(log_tr_) << "sendTransactions " << transactions.first.size() << " to " << peer_id; - TransactionPacket packet{.transactions = std::move(transactions.first), .extra_transactions_hashes = std::move(transactions.second)}; + TransactionPacket packet{.transactions = std::move(transactions.first), + .extra_transactions_hashes = std::move(transactions.second)}; if (sealAndSend(peer_id, SubprotocolPacketType::kTransactionPacket, encodePacketRlp(packet))) { for (const auto &trx : packet.transactions) { peer->markTransactionAsKnown(trx->getHash()); } - for (const auto &trx_hash : packet.extra_transactions_hashes) { - peer->markTransactionAsKnown(trx_hash); - } + // Note: do not mark packet.extra_transactions_hashes as known for peer - we are sending just hashes, not full txs } } From b9a1069e05724ce5afaa1442666f3a5de0091cb1 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Wed, 13 Nov 2024 14:25:58 +0100 Subject: [PATCH 084/105] post merge fixes --- libraries/core_libs/consensus/src/dag/dag_manager.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/core_libs/consensus/src/dag/dag_manager.cpp b/libraries/core_libs/consensus/src/dag/dag_manager.cpp index fd4e89e9ac..301ee7c885 100644 --- a/libraries/core_libs/consensus/src/dag/dag_manager.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_manager.cpp @@ -170,8 +170,8 @@ std::pair> DagManager::addDagBlock(const std::shar max_level_ = std::max(current_max_level, blk->getLevel()); addToDag(blk_hash, pivot_hash, tips, blk->getLevel()); - if (non_finalized_blks_min_difficulty_ > blk.getDifficulty()) { - non_finalized_blks_min_difficulty_ = blk.getDifficulty(); + if (non_finalized_blks_min_difficulty_ > blk->getDifficulty()) { + non_finalized_blks_min_difficulty_ = blk->getDifficulty(); } updateFrontier(); From ed13008de52f4b0b156ded082b53c77205059290 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Fri, 22 Nov 2024 11:13:06 +0100 Subject: [PATCH 085/105] chore: trusted nodes --- libraries/aleth/libp2p/Host.cpp | 7 +++++-- libraries/aleth/libp2p/Network.h | 3 +++ .../include/cli/config_jsons/testnet/testnet_config.json | 2 +- libraries/config/include/config/network.hpp | 2 ++ libraries/config/src/network.cpp | 6 ++++++ libraries/core_libs/network/src/network.cpp | 1 + 6 files changed, 18 insertions(+), 3 deletions(-) diff --git a/libraries/aleth/libp2p/Host.cpp b/libraries/aleth/libp2p/Host.cpp index 582682f3b9..e7f3a1a8b0 100644 --- a/libraries/aleth/libp2p/Host.cpp +++ b/libraries/aleth/libp2p/Host.cpp @@ -287,7 +287,8 @@ void Host::startPeerSession(Public const& _id, RLP const& _hello, unique_ptraddress()); + if (!disconnect_reason && (!peerSlotsAvailable() && !is_trusted_node)) { cnetdetails << "Too many peers, can't connect. peer count: " << peer_count_() << " pending peers: " << m_pendingPeerConns.size(); disconnect_reason = TooManyPeers; @@ -411,7 +412,9 @@ void Host::runAcceptor() { return; } auto socket = make_shared(std::move(_socket)); - if (peer_count_() > peerSlots(Ingress)) { + // Since a connecting peer might be a trusted node which should always connect allow up to max number of trusted + // nodes above the limit + if (peer_count_() > (peerSlots(Ingress) + m_netConfig.trustedNodes.size())) { cnetdetails << "Dropping incoming connect due to maximum peer count (" << Ingress << " * ideal peer count): " << socket->remoteEndpoint(); socket->close(); diff --git a/libraries/aleth/libp2p/Network.h b/libraries/aleth/libp2p/Network.h index 47e008aa81..082a2b3ba1 100644 --- a/libraries/aleth/libp2p/Network.h +++ b/libraries/aleth/libp2p/Network.h @@ -45,6 +45,9 @@ struct NetworkConfig { std::string listenIPAddress; uint16_t listenPort = c_defaultListenPort; + /// Trusted Nodes + std::unordered_set trustedNodes; + /// Preferences bool traverseNAT = true; diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json index 676a6428c8..2667146a60 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json @@ -30,7 +30,7 @@ "listen_port": 10002, "transaction_interval_ms": 100, "ideal_peer_count": 10, - "max_peer_count": 50, + "max_peer_count": 20, "sync_level_size": 10, "packets_processing_threads": 14, "peer_blacklist_timeout": 600, diff --git a/libraries/config/include/config/network.hpp b/libraries/config/include/config/network.hpp index 477a8155cd..e084986be2 100644 --- a/libraries/config/include/config/network.hpp +++ b/libraries/config/include/config/network.hpp @@ -5,6 +5,7 @@ #include #include "common/types.hpp" +#include "libp2p/Common.h" namespace taraxa { @@ -82,6 +83,7 @@ struct NetworkConfig { bool disable_peer_blacklist = false; uint16_t deep_syncing_threshold = 10; DdosProtectionConfig ddos_protection; + std::unordered_set trusted_nodes; std::optional rpc; std::optional graphql; diff --git a/libraries/config/src/network.cpp b/libraries/config/src/network.cpp index cb37200713..7e6b24cce8 100644 --- a/libraries/config/src/network.cpp +++ b/libraries/config/src/network.cpp @@ -133,6 +133,12 @@ void dec_json(const Json::Value &json, NetworkConfig &network) { network.listen_port = getConfigDataAsUInt(json, {"listen_port"}); network.transaction_interval_ms = getConfigDataAsUInt(json, {"transaction_interval_ms"}); network.ideal_peer_count = getConfigDataAsUInt(json, {"ideal_peer_count"}); + Json::Value priority_nodes = json["priority_nodes"]; + if (!priority_nodes.isNull()) { + for (const auto &item : priority_nodes) { + network.trusted_nodes.insert(dev::p2p::NodeID(item.asString())); + } + } network.max_peer_count = getConfigDataAsUInt(json, {"max_peer_count"}); network.sync_level_size = getConfigDataAsUInt(json, {"sync_level_size"}); network.packets_processing_threads = getConfigDataAsUInt(json, {"packets_processing_threads"}); diff --git a/libraries/core_libs/network/src/network.cpp b/libraries/core_libs/network/src/network.cpp index 1e8ad6f63d..84b1130380 100644 --- a/libraries/core_libs/network/src/network.cpp +++ b/libraries/core_libs/network/src/network.cpp @@ -58,6 +58,7 @@ Network::Network(const FullNodeConfig &config, const h256 &genesis_hash, std::fi net_conf.traverseNAT = false; net_conf.publicIPAddress = config.network.public_ip; net_conf.pin = false; + net_conf.trustedNodes = config.network.trusted_nodes; dev::p2p::TaraxaNetworkConfig taraxa_net_conf; taraxa_net_conf.ideal_peer_count = config.network.ideal_peer_count; From 980df69d369d74cab62eb128342ddc7c9d2d5f1b Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 26 Nov 2024 12:45:08 +0000 Subject: [PATCH 086/105] chore: update to latest develop --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 77135fe103..82c0f28981 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 77135fe10352156f4266e57d9dc162e33257c48c +Subproject commit 82c0f2898115f13d9cb56115a82cd4b22b712555 From 7453615bb51ec1c98a9cb2b320b314c7b3da2698 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 26 Nov 2024 20:56:22 +0100 Subject: [PATCH 087/105] update evm --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 82c0f28981..870992b61c 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 82c0f2898115f13d9cb56115a82cd4b22b712555 +Subproject commit 870992b61c7b183941622180880120c9662d9c14 From b1c4ab2c90b0568b0fc3c2c1f00bb17ea798f18c Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Thu, 14 Nov 2024 10:57:02 +0100 Subject: [PATCH 088/105] Update version to 1.12.1 --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index a22fb579b0..f411985bb6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,7 +3,7 @@ cmake_minimum_required(VERSION 3.20) # Set current version of the project set(TARAXA_MAJOR_VERSION 1) set(TARAXA_MINOR_VERSION 12) -set(TARAXA_PATCH_VERSION 0) +set(TARAXA_PATCH_VERSION 1) set(TARAXA_VERSION ${TARAXA_MAJOR_VERSION}.${TARAXA_MINOR_VERSION}.${TARAXA_PATCH_VERSION}) # Any time a change in the network protocol is introduced this version should be increased From 5c8c0f4a6fda6b91bc9da577ac29089a09b9268e Mon Sep 17 00:00:00 2001 From: kstdl Date: Fri, 22 Nov 2024 11:17:15 +0100 Subject: [PATCH 089/105] fix: state during the tracing --- .../include/final_chain/final_chain.hpp | 7 +- .../include/final_chain/state_api.hpp | 4 +- .../consensus/src/final_chain/final_chain.cpp | 6 +- .../consensus/src/final_chain/state_api.cpp | 8 +- .../src/pillar_chain/pillar_votes.cpp | 2 +- libraries/core_libs/network/rpc/Debug.cpp | 75 ++++++++++--------- libraries/core_libs/network/rpc/Debug.h | 5 +- submodules/CMakeLists.txt | 2 +- submodules/taraxa-evm | 2 +- 9 files changed, 61 insertions(+), 50 deletions(-) diff --git a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp index 57d08d142d..a98c7c705f 100644 --- a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp +++ b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp @@ -186,8 +186,8 @@ class FinalChain { * @param blk_n EthBlockNumber number of block we are getting state from * @return std::string */ - std::string trace(std::vector trx, EthBlockNumber blk_n, - std::optional params = {}) const; + std::string trace(std::vector state_trxs, std::vector trxs, + EthBlockNumber blk_n, std::optional params = {}) const; /** * @brief total count of eligible votes are in DPOS precompiled contract @@ -271,9 +271,10 @@ class FinalChain { std::vector&& finalized_dag_blk_hashes, std::shared_ptr&& anchor); + const SharedTransactions getTransactions(std::optional n = {}) const; + private: std::shared_ptr getTransactionHashes(std::optional n = {}) const; - const SharedTransactions getTransactions(std::optional n = {}) const; std::shared_ptr getBlockHeader(EthBlockNumber n) const; std::optional getBlockHash(EthBlockNumber n) const; EthBlockNumber lastIfAbsent(const std::optional& client_blk_n) const; diff --git a/libraries/core_libs/consensus/include/final_chain/state_api.hpp b/libraries/core_libs/consensus/include/final_chain/state_api.hpp index cffffad3f0..291fce35af 100644 --- a/libraries/core_libs/consensus/include/final_chain/state_api.hpp +++ b/libraries/core_libs/consensus/include/final_chain/state_api.hpp @@ -42,8 +42,8 @@ class StateAPI { h256 get_account_storage(EthBlockNumber blk_num, const addr_t& addr, const u256& key) const; bytes get_code_by_address(EthBlockNumber blk_num, const addr_t& addr) const; ExecutionResult dry_run_transaction(EthBlockNumber blk_num, const EVMBlock& blk, const EVMTransaction& trx) const; - bytes trace(EthBlockNumber blk_num, const EVMBlock& blk, const std::vector trx, - std::optional params = {}) const; + bytes trace(EthBlockNumber blk_num, const EVMBlock& blk, const std::vector& state_trxs, + const std::vector& trxs, std::optional params = {}) const; StateDescriptor get_last_committed_state_descriptor() const; const TransactionsExecutionResult& execute_transactions(const EVMBlock& block, diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index 19f5ea47a9..1c07801b9f 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -2,6 +2,7 @@ #include "common/encoding_solidity.hpp" #include "common/util.hpp" +#include "final_chain/state_api_data.hpp" #include "final_chain/trie_common.hpp" #include "pbft/pbft_block.hpp" #include "transaction/system_transaction.hpp" @@ -446,7 +447,8 @@ state_api::ExecutionResult FinalChain::call(const state_api::EVMTransaction& trx trx); } -std::string FinalChain::trace(std::vector trxs, EthBlockNumber blk_n, +std::string FinalChain::trace(std::vector state_trxs, + std::vector trxs, EthBlockNumber blk_n, std::optional params) const { const auto blk_header = blockHeader(lastIfAbsent(blk_n)); if (!blk_header) { @@ -459,7 +461,7 @@ std::string FinalChain::trace(std::vector trxs, EthBl blk_header->timestamp, BlockHeader::difficulty(), }, - trxs, params)); + state_trxs, trxs, params)); } uint64_t FinalChain::dposEligibleTotalVoteCount(EthBlockNumber blk_num) const { diff --git a/libraries/core_libs/consensus/src/final_chain/state_api.cpp b/libraries/core_libs/consensus/src/final_chain/state_api.cpp index 9366ae901b..1ed4ea86f4 100644 --- a/libraries/core_libs/consensus/src/final_chain/state_api.cpp +++ b/libraries/core_libs/consensus/src/final_chain/state_api.cpp @@ -164,10 +164,10 @@ ExecutionResult StateAPI::dry_run_transaction(EthBlockNumber blk_num, const EVMB trx); } -bytes StateAPI::trace(EthBlockNumber blk_num, const EVMBlock& blk, const std::vector trxs, - std::optional params) const { - return c_method_args_rlp(this_c_, blk_num, blk, trxs, - params); +bytes StateAPI::trace(EthBlockNumber blk_num, const EVMBlock& blk, const std::vector& state_trxs, + const std::vector& trxs, std::optional params) const { + return c_method_args_rlp(this_c_, blk_num, blk, state_trxs, + trxs, params); } StateDescriptor StateAPI::get_last_committed_state_descriptor() const { diff --git a/libraries/core_libs/consensus/src/pillar_chain/pillar_votes.cpp b/libraries/core_libs/consensus/src/pillar_chain/pillar_votes.cpp index a127eba448..4d2bed6a4b 100644 --- a/libraries/core_libs/consensus/src/pillar_chain/pillar_votes.cpp +++ b/libraries/core_libs/consensus/src/pillar_chain/pillar_votes.cpp @@ -132,7 +132,7 @@ bool PillarVotes::addVerifiedVote(const std::shared_ptr& vote, uint6 void PillarVotes::initializePeriodData(PbftPeriod period, uint64_t threshold) { std::scoped_lock lock(mutex_); - votes_.insert({period, PeriodVotes{.threshold = threshold}}); + votes_.insert({period, PeriodVotes{{}, {}, threshold}}); } void PillarVotes::eraseVotes(PbftPeriod min_period) { diff --git a/libraries/core_libs/network/rpc/Debug.cpp b/libraries/core_libs/network/rpc/Debug.cpp index 1f4c158f6e..e62990b409 100644 --- a/libraries/core_libs/network/rpc/Debug.cpp +++ b/libraries/core_libs/network/rpc/Debug.cpp @@ -6,6 +6,7 @@ #include "common/jsoncpp.hpp" #include "final_chain/state_api_data.hpp" #include "network/rpc/eth/data.hpp" +#include "transaction/transaction.hpp" using namespace std; using namespace dev; @@ -14,24 +15,12 @@ using namespace taraxa; namespace taraxa::net { -Json::Value Debug::debug_traceTransaction(const std::string& transaction_hash) { - Json::Value res; - auto [trx, loc] = get_transaction_with_location(transaction_hash); - if (!trx || !loc) { - throw std::runtime_error("Transaction not found"); - } - if (auto node = full_node_.lock()) { - return util::readJsonFromString(node->getFinalChain()->trace({to_eth_trx(std::move(trx))}, loc->period)); - } - return res; -} - Json::Value Debug::debug_traceCall(const Json::Value& call_params, const std::string& blk_num) { Json::Value res; const auto block = parse_blk_num(blk_num); auto trx = to_eth_trx(call_params, block); if (auto node = full_node_.lock()) { - return util::readJsonFromString(node->getFinalChain()->trace({std::move(trx)}, block)); + return util::readJsonFromString(node->getFinalChain()->trace({}, {std::move(trx)}, block)); } return res; } @@ -43,7 +32,34 @@ Json::Value Debug::trace_call(const Json::Value& call_params, const Json::Value& auto params = parse_tracking_parms(trace_params); if (auto node = full_node_.lock()) { return util::readJsonFromString( - node->getFinalChain()->trace({to_eth_trx(call_params, block)}, block, std::move(params))); + node->getFinalChain()->trace({}, {to_eth_trx(call_params, block)}, block, std::move(params))); + } + return res; +} + +std::tuple, state_api::EVMTransaction, uint64_t> +Debug::get_transaction_with_state(const std::string& transaction_hash) { + auto node = full_node_.lock(); + if (!node) { + return {}; + } + const auto hash = jsToFixed<32>(transaction_hash); + + auto loc = node->getFinalChain()->transactionLocation(hash); + if (!loc) { + throw std::runtime_error("Transaction not found"); + } + auto block_transactions = node->getFinalChain()->getTransactions(loc->period); + + auto state_trxs = SharedTransactions(block_transactions.begin(), block_transactions.begin() + loc->position); + + return {to_eth_trxs(state_trxs), to_eth_trx(block_transactions[loc->position]), loc->period}; +} +Json::Value Debug::debug_traceTransaction(const std::string& transaction_hash) { + Json::Value res; + auto [state_trxs, trx, period] = get_transaction_with_state(transaction_hash); + if (auto node = full_node_.lock()) { + return util::readJsonFromString(node->getFinalChain()->trace({}, {trx}, period)); } return res; } @@ -51,13 +67,9 @@ Json::Value Debug::trace_call(const Json::Value& call_params, const Json::Value& Json::Value Debug::trace_replayTransaction(const std::string& transaction_hash, const Json::Value& trace_params) { Json::Value res; auto params = parse_tracking_parms(trace_params); - auto [trx, loc] = get_transaction_with_location(transaction_hash); - if (!trx || !loc) { - throw std::runtime_error("Transaction not found"); - } + auto [state_trxs, trx, period] = get_transaction_with_state(transaction_hash); if (auto node = full_node_.lock()) { - return util::readJsonFromString( - node->getFinalChain()->trace({to_eth_trx(std::move(trx))}, loc->period, std::move(params))); + return util::readJsonFromString(node->getFinalChain()->trace(state_trxs, {trx}, period, params)); } return res; } @@ -71,11 +83,8 @@ Json::Value Debug::trace_replayBlockTransactions(const std::string& block_num, c if (!transactions.has_value() || transactions->empty()) { return Json::Value(Json::arrayValue); } - std::vector trxs; - trxs.reserve(transactions->size()); - std::transform(transactions->begin(), transactions->end(), std::back_inserter(trxs), - [this](auto t) { return to_eth_trx(std::move(t)); }); - return util::readJsonFromString(node->getFinalChain()->trace(std::move(trxs), block, std::move(params))); + std::vector trxs = to_eth_trxs(*transactions); + return util::readJsonFromString(node->getFinalChain()->trace({}, std::move(trxs), block, std::move(params))); } return res; } @@ -253,6 +262,13 @@ state_api::Tracing Debug::parse_tracking_parms(const Json::Value& json) const { return ret; } +std::vector Debug::to_eth_trxs(const std::vector>& trxs) { + std::vector eth_trxs; + eth_trxs.reserve(trxs.size()); + std::transform(trxs.begin(), trxs.end(), std::back_inserter(eth_trxs), + [this](auto t) { return to_eth_trx(std::move(t)); }); + return eth_trxs; +} state_api::EVMTransaction Debug::to_eth_trx(std::shared_ptr t) const { return state_api::EVMTransaction{ t->getSender(), t->getGasPrice(), t->getReceiver(), t->getNonce(), t->getValue(), t->getGas(), t->getData(), @@ -329,13 +345,4 @@ Address Debug::to_address(const string& s) const { throw InvalidAddress(); } -std::pair, std::optional> -Debug::get_transaction_with_location(const std::string& transaction_hash) const { - if (auto node = full_node_.lock()) { - const auto hash = jsToFixed<32>(transaction_hash); - return {node->getDB()->getTransaction(hash), node->getFinalChain()->transactionLocation(hash)}; - } - return {}; -} - } // namespace taraxa::net \ No newline at end of file diff --git a/libraries/core_libs/network/rpc/Debug.h b/libraries/core_libs/network/rpc/Debug.h index 7d897e7fe5..29cfc1d2ac 100644 --- a/libraries/core_libs/network/rpc/Debug.h +++ b/libraries/core_libs/network/rpc/Debug.h @@ -47,11 +47,12 @@ class Debug : public DebugFace { private: state_api::EVMTransaction to_eth_trx(std::shared_ptr t) const; state_api::EVMTransaction to_eth_trx(const Json::Value& json, EthBlockNumber blk_num); + std::vector to_eth_trxs(const std::vector>& trxs); EthBlockNumber parse_blk_num(const string& blk_num_str); state_api::Tracing parse_tracking_parms(const Json::Value& json) const; Address to_address(const string& s) const; - std::pair, std::optional> - get_transaction_with_location(const std::string& transaction_hash) const; + std::tuple, state_api::EVMTransaction, uint64_t> get_transaction_with_state( + const std::string& transaction_hash); std::weak_ptr full_node_; const uint64_t kGasLimit = ((uint64_t)1 << 53) - 1; diff --git a/submodules/CMakeLists.txt b/submodules/CMakeLists.txt index 4363497eb8..ad5b9bced0 100644 --- a/submodules/CMakeLists.txt +++ b/submodules/CMakeLists.txt @@ -117,7 +117,7 @@ set(EVM_AFTER_BUILD_COMMAND ${EVM_AFTER_BUILD_COMMAND} && mv ${EVM_BUILD_DIR}/li ## final command set(EVM_LIBRARY_COMMAND ${EVM_BUILD_COMMAND} && ${EVM_AFTER_BUILD_COMMAND}) -file(GLOB_RECURSE TARAXA_EVM_SOURCES "taraxa-evm/*.go" ) +file(GLOB_RECURSE TARAXA_EVM_SOURCES CONFIGURE_DEPENDS "taraxa-evm/*.go" ) list(APPEND TARAXA_EVM_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/taraxa-evm/taraxa/C/common.h ${CMAKE_CURRENT_SOURCE_DIR}/taraxa-evm/taraxa/C/state.h) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 870992b61c..4a41887d13 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 870992b61c7b183941622180880120c9662d9c14 +Subproject commit 4a41887d131a5841c165d981889348bcd6e96ac4 From 5da06fb2023ba4b4c73d9d8b67b21d7e8feefabf Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Tue, 26 Nov 2024 14:56:44 +0100 Subject: [PATCH 090/105] implement sequoia hardfork --- libraries/config/include/config/hardfork.hpp | 12 ++++++++++++ libraries/config/src/hardfork.cpp | 19 ++++++++++++++++++- submodules/taraxa-evm | 2 +- 3 files changed, 31 insertions(+), 2 deletions(-) diff --git a/libraries/config/include/config/hardfork.hpp b/libraries/config/include/config/hardfork.hpp index a591ee860e..af45335b38 100644 --- a/libraries/config/include/config/hardfork.hpp +++ b/libraries/config/include/config/hardfork.hpp @@ -90,6 +90,15 @@ void dec_json(const Json::Value& json, FicusHardforkConfig& obj); // Json::Value enc_json(const BambooHardfork& obj); // void dec_json(const Json::Value& json, BambooHardfork& obj); +struct SequoiaHardforkConfig { + uint64_t block_num = -1; + uint32_t delegation_locking_period = 5; // number of blocks + + HAS_RLP_FIELDS +}; +Json::Value enc_json(const SequoiaHardforkConfig& obj); +void dec_json(const Json::Value& json, SequoiaHardforkConfig& obj); + struct HardforksConfig { // disable it by default (set to max uint64) uint64_t fix_redelegate_block_num = -1; @@ -133,6 +142,9 @@ struct HardforksConfig { // Cornus hf - support multiple undelegations from the same validator at the same time uint64_t cornus_hf_block_num{0}; + // Sequoia hardfork: change of delegation locking period + SequoiaHardforkConfig sequoia_hf; + bool isCornusHardfork(uint64_t block_number) const { return block_number >= cornus_hf_block_num; } HAS_RLP_FIELDS diff --git a/libraries/config/src/hardfork.cpp b/libraries/config/src/hardfork.cpp index dd0c85d5b8..f68ec59d3d 100644 --- a/libraries/config/src/hardfork.cpp +++ b/libraries/config/src/hardfork.cpp @@ -138,6 +138,20 @@ RLP_FIELDS_DEFINE(FicusHardforkConfig, block_num, pillar_blocks_interval, bridge // } // RLP_FIELDS_DEFINE(BambooHardfork, block_num, redelegations) +Json::Value enc_json(const SequoiaHardforkConfig& obj) { + Json::Value json(Json::objectValue); + json["block_num"] = dev::toJS(obj.block_num); + json["delegation_locking_period"] = dev::toJS(obj.delegation_locking_period); + return json; +} + +void dec_json(const Json::Value& json, SequoiaHardforkConfig& obj) { + obj.block_num = json["block_num"].isUInt64() ? dev::getUInt(json["block_num"]) : uint64_t(-1); + obj.delegation_locking_period = dev::getUInt(json["delegation_locking_period"]); +} + +RLP_FIELDS_DEFINE(SequoiaHardforkConfig, block_num, delegation_locking_period) + Json::Value enc_json(const HardforksConfig& obj) { Json::Value json(Json::objectValue); json["fix_redelegate_block_num"] = dev::toJS(obj.fix_redelegate_block_num); @@ -159,6 +173,7 @@ Json::Value enc_json(const HardforksConfig& obj) { json["ficus_hf"] = enc_json(obj.ficus_hf); // json["bamboo_hf"] = enc_json(obj.bamboo_hf); json["cornus_hf_block_num"] = dev::toJS(obj.cornus_hf_block_num); + json["sequoia_hf"] = enc_json(obj.sequoia_hf); return json; } @@ -192,8 +207,10 @@ void dec_json(const Json::Value& json, HardforksConfig& obj) { // dec_json(json["bamboo_hf"], obj.bamboo_hf); obj.cornus_hf_block_num = json["cornus_hf_block_num"].isUInt64() ? dev::getUInt(json["cornus_hf_block_num"]) : uint64_t(-1); + dec_json(json["sequoia_hf"], obj.sequoia_hf); } RLP_FIELDS_DEFINE(HardforksConfig, fix_redelegate_block_num, redelegations, rewards_distribution_frequency, magnolia_hf, - phalaenopsis_hf_block_num, fix_claim_all_block_num, aspen_hf, ficus_hf, cornus_hf_block_num) + phalaenopsis_hf_block_num, fix_claim_all_block_num, aspen_hf, ficus_hf, cornus_hf_block_num, + sequoia_hf) } // namespace taraxa \ No newline at end of file diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 4a41887d13..073e2123c1 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 4a41887d131a5841c165d981889348bcd6e96ac4 +Subproject commit 073e2123c17250eba96f357b524d37d44689b8c3 From 0f36f1a3716bf162562dda6350e3686e1e92f846 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Thu, 28 Nov 2024 11:29:06 +0100 Subject: [PATCH 091/105] make locking period change part of cornus hardfork --- libraries/config/include/config/hardfork.hpp | 14 ++++++-------- libraries/config/src/hardfork.cpp | 16 ++++++---------- .../include/network/tarcap/taraxa_capability.hpp | 2 +- submodules/taraxa-evm | 2 +- 4 files changed, 14 insertions(+), 20 deletions(-) diff --git a/libraries/config/include/config/hardfork.hpp b/libraries/config/include/config/hardfork.hpp index af45335b38..4df95e643d 100644 --- a/libraries/config/include/config/hardfork.hpp +++ b/libraries/config/include/config/hardfork.hpp @@ -90,14 +90,14 @@ void dec_json(const Json::Value& json, FicusHardforkConfig& obj); // Json::Value enc_json(const BambooHardfork& obj); // void dec_json(const Json::Value& json, BambooHardfork& obj); -struct SequoiaHardforkConfig { +struct CornusHardforkConfig { uint64_t block_num = -1; uint32_t delegation_locking_period = 5; // number of blocks HAS_RLP_FIELDS }; -Json::Value enc_json(const SequoiaHardforkConfig& obj); -void dec_json(const Json::Value& json, SequoiaHardforkConfig& obj); +Json::Value enc_json(const CornusHardforkConfig& obj); +void dec_json(const Json::Value& json, CornusHardforkConfig& obj); struct HardforksConfig { // disable it by default (set to max uint64) @@ -140,12 +140,10 @@ struct HardforksConfig { FicusHardforkConfig ficus_hf; // Cornus hf - support multiple undelegations from the same validator at the same time - uint64_t cornus_hf_block_num{0}; + // - change of delegation locking period + CornusHardforkConfig cornus_hf; - // Sequoia hardfork: change of delegation locking period - SequoiaHardforkConfig sequoia_hf; - - bool isCornusHardfork(uint64_t block_number) const { return block_number >= cornus_hf_block_num; } + bool isCornusHardfork(uint64_t block_number) const { return block_number >= cornus_hf.block_num; } HAS_RLP_FIELDS }; diff --git a/libraries/config/src/hardfork.cpp b/libraries/config/src/hardfork.cpp index f68ec59d3d..b625bb7680 100644 --- a/libraries/config/src/hardfork.cpp +++ b/libraries/config/src/hardfork.cpp @@ -138,19 +138,19 @@ RLP_FIELDS_DEFINE(FicusHardforkConfig, block_num, pillar_blocks_interval, bridge // } // RLP_FIELDS_DEFINE(BambooHardfork, block_num, redelegations) -Json::Value enc_json(const SequoiaHardforkConfig& obj) { +Json::Value enc_json(const CornusHardforkConfig& obj) { Json::Value json(Json::objectValue); json["block_num"] = dev::toJS(obj.block_num); json["delegation_locking_period"] = dev::toJS(obj.delegation_locking_period); return json; } -void dec_json(const Json::Value& json, SequoiaHardforkConfig& obj) { +void dec_json(const Json::Value& json, CornusHardforkConfig& obj) { obj.block_num = json["block_num"].isUInt64() ? dev::getUInt(json["block_num"]) : uint64_t(-1); obj.delegation_locking_period = dev::getUInt(json["delegation_locking_period"]); } -RLP_FIELDS_DEFINE(SequoiaHardforkConfig, block_num, delegation_locking_period) +RLP_FIELDS_DEFINE(CornusHardforkConfig, block_num, delegation_locking_period) Json::Value enc_json(const HardforksConfig& obj) { Json::Value json(Json::objectValue); @@ -172,8 +172,7 @@ Json::Value enc_json(const HardforksConfig& obj) { json["aspen_hf"] = enc_json(obj.aspen_hf); json["ficus_hf"] = enc_json(obj.ficus_hf); // json["bamboo_hf"] = enc_json(obj.bamboo_hf); - json["cornus_hf_block_num"] = dev::toJS(obj.cornus_hf_block_num); - json["sequoia_hf"] = enc_json(obj.sequoia_hf); + json["cornus_hf"] = enc_json(obj.cornus_hf); return json; } @@ -205,12 +204,9 @@ void dec_json(const Json::Value& json, HardforksConfig& obj) { dec_json(json["aspen_hf"], obj.aspen_hf); dec_json(json["ficus_hf"], obj.ficus_hf); // dec_json(json["bamboo_hf"], obj.bamboo_hf); - obj.cornus_hf_block_num = - json["cornus_hf_block_num"].isUInt64() ? dev::getUInt(json["cornus_hf_block_num"]) : uint64_t(-1); - dec_json(json["sequoia_hf"], obj.sequoia_hf); + dec_json(json["cornus_hf"], obj.cornus_hf); } RLP_FIELDS_DEFINE(HardforksConfig, fix_redelegate_block_num, redelegations, rewards_distribution_frequency, magnolia_hf, - phalaenopsis_hf_block_num, fix_claim_all_block_num, aspen_hf, ficus_hf, cornus_hf_block_num, - sequoia_hf) + phalaenopsis_hf_block_num, fix_claim_all_block_num, aspen_hf, ficus_hf, cornus_hf) } // namespace taraxa \ No newline at end of file diff --git a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp index 33222e7384..29bb6ecfb2 100644 --- a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp +++ b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp @@ -59,7 +59,7 @@ class TaraxaCapability final : public dev::p2p::CapabilityFace { */ static const InitPacketsHandlers kInitLatestVersionHandlers; - // TODO: remove this once we pass next hf after cornus hf + // TODO: remove this once we pass cornus hf static const InitPacketsHandlers kInitV4Handlers; public: diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 073e2123c1..7389545dad 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 073e2123c17250eba96f357b524d37d44689b8c3 +Subproject commit 7389545dad4b5d63fe5dbe5bad9d96c49da376a9 From 1db415f304930cc12ee010d1afb08bb3ba6c69b1 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Thu, 28 Nov 2024 11:44:47 +0100 Subject: [PATCH 092/105] adjust configs --- .../config_jsons/default/default_genesis.json | 5 +- .../config_jsons/devnet/devnet_genesis.json | 5 +- .../config_jsons/mainnet/mainnet_genesis.json | 5 +- .../config_jsons/testnet/testnet_genesis.json | 5 +- tests/rewards_stats_test.cpp | 46 +++++++++++++------ 5 files changed, 49 insertions(+), 17 deletions(-) diff --git a/libraries/cli/include/cli/config_jsons/default/default_genesis.json b/libraries/cli/include/cli/config_jsons/default/default_genesis.json index 4afcf79004..9c719b0208 100644 --- a/libraries/cli/include/cli/config_jsons/default/default_genesis.json +++ b/libraries/cli/include/cli/config_jsons/default/default_genesis.json @@ -120,6 +120,9 @@ "pillar_blocks_interval": 10, "bridge_contract_address": "0xcAF2b453FE8382a4B8110356DF0508f6d71F22BF" }, - "cornus_hf_block_num": 100 + "cornus_hf": { + "block_num": 100, + "delegation_locking_period": 5 + } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json index d4afa4865c..12f0f68822 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json @@ -285,6 +285,9 @@ "pillar_blocks_interval": 10, "bridge_contract_address": "0xcAF2b453FE8382a4B8110356DF0508f6d71F22BF" }, - "cornus_hf_block_num": 0 + "cornus_hf": { + "block_num": 0, + "delegation_locking_period": 5 + } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json index 801a7f781b..b1b2d3a2ae 100644 --- a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json @@ -1654,6 +1654,9 @@ "pillar_blocks_interval": 4000, "bridge_contract_address": "0xe126E0BaeAE904b8Cfd619Be1A8667A173b763a1" }, - "cornus_hf_block_num": -1 + "cornus_hf": { + "block_num": -1, + "delegation_locking_period": 163459 + } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index 658bc22702..c12536d769 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -154,6 +154,9 @@ "pillar_blocks_interval": 1000, "bridge_contract_address": "0xcAF2b453FE8382a4B8110356DF0508f6d71F22BF" }, - "cornus_hf_block_num": 1668000 + "cornus_hf": { + "block_num": -1, + "delegation_locking_period": 5 + } } } \ No newline at end of file diff --git a/tests/rewards_stats_test.cpp b/tests/rewards_stats_test.cpp index 9b2e9d4272..af381c2124 100644 --- a/tests/rewards_stats_test.cpp +++ b/tests/rewards_stats_test.cpp @@ -20,11 +20,17 @@ struct RewardsStatsTest : NodesTest {}; class TestableRewardsStats : public rewards::Stats { public: TestableRewardsStats(const HardforksConfig::RewardsDistributionMap& rdm, std::shared_ptr db) - : rewards::Stats( - 100, - HardforksConfig{ - 0, {}, rdm, MagnoliaHardfork{0, 0}, 0, 0, AspenHardfork{0, 0}, FicusHardforkConfig{0, 0, {}}}, - db, [](auto) { return 100; }) {} + : rewards::Stats(100, + HardforksConfig{0, + {}, + rdm, + MagnoliaHardfork{0, 0}, + 0, + 0, + AspenHardfork{0, 0}, + FicusHardforkConfig{0, 0, {}}, + CornusHardforkConfig{0, 0}}, + db, [](auto) { return 100; }) {} auto getStats() { return blocks_stats_; } }; @@ -242,14 +248,28 @@ TEST_F(RewardsStatsTest, dagBlockRewards) { hfc.aspen_hf.block_num_part_two = 4; // Create two reward stats to test before and after aspen hardfork part 1 - rewards::Stats pre_aspen_reward_stats( - 100, - HardforksConfig{0, {}, {}, MagnoliaHardfork{0, 0}, 0, 0, AspenHardfork{6, 999}, FicusHardforkConfig{0, 0, {}}}, - db, [](auto) { return 100; }); - rewards::Stats post_aspen_reward_stats( - 100, - HardforksConfig{0, {}, {}, MagnoliaHardfork{0, 0}, 0, 0, AspenHardfork{4, 999}, FicusHardforkConfig{0, 0, {}}}, - db, [](auto) { return 100; }); + rewards::Stats pre_aspen_reward_stats(100, + HardforksConfig{0, + {}, + {}, + MagnoliaHardfork{0, 0}, + 0, + 0, + AspenHardfork{6, 999}, + FicusHardforkConfig{0, 0, {}}, + CornusHardforkConfig{0, 0}}, + db, [](auto) { return 100; }); + rewards::Stats post_aspen_reward_stats(100, + HardforksConfig{0, + {}, + {}, + MagnoliaHardfork{0, 0}, + 0, + 0, + AspenHardfork{4, 999}, + FicusHardforkConfig{0, 0, {}}, + CornusHardforkConfig{0, 0}}, + db, [](auto) { return 100; }); // Create pbft block with 5 dag blocks auto dag_key1 = dev::KeyPair::create(); From 04d8d20c45bcca6347ffa37ce480048826d485be Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Mon, 2 Dec 2024 09:29:56 +0100 Subject: [PATCH 093/105] update evm --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 7389545dad..8fa285622a 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 7389545dad4b5d63fe5dbe5bad9d96c49da376a9 +Subproject commit 8fa285622a541f4d1a6141147c817bf08a1ca652 From 2bc913fdd4acb1186fa80c6c978338ec2d789a04 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Thu, 28 Nov 2024 11:58:40 +0100 Subject: [PATCH 094/105] chore: increase gas limit --- .../config_jsons/default/default_genesis.json | 4 +++- .../config_jsons/devnet/devnet_genesis.json | 4 +++- .../config_jsons/mainnet/mainnet_genesis.json | 4 +++- .../config_jsons/testnet/testnet_genesis.json | 8 ++++--- libraries/config/include/config/config.hpp | 2 ++ libraries/config/include/config/genesis.hpp | 1 + libraries/config/include/config/hardfork.hpp | 23 +++++++++++-------- libraries/config/src/config.cpp | 19 +++++++++++++++ libraries/config/src/genesis.cpp | 11 +++++++-- libraries/config/src/hardfork.cpp | 6 ++++- .../consensus/include/dag/dag_manager.hpp | 3 +-- .../consensus/include/pbft/pbft_manager.hpp | 3 ++- .../consensus/src/dag/dag_block_proposer.cpp | 6 ++--- .../consensus/src/dag/dag_manager.cpp | 19 +++++++-------- .../consensus/src/final_chain/final_chain.cpp | 2 +- .../consensus/src/pbft/pbft_manager.cpp | 12 ++++++---- submodules/taraxa-evm | 2 +- tests/dag_block_test.cpp | 1 + tests/pbft_manager_test.cpp | 7 +++++- tests/rewards_stats_test.cpp | 6 ++--- tests/rpc_test.cpp | 6 ++--- 21 files changed, 102 insertions(+), 47 deletions(-) diff --git a/libraries/cli/include/cli/config_jsons/default/default_genesis.json b/libraries/cli/include/cli/config_jsons/default/default_genesis.json index 9c719b0208..90f23f34b2 100644 --- a/libraries/cli/include/cli/config_jsons/default/default_genesis.json +++ b/libraries/cli/include/cli/config_jsons/default/default_genesis.json @@ -122,7 +122,9 @@ }, "cornus_hf": { "block_num": 100, - "delegation_locking_period": 5 + "delegation_locking_period": 5, + "dag_gas_limit": "0x1908B100", + "pbft_gas_limit": "0x7d2b7500" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json index 12f0f68822..112ef5e0a7 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json @@ -287,7 +287,9 @@ }, "cornus_hf": { "block_num": 0, - "delegation_locking_period": 5 + "delegation_locking_period": 5, + "dag_gas_limit": "0x1908B100", + "pbft_gas_limit": "0x7d2b7500" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json index b1b2d3a2ae..d01c8ae861 100644 --- a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json @@ -1656,7 +1656,9 @@ }, "cornus_hf": { "block_num": -1, - "delegation_locking_period": 163459 + "delegation_locking_period": 163459, + "dag_gas_limit": "0x1908B100", + "pbft_gas_limit": "0x7d2b7500" } } } \ No newline at end of file diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index c12536d769..fbf702e43f 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -105,13 +105,13 @@ "dag_blocks_size": "0x32", "ghost_path_move_back": "0x0", "lambda_ms": "0x5DC", - "gas_limit": "0x7d2b7500" + "gas_limit": "0x12C684C0" }, "dag": { "block_proposer": { "shard": 1 }, - "gas_limit": "0x1908B100" + "gas_limit": "0x1E0A6E0" }, "sortition": { "changes_count_for_average": 10, @@ -156,7 +156,9 @@ }, "cornus_hf": { "block_num": -1, - "delegation_locking_period": 5 + "delegation_locking_period": 5, + "dag_gas_limit": "0x1908B100", + "pbft_gas_limit": "0x7d2b7500" } } } \ No newline at end of file diff --git a/libraries/config/include/config/config.hpp b/libraries/config/include/config/config.hpp index 477c29019d..c5fa6e0b79 100644 --- a/libraries/config/include/config/config.hpp +++ b/libraries/config/include/config/config.hpp @@ -58,6 +58,8 @@ struct FullNodeConfig { bool enable_test_rpc = false; bool enable_debug = false; uint32_t final_chain_cache_in_blocks = 5; + uint64_t propose_dag_gas_limit = 0x1E0A6E0; + uint64_t propose_pbft_gas_limit = 0x12C684C0; // config values that limits transactions pool uint32_t transactions_pool_size = kDefaultTransactionPoolSize; diff --git a/libraries/config/include/config/genesis.hpp b/libraries/config/include/config/genesis.hpp index 18d6c15c83..11ef9b42e4 100644 --- a/libraries/config/include/config/genesis.hpp +++ b/libraries/config/include/config/genesis.hpp @@ -34,6 +34,7 @@ struct GenesisConfig { bytes rlp() const; blk_hash_t genesisHash() const; void updateBlocksPerYear(); + std::pair getGasLimits(uint64_t block_number) const; }; Json::Value enc_json(GenesisConfig const& obj); diff --git a/libraries/config/include/config/hardfork.hpp b/libraries/config/include/config/hardfork.hpp index 4df95e643d..4357eb9bd7 100644 --- a/libraries/config/include/config/hardfork.hpp +++ b/libraries/config/include/config/hardfork.hpp @@ -72,6 +72,17 @@ struct FicusHardforkConfig { Json::Value enc_json(const FicusHardforkConfig& obj); void dec_json(const Json::Value& json, FicusHardforkConfig& obj); +struct CornusHardforkConfig { + uint64_t block_num = -1; + uint32_t delegation_locking_period = 5; // number of blocks + uint64_t dag_gas_limit = 0; + uint64_t pbft_gas_limit = 0; + + HAS_RLP_FIELDS +}; +Json::Value enc_json(const CornusHardforkConfig& obj); +void dec_json(const Json::Value& json, CornusHardforkConfig& obj); + // Keeping it for next HF // struct BambooRedelegation { // taraxa::addr_t validator; @@ -90,15 +101,6 @@ void dec_json(const Json::Value& json, FicusHardforkConfig& obj); // Json::Value enc_json(const BambooHardfork& obj); // void dec_json(const Json::Value& json, BambooHardfork& obj); -struct CornusHardforkConfig { - uint64_t block_num = -1; - uint32_t delegation_locking_period = 5; // number of blocks - - HAS_RLP_FIELDS -}; -Json::Value enc_json(const CornusHardforkConfig& obj); -void dec_json(const Json::Value& json, CornusHardforkConfig& obj); - struct HardforksConfig { // disable it by default (set to max uint64) uint64_t fix_redelegate_block_num = -1; @@ -141,9 +143,10 @@ struct HardforksConfig { // Cornus hf - support multiple undelegations from the same validator at the same time // - change of delegation locking period + // - change gas limit CornusHardforkConfig cornus_hf; - bool isCornusHardfork(uint64_t block_number) const { return block_number >= cornus_hf.block_num; } + bool isOnCornusHardfork(uint64_t block_number) const { return block_number >= cornus_hf.block_num; } HAS_RLP_FIELDS }; diff --git a/libraries/config/src/config.cpp b/libraries/config/src/config.cpp index 9e9bffdd19..8a2b9e9c71 100644 --- a/libraries/config/src/config.cpp +++ b/libraries/config/src/config.cpp @@ -108,6 +108,9 @@ FullNodeConfig::FullNodeConfig(const Json::Value &string_or_object, const Json:: genesis = GenesisConfig(); } + propose_dag_gas_limit = getConfigDataAsUInt(root, {"propose_dag_gas_limit"}, true, propose_dag_gas_limit); + propose_pbft_gas_limit = getConfigDataAsUInt(root, {"propose_pbft_gas_limit"}, true, propose_pbft_gas_limit); + is_light_node = getConfigDataAsBoolean(root, {"is_light_node"}, true, is_light_node); const auto min_light_node_history = (genesis.state.dpos.blocks_per_year * kDefaultLightNodeHistoryDays) / 365; light_node_history = getConfigDataAsUInt(root, {"light_node_history"}, true, min_light_node_history); @@ -199,6 +202,22 @@ void FullNodeConfig::validate() const { throw ConfigException("transactions_pool_size cannot be smaller than " + std::to_string(kMinTransactionPoolSize)); } + if (genesis.pbft.gas_limit < propose_pbft_gas_limit || + (genesis.state.hardforks.cornus_hf.block_num != uint64_t(-1) && + genesis.state.hardforks.cornus_hf.pbft_gas_limit < propose_pbft_gas_limit)) { + throw ConfigException("Propose pbft gas limit:" + std::to_string(propose_pbft_gas_limit) + + " greater than max allowed pbft gas limit:" + std::to_string(genesis.pbft.gas_limit) + ":" + + std::to_string(genesis.state.hardforks.cornus_hf.pbft_gas_limit)); + } + + if (genesis.dag.gas_limit < propose_dag_gas_limit || + (genesis.state.hardforks.cornus_hf.block_num != uint64_t(-1) && + genesis.state.hardforks.cornus_hf.dag_gas_limit < propose_dag_gas_limit)) { + throw ConfigException("Propose dag gas limit:" + std::to_string(propose_pbft_gas_limit) + + " greater than max allowed pbft gas limit:" + std::to_string(genesis.pbft.gas_limit) + ":" + + std::to_string(genesis.state.hardforks.cornus_hf.pbft_gas_limit)); + } + // TODO: add validation of other config values } diff --git a/libraries/config/src/genesis.cpp b/libraries/config/src/genesis.cpp index fe24c5fc2f..3f5649af99 100644 --- a/libraries/config/src/genesis.cpp +++ b/libraries/config/src/genesis.cpp @@ -86,10 +86,10 @@ GenesisConfig::GenesisConfig() { pbft.committee_size = 5; pbft.dag_blocks_size = 100; pbft.ghost_path_move_back = 1; - pbft.gas_limit = 60000000; + pbft.gas_limit = 315000000; // DAG config - dag.gas_limit = 10000000; + dag.gas_limit = 315000000; // DPOS config auto& dpos = state.dpos; @@ -129,4 +129,11 @@ bytes GenesisConfig::rlp() const { blk_hash_t GenesisConfig::genesisHash() const { return dev::sha3(rlp()); } +std::pair GenesisConfig::getGasLimits(uint64_t block_number) const { + if (state.hardforks.isOnCornusHardfork(block_number)) { + return {state.hardforks.cornus_hf.dag_gas_limit, state.hardforks.cornus_hf.pbft_gas_limit}; + } + return {dag.gas_limit, pbft.gas_limit}; +} + } // namespace taraxa diff --git a/libraries/config/src/hardfork.cpp b/libraries/config/src/hardfork.cpp index b625bb7680..380149a3a3 100644 --- a/libraries/config/src/hardfork.cpp +++ b/libraries/config/src/hardfork.cpp @@ -142,15 +142,19 @@ Json::Value enc_json(const CornusHardforkConfig& obj) { Json::Value json(Json::objectValue); json["block_num"] = dev::toJS(obj.block_num); json["delegation_locking_period"] = dev::toJS(obj.delegation_locking_period); + json["dag_gas_limit"] = dev::toJS(obj.dag_gas_limit); + json["pbft_gas_limit"] = dev::toJS(obj.pbft_gas_limit); return json; } void dec_json(const Json::Value& json, CornusHardforkConfig& obj) { obj.block_num = json["block_num"].isUInt64() ? dev::getUInt(json["block_num"]) : uint64_t(-1); obj.delegation_locking_period = dev::getUInt(json["delegation_locking_period"]); + obj.dag_gas_limit = dev::getUInt(json["dag_gas_limit"]); + obj.pbft_gas_limit = dev::getUInt(json["pbft_gas_limit"]); } -RLP_FIELDS_DEFINE(CornusHardforkConfig, block_num, delegation_locking_period) +RLP_FIELDS_DEFINE(CornusHardforkConfig, block_num, delegation_locking_period, dag_gas_limit, pbft_gas_limit) Json::Value enc_json(const HardforksConfig& obj) { Json::Value json(Json::objectValue); diff --git a/libraries/core_libs/consensus/include/dag/dag_manager.hpp b/libraries/core_libs/consensus/include/dag/dag_manager.hpp index a2e077576a..634135ad17 100644 --- a/libraries/core_libs/consensus/include/dag/dag_manager.hpp +++ b/libraries/core_libs/consensus/include/dag/dag_manager.hpp @@ -279,8 +279,7 @@ class DagManager : public std::enable_shared_from_this { const uint32_t cache_delete_step_ = 100; ExpirationCacheMap> seen_blocks_; std::shared_ptr final_chain_; - const uint64_t kPbftGasLimit; - const HardforksConfig kHardforks; + const GenesisConfig kGenesis; const uint64_t kValidatorMaxVote; LOG_OBJECTS_DEFINE diff --git a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp index 6e8d46fb6e..289307cdbd 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp @@ -224,9 +224,10 @@ class PbftManager { /** * @brief Check a block weight of gas estimation * @param dag_blocks dag blocks + * @param period period * @return true if total weight of gas estimation is less or equal to gas limit. Otherwise return false */ - bool checkBlockWeight(const std::vector> &dag_blocks) const; + bool checkBlockWeight(const std::vector> &dag_blocks, PbftPeriod period) const; blk_hash_t getLastPbftBlockHash(); diff --git a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp index 510778868a..6fdeae81c9 100644 --- a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp @@ -26,8 +26,8 @@ DagBlockProposer::DagBlockProposer(const FullNodeConfig& config, std::shared_ptr node_sk_(config.node_secret), vrf_sk_(config.vrf_secret), vrf_pk_(vrf_wrapper::getVrfPublicKey(vrf_sk_)), - kPbftGasLimit(config.genesis.pbft.gas_limit), - kDagGasLimit(config.genesis.dag.gas_limit), + kPbftGasLimit(config.propose_pbft_gas_limit), + kDagGasLimit(config.propose_dag_gas_limit), kHardforks(config.genesis.state.hardforks), kValidatorMaxVote(config.genesis.state.dpos.validator_maximum_stake / config.genesis.state.dpos.vote_eligibility_balance_step) { @@ -117,7 +117,7 @@ bool DagBlockProposer::proposeDagBlock() { } } - auto [transactions, estimations] = getShardedTrxs(*proposal_period, dag_mgr_->getDagConfig().gas_limit); + auto [transactions, estimations] = getShardedTrxs(*proposal_period, kDagGasLimit); if (transactions.empty()) { last_propose_level_ = propose_level; num_tries_ = 0; diff --git a/libraries/core_libs/consensus/src/dag/dag_manager.cpp b/libraries/core_libs/consensus/src/dag/dag_manager.cpp index 301ee7c885..36b4f22628 100644 --- a/libraries/core_libs/consensus/src/dag/dag_manager.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_manager.cpp @@ -35,8 +35,7 @@ DagManager::DagManager(const FullNodeConfig &config, addr_t node_addr, std::shar dag_expiry_limit_(config.dag_expiry_limit), seen_blocks_(cache_max_size_, cache_delete_step_), final_chain_(std::move(final_chain)), - kPbftGasLimit(config.genesis.pbft.gas_limit), - kHardforks(config.genesis.state.hardforks), + kGenesis(config.genesis), kValidatorMaxVote(config.genesis.state.dpos.validator_maximum_stake / config.genesis.state.dpos.vote_eligibility_balance_step) { LOG_OBJECTS_CREATE("DAGMGR"); @@ -512,7 +511,7 @@ void DagManager::recoverDag() { try { uint64_t max_vote_count = 0; const auto vote_count = final_chain_->dposEligibleVoteCount(*propose_period, blk->getSender()); - if (*propose_period < kHardforks.magnolia_hf.block_num) { + if (*propose_period < kGenesis.state.hardforks.magnolia_hf.block_num) { max_vote_count = final_chain_->dposEligibleTotalVoteCount(*propose_period); } else { max_vote_count = kValidatorMaxVote; @@ -673,7 +672,7 @@ std::pair DagManager::ver const auto proposal_period_hash = db_->getPeriodBlockHash(*propose_period); uint64_t max_vote_count = 0; const auto vote_count = final_chain_->dposEligibleVoteCount(*propose_period, blk->getSender()); - if (*propose_period < kHardforks.magnolia_hf.block_num) { + if (*propose_period < kGenesis.state.hardforks.magnolia_hf.block_num) { max_vote_count = final_chain_->dposEligibleTotalVoteCount(*propose_period); } else { max_vote_count = kValidatorMaxVote; @@ -715,14 +714,16 @@ std::pair DagManager::ver return {VerifyBlockReturnType::IncorrectTransactionsEstimation, {}}; } - if (total_block_weight > getDagConfig().gas_limit) { - LOG(log_er_) << "BlockTooBig. DAG block " << blk->getHash() << " gas_limit: " << getDagConfig().gas_limit + const auto [dag_gas_limit, pbft_gas_limit] = kGenesis.getGasLimits(*propose_period); + + if (total_block_weight > dag_gas_limit) { + LOG(log_er_) << "BlockTooBig. DAG block " << blk->getHash() << " gas_limit: " << dag_gas_limit << " total_block_weight " << total_block_weight << " current period " << final_chain_->lastBlockNumber(); return {VerifyBlockReturnType::BlockTooBig, {}}; } - if ((blk->getTips().size() + 1) > kPbftGasLimit / getDagConfig().gas_limit) { + if ((blk->getTips().size() + 1) > pbft_gas_limit / dag_gas_limit) { for (const auto &t : blk->getTips()) { const auto tip_blk = getDagBlock(t); if (tip_blk == nullptr) { @@ -731,8 +732,8 @@ std::pair DagManager::ver } block_gas_estimation += tip_blk->getGasEstimation(); } - if (block_gas_estimation > kPbftGasLimit) { - LOG(log_er_) << "BlockTooBig. DAG block " << blk->getHash() << " with tips has limit: " << kPbftGasLimit + if (block_gas_estimation > pbft_gas_limit) { + LOG(log_er_) << "BlockTooBig. DAG block " << blk->getHash() << " with tips has limit: " << pbft_gas_limit << " block_gas_estimation " << block_gas_estimation << " current period " << final_chain_->lastBlockNumber(); return {VerifyBlockReturnType::BlockTooBig, {}}; diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index 1c07801b9f..0c493ec2c4 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -588,7 +588,7 @@ std::optional FinalChain::finalChainHash(EthBlockNumber n) const { return {}; } - if (kConfig.genesis.state.hardforks.isCornusHardfork(n)) { + if (kConfig.genesis.state.hardforks.isOnCornusHardfork(n)) { return header->hash; } return header->state_root; diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index ee8f338f23..178c3428b0 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -1287,7 +1287,8 @@ PbftManager::proposePbftBlock() { } const auto &dag_block_weight = dag_blk->getGasEstimation(); - if (total_weight + dag_block_weight > kGenesisConfig.pbft.gas_limit) { + const auto [dag_gas_limit, pbft_gas_limit] = kGenesisConfig.getGasLimits(current_pbft_period); + if (total_weight + dag_block_weight > pbft_gas_limit) { break; } total_weight += dag_block_weight; @@ -1553,7 +1554,7 @@ bool PbftManager::validatePbftBlock(const std::shared_ptr &pbft_block auto prev_pbft_block = pbft_chain_->getPbftBlockInChain(last_pbft_block_hash); auto ghost = dag_mgr_->getGhostPath(prev_pbft_block.getPivotDagBlockHash()); if (ghost.size() > 1 && anchor_hash != ghost[1]) { - if (!checkBlockWeight(anchor_dag_block_order_cache_[anchor_hash])) { + if (!checkBlockWeight(anchor_dag_block_order_cache_[anchor_hash], block_period)) { LOG(log_er_) << "PBFT block " << pbft_block_hash << " weight exceeded max limit"; anchor_dag_block_order_cache_.erase(anchor_hash); return false; @@ -2155,11 +2156,14 @@ void PbftManager::periodDataQueuePush(PeriodData &&period_data, dev::p2p::NodeID size_t PbftManager::periodDataQueueSize() const { return sync_queue_.size(); } -bool PbftManager::checkBlockWeight(const std::vector> &dag_blocks) const { +bool PbftManager::checkBlockWeight(const std::vector> &dag_blocks, PbftPeriod period) const { const u256 total_weight = std::accumulate(dag_blocks.begin(), dag_blocks.end(), u256(0), [](u256 value, const auto &dag_block) { return value + dag_block->getGasEstimation(); }); - if (total_weight > kGenesisConfig.pbft.gas_limit) { + auto pbft_gas_limit = kGenesisConfig.state.hardforks.isOnCornusHardfork(period) + ? kGenesisConfig.state.hardforks.cornus_hf.pbft_gas_limit + : kGenesisConfig.pbft.gas_limit; + if (total_weight > pbft_gas_limit) { return false; } return true; diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 8fa285622a..a84b889c28 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 8fa285622a541f4d1a6141147c817bf08a1ca652 +Subproject commit a84b889c28a8813799650316e881b7f945ffb150 diff --git a/tests/dag_block_test.cpp b/tests/dag_block_test.cpp index 6b3b3539c0..c01c5cc6e8 100644 --- a/tests/dag_block_test.cpp +++ b/tests/dag_block_test.cpp @@ -409,6 +409,7 @@ TEST_F(DagBlockMgrTest, too_big_dag_block) { // make config auto node_cfgs = make_node_cfgs(1, 1, 20); node_cfgs.front().genesis.dag.gas_limit = 500000; + node_cfgs.front().propose_dag_gas_limit = 500000; auto node = create_nodes(node_cfgs).front(); auto db = node->getDB(); diff --git a/tests/pbft_manager_test.cpp b/tests/pbft_manager_test.cpp index 942cdf241a..3a6a86c766 100644 --- a/tests/pbft_manager_test.cpp +++ b/tests/pbft_manager_test.cpp @@ -539,6 +539,7 @@ TEST_F(PbftManagerWithDagCreation, dag_generation) { TEST_F(PbftManagerWithDagCreation, limit_dag_block_size) { auto node_cfgs = make_node_cfgs(1, 1, 5, true); node_cfgs.front().genesis.dag.gas_limit = 500000; + node_cfgs.front().propose_dag_gas_limit = 500000; makeNodeFromConfig(node_cfgs); deployContract(); @@ -596,6 +597,8 @@ TEST_F(PbftManagerWithDagCreation, limit_pbft_block) { auto node_cfgs = make_node_cfgs(1, 1, 5, true); node_cfgs.front().genesis.dag.gas_limit = 500000; node_cfgs.front().genesis.pbft.gas_limit = 1100000; + node_cfgs.front().propose_dag_gas_limit = 500000; + node_cfgs.front().propose_pbft_gas_limit = 1100000; makeNodeFromConfig(node_cfgs); deployContract(); @@ -631,6 +634,8 @@ TEST_F(PbftManagerWithDagCreation, produce_overweighted_block) { auto node_cfgs = make_node_cfgs(1, 1, 5, true); auto dag_gas_limit = node_cfgs.front().genesis.dag.gas_limit = 500000; node_cfgs.front().genesis.pbft.gas_limit = 1100000; + node_cfgs.front().propose_dag_gas_limit = 500000; + node_cfgs.front().propose_pbft_gas_limit = 1100000; makeNodeFromConfig(node_cfgs); deployContract(); @@ -665,7 +670,7 @@ TEST_F(PbftManagerWithDagCreation, produce_overweighted_block) { const auto period = node->getFinalChain()->lastBlockNumber(); auto period_data = node->getDB()->getPeriodData(period); ASSERT_TRUE(period_data.has_value()); - EXPECT_FALSE(node->getPbftManager()->checkBlockWeight(period_data->dag_blocks)); + EXPECT_FALSE(node->getPbftManager()->checkBlockWeight(period_data->dag_blocks, period)); } TEST_F(PbftManagerWithDagCreation, proposed_blocks) { diff --git a/tests/rewards_stats_test.cpp b/tests/rewards_stats_test.cpp index af381c2124..d96245719b 100644 --- a/tests/rewards_stats_test.cpp +++ b/tests/rewards_stats_test.cpp @@ -29,7 +29,7 @@ class TestableRewardsStats : public rewards::Stats { 0, AspenHardfork{0, 0}, FicusHardforkConfig{0, 0, {}}, - CornusHardforkConfig{0, 0}}, + CornusHardforkConfig{0, 0, 0, 0}}, db, [](auto) { return 100; }) {} auto getStats() { return blocks_stats_; } }; @@ -257,7 +257,7 @@ TEST_F(RewardsStatsTest, dagBlockRewards) { 0, AspenHardfork{6, 999}, FicusHardforkConfig{0, 0, {}}, - CornusHardforkConfig{0, 0}}, + CornusHardforkConfig{0, 0, 0, 0}}, db, [](auto) { return 100; }); rewards::Stats post_aspen_reward_stats(100, HardforksConfig{0, @@ -268,7 +268,7 @@ TEST_F(RewardsStatsTest, dagBlockRewards) { 0, AspenHardfork{4, 999}, FicusHardforkConfig{0, 0, {}}, - CornusHardforkConfig{0, 0}}, + CornusHardforkConfig{0, 0, 0, 0}}, db, [](auto) { return 100; }); // Create pbft block with 5 dag blocks diff --git a/tests/rpc_test.cpp b/tests/rpc_test.cpp index ad34833237..eab52aaad1 100644 --- a/tests/rpc_test.cpp +++ b/tests/rpc_test.cpp @@ -30,9 +30,9 @@ TEST_F(RPCTest, eth_estimateGas) { { Json::Value trx(Json::objectValue); trx["data"] = samples::greeter_contract_code; - check_estimation_is_in_range(trx, "0x5ccc5"); + check_estimation_is_in_range(trx, "0x5dcc5"); trx["from"] = from; - check_estimation_is_in_range(trx, "0x5ccc5"); + check_estimation_is_in_range(trx, "0x5dcc5"); } // Contract creation with value @@ -40,7 +40,7 @@ TEST_F(RPCTest, eth_estimateGas) { Json::Value trx(Json::objectValue); trx["value"] = 1; trx["data"] = samples::greeter_contract_code; - check_estimation_is_in_range(trx, "0x5ccc5"); + check_estimation_is_in_range(trx, "0x5dcc5"); } // Simple transfer estimations with author + without author From 0855c6e616d28ac152ae7c21275ddbd60123d929 Mon Sep 17 00:00:00 2001 From: kstdl Date: Mon, 2 Dec 2024 12:35:41 +0100 Subject: [PATCH 095/105] fix: bug after the merge --- libraries/core_libs/consensus/src/final_chain/final_chain.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index d94541b49c..1c07801b9f 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -572,7 +572,7 @@ std::shared_ptr FinalChain::getBlockHeader(EthBlockNumber n) if (!pbft) { return {}; } - return BlockHeader::fromRLP(dev::RLP(raw)); + return std::make_shared(std::move(raw), *pbft, kBlockGasLimit); } return {}; } From 871609da61cc8e6f7fe57eccc269e5c23c500500 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Tue, 3 Dec 2024 13:00:22 +0100 Subject: [PATCH 096/105] fix multiple tarcaps --- libraries/core_libs/network/src/network.cpp | 102 +++++++++++++++--- .../network/src/tarcap/taraxa_capability.cpp | 19 +++- 2 files changed, 101 insertions(+), 20 deletions(-) diff --git a/libraries/core_libs/network/src/network.cpp b/libraries/core_libs/network/src/network.cpp index 84b1130380..3fc7f48cf0 100644 --- a/libraries/core_libs/network/src/network.cpp +++ b/libraries/core_libs/network/src/network.cpp @@ -15,6 +15,14 @@ #include "network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/pillar_vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/status_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "network/tarcap/stats/node_stats.hpp" #include "network/tarcap/stats/time_period_packets_stats.hpp" @@ -183,8 +191,14 @@ void Network::registerPeriodicEvents(const std::shared_ptr &pbft_mg // Send new transactions auto sendTxs = [this, trx_mgr = trx_mgr]() { for (auto &tarcap : tarcaps_) { - auto tx_packet_handler = tarcap.second->getSpecificHandler(); - tx_packet_handler->periodicSendTransactions(trx_mgr->getAllPoolTrxs()); + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + auto tx_packet_handler = tarcap.second->getSpecificHandler(); + tx_packet_handler->periodicSendTransactions(trx_mgr->getAllPoolTrxs()); + } else { + auto tx_packet_handler = tarcap.second->getSpecificHandler(); + tx_packet_handler->periodicSendTransactions(trx_mgr->getAllPoolTrxs()); + } } }; periodic_events_tp_.post_loop({kConf.network.transaction_interval_ms}, sendTxs); @@ -192,8 +206,14 @@ void Network::registerPeriodicEvents(const std::shared_ptr &pbft_mg // Send status packet auto sendStatus = [this]() { for (auto &tarcap : tarcaps_) { - auto status_packet_handler = tarcap.second->getSpecificHandler(); - status_packet_handler->sendStatusToPeers(); + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + auto status_packet_handler = tarcap.second->getSpecificHandler(); + status_packet_handler->sendStatusToPeers(); + } else { + auto status_packet_handler = tarcap.second->getSpecificHandler(); + status_packet_handler->sendStatusToPeers(); + } } }; const auto send_status_interval = 6 * lambda_ms; @@ -292,28 +312,52 @@ void Network::addBootNodes(bool initial) { void Network::gossipDagBlock(const std::shared_ptr &block, bool proposed, const SharedTransactions &trxs) { for (const auto &tarcap : tarcaps_) { - tarcap.second->getSpecificHandler()->onNewBlockVerified(block, proposed, - trxs); + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + tarcap.second->getSpecificHandler()->onNewBlockVerified(block, proposed, + trxs); + } else { + tarcap.second->getSpecificHandler()->onNewBlockVerified( + block, proposed, trxs); + } } } void Network::gossipVote(const std::shared_ptr &vote, const std::shared_ptr &block, bool rebroadcast) { for (const auto &tarcap : tarcaps_) { - tarcap.second->getSpecificHandler()->onNewPbftVote(vote, block, rebroadcast); + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + tarcap.second->getSpecificHandler()->onNewPbftVote(vote, block, rebroadcast); + } else { + tarcap.second->getSpecificHandler()->onNewPbftVote(vote, block, + rebroadcast); + } } } void Network::gossipVotesBundle(const std::vector> &votes, bool rebroadcast) { for (const auto &tarcap : tarcaps_) { - tarcap.second->getSpecificHandler()->onNewPbftVotesBundle(votes, - rebroadcast); + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + tarcap.second->getSpecificHandler()->onNewPbftVotesBundle(votes, + rebroadcast); + } else { + tarcap.second->getSpecificHandler()->onNewPbftVotesBundle( + votes, rebroadcast); + } } } void Network::gossipPillarBlockVote(const std::shared_ptr &vote, bool rebroadcast) { for (const auto &tarcap : tarcaps_) { - tarcap.second->getSpecificHandler()->onNewPillarVote(vote, rebroadcast); + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + tarcap.second->getSpecificHandler()->onNewPillarVote(vote, rebroadcast); + } else { + tarcap.second->getSpecificHandler()->onNewPillarVote(vote, + rebroadcast); + } } } @@ -324,7 +368,12 @@ void Network::handleMaliciousSyncPeer(const dev::p2p::NodeID &node_id) { continue; } - tarcap.second->getSpecificHandler()->handleMaliciousSyncPeer(node_id); + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + tarcap.second->getSpecificHandler()->handleMaliciousSyncPeer(node_id); + } else { + tarcap.second->getSpecificHandler()->handleMaliciousSyncPeer(node_id); + } } } @@ -332,8 +381,15 @@ std::shared_ptr Network::getMaxChainPeer() const { std::shared_ptr max_chain_peer{nullptr}; for (const auto &tarcap : tarcaps_) { - const auto peer = - tarcap.second->getSpecificHandler<::taraxa::network::tarcap::PbftSyncPacketHandler>()->getMaxChainPeer(); + std::shared_ptr peer; + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + peer = tarcap.second->getSpecificHandler<::taraxa::network::tarcap::PbftSyncPacketHandler>()->getMaxChainPeer(); + } else { + peer = + tarcap.second->getSpecificHandler<::taraxa::network::tarcap::v4::PbftSyncPacketHandler>()->getMaxChainPeer(); + } + if (!peer) { continue; } @@ -351,16 +407,28 @@ std::shared_ptr Network::getMaxChainPeer() const { void Network::requestPillarBlockVotesBundle(taraxa::PbftPeriod period, const taraxa::blk_hash_t &pillar_block_hash) { for (const auto &tarcap : tarcaps_) { // Try to get most up-to-date peer - const auto peer = - tarcap.second->getSpecificHandler<::taraxa::network::tarcap::PbftSyncPacketHandler>()->getMaxChainPeer(); + std::shared_ptr peer; + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + peer = tarcap.second->getSpecificHandler<::taraxa::network::tarcap::PbftSyncPacketHandler>()->getMaxChainPeer(); + } else { + peer = + tarcap.second->getSpecificHandler<::taraxa::network::tarcap::v4::PbftSyncPacketHandler>()->getMaxChainPeer(); + } if (!peer) { continue; } // TODO[2748]: is it good enough to request it just from 1 peer without knowing if he has all of the votes ? - tarcap.second->getSpecificHandler()->requestPillarVotesBundle( - period, pillar_block_hash, peer); + // TODO[2905]: refactor + if (tarcap.first == TARAXA_NET_VERSION) { + tarcap.second->getSpecificHandler()->requestPillarVotesBundle( + period, pillar_block_hash, peer); + } else { + tarcap.second->getSpecificHandler() + ->requestPillarVotesBundle(period, pillar_block_hash, peer); + } } } diff --git a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp index bee9e177f9..55d17a8209 100644 --- a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp +++ b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp @@ -2,6 +2,7 @@ #include +#include "config/version.hpp" #include "network/tarcap/packets_handler.hpp" #include "network/tarcap/packets_handlers/latest/dag_block_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/dag_sync_packet_handler.hpp" @@ -100,8 +101,14 @@ void TaraxaCapability::onConnect(std::weak_ptr session, u256 peers_state_->addPendingPeer(node_id, session_p->info().host + ":" + std::to_string(session_p->info().port)); LOG(log_nf_) << "Node " << node_id << " connected"; - auto status_packet_handler = packets_handlers_->getSpecificHandler(); - status_packet_handler->sendStatus(node_id, true); + // TODO[2905]: refactor + if (version_ == TARAXA_NET_VERSION) { + auto status_packet_handler = packets_handlers_->getSpecificHandler(); + status_packet_handler->sendStatus(node_id, true); + } else { + auto status_packet_handler = packets_handlers_->getSpecificHandler(); + status_packet_handler->sendStatus(node_id, true); + } } void TaraxaCapability::onDisconnect(dev::p2p::NodeID const &_nodeID) { @@ -113,7 +120,13 @@ void TaraxaCapability::onDisconnect(dev::p2p::NodeID const &_nodeID) { pbft_syncing_state_->setPbftSyncing(false); if (peers_state_->getPeersCount() > 0) { LOG(log_dg_) << "Restart PBFT/DAG syncing due to syncing peer disconnect."; - packets_handlers_->getSpecificHandler()->startSyncingPbft(); + // TODO[2905]: refactor + if (version_ == TARAXA_NET_VERSION) { + packets_handlers_->getSpecificHandler()->startSyncingPbft(); + } else { + packets_handlers_->getSpecificHandler()->startSyncingPbft(); + } + } else { LOG(log_dg_) << "Stop PBFT/DAG syncing due to syncing peer disconnect and no other peers available."; } From 69a44843e2ad4c9aa209c7705be7ae0ff88a7b48 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Tue, 3 Dec 2024 13:54:02 +0100 Subject: [PATCH 097/105] fix: propose gas limit validation --- .../config/include/config/config_utils.hpp | 4 +--- libraries/config/src/config.cpp | 20 ++----------------- libraries/config/src/config_utils.cpp | 13 +++--------- libraries/config/src/network.cpp | 7 +++---- .../consensus/src/dag/dag_block_proposer.cpp | 6 ++++-- 5 files changed, 13 insertions(+), 37 deletions(-) diff --git a/libraries/config/include/config/config_utils.hpp b/libraries/config/include/config/config_utils.hpp index 333c21bbdd..c82ee77987 100644 --- a/libraries/config/include/config/config_utils.hpp +++ b/libraries/config/include/config/config_utils.hpp @@ -16,11 +16,9 @@ Json::Value getConfigData(Json::Value root, const std::vector &path std::string getConfigDataAsString(const Json::Value &root, const std::vector &path, bool optional = false, const std::string &value = {}); -uint32_t getConfigDataAsUInt(const Json::Value &root, const std::vector &path, bool optional = false, +uint64_t getConfigDataAsUInt(const Json::Value &root, const std::vector &path, bool optional = false, uint32_t value = 0); -uint64_t getConfigDataAsUInt64(const Json::Value &root, const std::vector &path); - bool getConfigDataAsBoolean(const Json::Value &root, const std::vector &path, bool optional = false, bool value = false); diff --git a/libraries/config/src/config.cpp b/libraries/config/src/config.cpp index 8a2b9e9c71..f91b75f979 100644 --- a/libraries/config/src/config.cpp +++ b/libraries/config/src/config.cpp @@ -54,8 +54,8 @@ std::vector FullNodeConfig::loadLoggingConfigs(const Json::Value output.target = log_path; output.file_name = (log_path / getConfigDataAsString(o, {"file_name"})).string(); output.format = getConfigDataAsString(o, {"format"}); - output.max_size = getConfigDataAsUInt64(o, {"max_size"}); - output.rotation_size = getConfigDataAsUInt64(o, {"rotation_size"}); + output.max_size = getConfigDataAsUInt(o, {"max_size"}); + output.rotation_size = getConfigDataAsUInt(o, {"rotation_size"}); output.time_based_rotation = getConfigDataAsString(o, {"time_based_rotation"}); } logging.outputs.push_back(output); @@ -202,22 +202,6 @@ void FullNodeConfig::validate() const { throw ConfigException("transactions_pool_size cannot be smaller than " + std::to_string(kMinTransactionPoolSize)); } - if (genesis.pbft.gas_limit < propose_pbft_gas_limit || - (genesis.state.hardforks.cornus_hf.block_num != uint64_t(-1) && - genesis.state.hardforks.cornus_hf.pbft_gas_limit < propose_pbft_gas_limit)) { - throw ConfigException("Propose pbft gas limit:" + std::to_string(propose_pbft_gas_limit) + - " greater than max allowed pbft gas limit:" + std::to_string(genesis.pbft.gas_limit) + ":" + - std::to_string(genesis.state.hardforks.cornus_hf.pbft_gas_limit)); - } - - if (genesis.dag.gas_limit < propose_dag_gas_limit || - (genesis.state.hardforks.cornus_hf.block_num != uint64_t(-1) && - genesis.state.hardforks.cornus_hf.dag_gas_limit < propose_dag_gas_limit)) { - throw ConfigException("Propose dag gas limit:" + std::to_string(propose_pbft_gas_limit) + - " greater than max allowed pbft gas limit:" + std::to_string(genesis.pbft.gas_limit) + ":" + - std::to_string(genesis.state.hardforks.cornus_hf.pbft_gas_limit)); - } - // TODO: add validation of other config values } diff --git a/libraries/config/src/config_utils.cpp b/libraries/config/src/config_utils.cpp index f7dc9dc475..a3946d6b4f 100644 --- a/libraries/config/src/config_utils.cpp +++ b/libraries/config/src/config_utils.cpp @@ -7,6 +7,7 @@ #include #include "common/config_exception.hpp" +#include "libdevcore/CommonJS.h" namespace taraxa { @@ -44,14 +45,14 @@ std::string getConfigDataAsString(const Json::Value &root, const std::vector &path, bool optional, +uint64_t getConfigDataAsUInt(const Json::Value &root, const std::vector &path, bool optional, uint32_t value) { try { Json::Value ret = getConfigData(root, path, optional); if (ret.isNull()) { return value; } else { - return ret.asUInt(); + return dev::getUInt(ret); } } catch (Json::Exception &e) { if (optional) { @@ -61,14 +62,6 @@ uint32_t getConfigDataAsUInt(const Json::Value &root, const std::vector &path) { - try { - return getConfigData(root, path).asUInt64(); - } catch (Json::Exception &e) { - throw ConfigException(getConfigErr(path) + e.what()); - } -} - bool getConfigDataAsBoolean(const Json::Value &root, const std::vector &path, bool optional, bool value) { try { Json::Value ret = getConfigData(root, path, optional); diff --git a/libraries/config/src/network.cpp b/libraries/config/src/network.cpp index 7e6b24cce8..59701f0d69 100644 --- a/libraries/config/src/network.cpp +++ b/libraries/config/src/network.cpp @@ -75,10 +75,9 @@ DdosProtectionConfig dec_ddos_protection_config_json(const Json::Value &json) { ddos_protection.packets_stats_time_period_ms = std::chrono::milliseconds{getConfigDataAsUInt(json, {"packets_stats_time_period_ms"})}; ddos_protection.peer_max_packets_processing_time_us = - std::chrono::microseconds{getConfigDataAsUInt64(json, {"peer_max_packets_processing_time_us"})}; - ddos_protection.peer_max_packets_queue_size_limit = - getConfigDataAsUInt64(json, {"peer_max_packets_queue_size_limit"}); - ddos_protection.max_packets_queue_size = getConfigDataAsUInt64(json, {"max_packets_queue_size"}); + std::chrono::microseconds{getConfigDataAsUInt(json, {"peer_max_packets_processing_time_us"})}; + ddos_protection.peer_max_packets_queue_size_limit = getConfigDataAsUInt(json, {"peer_max_packets_queue_size_limit"}); + ddos_protection.max_packets_queue_size = getConfigDataAsUInt(json, {"max_packets_queue_size"}); return ddos_protection; } diff --git a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp index 6fdeae81c9..9e42063db9 100644 --- a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp @@ -26,8 +26,10 @@ DagBlockProposer::DagBlockProposer(const FullNodeConfig& config, std::shared_ptr node_sk_(config.node_secret), vrf_sk_(config.vrf_secret), vrf_pk_(vrf_wrapper::getVrfPublicKey(vrf_sk_)), - kPbftGasLimit(config.propose_pbft_gas_limit), - kDagGasLimit(config.propose_dag_gas_limit), + kPbftGasLimit( + std::min(config.propose_pbft_gas_limit, config.genesis.getGasLimits(final_chain_->lastBlockNumber()).second)), + kDagGasLimit( + std::min(config.propose_dag_gas_limit, config.genesis.getGasLimits(final_chain_->lastBlockNumber()).first)), kHardforks(config.genesis.state.hardforks), kValidatorMaxVote(config.genesis.state.dpos.validator_maximum_stake / config.genesis.state.dpos.vote_eligibility_balance_step) { From 478fc883bdea5457e250b23894efb48af1dde5b0 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 4 Dec 2024 15:18:16 +0100 Subject: [PATCH 098/105] chore: new testnet --- .../cli/include/cli/config_jsons/testnet/testnet_genesis.json | 4 ++-- submodules/taraxa-evm | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index fbf702e43f..6062b19f03 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -4,7 +4,7 @@ "level": "0x0", "pivot": "0x0000000000000000000000000000000000000000000000000000000000000000", "sig": "0xb7e22d46c1ba94d5e8347b01d137b5c428fcbbeaf0a77fb024cbbf1517656ff00d04f7f25be608c321b0d7483c402c294ff46c49b265305d046a52236c0a363701", - "timestamp": "0x669f7f20", + "timestamp": "0x6750605A", "tips": [], "transactions": [] }, @@ -155,7 +155,7 @@ "bridge_contract_address": "0xcAF2b453FE8382a4B8110356DF0508f6d71F22BF" }, "cornus_hf": { - "block_num": -1, + "block_num": 1000, "delegation_locking_period": 5, "dag_gas_limit": "0x1908B100", "pbft_gas_limit": "0x7d2b7500" diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index a84b889c28..0e0aef18d2 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit a84b889c28a8813799650316e881b7f945ffb150 +Subproject commit 0e0aef18d24caa434290c5a70633067894005dc6 From 5673b765aab13a157cc3ebb0eecb54c8891c631f Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Thu, 12 Dec 2024 12:34:29 +0100 Subject: [PATCH 099/105] fix: no balance duplicate transaction --- .../src/transaction/transaction_manager.cpp | 24 +++++++------------ 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp index e5fd6efa6f..e524a43c78 100644 --- a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp +++ b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp @@ -196,24 +196,18 @@ void TransactionManager::saveTransactionsFromDagBlock(SharedTransactions const & std::unique_lock transactions_lock(transactions_mutex_); for (auto t : trxs) { - const auto account = final_chain_->getAccount(t->getSender()).value_or(taraxa::state_api::ZeroAccount); const auto tx_hash = t->getHash(); - // Checking nonce in cheaper than checking db, verify with nonce if possible - bool trx_not_executed = account.nonce < t->getNonce() || !db_->transactionFinalized(tx_hash); - - if (trx_not_executed) { - if (!recently_finalized_transactions_.contains(tx_hash) && - !nonfinalized_transactions_in_dag_.contains(tx_hash)) { - db_->addTransactionToBatch(*t, write_batch); - nonfinalized_transactions_in_dag_.emplace(tx_hash, t); - if (transactions_pool_.erase(tx_hash)) { - LOG(log_dg_) << "Transaction " << tx_hash << " removed from trx pool "; - // Transactions are counted when included in DAG - accepted_transactions.emplace_back(tx_hash); - } - trx_count_++; + if (!recently_finalized_transactions_.contains(tx_hash) && !nonfinalized_transactions_in_dag_.contains(tx_hash) && + !db_->transactionFinalized(tx_hash)) { + db_->addTransactionToBatch(*t, write_batch); + nonfinalized_transactions_in_dag_.emplace(tx_hash, t); + if (transactions_pool_.erase(tx_hash)) { + LOG(log_dg_) << "Transaction " << tx_hash << " removed from trx pool "; + // Transactions are counted when included in DAG + accepted_transactions.emplace_back(tx_hash); } + trx_count_++; } } db_->addStatusFieldToBatch(StatusDbField::TrxCount, trx_count_, write_batch); From 825d844fc07f08335d35450e38d968c9e0aeb58d Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 17 Dec 2024 14:17:23 +0100 Subject: [PATCH 100/105] chore: update evm --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 0e0aef18d2..bbaa08f3cb 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 0e0aef18d24caa434290c5a70633067894005dc6 +Subproject commit bbaa08f3cb99e7256c7a6b5c6c6d8cc2c8d4d930 From a5b8fa6d174b316e7b576988281da7af9ec90547 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 17 Dec 2024 14:23:31 +0100 Subject: [PATCH 101/105] update block number --- .../cli/include/cli/config_jsons/mainnet/mainnet_genesis.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json index d01c8ae861..b00f1234d6 100644 --- a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json @@ -1655,7 +1655,7 @@ "bridge_contract_address": "0xe126E0BaeAE904b8Cfd619Be1A8667A173b763a1" }, "cornus_hf": { - "block_num": -1, + "block_num": 15276000, "delegation_locking_period": 163459, "dag_gas_limit": "0x1908B100", "pbft_gas_limit": "0x7d2b7500" From 92005e132b3e60c3a96475c1bfe343aa43bf3875 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Wed, 18 Dec 2024 12:24:41 +0100 Subject: [PATCH 102/105] chore: merge v4 and v5 network version to same version --- CMakeLists.txt | 2 +- libraries/common/include/common/constants.hpp | 2 +- .../common/ext_pillar_vote_packet_handler.hpp | 4 +- .../common/ext_syncing_packet_handler.hpp | 4 +- .../common/ext_votes_packet_handler.hpp | 4 +- .../{v4 => v3}/common/packet_handler.hpp | 4 +- .../{v4 => v3}/dag_block_packet_handler.hpp | 4 +- .../{v4 => v3}/dag_sync_packet_handler.hpp | 4 +- .../get_dag_sync_packet_handler.hpp | 4 +- .../get_next_votes_bundle_packet_handler.hpp | 4 +- .../get_pbft_sync_packet_handler.hpp | 18 ++---- ...get_pillar_votes_bundle_packet_handler.hpp | 4 +- .../{v4 => v3}/pbft_sync_packet_handler.hpp | 16 +----- .../{v4 => v3}/pillar_vote_packet_handler.hpp | 6 +- .../pillar_votes_bundle_packet_handler.hpp | 6 +- .../{v4 => v3}/status_packet_handler.hpp | 4 +- .../{v4 => v3}/transaction_packet_handler.hpp | 4 +- .../{v4 => v3}/vote_packet_handler.hpp | 4 +- .../votes_bundle_packet_handler.hpp | 4 +- libraries/core_libs/network/src/network.cpp | 44 +++++++-------- .../common/ext_bls_sig_packet_handler.cpp | 6 +- .../common/ext_syncing_packet_handler.cpp | 6 +- .../common/ext_votes_packet_handler.cpp | 6 +- .../{v4 => v3}/common/packet_handler.cpp | 6 +- .../{v4 => v3}/dag_block_packet_handler.cpp | 8 +-- .../{v4 => v3}/dag_sync_packet_handler.cpp | 8 +-- .../get_dag_sync_packet_handler.cpp | 6 +- .../get_next_votes_bundle_packet_handler.cpp | 6 +- .../get_pbft_sync_packet_handler.cpp | 22 ++------ ...get_pillar_votes_bundle_packet_handler.cpp | 8 +-- .../{v4 => v3}/pbft_sync_packet_handler.cpp | 43 ++------------ .../{v4 => v3}/pillar_vote_packet_handler.cpp | 6 +- .../pillar_votes_bundle_packet_handler.cpp | 6 +- .../packets_handlers/{v4 => v3}/readme.md | 0 .../{v4 => v3}/status_packet_handler.cpp | 8 +-- .../{v4 => v3}/transaction_packet_handler.cpp | 6 +- .../{v4 => v3}/vote_packet_handler.cpp | 6 +- .../votes_bundle_packet_handler.cpp | 6 +- .../network/src/tarcap/taraxa_capability.cpp | 56 +++++++++---------- 39 files changed, 150 insertions(+), 215 deletions(-) rename libraries/core_libs/network/include/network/tarcap/packets_handlers/{v4 => v3}/common/ext_pillar_vote_packet_handler.hpp (91%) rename libraries/core_libs/network/include/network/tarcap/packets_handlers/{v4 => v3}/common/ext_syncing_packet_handler.hpp (96%) rename libraries/core_libs/network/include/network/tarcap/packets_handlers/{v4 => v3}/common/ext_votes_packet_handler.hpp (97%) rename libraries/core_libs/network/include/network/tarcap/packets_handlers/{v4 => v3}/common/packet_handler.hpp (97%) rename libraries/core_libs/network/include/network/tarcap/packets_handlers/{v4 => v3}/dag_block_packet_handler.hpp (95%) rename libraries/core_libs/network/include/network/tarcap/packets_handlers/{v4 => v3}/dag_sync_packet_handler.hpp (93%) rename libraries/core_libs/network/include/network/tarcap/packets_handlers/{v4 => v3}/get_dag_sync_packet_handler.hpp (94%) rename libraries/core_libs/network/include/network/tarcap/packets_handlers/{v4 => v3}/get_next_votes_bundle_packet_handler.hpp (93%) rename libraries/core_libs/network/include/network/tarcap/packets_handlers/{v4 => v3}/get_pbft_sync_packet_handler.hpp (82%) rename libraries/core_libs/network/include/network/tarcap/packets_handlers/{v4 => v3}/get_pillar_votes_bundle_packet_handler.hpp (93%) rename libraries/core_libs/network/include/network/tarcap/packets_handlers/{v4 => v3}/pbft_sync_packet_handler.hpp (92%) rename libraries/core_libs/network/include/network/tarcap/packets_handlers/{v4 => v3}/pillar_vote_packet_handler.hpp (87%) rename libraries/core_libs/network/include/network/tarcap/packets_handlers/{v4 => v3}/pillar_votes_bundle_packet_handler.hpp (86%) rename libraries/core_libs/network/include/network/tarcap/packets_handlers/{v4 => v3}/status_packet_handler.hpp (94%) rename libraries/core_libs/network/include/network/tarcap/packets_handlers/{v4 => v3}/transaction_packet_handler.hpp (97%) rename libraries/core_libs/network/include/network/tarcap/packets_handlers/{v4 => v3}/vote_packet_handler.hpp (94%) rename libraries/core_libs/network/include/network/tarcap/packets_handlers/{v4 => v3}/votes_bundle_packet_handler.hpp (94%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/common/ext_bls_sig_packet_handler.cpp (90%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/common/ext_syncing_packet_handler.cpp (97%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/common/ext_votes_packet_handler.cpp (98%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/common/packet_handler.cpp (97%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/dag_block_packet_handler.cpp (98%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/dag_sync_packet_handler.cpp (96%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/get_dag_sync_packet_handler.cpp (96%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/get_next_votes_bundle_packet_handler.cpp (96%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/get_pbft_sync_packet_handler.cpp (95%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/get_pillar_votes_bundle_packet_handler.cpp (95%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/pbft_sync_packet_handler.cpp (98%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/pillar_vote_packet_handler.cpp (94%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/pillar_votes_bundle_packet_handler.cpp (92%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/readme.md (100%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/status_packet_handler.cpp (97%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/transaction_packet_handler.cpp (98%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/vote_packet_handler.cpp (97%) rename libraries/core_libs/network/src/tarcap/packets_handlers/{v4 => v3}/votes_bundle_packet_handler.cpp (97%) diff --git a/CMakeLists.txt b/CMakeLists.txt index f411985bb6..1209bfbfda 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -7,7 +7,7 @@ set(TARAXA_PATCH_VERSION 1) set(TARAXA_VERSION ${TARAXA_MAJOR_VERSION}.${TARAXA_MINOR_VERSION}.${TARAXA_PATCH_VERSION}) # Any time a change in the network protocol is introduced this version should be increased -set(TARAXA_NET_VERSION 5) +set(TARAXA_NET_VERSION 4) # Major version is modified when DAG blocks, pbft blocks and any basic building blocks of our blockchain is modified # in the db set(TARAXA_DB_MAJOR_VERSION 1) diff --git a/libraries/common/include/common/constants.hpp b/libraries/common/include/common/constants.hpp index 3a60e60d59..aea008f1ac 100644 --- a/libraries/common/include/common/constants.hpp +++ b/libraries/common/include/common/constants.hpp @@ -31,7 +31,7 @@ constexpr uint32_t kDefaultTransactionPoolSize{200000}; constexpr uint32_t kMaxNonFinalizedTransactions{1000000}; constexpr uint32_t kMaxNonFinalizedDagBlocks{100}; -const size_t kV4NetworkVersion = 4; +const size_t kV3NetworkVersion = 3; const uint32_t kRecentlyFinalizedTransactionsFactor = 2; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_pillar_vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_pillar_vote_packet_handler.hpp similarity index 91% rename from libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_pillar_vote_packet_handler.hpp rename to libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_pillar_vote_packet_handler.hpp index 8dfea03969..bee00ed59f 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_pillar_vote_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_pillar_vote_packet_handler.hpp @@ -16,7 +16,7 @@ class FinalChain; } // namespace taraxa -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { class ExtPillarVotePacketHandler : public PacketHandler { public: @@ -32,4 +32,4 @@ class ExtPillarVotePacketHandler : public PacketHandler { std::shared_ptr pillar_chain_manager_; }; -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_syncing_packet_handler.hpp similarity index 96% rename from libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp rename to libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_syncing_packet_handler.hpp index 5019670fde..ce8835ed86 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_syncing_packet_handler.hpp @@ -14,7 +14,7 @@ namespace taraxa::network::tarcap { class PbftSyncingState; } -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { /** * @brief ExtSyncingPacketHandler is extended abstract PacketHandler with added functions that are used in packet @@ -63,4 +63,4 @@ class ExtSyncingPacketHandler : public PacketHandler { std::shared_ptr db_{nullptr}; }; -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_votes_packet_handler.hpp similarity index 97% rename from libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.hpp rename to libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_votes_packet_handler.hpp index 40ca24fb90..827315d441 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/ext_votes_packet_handler.hpp @@ -11,7 +11,7 @@ class VoteManager; class SlashingManager; } // namespace taraxa -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { /** * @brief ExtVotesPacketHandler is extended abstract PacketHandler with added functions that are used in packet @@ -88,4 +88,4 @@ class ExtVotesPacketHandler : public PacketHandler { std::shared_ptr slashing_manager_; }; -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/packet_handler.hpp similarity index 97% rename from libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/packet_handler.hpp rename to libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/packet_handler.hpp index cf33bb23b2..6cd6d92c94 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/common/packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/common/packet_handler.hpp @@ -13,7 +13,7 @@ #include "network/tarcap/taraxa_peer.hpp" #include "network/threadpool/packet_data.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { // class TimePeriodPacketsStats; @@ -83,4 +83,4 @@ class PacketHandler : public BasePacketHandler { LOG_OBJECTS_DEFINE }; -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/dag_block_packet_handler.hpp similarity index 95% rename from libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp rename to libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/dag_block_packet_handler.hpp index 6de3bb9fcf..8cb9dbec48 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/dag_block_packet_handler.hpp @@ -6,7 +6,7 @@ namespace taraxa { class TransactionManager; } // namespace taraxa -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { class DagBlockPacketHandler : public ExtSyncingPacketHandler { public: @@ -34,4 +34,4 @@ class DagBlockPacketHandler : public ExtSyncingPacketHandler { std::shared_ptr trx_mgr_{nullptr}; }; -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/dag_sync_packet_handler.hpp similarity index 93% rename from libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp rename to libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/dag_sync_packet_handler.hpp index f3702a7cb0..2b00fd089b 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/dag_sync_packet_handler.hpp @@ -6,7 +6,7 @@ namespace taraxa { class TransactionManager; } // namespace taraxa -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { class DagSyncPacketHandler : public ExtSyncingPacketHandler { public: @@ -28,4 +28,4 @@ class DagSyncPacketHandler : public ExtSyncingPacketHandler { std::shared_ptr trx_mgr_{nullptr}; }; -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_dag_sync_packet_handler.hpp similarity index 94% rename from libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.hpp rename to libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_dag_sync_packet_handler.hpp index a6e7b0b121..c8e3cc9ee3 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_dag_sync_packet_handler.hpp @@ -9,7 +9,7 @@ class DbStorage; class TransactionManager; } // namespace taraxa -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { class GetDagSyncPacketHandler : public PacketHandler { public: @@ -35,4 +35,4 @@ class GetDagSyncPacketHandler : public PacketHandler { std::shared_ptr db_; }; -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_next_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_next_votes_bundle_packet_handler.hpp similarity index 93% rename from libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_next_votes_bundle_packet_handler.hpp rename to libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_next_votes_bundle_packet_handler.hpp index e647cab96d..e154d7d418 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_next_votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_next_votes_bundle_packet_handler.hpp @@ -7,7 +7,7 @@ class PbftManager; class VoteManager; } // namespace taraxa -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { class GetNextVotesBundlePacketHandler : public ExtVotesPacketHandler { public: @@ -26,4 +26,4 @@ class GetNextVotesBundlePacketHandler : public ExtVotesPacketHandler { virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; }; -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp similarity index 82% rename from libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_pbft_sync_packet_handler.hpp rename to libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp index a736b34c5c..388eef0b6a 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp @@ -1,34 +1,27 @@ #pragma once - -#include "common/packet_handler.hpp" - +#include "../v3/common/packet_handler.hpp" namespace taraxa { class PbftChain; class DbStorage; class VoteManager; } // namespace taraxa - namespace taraxa::network::tarcap { class PbftSyncingState; } - -namespace taraxa::network::tarcap::v4 { - +namespace taraxa::network::tarcap::v3 { class GetPbftSyncPacketHandler : public PacketHandler { public: - GetPbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, - std::shared_ptr packets_stats, + GetPbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, + std::shared_ptr packets_stats, std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, std::shared_ptr vote_mgr, std::shared_ptr db, const addr_t& node_addr, const std::string& logs_prefix = "GET_PBFT_SYNC_PH"); - // Packet type that is processed by this handler static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kGetPbftSyncPacket; private: virtual void validatePacketRlpFormat(const threadpool::PacketData& packet_data) const override; virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; - virtual void sendPbftBlocks(const std::shared_ptr& peer, PbftPeriod from_period, size_t blocks_to_transfer, bool pbft_chain_synced); @@ -38,5 +31,4 @@ class GetPbftSyncPacketHandler : public PacketHandler { std::shared_ptr vote_mgr_; std::shared_ptr db_; }; - -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 \ No newline at end of file diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pillar_votes_bundle_packet_handler.hpp similarity index 93% rename from libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.hpp rename to libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pillar_votes_bundle_packet_handler.hpp index 0d56947afa..eb48aecc68 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/get_pillar_votes_bundle_packet_handler.hpp @@ -3,7 +3,7 @@ #include "common/packet_handler.hpp" #include "pillar_chain/pillar_chain_manager.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { class GetPillarVotesBundlePacketHandler : public PacketHandler { public: @@ -28,4 +28,4 @@ class GetPillarVotesBundlePacketHandler : public PacketHandler { std::shared_ptr pillar_chain_manager_; }; -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp similarity index 92% rename from libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp rename to libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp index f00d044f93..25056f8018 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp @@ -1,11 +1,8 @@ #pragma once - -#include "common/ext_syncing_packet_handler.hpp" +#include "../v3/common/ext_syncing_packet_handler.hpp" #include "common/thread_pool.hpp" #include "vote_manager/vote_manager.hpp" - -namespace taraxa::network::tarcap::v4 { - +namespace taraxa::network::tarcap::v3 { class PbftSyncPacketHandler : public ExtSyncingPacketHandler { public: PbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, @@ -14,9 +11,7 @@ class PbftSyncPacketHandler : public ExtSyncingPacketHandler { std::shared_ptr pbft_mgr, std::shared_ptr dag_mgr, std::shared_ptr vote_mgr, std::shared_ptr db, const addr_t& node_addr, const std::string& logs_prefix = "PBFT_SYNC_PH"); - void handleMaliciousSyncPeer(const dev::p2p::NodeID& id); - // Packet type that is processed by this handler static constexpr SubprotocolPacketType kPacketType_ = SubprotocolPacketType::kPbftSyncPacket; @@ -27,17 +22,12 @@ class PbftSyncPacketHandler : public ExtSyncingPacketHandler { protected: virtual PeriodData decodePeriodData(const dev::RLP& period_data_rlp) const; virtual std::vector> decodeVotesBundle(const dev::RLP& votes_bundle_rlp) const; - void pbftSyncComplete(); void delayedPbftSync(uint32_t counter); - static constexpr uint32_t kDelayedPbftSyncDelayMs = 10; - std::shared_ptr vote_mgr_; util::ThreadPool periodic_events_tp_; - static constexpr size_t kStandardPacketSize = 2; static constexpr size_t kChainSyncedPacketSize = 3; }; - -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 \ No newline at end of file diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pillar_vote_packet_handler.hpp similarity index 87% rename from libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_vote_packet_handler.hpp rename to libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pillar_vote_packet_handler.hpp index a94781ca19..ccec8fc878 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_vote_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pillar_vote_packet_handler.hpp @@ -1,8 +1,8 @@ #pragma once -#include "network/tarcap/packets_handlers/v4/common/ext_pillar_vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/common/ext_pillar_vote_packet_handler.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { class PillarVotePacketHandler : public ExtPillarVotePacketHandler { public: @@ -22,4 +22,4 @@ class PillarVotePacketHandler : public ExtPillarVotePacketHandler { virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; }; -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pillar_votes_bundle_packet_handler.hpp similarity index 86% rename from libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.hpp rename to libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pillar_votes_bundle_packet_handler.hpp index 40cc624119..efa63c4684 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/pillar_votes_bundle_packet_handler.hpp @@ -1,8 +1,8 @@ #pragma once -#include "network/tarcap/packets_handlers/v4/common/ext_pillar_vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/common/ext_pillar_vote_packet_handler.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { class PillarVotesBundlePacketHandler : public ExtPillarVotePacketHandler { public: @@ -22,4 +22,4 @@ class PillarVotesBundlePacketHandler : public ExtPillarVotePacketHandler { constexpr static size_t kMaxPillarVotesInBundleRlp{250}; }; -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/status_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/status_packet_handler.hpp similarity index 94% rename from libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/status_packet_handler.hpp rename to libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/status_packet_handler.hpp index 7e6978c91e..6e19de6324 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/status_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/status_packet_handler.hpp @@ -2,7 +2,7 @@ #include "common/ext_syncing_packet_handler.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { class StatusPacketHandler : public ExtSyncingPacketHandler { public: @@ -30,4 +30,4 @@ class StatusPacketHandler : public ExtSyncingPacketHandler { const h256 kGenesisHash; }; -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/transaction_packet_handler.hpp similarity index 97% rename from libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp rename to libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/transaction_packet_handler.hpp index f40167e567..6db64feb81 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/transaction_packet_handler.hpp @@ -8,7 +8,7 @@ class TransactionManager; enum class TransactionStatus; } // namespace taraxa -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { class TestState; @@ -75,4 +75,4 @@ class TransactionPacketHandler : public PacketHandler { std::atomic unique_received_trx_count_{0}; }; -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/vote_packet_handler.hpp similarity index 94% rename from libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/vote_packet_handler.hpp rename to libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/vote_packet_handler.hpp index aa24da7787..26796cc6d0 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/vote_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/vote_packet_handler.hpp @@ -2,7 +2,7 @@ #include "common/ext_votes_packet_handler.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { class VotePacketHandler : public ExtVotesPacketHandler { public: @@ -36,4 +36,4 @@ class VotePacketHandler : public ExtVotesPacketHandler { const size_t kExtendedVotePacketSize{3}; }; -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/votes_bundle_packet_handler.hpp similarity index 94% rename from libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp rename to libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/votes_bundle_packet_handler.hpp index e42e33c2d2..972e9e64da 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/v3/votes_bundle_packet_handler.hpp @@ -2,7 +2,7 @@ #include "common/ext_votes_packet_handler.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { class VotesBundlePacketHandler : public ExtVotesPacketHandler { public: @@ -30,4 +30,4 @@ class VotesBundlePacketHandler : public ExtVotesPacketHandler { virtual void process(const threadpool::PacketData& packet_data, const std::shared_ptr& peer) override; }; -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/network.cpp b/libraries/core_libs/network/src/network.cpp index 3fc7f48cf0..5d991f768f 100644 --- a/libraries/core_libs/network/src/network.cpp +++ b/libraries/core_libs/network/src/network.cpp @@ -15,14 +15,14 @@ #include "network/tarcap/packets_handlers/latest/transaction_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/vote_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/votes_bundle_packet_handler.hpp" -#include "network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp" -#include "network/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.hpp" -#include "network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp" -#include "network/tarcap/packets_handlers/v4/pillar_vote_packet_handler.hpp" -#include "network/tarcap/packets_handlers/v4/status_packet_handler.hpp" -#include "network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp" -#include "network/tarcap/packets_handlers/v4/vote_packet_handler.hpp" -#include "network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/dag_block_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/get_pillar_votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/pillar_vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/status_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/transaction_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/votes_bundle_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "network/tarcap/stats/node_stats.hpp" #include "network/tarcap/stats/time_period_packets_stats.hpp" @@ -82,16 +82,16 @@ Network::Network(const FullNodeConfig &config, const h256 &genesis_hash, std::fi dev::p2p::Host::CapabilitiesFactory constructCapabilities = [&](std::weak_ptr host) { assert(!host.expired()); - assert(kV4NetworkVersion < TARAXA_NET_VERSION); + assert(kV3NetworkVersion < TARAXA_NET_VERSION); dev::p2p::Host::CapabilityList capabilities; // Register old version (V4) of taraxa capability - auto v4_tarcap = std::make_shared( - kV4NetworkVersion, config, genesis_hash, host, key, packets_tp_, all_packets_stats_, pbft_syncing_state_, db, + auto v3_tarcap = std::make_shared( + kV3NetworkVersion, config, genesis_hash, host, key, packets_tp_, all_packets_stats_, pbft_syncing_state_, db, pbft_mgr, pbft_chain, vote_mgr, dag_mgr, trx_mgr, slashing_manager, pillar_chain_mgr, network::tarcap::TaraxaCapability::kInitV4Handlers); - capabilities.emplace_back(v4_tarcap); + capabilities.emplace_back(v3_tarcap); // Register latest version of taraxa capability auto latest_tarcap = std::make_shared( @@ -196,7 +196,7 @@ void Network::registerPeriodicEvents(const std::shared_ptr &pbft_mg auto tx_packet_handler = tarcap.second->getSpecificHandler(); tx_packet_handler->periodicSendTransactions(trx_mgr->getAllPoolTrxs()); } else { - auto tx_packet_handler = tarcap.second->getSpecificHandler(); + auto tx_packet_handler = tarcap.second->getSpecificHandler(); tx_packet_handler->periodicSendTransactions(trx_mgr->getAllPoolTrxs()); } } @@ -211,7 +211,7 @@ void Network::registerPeriodicEvents(const std::shared_ptr &pbft_mg auto status_packet_handler = tarcap.second->getSpecificHandler(); status_packet_handler->sendStatusToPeers(); } else { - auto status_packet_handler = tarcap.second->getSpecificHandler(); + auto status_packet_handler = tarcap.second->getSpecificHandler(); status_packet_handler->sendStatusToPeers(); } } @@ -317,7 +317,7 @@ void Network::gossipDagBlock(const std::shared_ptr &block, bool propos tarcap.second->getSpecificHandler()->onNewBlockVerified(block, proposed, trxs); } else { - tarcap.second->getSpecificHandler()->onNewBlockVerified( + tarcap.second->getSpecificHandler()->onNewBlockVerified( block, proposed, trxs); } } @@ -330,7 +330,7 @@ void Network::gossipVote(const std::shared_ptr &vote, const std::share if (tarcap.first == TARAXA_NET_VERSION) { tarcap.second->getSpecificHandler()->onNewPbftVote(vote, block, rebroadcast); } else { - tarcap.second->getSpecificHandler()->onNewPbftVote(vote, block, + tarcap.second->getSpecificHandler()->onNewPbftVote(vote, block, rebroadcast); } } @@ -343,7 +343,7 @@ void Network::gossipVotesBundle(const std::vector> &vo tarcap.second->getSpecificHandler()->onNewPbftVotesBundle(votes, rebroadcast); } else { - tarcap.second->getSpecificHandler()->onNewPbftVotesBundle( + tarcap.second->getSpecificHandler()->onNewPbftVotesBundle( votes, rebroadcast); } } @@ -355,7 +355,7 @@ void Network::gossipPillarBlockVote(const std::shared_ptr &vote, boo if (tarcap.first == TARAXA_NET_VERSION) { tarcap.second->getSpecificHandler()->onNewPillarVote(vote, rebroadcast); } else { - tarcap.second->getSpecificHandler()->onNewPillarVote(vote, + tarcap.second->getSpecificHandler()->onNewPillarVote(vote, rebroadcast); } } @@ -372,7 +372,7 @@ void Network::handleMaliciousSyncPeer(const dev::p2p::NodeID &node_id) { if (tarcap.first == TARAXA_NET_VERSION) { tarcap.second->getSpecificHandler()->handleMaliciousSyncPeer(node_id); } else { - tarcap.second->getSpecificHandler()->handleMaliciousSyncPeer(node_id); + tarcap.second->getSpecificHandler()->handleMaliciousSyncPeer(node_id); } } } @@ -387,7 +387,7 @@ std::shared_ptr Network::getMaxChainPeer() const { peer = tarcap.second->getSpecificHandler<::taraxa::network::tarcap::PbftSyncPacketHandler>()->getMaxChainPeer(); } else { peer = - tarcap.second->getSpecificHandler<::taraxa::network::tarcap::v4::PbftSyncPacketHandler>()->getMaxChainPeer(); + tarcap.second->getSpecificHandler<::taraxa::network::tarcap::v3::PbftSyncPacketHandler>()->getMaxChainPeer(); } if (!peer) { @@ -413,7 +413,7 @@ void Network::requestPillarBlockVotesBundle(taraxa::PbftPeriod period, const tar peer = tarcap.second->getSpecificHandler<::taraxa::network::tarcap::PbftSyncPacketHandler>()->getMaxChainPeer(); } else { peer = - tarcap.second->getSpecificHandler<::taraxa::network::tarcap::v4::PbftSyncPacketHandler>()->getMaxChainPeer(); + tarcap.second->getSpecificHandler<::taraxa::network::tarcap::v3::PbftSyncPacketHandler>()->getMaxChainPeer(); } if (!peer) { @@ -426,7 +426,7 @@ void Network::requestPillarBlockVotesBundle(taraxa::PbftPeriod period, const tar tarcap.second->getSpecificHandler()->requestPillarVotesBundle( period, pillar_block_hash, peer); } else { - tarcap.second->getSpecificHandler() + tarcap.second->getSpecificHandler() ->requestPillarVotesBundle(period, pillar_block_hash, peer); } } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_bls_sig_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/common/ext_bls_sig_packet_handler.cpp similarity index 90% rename from libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_bls_sig_packet_handler.cpp rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/common/ext_bls_sig_packet_handler.cpp index ff85908beb..e6ce8b8677 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_bls_sig_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/common/ext_bls_sig_packet_handler.cpp @@ -1,7 +1,7 @@ -#include "network/tarcap/packets_handlers/v4/common/ext_pillar_vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/common/ext_pillar_vote_packet_handler.hpp" #include "pillar_chain/pillar_chain_manager.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { ExtPillarVotePacketHandler::ExtPillarVotePacketHandler( const FullNodeConfig &conf, std::shared_ptr peers_state, @@ -34,4 +34,4 @@ bool ExtPillarVotePacketHandler::processPillarVote(const std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -164,4 +164,4 @@ void ExtSyncingPacketHandler::requestDagBlocks(const dev::p2p::NodeID &_nodeID, sealAndSend(_nodeID, SubprotocolPacketType::kGetDagSyncPacket, std::move(s)); } -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/common/ext_votes_packet_handler.cpp similarity index 98% rename from libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.cpp rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/common/ext_votes_packet_handler.cpp index 60bf78ca0a..774342ffa7 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/common/ext_votes_packet_handler.cpp @@ -1,4 +1,4 @@ -#include "network/tarcap/packets_handlers/v4/common/ext_votes_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/common/ext_votes_packet_handler.hpp" #include "network/tarcap/packets_handlers/latest/common/exceptions.hpp" #include "pbft/pbft_manager.hpp" @@ -6,7 +6,7 @@ #include "vote/votes_bundle_rlp.hpp" #include "vote_manager/vote_manager.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { ExtVotesPacketHandler::ExtVotesPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -222,4 +222,4 @@ void ExtVotesPacketHandler::sendPbftVotesBundle(const std::shared_ptr peers_state, std::shared_ptr packets_stats, const addr_t& node_addr, @@ -141,4 +141,4 @@ void PacketHandler::requestPbftNextVotesAtPeriodRound(const dev::p2p::NodeID& pe std::move(dev::RLPStream(2) << pbft_period << pbft_round)); } -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_block_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/dag_block_packet_handler.cpp similarity index 98% rename from libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_block_packet_handler.cpp rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/dag_block_packet_handler.cpp index 65c1754512..5a707de443 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_block_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/dag_block_packet_handler.cpp @@ -1,11 +1,11 @@ -#include "network/tarcap/packets_handlers/v4/dag_block_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/dag_block_packet_handler.hpp" #include "dag/dag_manager.hpp" -#include "network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/transaction_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "transaction/transaction_manager.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { DagBlockPacketHandler::DagBlockPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -251,4 +251,4 @@ void DagBlockPacketHandler::onNewBlockVerified(const std::shared_ptr & LOG(log_dg_) << "Send DagBlock " << block->getHash() << " to peers: " << peer_and_transactions_to_log; if (!peers_to_send.empty()) LOG(log_tr_) << "Sent block to " << peers_to_send.size() << " peers"; } -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/dag_sync_packet_handler.cpp similarity index 96% rename from libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_sync_packet_handler.cpp rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/dag_sync_packet_handler.cpp index f2b6894921..0bf75ed916 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/dag_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/dag_sync_packet_handler.cpp @@ -1,12 +1,12 @@ -#include "network/tarcap/packets_handlers/v4/dag_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/dag_sync_packet_handler.hpp" #include "dag/dag.hpp" -#include "network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/common/ext_syncing_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "transaction/transaction.hpp" #include "transaction/transaction_manager.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { DagSyncPacketHandler::DagSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -129,4 +129,4 @@ void DagSyncPacketHandler::process(const threadpool::PacketData& packet_data, co << " Transactions: " << transactions_to_log << " from " << packet_data.from_node_id_; } -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_dag_sync_packet_handler.cpp similarity index 96% rename from libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.cpp rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_dag_sync_packet_handler.cpp index bcdcb938bd..eedf57a1e9 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_dag_sync_packet_handler.cpp @@ -1,9 +1,9 @@ -#include "network/tarcap/packets_handlers/v4/get_dag_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/get_dag_sync_packet_handler.hpp" #include "dag/dag_manager.hpp" #include "transaction/transaction_manager.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { GetDagSyncPacketHandler::GetDagSyncPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -86,4 +86,4 @@ void GetDagSyncPacketHandler::sendBlocks(const dev::p2p::NodeID &peer_id, sealAndSend(peer_id, SubprotocolPacketType::kDagSyncPacket, std::move(s)); } -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_next_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_next_votes_bundle_packet_handler.cpp similarity index 96% rename from libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_next_votes_bundle_packet_handler.cpp rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_next_votes_bundle_packet_handler.cpp index 8ca116992b..e0d38453a2 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_next_votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_next_votes_bundle_packet_handler.cpp @@ -1,9 +1,9 @@ -#include "network/tarcap/packets_handlers/v4/get_next_votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/get_next_votes_bundle_packet_handler.hpp" #include "pbft/pbft_manager.hpp" #include "vote_manager/vote_manager.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { GetNextVotesBundlePacketHandler::GetNextVotesBundlePacketHandler( const FullNodeConfig &conf, std::shared_ptr peers_state, @@ -79,4 +79,4 @@ void GetNextVotesBundlePacketHandler::process(const threadpool::PacketData &pack } } -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.cpp similarity index 95% rename from libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_pbft_sync_packet_handler.cpp rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.cpp index d76698ec95..83f0bd45f9 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.cpp @@ -1,14 +1,13 @@ -#include "network/tarcap/packets_handlers/v4/get_pbft_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/get_pbft_sync_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "pbft/pbft_chain.hpp" +#include "pbft/period_data.hpp" #include "storage/storage.hpp" #include "vote/pbft_vote.hpp" #include "vote/votes_bundle_rlp.hpp" #include "vote_manager/vote_manager.hpp" - -namespace taraxa::network::tarcap::v4 { - +namespace taraxa::network::tarcap::v3 { GetPbftSyncPacketHandler::GetPbftSyncPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_syncing_state, @@ -21,17 +20,14 @@ GetPbftSyncPacketHandler::GetPbftSyncPacketHandler(const FullNodeConfig &conf, s pbft_chain_(std::move(pbft_chain)), vote_mgr_(std::move(vote_mgr)), db_(std::move(db)) {} - void GetPbftSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { if (constexpr size_t required_size = 1; packet_data.rlp_.itemCount() != required_size) { throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), required_size); } } - void GetPbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, const std::shared_ptr &peer) { LOG(log_tr_) << "Received GetPbftSyncPacket Block"; - const size_t height_to_sync = packet_data.rlp_[0].toInt(); // Here need PBFT chain size, not synced period since synced blocks has not verified yet. const size_t my_chain_size = pbft_chain_->getPbftChainSize(); @@ -42,14 +38,12 @@ void GetPbftSyncPacketHandler::process(const threadpool::PacketData &packet_data << ". That's bigger than own PBFT chain size " << my_chain_size; throw MaliciousPeerException(err_msg.str()); } - if (kConf.is_light_node && height_to_sync + kConf.light_node_history <= my_chain_size) { std::ostringstream err_msg; err_msg << "Peer " << packet_data.from_node_id_ << " request syncing period start at " << height_to_sync << ". Light node does not have the data " << my_chain_size; throw MaliciousPeerException(err_msg.str()); } - size_t blocks_to_transfer = 0; auto pbft_chain_synced = false; const auto total_period_data_size = my_chain_size - height_to_sync + 1; @@ -60,27 +54,23 @@ void GetPbftSyncPacketHandler::process(const threadpool::PacketData &packet_data blocks_to_transfer = kConf.network.sync_level_size; } LOG(log_tr_) << "Will send " << blocks_to_transfer << " PBFT blocks to " << packet_data.from_node_id_; - sendPbftBlocks(peer, height_to_sync, blocks_to_transfer, pbft_chain_synced); } - // api for pbft syncing void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr &peer, PbftPeriod from_period, size_t blocks_to_transfer, bool pbft_chain_synced) { const auto &peer_id = peer->getId(); LOG(log_tr_) << "sendPbftBlocks: peer want to sync from pbft chain height " << from_period << ", will send at most " << blocks_to_transfer << " pbft blocks to " << peer_id; - for (auto block_period = from_period; block_period < from_period + blocks_to_transfer; block_period++) { bool last_block = (block_period == from_period + blocks_to_transfer - 1); auto data = db_->getPeriodDataRaw(block_period); - if (data.size() == 0) { // This can happen when switching from light node to full node setting LOG(log_er_) << "DB corrupted. Cannot find period " << block_period << " PBFT block in db"; return; } - + data = PeriodData::ToOldPeriodData(data); dev::RLPStream s; if (pbft_chain_synced && last_block) { // Latest finalized block cert votes are saved in db as reward votes for new blocks @@ -102,7 +92,6 @@ void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr s << last_block; s.appendRaw(data); } - LOG(log_dg_) << "Sending PbftSyncPacket period " << block_period << " to " << peer_id; sealAndSend(peer_id, SubprotocolPacketType::kPbftSyncPacket, std::move(s)); if (pbft_chain_synced && last_block) { @@ -110,5 +99,4 @@ void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr } } } - -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 \ No newline at end of file diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pillar_votes_bundle_packet_handler.cpp similarity index 95% rename from libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.cpp rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pillar_votes_bundle_packet_handler.cpp index e3d2086391..8e4fe9c569 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/get_pillar_votes_bundle_packet_handler.cpp @@ -1,8 +1,8 @@ -#include "network/tarcap/packets_handlers/v4/get_pillar_votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/get_pillar_votes_bundle_packet_handler.hpp" -#include "network/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/pillar_votes_bundle_packet_handler.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { GetPillarVotesBundlePacketHandler::GetPillarVotesBundlePacketHandler( const FullNodeConfig &conf, std::shared_ptr peers_state, @@ -95,4 +95,4 @@ void GetPillarVotesBundlePacketHandler::requestPillarVotesBundle(PbftPeriod peri } } -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp similarity index 98% rename from libraries/core_libs/network/src/tarcap/packets_handlers/v4/pbft_sync_packet_handler.cpp rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp index a7acc54653..8af76978f3 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pbft_sync_packet_handler.cpp @@ -1,4 +1,4 @@ -#include "network/tarcap/packets_handlers/v4/pbft_sync_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/pbft_sync_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "pbft/pbft_chain.hpp" @@ -6,9 +6,7 @@ #include "transaction/transaction_manager.hpp" #include "vote/pbft_vote.hpp" #include "vote/votes_bundle_rlp.hpp" - -namespace taraxa::network::tarcap::v4 { - +namespace taraxa::network::tarcap::v3 { PbftSyncPacketHandler::PbftSyncPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_syncing_state, @@ -21,12 +19,10 @@ PbftSyncPacketHandler::PbftSyncPacketHandler(const FullNodeConfig &conf, std::sh logs_prefix + "PBFT_SYNC_PH"), vote_mgr_(std::move(vote_mgr)), periodic_events_tp_(1, true) {} - void PbftSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData &packet_data) const { if (packet_data.rlp_.itemCount() != kStandardPacketSize && packet_data.rlp_.itemCount() != kChainSyncedPacketSize) { throw InvalidRlpItemsCountException(packet_data.type_str_, packet_data.rlp_.itemCount(), kStandardPacketSize); } - // PeriodData rlp parsing cannot be done through util::rlp_tuple, which automatically checks the rlp size so it is // checked here manually if (packet_data.rlp_[1].itemCount() != PeriodData::kBaseRlpItemCount && @@ -35,7 +31,6 @@ void PbftSyncPacketHandler::validatePacketRlpFormat(const threadpool::PacketData PeriodData::kBaseRlpItemCount); } } - void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, const std::shared_ptr &peer) { // Note: no need to consider possible race conditions due to concurrent processing as it is @@ -46,13 +41,11 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, << " but there is no current syncing peer set"; return; } - if (syncing_peer->getId() != packet_data.from_node_id_) { LOG(log_wr_) << "PbftSyncPacket received from unexpected peer " << packet_data.from_node_id_.abridged() << " current syncing peer " << syncing_peer->getId().abridged(); return; } - // Process received pbft blocks // pbft_chain_synced is the flag to indicate own PBFT chain has synced with the peer's PBFT chain const bool pbft_chain_synced = packet_data.rlp_.itemCount() == kChainSyncedPacketSize; @@ -64,13 +57,11 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, } catch (const std::runtime_error &e) { throw MaliciousPeerException("Unable to parse PeriodData: " + std::string(e.what())); } - std::vector> current_block_cert_votes; if (pbft_chain_synced) { current_block_cert_votes = decodeVotesBundle(packet_data.rlp_[2]); } const auto pbft_blk_hash = period_data.pbft_blk->getBlockHash(); - std::string received_dag_blocks_str; // This is just log related stuff for (auto const &block : period_data.dag_blocks) { received_dag_blocks_str += block->getHash().toString() + " "; @@ -78,19 +69,15 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, peer->dag_level_ = block->getLevel(); } } - const auto pbft_block_period = period_data.pbft_blk->getPeriod(); LOG(log_dg_) << "PbftSyncPacket received. Period: " << pbft_block_period << ", dag Blocks: " << received_dag_blocks_str << " from " << packet_data.from_node_id_; - peer->markPbftBlockAsKnown(pbft_blk_hash); // Update peer's pbft period if outdated if (peer->pbft_chain_size_ < pbft_block_period) { peer->pbft_chain_size_ = pbft_block_period; } - LOG(log_tr_) << "Processing pbft block: " << pbft_blk_hash; - if (pbft_chain_->findPbftBlockInChain(pbft_blk_hash)) { LOG(log_wr_) << "PBFT block " << pbft_blk_hash << ", period: " << period_data.pbft_blk->getPeriod() << " from " << packet_data.from_node_id_ << " already present in chain"; @@ -101,12 +88,10 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, pbftSyncComplete(); return; } - LOG(log_er_) << "Block " << pbft_blk_hash << " period unexpected: " << pbft_block_period << ". Expected period: " << pbft_mgr_->pbftSyncingPeriod() + 1; return; } - // Check cert vote matches if final synced block if (pbft_chain_synced) { for (auto const &vote : current_block_cert_votes) { @@ -118,7 +103,6 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, } } } - // Check votes match the hash of previous block in the queue auto last_pbft_block_hash = pbft_mgr_->lastPbftBlockHashFromQueueOrChain(); // Check cert vote matches @@ -131,12 +115,10 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, return; } } - if (!pbft_mgr_->validatePillarDataInPeriodData(period_data)) { handleMaliciousSyncPeer(packet_data.from_node_id_); return; } - auto order_hash = PbftManager::calculateOrderHash(period_data.dag_blocks); if (order_hash != period_data.pbft_blk->getOrderHash()) { { // This is just log related stuff @@ -158,7 +140,6 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, handleMaliciousSyncPeer(packet_data.from_node_id_); return; } - // This is special case when queue is empty and we can not say for sure that all votes that are part of this block // have been verified before if (pbft_mgr_->periodDataQueueEmpty()) { @@ -170,10 +151,8 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, handleMaliciousSyncPeer(packet_data.from_node_id_); return; } - vote_mgr_->addVerifiedVote(v); } - // And now we need to replace it with verified votes if (auto votes = vote_mgr_->checkRewardVotes(period_data.pbft_blk, true); votes.first) { period_data.previous_block_cert_votes = std::move(votes.second); @@ -184,31 +163,25 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, pbft_syncing_state_->setPbftSyncing(false); return; } - LOG(log_er_) << "Invalid reward votes in block " << period_data.pbft_blk->getBlockHash() << " from peer " << packet_data.from_node_id_.abridged() << " received, stop syncing."; handleMaliciousSyncPeer(packet_data.from_node_id_); return; } } - LOG(log_tr_) << "Synced PBFT block hash " << pbft_blk_hash << " with " << period_data.previous_block_cert_votes.size() << " cert votes"; LOG(log_tr_) << "Synced PBFT block " << period_data; pbft_mgr_->periodDataQueuePush(std::move(period_data), packet_data.from_node_id_, std::move(current_block_cert_votes)); } - auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); - // Reset last sync packet received time pbft_syncing_state_->setLastSyncPacketTime(); - if (pbft_chain_synced) { pbftSyncComplete(); return; } - if (last_block) { // If current sync period is actually bigger than the block we just received we are probably synced if (pbft_sync_period > pbft_block_period) { @@ -229,16 +202,13 @@ void PbftSyncPacketHandler::process(const threadpool::PacketData &packet_data, } } } - PeriodData PbftSyncPacketHandler::decodePeriodData(const dev::RLP &period_data_rlp) const { - return PeriodData(period_data_rlp); + return PeriodData::FromOldPeriodData(period_data_rlp); } - std::vector> PbftSyncPacketHandler::decodeVotesBundle( const dev::RLP &votes_bundle_rlp) const { return decodePbftVotesBundleRlp(votes_bundle_rlp); } - void PbftSyncPacketHandler::pbftSyncComplete() { if (pbft_mgr_->periodDataQueueSize()) { LOG(log_tr_) << "Syncing pbft blocks faster than processing. Remaining sync size " @@ -257,7 +227,6 @@ void PbftSyncPacketHandler::pbftSyncComplete() { } } } - void PbftSyncPacketHandler::delayedPbftSync(uint32_t counter) { const uint32_t max_delayed_pbft_sync_count = 60000 / kDelayedPbftSyncDelayMs; auto pbft_sync_period = pbft_mgr_->pbftSyncingPeriod(); @@ -268,7 +237,6 @@ void PbftSyncPacketHandler::delayedPbftSync(uint32_t counter) { LOG(log_tr_) << "Syncing PBFT is stopping"; return; } - if (pbft_syncing_state_->isPbftSyncing()) { if (pbft_sync_period > pbft_chain_->getPbftChainSize() + (10 * kConf.network.sync_level_size)) { LOG(log_tr_) << "Syncing pbft blocks faster than processing " << pbft_sync_period << " " @@ -281,10 +249,8 @@ void PbftSyncPacketHandler::delayedPbftSync(uint32_t counter) { } } } - void PbftSyncPacketHandler::handleMaliciousSyncPeer(const dev::p2p::NodeID &id) { peers_state_->set_peer_malicious(id); - if (auto host = peers_state_->host_.lock(); host) { LOG(log_nf_) << "Disconnect peer " << id; host->disconnect(id, dev::p2p::UserReason); @@ -292,5 +258,4 @@ void PbftSyncPacketHandler::handleMaliciousSyncPeer(const dev::p2p::NodeID &id) LOG(log_er_) << "Unable to handleMaliciousSyncPeer, host == nullptr"; } } - -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 \ No newline at end of file diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pillar_vote_packet_handler.cpp similarity index 94% rename from libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_vote_packet_handler.cpp rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/pillar_vote_packet_handler.cpp index 9676fc736a..a70eabdeff 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pillar_vote_packet_handler.cpp @@ -1,8 +1,8 @@ -#include "network/tarcap/packets_handlers/v4/pillar_vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/pillar_vote_packet_handler.hpp" #include "vote/pillar_vote.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { PillarVotePacketHandler::PillarVotePacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -61,4 +61,4 @@ void PillarVotePacketHandler::sendPillarVote(const std::shared_ptr & } } -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pillar_votes_bundle_packet_handler.cpp similarity index 92% rename from libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.cpp rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/pillar_votes_bundle_packet_handler.cpp index 032239389d..00178ebcc9 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/pillar_votes_bundle_packet_handler.cpp @@ -1,8 +1,8 @@ -#include "network/tarcap/packets_handlers/v4/pillar_votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/pillar_votes_bundle_packet_handler.hpp" #include "vote/pillar_vote.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { PillarVotesBundlePacketHandler::PillarVotesBundlePacketHandler( const FullNodeConfig &conf, std::shared_ptr peers_state, @@ -37,4 +37,4 @@ void PillarVotesBundlePacketHandler::process(const threadpool::PacketData &packe } } -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/readme.md b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/readme.md similarity index 100% rename from libraries/core_libs/network/src/tarcap/packets_handlers/v4/readme.md rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/readme.md diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/status_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/status_packet_handler.cpp similarity index 97% rename from libraries/core_libs/network/src/tarcap/packets_handlers/v4/status_packet_handler.cpp rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/status_packet_handler.cpp index 8847e0d4aa..97a8a05eec 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/status_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/status_packet_handler.cpp @@ -1,14 +1,14 @@ -#include "network/tarcap/packets_handlers/v4/status_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/status_packet_handler.hpp" #include "config/version.hpp" #include "dag/dag.hpp" -#include "network/tarcap/packets_handlers/v4/common/ext_syncing_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/common/ext_syncing_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" #include "pbft/pbft_chain.hpp" #include "pbft/pbft_manager.hpp" #include "vote_manager/vote_manager.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { StatusPacketHandler::StatusPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -195,4 +195,4 @@ void StatusPacketHandler::sendStatusToPeers() { } } -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/transaction_packet_handler.cpp similarity index 98% rename from libraries/core_libs/network/src/tarcap/packets_handlers/v4/transaction_packet_handler.cpp rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/transaction_packet_handler.cpp index 8c6d742a26..8cb528826b 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/transaction_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/transaction_packet_handler.cpp @@ -1,11 +1,11 @@ -#include "network/tarcap/packets_handlers/v4/transaction_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/transaction_packet_handler.hpp" #include #include "transaction/transaction.hpp" #include "transaction/transaction_manager.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { TransactionPacketHandler::TransactionPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -219,4 +219,4 @@ void TransactionPacketHandler::sendTransactions(std::shared_ptr peer } } -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/vote_packet_handler.cpp similarity index 97% rename from libraries/core_libs/network/src/tarcap/packets_handlers/v4/vote_packet_handler.cpp rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/vote_packet_handler.cpp index 8a3771a1bc..184ae41e82 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/vote_packet_handler.cpp @@ -1,9 +1,9 @@ -#include "network/tarcap/packets_handlers/v4/vote_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/vote_packet_handler.hpp" #include "pbft/pbft_manager.hpp" #include "vote_manager/vote_manager.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { VotePacketHandler::VotePacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -138,4 +138,4 @@ void VotePacketHandler::sendPbftVote(const std::shared_ptr &peer, co } } -} // namespace taraxa::network::tarcap::v4 +} // namespace taraxa::network::tarcap::v3 diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/votes_bundle_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/votes_bundle_packet_handler.cpp similarity index 97% rename from libraries/core_libs/network/src/tarcap/packets_handlers/v4/votes_bundle_packet_handler.cpp rename to libraries/core_libs/network/src/tarcap/packets_handlers/v3/votes_bundle_packet_handler.cpp index 86c8241a12..1b89bae0cd 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/v4/votes_bundle_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/v3/votes_bundle_packet_handler.cpp @@ -1,10 +1,10 @@ -#include "network/tarcap/packets_handlers/v4/votes_bundle_packet_handler.hpp" +#include "network/tarcap/packets_handlers/v3/votes_bundle_packet_handler.hpp" #include "pbft/pbft_manager.hpp" #include "vote/votes_bundle_rlp.hpp" #include "vote_manager/vote_manager.hpp" -namespace taraxa::network::tarcap::v4 { +namespace taraxa::network::tarcap::v3 { VotesBundlePacketHandler::VotesBundlePacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, @@ -122,4 +122,4 @@ void VotesBundlePacketHandler::onNewPbftVotesBundle(const std::vector session, u256 auto status_packet_handler = packets_handlers_->getSpecificHandler(); status_packet_handler->sendStatus(node_id, true); } else { - auto status_packet_handler = packets_handlers_->getSpecificHandler(); + auto status_packet_handler = packets_handlers_->getSpecificHandler(); status_packet_handler->sendStatus(node_id, true); } } @@ -124,7 +124,7 @@ void TaraxaCapability::onDisconnect(dev::p2p::NodeID const &_nodeID) { if (version_ == TARAXA_NET_VERSION) { packets_handlers_->getSpecificHandler()->startSyncingPbft(); } else { - packets_handlers_->getSpecificHandler()->startSyncingPbft(); + packets_handlers_->getSpecificHandler()->startSyncingPbft(); } } else { @@ -327,44 +327,44 @@ const TaraxaCapability::InitPacketsHandlers TaraxaCapability::kInitV4Handlers = const addr_t &node_addr) { auto packets_handlers = std::make_shared(); // Consensus packets with high processing priority - packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_mgr, pbft_chain, + packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_mgr, pbft_chain, vote_mgr, slashing_manager, node_addr, logs_prefix); - packets_handlers->registerHandler( + packets_handlers->registerHandler( config, peers_state, packets_stats, pbft_mgr, pbft_chain, vote_mgr, slashing_manager, node_addr, logs_prefix); - packets_handlers->registerHandler( + packets_handlers->registerHandler( config, peers_state, packets_stats, pbft_mgr, pbft_chain, vote_mgr, slashing_manager, node_addr, logs_prefix); // Standard packets with mid processing priority - packets_handlers->registerHandler(config, peers_state, packets_stats, + packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, pbft_chain, pbft_mgr, dag_mgr, trx_mgr, db, node_addr, logs_prefix); - packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, + packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, node_addr, logs_prefix); // Non critical packets with low processing priority - packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, + packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, pbft_chain, pbft_mgr, dag_mgr, db, genesis_hash, node_addr, logs_prefix); - packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, + packets_handlers->registerHandler(config, peers_state, packets_stats, trx_mgr, dag_mgr, db, node_addr, logs_prefix); - packets_handlers->registerHandler(config, peers_state, packets_stats, + packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, pbft_chain, pbft_mgr, dag_mgr, trx_mgr, db, node_addr, logs_prefix); - packets_handlers->registerHandler( + packets_handlers->registerHandler( config, peers_state, packets_stats, pbft_syncing_state, pbft_chain, vote_mgr, db, node_addr, logs_prefix); - packets_handlers->registerHandler(config, peers_state, packets_stats, + packets_handlers->registerHandler(config, peers_state, packets_stats, pbft_syncing_state, pbft_chain, pbft_mgr, dag_mgr, vote_mgr, db, node_addr, logs_prefix); - packets_handlers->registerHandler(config, peers_state, packets_stats, + packets_handlers->registerHandler(config, peers_state, packets_stats, pillar_chain_mgr, node_addr, logs_prefix); - packets_handlers->registerHandler( + packets_handlers->registerHandler( config, peers_state, packets_stats, pillar_chain_mgr, node_addr, logs_prefix); - packets_handlers->registerHandler(config, peers_state, packets_stats, + packets_handlers->registerHandler(config, peers_state, packets_stats, pillar_chain_mgr, node_addr, logs_prefix); return packets_handlers; From 23be1797c68ac43029e707006e85b38b25341cbc Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 18 Dec 2024 15:17:37 +0100 Subject: [PATCH 103/105] chore: update bulding instructions --- doc/building.md | 163 ++---------------------------------------------- 1 file changed, 5 insertions(+), 158 deletions(-) diff --git a/doc/building.md b/doc/building.md index 7af1fedd94..93ad86a9e8 100644 --- a/doc/building.md +++ b/doc/building.md @@ -71,167 +71,14 @@ will build out of the box without further effort: cmake -DCONAN_PROFILE=clang -DCMAKE_BUILD_TYPE=RelWithDebInfo -DTARAXA_ENABLE_LTO=OFF -DTARAXA_STATIC_BUILD=OFF ../ make -j$(nproc) -## Building on Ubuntu 22.04 -For Ubuntu 22.04 users, after installing the right packages with `apt` taraxa-node -will build out of the box without further effort: - -### Install taraxa-node dependencies: - - # Required packages - sudo apt-get install -y \ - libtool \ - autoconf \ - ccache \ - cmake \ - clang-format-14 \ - clang-tidy-14 \ - golang-go \ - python3-pip \ - # this libs are required for arm build by go part. you can skip it for amd64 build - libzstd-dev \ - libsnappy-dev \ - rapidjson-dev \ - libgmp-dev \ - libmpfr-dev \ - libmicrohttpd-dev - - # Optional. Needed to run py_test. This won't install on arm64 OS because package is missing in apt - sudo add-apt-repository ppa:ethereum/ethereum - sudo apt-get update - sudo apt install solc - - # Install conan package manager - sudo python3 -m pip install conan==1.64.1 - - # Setup clang as default compiler either in your IDE or by env. variables" - export CC="clang-14" - export CXX="clang++-14" - -### Clone the Repository - - git clone https://github.com/Taraxa-project/taraxa-node.git --branch testnet - cd taraxa-node - git submodule update --init --recursive - -### Compile - - # Optional - one time action - # Create clang profile - # It is recommended to use clang because on other compilers you could face some errors - conan profile new clang --detect && \ - conan profile update settings.compiler=clang clang && \ - conan profile update settings.compiler.version=14 clang && \ - conan profile update settings.compiler.libcxx=libstdc++11 clang && \ - conan profile update env.CC=clang-14 clang && \ - conan profile update env.CXX=clang++-14 clang - - # Export needed var for conan - export CONAN_REVISIONS_ENABLED=1 - - # Compile project using cmake - mkdir cmake-build - cd cmake-build - cmake -DCONAN_PROFILE=clang -DCMAKE_BUILD_TYPE=RelWithDebInfo -DTARAXA_ENABLE_LTO=OFF -DTARAXA_STATIC_BUILD=OFF ../ - make -j$(nproc) - -## Building on Ubuntu 20.04 -For Ubuntu 20.04 users, after installing the right packages with `apt` taraxa-node -will build out of the box without further effort: - -### Install taraxa-node dependencies: - - # Required packages - sudo apt-get install -y \ - libtool \ - autoconf \ - ccache cmake gcc g++ clang-format clang-tidy cppcheck \ - libgflags-dev\ - libjsoncpp-dev \ - libjsonrpccpp-dev \ - python3-pip \ - rapidjson-dev \ - libgmp-dev \ - libmpfr-dev \ - libmicrohttpd-dev - - - # Install conan package manager - # >= 1.36.0 version is required to work properly with clang-14 - sudo python3 -m pip install conan==1.60.0 - - # Install cmake - # >= 3.20 version is required for JSON subcommand - # Setup your IDE accordingly to use this version - sudo python3 -m pip install cmake - - # Go (required) - curl -LO https://go.dev/dl/go1.22.2.linux-amd64.tar.gz - sudo tar -C /usr/local -xzf go1.22.2.linux-amd64.tar.gz - rm -rf go1.22.2.linux-amd64.tar.gz - - # Add go to PATH - # Add these env. variables to the ~/.profile to persist go settings even after restart - export GOROOT=/usr/local/go - export GOPATH=$HOME/.go - export PATH=$GOPATH/bin:$GOROOT/bin:$PATH - - # Optional - # We are using clang from llvm toolchain as default compiler as well as clang-format and clang-tidy - # It is possible to build taraxa-node also with other C++ compilers but to contribute to the official repo, - # changes must pass clang-format/clang-tidy checks for which we internally use llvm version=13 - # To install llvm: - sudo su - - curl -SL -o llvm.sh https://apt.llvm.org/llvm.sh && \ - chmod +x llvm.sh && \ - ./llvm.sh 14 && \ - apt-get install -y clang-format-14 clang-tidy-14 && \ - rm -f llvm.sh - - # Setup clang as default compiler either in your IDE or by env. variables" - export CC="clang-14" - export CXX="clang++-14" - -### Clone the Repository - - git clone https://github.com/Taraxa-project/taraxa-node.git --branch testnet - cd taraxa-node - git submodule update --init --recursive - -### Compile - - # Optional - one time action - # Create clang profile - # It is recommended to use clang because on other compilers you could face some errors - conan profile new clang --detect && \ - conan profile update settings.compiler=clang clang && \ - conan profile update settings.compiler.version=14 clang && \ - conan profile update settings.compiler.libcxx=libstdc++11 clang && \ - conan profile update env.CC=clang-14 clang && \ - conan profile update env.CXX=clang++-14 clang - - # Export needed var for conan - export CONAN_REVISIONS_ENABLED=1 - - # Compile project using cmake - mkdir cmake-build - cd cmake-build - cmake -DCONAN_PROFILE=clang -DCMAKE_BUILD_TYPE=RelWithDebInfo -DTARAXA_ENABLE_LTO=OFF -DTARAXA_STATIC_BUILD=OFF ../ - make -j$(nproc) - -And optional: - - # optional - make install # defaults to /usr/local - ## Building on MacOS ### Install taraxa-node dependencies: -First you need to get (Brew)[https://brew.sh/] package manager. After that you need tot install dependencies with it. Clang-14 is used for compilation. +First you need to get (Brew)[https://brew.sh/] package manager. After that you need tot install dependencies with it. Clang-17 is used for compilation. brew update - brew install coreutils go autoconf automake gflags git libtool llvm@14 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr libmicrohttpd + brew install coreutils go autoconf automake gflags git libtool llvm@17 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr libmicrohttpd ### Clone the Repository @@ -245,8 +92,8 @@ First you need to get (Brew)[https://brew.sh/] package manager. After that you n # It is recommended to use clang because on other compilers you could face some errors conan profile new clang --detect && \ conan profile update settings.compiler=clang clang && \ - conan profile update settings.compiler.version=14 clang && \ - conan profile update settings.compiler.compiler.cppstd=14 + conan profile update settings.compiler.version=17 clang && \ + conan profile update settings.compiler.compiler.cppstd=17 conan profile update settings.compiler.libcxx=libc++ clang && \ conan profile update env.CC=clang clang && \ conan profile update env.CXX=clang++ clang @@ -316,7 +163,7 @@ You should be able to build project following default MacOS building process. Bu # It output should be equal to `i386` conan profile new clang --detect && \ conan profile update settings.compiler=clang clang && \ - conan profile update settings.compiler.version=14 clang && \ + conan profile update settings.compiler.version=17 clang && \ conan profile update settings.compiler.libcxx=libc++ clang && \ conan profile update env.CC=/usr/local/opt/llvm/bin/clang clang && \ conan profile update env.CXX=/usr/local/opt/llvm/bin/clang++ clang From a041af7374353704e6f606b390139dbfdac73c8d Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 18 Dec 2024 15:43:16 +0100 Subject: [PATCH 104/105] remove testnet branch from building instructions --- doc/building.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/building.md b/doc/building.md index 93ad86a9e8..8d4b1e633f 100644 --- a/doc/building.md +++ b/doc/building.md @@ -47,7 +47,7 @@ will build out of the box without further effort: ### Clone the Repository - git clone https://github.com/Taraxa-project/taraxa-node.git --branch testnet + git clone https://github.com/Taraxa-project/taraxa-node.git cd taraxa-node git submodule update --init --recursive @@ -82,7 +82,7 @@ First you need to get (Brew)[https://brew.sh/] package manager. After that you n ### Clone the Repository - git clone https://github.com/Taraxa-project/taraxa-node.git --branch testnet + git clone https://github.com/Taraxa-project/taraxa-node.git cd taraxa-node git submodule update --init --recursive @@ -151,7 +151,7 @@ You should be able to build project following default MacOS building process. Bu ### Clone the Repository - git clone https://github.com/Taraxa-project/taraxa-node.git --branch testnet + git clone https://github.com/Taraxa-project/taraxa-node.git cd taraxa-node git submodule update --init --recursive From 1bdb67d7d88ffbaf38efffa1a223dbe506bb7357 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 19 Dec 2024 11:31:55 +0100 Subject: [PATCH 105/105] fix building instructions --- doc/building.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/doc/building.md b/doc/building.md index 8d4b1e633f..ae79c11362 100644 --- a/doc/building.md +++ b/doc/building.md @@ -20,12 +20,12 @@ will build out of the box without further effort: autoconf \ ccache \ cmake \ + clang \ clang-format-17 \ clang-tidy-17 \ llvm-17 \ golang-go \ python3-full \ - # this libs are required for arm build by go part. you can skip it for amd64 build libzstd-dev \ libsnappy-dev \ rapidjson-dev \ @@ -62,8 +62,7 @@ will build out of the box without further effort: && conan profile update settings.compiler.libcxx=libstdc++11 clang \ && conan profile update settings.build_type=RelWithDebInfo clang \ && conan profile update env.CC=clang-17 clang \ - && conan profile update env.CXX=clang++-17 clang \ - && conan install --build missing -pr=clang . + && conan profile update env.CXX=clang++-17 clang # Compile project using cmake mkdir cmake-build