From 0b4c7e9674f0ff611a162c061218b7df4e32e1c0 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Thu, 16 Feb 2023 16:10:01 +0100 Subject: [PATCH 001/162] rename bootnode yaml files --- ...vices-loadbalancer.yaml => bootnode-service-loadbalancer.yaml} | 0 .../templates/{boot-node-service.yaml => bootnode-service.yaml} | 0 ...boot-node-servicemonitor.yaml => bootnode-servicemonitor.yaml} | 0 charts/taraxa-node/templates/{boot-node.yaml => bootnode.yaml} | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename charts/taraxa-node/templates/{boot-nodes-services-loadbalancer.yaml => bootnode-service-loadbalancer.yaml} (100%) rename charts/taraxa-node/templates/{boot-node-service.yaml => bootnode-service.yaml} (100%) rename charts/taraxa-node/templates/{boot-node-servicemonitor.yaml => bootnode-servicemonitor.yaml} (100%) rename charts/taraxa-node/templates/{boot-node.yaml => bootnode.yaml} (100%) diff --git a/charts/taraxa-node/templates/boot-nodes-services-loadbalancer.yaml b/charts/taraxa-node/templates/bootnode-service-loadbalancer.yaml similarity index 100% rename from charts/taraxa-node/templates/boot-nodes-services-loadbalancer.yaml rename to charts/taraxa-node/templates/bootnode-service-loadbalancer.yaml diff --git a/charts/taraxa-node/templates/boot-node-service.yaml b/charts/taraxa-node/templates/bootnode-service.yaml similarity index 100% rename from charts/taraxa-node/templates/boot-node-service.yaml rename to charts/taraxa-node/templates/bootnode-service.yaml diff --git a/charts/taraxa-node/templates/boot-node-servicemonitor.yaml b/charts/taraxa-node/templates/bootnode-servicemonitor.yaml similarity index 100% rename from charts/taraxa-node/templates/boot-node-servicemonitor.yaml rename to charts/taraxa-node/templates/bootnode-servicemonitor.yaml diff --git a/charts/taraxa-node/templates/boot-node.yaml b/charts/taraxa-node/templates/bootnode.yaml similarity index 100% rename from charts/taraxa-node/templates/boot-node.yaml rename to charts/taraxa-node/templates/bootnode.yaml From 0751a029c0040c82264bec6ea550b21430a2acd9 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Mon, 20 Feb 2023 15:43:40 +0100 Subject: [PATCH 002/162] fix: peer chain size --- .../network/src/tarcap/packets_handlers/vote_packet_handler.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp index bf494f7e32..7bd333abc9 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp @@ -70,7 +70,7 @@ void VotePacketHandler::process(const PacketData &packet_data, const std::shared onNewPbftVote(vote, pbft_block); // Update peer's max chain size - if (peer_chain_size.has_value() && *peer_chain_size > peer->pbft_chain_size_) { + if (peer_chain_size.has_value() && vote->getVoter() == peer->getId() && *peer_chain_size > peer->pbft_chain_size_) { peer->pbft_chain_size_ = *peer_chain_size; } } From 276a98320694885b6c65aee8f0dfe425c6adce34 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 21 Feb 2023 19:29:15 +0100 Subject: [PATCH 003/162] chore: fix segfault on network test --- .../packets_handlers/transaction_packet_handler.hpp | 2 +- .../packets_handlers/transaction_packet_handler.cpp | 3 ++- tests/network_test.cpp | 11 +++++++---- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/transaction_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/transaction_packet_handler.hpp index d205ec2470..dd38d286fa 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/transaction_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/transaction_packet_handler.hpp @@ -27,7 +27,7 @@ class TransactionPacketHandler final : public PacketHandler { * @param transactions serialized transactions * */ - void sendTransactions(std::shared_ptr const& peer, + void sendTransactions(std::shared_ptr peer, std::vector>&& transactions); /** diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp index 0b7d5387fc..d106786a85 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp @@ -159,8 +159,9 @@ void TransactionPacketHandler::periodicSendTransactions(SharedTransactions &&tra } } -void TransactionPacketHandler::sendTransactions(std::shared_ptr const &peer, +void TransactionPacketHandler::sendTransactions(std::shared_ptr peer, std::vector> &&transactions) { + if (!peer) return; const auto peer_id = peer->getId(); LOG(log_tr_) << "sendTransactions " << transactions.size() << " to " << peer_id; diff --git a/tests/network_test.cpp b/tests/network_test.cpp index efc4b92f99..4807599669 100644 --- a/tests/network_test.cpp +++ b/tests/network_test.cpp @@ -327,7 +327,7 @@ TEST_F(NetworkTest, transfer_transaction) { nw1->start(); nw2->start(); - EXPECT_HAPPENS({10s, 200ms}, [&](auto& ctx) { + EXPECT_HAPPENS({20s, 100ms}, [&](auto& ctx) { nw1->setPendingPeersToReady(); nw2->setPendingPeersToReady(); WAIT_EXPECT_EQ(ctx, nw1->getPeerCount(), 1) @@ -336,15 +336,18 @@ TEST_F(NetworkTest, transfer_transaction) { auto nw1_nodeid = nw1->getNodeId(); auto nw2_nodeid = nw2->getNodeId(); - EXPECT_NE(nw1->getPeer(nw2_nodeid), nullptr); - EXPECT_NE(nw2->getPeer(nw1_nodeid), nullptr); + + const auto peer2 = nw1->getPeer(nw2_nodeid); + const auto peer1 = nw2->getPeer(nw1_nodeid); + EXPECT_NE(peer2, nullptr); + EXPECT_NE(peer1, nullptr); SharedTransactions transactions; transactions.push_back(g_signed_trx_samples[0]); transactions.push_back(g_signed_trx_samples[1]); transactions.push_back(g_signed_trx_samples[2]); - nw2->getSpecificHandler()->sendTransactions(nw2->getPeer(nw1_nodeid), + nw2->getSpecificHandler()->sendTransactions(peer1, std::move(transactions)); EXPECT_HAPPENS({2s, 200ms}, [&](auto& ctx) { WAIT_EXPECT_EQ(ctx, nw1->getReceivedTransactionsCount(), 3) }); From f09752b56d8ca36d2a8b9846b2d36f5afe4b8068 Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Thu, 23 Feb 2023 17:12:07 +0100 Subject: [PATCH 004/162] chore: set min. delegationt o 1k TARA and starting validators delegation to 10k TARA on devnet --- .../config_jsons/devnet/devnet_genesis.json | 40 +++++++++---------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json index 519150f3ce..133885012d 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json @@ -13,8 +13,8 @@ "delegation_locking_period": "0x5", "eligibility_balance_threshold": "0xd3c21bcecceda1000000", "vote_eligibility_balance_step": "0x152d02c7e14af6800000", - "validator_maximum_stake": "0x84595161401484A000000", - "minimum_deposit": "0x0", + "validator_maximum_stake": "0x84595161401484a000000", + "minimum_deposit": "0x3635c9adc5dea00000", "max_block_author_reward": "0x5", "dag_proposers_reward": "0x32", "commission_change_delta": "0x0", @@ -28,7 +28,7 @@ "endpoint": "", "description": "Taraxa devnet validator 1", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "05fe580fd2d461ee5f762a33bbe669403bb04a851f2e9ed8d2579a9c9b77c3ec" }, @@ -39,7 +39,7 @@ "endpoint": "", "description": "Taraxa devnet validator 2", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "70d34c86787e5f7bd0f266cad291cb521e23176fa37c6efc034858a1620ac69e" }, @@ -50,7 +50,7 @@ "endpoint": "", "description": "Taraxa devnet validator 3", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "f8d5c00ce9fa3058341e051b36a1e6ccf69df81fb865568b2bf1507d085691e2" }, @@ -61,7 +61,7 @@ "endpoint": "", "description": "Taraxa devnet validator 4", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "aa12507d00c992b95e65d80b21fd2db5b48c4f7ff4393064828d1adc930710b4" }, @@ -72,7 +72,7 @@ "endpoint": "", "description": "Taraxa devnet validator 5", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "bd34898ae0080187c408b5724f05682855c4425fda61d332f5f9d746d4eb753a" }, @@ -83,7 +83,7 @@ "endpoint": "", "description": "Taraxa devnet validator 6", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "25d35fed93989c40b4e8685d9d7ee02213230221ea9efcbe8cfccfc788670dba" }, @@ -94,7 +94,7 @@ "endpoint": "", "description": "Taraxa devnet validator 7", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "55c0bd1af84fb793a5dd7b960e330248d8a0acde566922b3e210f43592700dad" }, @@ -105,7 +105,7 @@ "endpoint": "", "description": "Taraxa devnet validator 8", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "33131367e7279ee51c0f26c6f9b6627848f822d134abef21a88be467dfbaae7b" }, @@ -116,7 +116,7 @@ "endpoint": "", "description": "Taraxa devnet validator 9", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "da63de37c69a59cb3ebbcfb79ef8d561b18b448b544a14438c62cd56bc0a29f5" }, @@ -127,7 +127,7 @@ "endpoint": "", "description": "Taraxa devnet validator 10", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "337178752602a5ca38928bf0d8d434ec653505c92b280b0edab6c39d5e79f4fd" }, @@ -138,7 +138,7 @@ "endpoint": "", "description": "Taraxa devnet validator 11", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "ac08e4ca5f1bcdd61dbefa7551ab839bdd4545e59ee8a4ab5d3aabb71104ab73" }, @@ -149,7 +149,7 @@ "endpoint": "", "description": "Taraxa devnet validator 12", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "189b05cca0a816a36f977f0541ef7585218b2087f04b23444ab58d0c755adecc" }, @@ -160,7 +160,7 @@ "endpoint": "", "description": "Taraxa devnet validator 13", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "8e95172f90b68ee753132bf6342ee00b398e2417312f610d58c34729ab0608ee" }, @@ -171,7 +171,7 @@ "endpoint": "", "description": "Taraxa devnet validator 14", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "959551740ff948d9714f15a2bfb2183c4ead897dd79775a0a18488aa8936e2ba" }, @@ -182,7 +182,7 @@ "endpoint": "", "description": "Taraxa devnet validator 15", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "56b7831cb3e35c1d6d1e3f661de2068d6feeaa54074b3e02709a87d7f0d6c72a" }, @@ -193,7 +193,7 @@ "endpoint": "", "description": "Taraxa devnet validator 16", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "e774c519814cbc04008aa958932e7adb82ebbbd6ca69089c0a1458ea34fb4299" }, @@ -204,7 +204,7 @@ "endpoint": "", "description": "Taraxa devnet validator 17", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "1b15b7bef6a1dbe9aeb2792f2e38d6222d31f8c6c15cff1152f258013d70d933" }, @@ -215,7 +215,7 @@ "endpoint": "", "description": "Taraxa devnet validator 18", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x84595161401484a000000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" }, "vrf_key": "37bf145ac98e7de7db6e5b933e72737fbf190fd4fb1d193b15cf8b00db30ba30" } From 821f96d5ca8efcd6298f2ba5e0c2a7d9ad32d991 Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Fri, 24 Feb 2023 09:36:04 +0100 Subject: [PATCH 005/162] chore: set default delegation for our validators to 100k --- .../config_jsons/devnet/devnet_genesis.json | 36 +++++++++---------- 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json index 133885012d..04aab6c412 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json @@ -28,7 +28,7 @@ "endpoint": "", "description": "Taraxa devnet validator 1", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "05fe580fd2d461ee5f762a33bbe669403bb04a851f2e9ed8d2579a9c9b77c3ec" }, @@ -39,7 +39,7 @@ "endpoint": "", "description": "Taraxa devnet validator 2", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "70d34c86787e5f7bd0f266cad291cb521e23176fa37c6efc034858a1620ac69e" }, @@ -50,7 +50,7 @@ "endpoint": "", "description": "Taraxa devnet validator 3", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "f8d5c00ce9fa3058341e051b36a1e6ccf69df81fb865568b2bf1507d085691e2" }, @@ -61,7 +61,7 @@ "endpoint": "", "description": "Taraxa devnet validator 4", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "aa12507d00c992b95e65d80b21fd2db5b48c4f7ff4393064828d1adc930710b4" }, @@ -72,7 +72,7 @@ "endpoint": "", "description": "Taraxa devnet validator 5", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "bd34898ae0080187c408b5724f05682855c4425fda61d332f5f9d746d4eb753a" }, @@ -83,7 +83,7 @@ "endpoint": "", "description": "Taraxa devnet validator 6", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "25d35fed93989c40b4e8685d9d7ee02213230221ea9efcbe8cfccfc788670dba" }, @@ -94,7 +94,7 @@ "endpoint": "", "description": "Taraxa devnet validator 7", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "55c0bd1af84fb793a5dd7b960e330248d8a0acde566922b3e210f43592700dad" }, @@ -105,7 +105,7 @@ "endpoint": "", "description": "Taraxa devnet validator 8", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "33131367e7279ee51c0f26c6f9b6627848f822d134abef21a88be467dfbaae7b" }, @@ -116,7 +116,7 @@ "endpoint": "", "description": "Taraxa devnet validator 9", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "da63de37c69a59cb3ebbcfb79ef8d561b18b448b544a14438c62cd56bc0a29f5" }, @@ -127,7 +127,7 @@ "endpoint": "", "description": "Taraxa devnet validator 10", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "337178752602a5ca38928bf0d8d434ec653505c92b280b0edab6c39d5e79f4fd" }, @@ -138,7 +138,7 @@ "endpoint": "", "description": "Taraxa devnet validator 11", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "ac08e4ca5f1bcdd61dbefa7551ab839bdd4545e59ee8a4ab5d3aabb71104ab73" }, @@ -149,7 +149,7 @@ "endpoint": "", "description": "Taraxa devnet validator 12", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "189b05cca0a816a36f977f0541ef7585218b2087f04b23444ab58d0c755adecc" }, @@ -160,7 +160,7 @@ "endpoint": "", "description": "Taraxa devnet validator 13", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "8e95172f90b68ee753132bf6342ee00b398e2417312f610d58c34729ab0608ee" }, @@ -171,7 +171,7 @@ "endpoint": "", "description": "Taraxa devnet validator 14", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "959551740ff948d9714f15a2bfb2183c4ead897dd79775a0a18488aa8936e2ba" }, @@ -182,7 +182,7 @@ "endpoint": "", "description": "Taraxa devnet validator 15", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "56b7831cb3e35c1d6d1e3f661de2068d6feeaa54074b3e02709a87d7f0d6c72a" }, @@ -193,7 +193,7 @@ "endpoint": "", "description": "Taraxa devnet validator 16", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "e774c519814cbc04008aa958932e7adb82ebbbd6ca69089c0a1458ea34fb4299" }, @@ -204,7 +204,7 @@ "endpoint": "", "description": "Taraxa devnet validator 17", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "1b15b7bef6a1dbe9aeb2792f2e38d6222d31f8c6c15cff1152f258013d70d933" }, @@ -215,7 +215,7 @@ "endpoint": "", "description": "Taraxa devnet validator 18", "delegations": { - "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x21e19e0c9bab2400000" + "0x7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0xd3c21bcecceda1000000" }, "vrf_key": "37bf145ac98e7de7db6e5b933e72737fbf190fd4fb1d193b15cf8b00db30ba30" } From 49d17472ae4f5d8187291ede5225c18453bcca64 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 22 Feb 2023 14:49:14 +0100 Subject: [PATCH 006/162] ignore helm stuff --- .gitignore | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.gitignore b/.gitignore index c3c9173fb8..50902de574 100644 --- a/.gitignore +++ b/.gitignore @@ -40,3 +40,7 @@ charts/*/charts/*.tgz # do not include generated documentation doxygen_docs + +# Helm stuff +requirements.lock +Chart.lock \ No newline at end of file From 7abd8c318dab0e99dfd9b6c035457b859732614e Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 22 Feb 2023 15:45:11 +0100 Subject: [PATCH 007/162] wip: enable to deploy different images --- charts/taraxa-node/CHANGELOG.md | 7 +++++++ charts/taraxa-node/templates/bootnode.yaml | 16 ++++++++++++++++ charts/taraxa-node/templates/consensus-node.yaml | 16 ++++++++++++++++ charts/taraxa-node/templates/taraxa-node.yaml | 16 ++++++++++++++++ charts/taraxa-node/values.yaml | 3 +++ 5 files changed, 58 insertions(+) diff --git a/charts/taraxa-node/CHANGELOG.md b/charts/taraxa-node/CHANGELOG.md index d5896d3506..bb724c2f38 100644 --- a/charts/taraxa-node/CHANGELOG.md +++ b/charts/taraxa-node/CHANGELOG.md @@ -21,6 +21,13 @@ numbering uses [semantic versioning](http://semver.org). * Separate config for genesis +## v0.2.5 + +### Minor changes + +* Allow for different images in `StatefulSet`s for boot, rpc and consensus nodes + + ## v0.2.4 ### Minor changes diff --git a/charts/taraxa-node/templates/bootnode.yaml b/charts/taraxa-node/templates/bootnode.yaml index f73004d58b..6612b98ead 100644 --- a/charts/taraxa-node/templates/bootnode.yaml +++ b/charts/taraxa-node/templates/bootnode.yaml @@ -34,8 +34,16 @@ spec: spec: initContainers: - name: config-adapter + {{- if and .Values.bootnode.image.repository .Values.bootnode.image.tag -}} + image: "{{ .Values.bootnode.image.repository }}:{{ .Values.bootnode.image.tag }}" + {{- else -}} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end -}} + {{- if .Values.bootnode.image.imagePullPolicy -}} + imagePullPolicy: {{ .Values.bootnode.image.pullPolicy }} + {{- else -}} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end -}} envFrom: - secretRef: name: {{ .Release.Name }} @@ -54,8 +62,16 @@ spec: mountPath: /root/.taraxa containers: - name: boot-node + {{- if and .Values.bootnode.image.repository .Values.bootnode.image.tag -}} + image: "{{ .Values.bootnode.image.repository }}:{{ .Values.bootnode.image.tag }}" + {{- else -}} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end -}} + {{- if .Values.bootnode.image.imagePullPolicy -}} + imagePullPolicy: {{ .Values.bootnode.image.pullPolicy }} + {{- else -}} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end -}} args: {{- toYaml .Values.bootnode.args | nindent 12 }} - --chain-id diff --git a/charts/taraxa-node/templates/consensus-node.yaml b/charts/taraxa-node/templates/consensus-node.yaml index df2608e935..5e7f3fd715 100644 --- a/charts/taraxa-node/templates/consensus-node.yaml +++ b/charts/taraxa-node/templates/consensus-node.yaml @@ -44,8 +44,16 @@ spec: subPath: entrypoint.sh {{- end }} - name: config-adapter + {{- if and .Values.consensusnode.image.repository .Values.consensusnode.image.tag -}} + image: "{{ .Values.consensusnode.image.repository }}:{{ .Values.consensusnode.image.tag }}" + {{- else -}} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end -}} + {{- if .Values.consensusnode.image.imagePullPolicy -}} + imagePullPolicy: {{ .Values.consensusnode.image.pullPolicy }} + {{- else -}} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end -}} envFrom: - secretRef: name: {{ .Release.Name }} @@ -94,8 +102,16 @@ spec: subPath: status.py {{- end }} - name: consensus-node + {{- if and .Values.consensusnode.image.repository .Values.consensusnode.image.tag -}} + image: "{{ .Values.consensusnode.image.repository }}:{{ .Values.consensusnode.image.tag }}" + {{- else -}} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end -}} + {{- if .Values.consensusnode.image.imagePullPolicy -}} + imagePullPolicy: {{ .Values.consensusnode.image.pullPolicy }} + {{- else -}} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end -}} args: {{- toYaml .Values.consensusnode.args | nindent 12 }} env: diff --git a/charts/taraxa-node/templates/taraxa-node.yaml b/charts/taraxa-node/templates/taraxa-node.yaml index 49494c0e2f..8c8c89b0a3 100644 --- a/charts/taraxa-node/templates/taraxa-node.yaml +++ b/charts/taraxa-node/templates/taraxa-node.yaml @@ -35,8 +35,16 @@ spec: spec: initContainers: - name: config-adapter + {{- if and .Values.node.image.repository .Values.node.image.tag -}} + image: "{{ .Values.node.image.repository }}:{{ .Values.node.image.tag }}" + {{- else -}} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end -}} + {{- if .Values.node.image.imagePullPolicy -}} + imagePullPolicy: {{ .Values.node.image.pullPolicy }} + {{- else -}} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end -}} envFrom: - secretRef: name: {{ .Release.Name }} @@ -91,8 +99,16 @@ spec: subPath: status.py {{- end }} - name: taraxa-node + {{- if and .Values.node.image.repository .Values.node.image.tag -}} + image: "{{ .Values.node.image.repository }}:{{ .Values.node.image.tag }}" + {{- else -}} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end -}} + {{- if .Values.node.image.imagePullPolicy -}} + imagePullPolicy: {{ .Values.node.image.pullPolicy }} + {{- else -}} imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end -}} args: {{- toYaml .Values.node.args | nindent 12 }} env: diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index f62ff08f9a..03be75406b 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -64,6 +64,7 @@ tolerations: [] affinity: {} node: + image: {} enabled: true replicaCount: 20 loadBalancer: @@ -143,6 +144,7 @@ node: bootnode: + image: {} enabled: true replicaCount: 1 loadBalancer: @@ -186,6 +188,7 @@ bootnode: annotations: {} consensusnode: + image: {} enabled: true replicaCount: 1 probes: From d7c1b0f955d2228bfcf5323e3802b8f9783213b8 Mon Sep 17 00:00:00 2001 From: Robert Jonczy Date: Wed, 22 Feb 2023 22:22:11 +0100 Subject: [PATCH 008/162] fix yaml to json --- charts/taraxa-node/templates/bootnode.yaml | 26 ++++++++++--------- .../taraxa-node/templates/consensus-node.yaml | 24 ++++++++--------- charts/taraxa-node/templates/taraxa-node.yaml | 24 ++++++++--------- charts/taraxa-node/values.yaml | 6 ++--- 4 files changed, 41 insertions(+), 39 deletions(-) diff --git a/charts/taraxa-node/templates/bootnode.yaml b/charts/taraxa-node/templates/bootnode.yaml index 6612b98ead..e3c1d04a90 100644 --- a/charts/taraxa-node/templates/bootnode.yaml +++ b/charts/taraxa-node/templates/bootnode.yaml @@ -34,16 +34,17 @@ spec: spec: initContainers: - name: config-adapter - {{- if and .Values.bootnode.image.repository .Values.bootnode.image.tag -}} + {{- if and .Values.bootnode.image.repository .Values.bootnode.image.tag }} image: "{{ .Values.bootnode.image.repository }}:{{ .Values.bootnode.image.tag }}" - {{- else -}} + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - {{- end -}} - {{- if .Values.bootnode.image.imagePullPolicy -}} + {{- end }} + + {{- if .Values.bootnode.image.imagePullPolicy }} imagePullPolicy: {{ .Values.bootnode.image.pullPolicy }} - {{- else -}} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- end -}} + {{- end }} envFrom: - secretRef: name: {{ .Release.Name }} @@ -62,16 +63,17 @@ spec: mountPath: /root/.taraxa containers: - name: boot-node - {{- if and .Values.bootnode.image.repository .Values.bootnode.image.tag -}} + {{- if and .Values.bootnode.image.repository .Values.bootnode.image.tag }} image: "{{ .Values.bootnode.image.repository }}:{{ .Values.bootnode.image.tag }}" - {{- else -}} + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - {{- end -}} - {{- if .Values.bootnode.image.imagePullPolicy -}} + {{- end }} + + {{- if .Values.bootnode.image.imagePullPolicy }} imagePullPolicy: {{ .Values.bootnode.image.pullPolicy }} - {{- else -}} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- end -}} + {{- end }} args: {{- toYaml .Values.bootnode.args | nindent 12 }} - --chain-id diff --git a/charts/taraxa-node/templates/consensus-node.yaml b/charts/taraxa-node/templates/consensus-node.yaml index 5e7f3fd715..8f8c7ca8af 100644 --- a/charts/taraxa-node/templates/consensus-node.yaml +++ b/charts/taraxa-node/templates/consensus-node.yaml @@ -44,16 +44,16 @@ spec: subPath: entrypoint.sh {{- end }} - name: config-adapter - {{- if and .Values.consensusnode.image.repository .Values.consensusnode.image.tag -}} + {{- if and .Values.consensusnode.image.repository .Values.consensusnode.image.tag }} image: "{{ .Values.consensusnode.image.repository }}:{{ .Values.consensusnode.image.tag }}" - {{- else -}} + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - {{- end -}} - {{- if .Values.consensusnode.image.imagePullPolicy -}} + {{- end }} + {{- if .Values.consensusnode.image.imagePullPolicy }} imagePullPolicy: {{ .Values.consensusnode.image.pullPolicy }} - {{- else -}} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- end -}} + {{- end }} envFrom: - secretRef: name: {{ .Release.Name }} @@ -102,16 +102,16 @@ spec: subPath: status.py {{- end }} - name: consensus-node - {{- if and .Values.consensusnode.image.repository .Values.consensusnode.image.tag -}} + {{- if and .Values.consensusnode.image.repository .Values.consensusnode.image.tag }} image: "{{ .Values.consensusnode.image.repository }}:{{ .Values.consensusnode.image.tag }}" - {{- else -}} + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - {{- end -}} - {{- if .Values.consensusnode.image.imagePullPolicy -}} + {{- end }} + {{- if .Values.consensusnode.image.imagePullPolicy }} imagePullPolicy: {{ .Values.consensusnode.image.pullPolicy }} - {{- else -}} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- end -}} + {{- end }} args: {{- toYaml .Values.consensusnode.args | nindent 12 }} env: diff --git a/charts/taraxa-node/templates/taraxa-node.yaml b/charts/taraxa-node/templates/taraxa-node.yaml index 8c8c89b0a3..aa791585b4 100644 --- a/charts/taraxa-node/templates/taraxa-node.yaml +++ b/charts/taraxa-node/templates/taraxa-node.yaml @@ -35,16 +35,16 @@ spec: spec: initContainers: - name: config-adapter - {{- if and .Values.node.image.repository .Values.node.image.tag -}} + {{- if and .Values.node.image.repository .Values.node.image.tag }} image: "{{ .Values.node.image.repository }}:{{ .Values.node.image.tag }}" - {{- else -}} + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - {{- end -}} - {{- if .Values.node.image.imagePullPolicy -}} + {{- end }} + {{- if .Values.node.image.imagePullPolicy }} imagePullPolicy: {{ .Values.node.image.pullPolicy }} - {{- else -}} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- end -}} + {{- end }} envFrom: - secretRef: name: {{ .Release.Name }} @@ -99,16 +99,16 @@ spec: subPath: status.py {{- end }} - name: taraxa-node - {{- if and .Values.node.image.repository .Values.node.image.tag -}} + {{- if and .Values.node.image.repository .Values.node.image.tag }} image: "{{ .Values.node.image.repository }}:{{ .Values.node.image.tag }}" - {{- else -}} + {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - {{- end -}} - {{- if .Values.node.image.imagePullPolicy -}} + {{- end }} + {{- if .Values.node.image.imagePullPolicy }} imagePullPolicy: {{ .Values.node.image.pullPolicy }} - {{- else -}} + {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- end -}} + {{- end }} args: {{- toYaml .Values.node.args | nindent 12 }} env: diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index 03be75406b..8ea8179326 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -64,8 +64,8 @@ tolerations: [] affinity: {} node: - image: {} enabled: true + image: {} replicaCount: 20 loadBalancer: enabled: false @@ -144,8 +144,8 @@ node: bootnode: - image: {} enabled: true + image: {} replicaCount: 1 loadBalancer: enabled: false @@ -188,8 +188,8 @@ bootnode: annotations: {} consensusnode: - image: {} enabled: true + image: {} replicaCount: 1 probes: enabled: true From 445d78a3b9c66a80619e392397ebdf4de9d5fead Mon Sep 17 00:00:00 2001 From: rjonczy Date: Fri, 24 Feb 2023 15:15:54 +0100 Subject: [PATCH 009/162] added taraxa-indexer --- charts/taraxa-node/CHANGELOG.md | 6 ++++ charts/taraxa-node/Chart.yaml | 2 +- charts/taraxa-node/templates/taraxa-node.yaml | 33 +++++++++++++++++++ charts/taraxa-node/values.yaml | 14 +++++++- 4 files changed, 53 insertions(+), 2 deletions(-) diff --git a/charts/taraxa-node/CHANGELOG.md b/charts/taraxa-node/CHANGELOG.md index bb724c2f38..2e10e6750b 100644 --- a/charts/taraxa-node/CHANGELOG.md +++ b/charts/taraxa-node/CHANGELOG.md @@ -3,6 +3,12 @@ This file documents all notable changes to `taraxa-node` Helm Chart. The release numbering uses [semantic versioning](http://semver.org). +## v0.3.3 + +### Major changes + +* Added sidecar container to RPC nodes with [taraxa-indexer](https://github.com/Taraxa-project/taraxa-indexer) + ## v0.3.2 ### Minor changes diff --git a/charts/taraxa-node/Chart.yaml b/charts/taraxa-node/Chart.yaml index d0a045aaa9..5e062c2ad1 100644 --- a/charts/taraxa-node/Chart.yaml +++ b/charts/taraxa-node/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 appVersion: "1.0" description: Kubernetes helm chart for Taraxa blockchain full node implementation. name: taraxa-node -version: 0.3.2 +version: 0.3.3 keywords: - blockchain - taraxa diff --git a/charts/taraxa-node/templates/taraxa-node.yaml b/charts/taraxa-node/templates/taraxa-node.yaml index aa791585b4..4d78d2612e 100644 --- a/charts/taraxa-node/templates/taraxa-node.yaml +++ b/charts/taraxa-node/templates/taraxa-node.yaml @@ -98,6 +98,18 @@ spec: readOnly: true subPath: status.py {{- end }} + {{- if .Values.node.indexer.enabled }} + - name: taraxa-indexer + image: "{{ .Values.node.image.repository }}:{{ .Values.node.image.tag }}" + imagePullPolicy: {{ .Values.node.image.pullPolicy }} + command: ["/taraxa-indexer"] + args: + - -db_path {{ .Values.node.indexer.persistence.mountPoint }} + - -blockchain_ws 'ws://localhost:8777' + volumeMounts: + - name: data + mountPath: /data + {{- end }} - name: taraxa-node {{- if and .Values.node.image.repository .Values.node.image.tag }} image: "{{ .Values.node.image.repository }}:{{ .Values.node.image.tag }}" @@ -224,5 +236,26 @@ spec: resources: requests: storage: "{{ .Values.node.persistence.size }}" + {{- if .Values.node.indexer.enabled }} + - metadata: + name: indexer-data + annotations: + {{- if .Values.node.indexer.persistence.annotations}} + {{- toYaml .Values.node.indexer.persistence.annotations | nindent 4 }} + {{- end }} + spec: + accessModes: + - {{ .Values.node.indexer.persistence.accessMode | quote }} + {{- if .Values.node.indexer.persistence.storageClass }} + {{- if (eq "-" .Values.node.indexer.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.node.indexer.persistence.storageClass }}" + {{- end }} + {{- end }} + resources: + requests: + storage: "{{ .Values.node.indexer.persistence.size }}" + {{- end }} {{- end }} {{- end }} diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index 8ea8179326..5b060b38e7 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -141,7 +141,19 @@ node: size: 30Gi storageClass: annotations: {} - + indexer: + enabled: false + image: + repository: gcr.io/jovial-meridian-249123/taraxa-indexer + tag: latest + pullPolicy: IfNotPresent + persistence: + enabled: false + accessMode: ReadWriteOnce + size: 30Gi + storageClass: + annotations: {} + mountPoint: /data bootnode: enabled: true From 6331e5b700c4cad5d916a96c3a7f7ae07ecb6f0f Mon Sep 17 00:00:00 2001 From: rjonczy Date: Fri, 24 Feb 2023 15:30:31 +0100 Subject: [PATCH 010/162] fix indexer image keys --- charts/taraxa-node/templates/taraxa-node.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/charts/taraxa-node/templates/taraxa-node.yaml b/charts/taraxa-node/templates/taraxa-node.yaml index 4d78d2612e..47579a1ae2 100644 --- a/charts/taraxa-node/templates/taraxa-node.yaml +++ b/charts/taraxa-node/templates/taraxa-node.yaml @@ -100,12 +100,12 @@ spec: {{- end }} {{- if .Values.node.indexer.enabled }} - name: taraxa-indexer - image: "{{ .Values.node.image.repository }}:{{ .Values.node.image.tag }}" - imagePullPolicy: {{ .Values.node.image.pullPolicy }} + image: "{{ .Values.node.indexer.image.repository }}:{{ .Values.node.indexer.image.tag }}" + imagePullPolicy: {{ .Values.node.indexer.image.pullPolicy }} command: ["/taraxa-indexer"] args: - -db_path {{ .Values.node.indexer.persistence.mountPoint }} - - -blockchain_ws 'ws://localhost:8777' + - -blockchain_ws 'wss://localhost:8777' volumeMounts: - name: data mountPath: /data From dd8afee1c4d67964e24c20c8c8362cdda586fea7 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Fri, 24 Feb 2023 15:33:56 +0100 Subject: [PATCH 011/162] ws --- charts/taraxa-node/templates/taraxa-node.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/taraxa-node/templates/taraxa-node.yaml b/charts/taraxa-node/templates/taraxa-node.yaml index 47579a1ae2..88eb262401 100644 --- a/charts/taraxa-node/templates/taraxa-node.yaml +++ b/charts/taraxa-node/templates/taraxa-node.yaml @@ -105,7 +105,7 @@ spec: command: ["/taraxa-indexer"] args: - -db_path {{ .Values.node.indexer.persistence.mountPoint }} - - -blockchain_ws 'wss://localhost:8777' + - -blockchain_ws 'ws://localhost:8777' volumeMounts: - name: data mountPath: /data From 193a08c63515e17fedc0431c2b2de6d7bfcbcca8 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Fri, 24 Feb 2023 16:13:45 +0100 Subject: [PATCH 012/162] temp chage of tag --- .circleci/config.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index dc07605fc5..6b0c2d99e9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -236,7 +236,11 @@ commands: --sort-by="~timestamp" \ --limit 1 \ --format="value(tags[0])" ) - + + # temp + PR_TAG=pr-2342-1a09cc9 + # temp + if [ -z ${PR_TAG} ]; then echo "No images for PR pr-<< parameters.pr-name >>" exit 1 From 114a3953a0802566e25bae8a3f914365867c2b58 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Fri, 24 Feb 2023 16:15:07 +0100 Subject: [PATCH 013/162] enable indexer --- .circleci/config.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 6b0c2d99e9..a028ffa522 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -240,7 +240,7 @@ commands: # temp PR_TAG=pr-2342-1a09cc9 # temp - + if [ -z ${PR_TAG} ]; then echo "No images for PR pr-<< parameters.pr-name >>" exit 1 @@ -272,7 +272,8 @@ commands: --set consensusnode.persistence.enabled=true \ --set node.serviceMonitor.enabled=true \ --set bootnode.serviceMonitor.enabled=false \ - --set consensusnode.serviceMonitor.enabled=true + --set consensusnode.serviceMonitor.enabled=true \ + --set node.indexer.enabled=true fi cleanup_prnet_chart: From c5461dcdf38850aa7a4ea0704178b7309bf3c152 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Fri, 24 Feb 2023 17:05:52 +0100 Subject: [PATCH 014/162] fix volume name --- charts/taraxa-node/templates/taraxa-node.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/taraxa-node/templates/taraxa-node.yaml b/charts/taraxa-node/templates/taraxa-node.yaml index 88eb262401..11b7f3ae58 100644 --- a/charts/taraxa-node/templates/taraxa-node.yaml +++ b/charts/taraxa-node/templates/taraxa-node.yaml @@ -107,7 +107,7 @@ spec: - -db_path {{ .Values.node.indexer.persistence.mountPoint }} - -blockchain_ws 'ws://localhost:8777' volumeMounts: - - name: data + - name: indexer-data mountPath: /data {{- end }} - name: taraxa-node From d085348203fd05df266d36c2835730d938f54b69 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Sun, 26 Feb 2023 11:20:35 +0100 Subject: [PATCH 015/162] chore: fix incorrect error log in rewards distribution --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 1b085007b7..fc286996a5 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 1b085007b794f158cb39d36604c6d91ad9a4b1f7 +Subproject commit fc286996a5c6e436b436e328336f88b3523b3236 From 4f6d9ac7e7a45e6c1b8a2e1e9adbcc62d491f3c1 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Mon, 27 Feb 2023 14:53:10 +0100 Subject: [PATCH 016/162] deploy specific version --- .circleci/config.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index a028ffa522..56ddd45eee 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -273,7 +273,8 @@ commands: --set node.serviceMonitor.enabled=true \ --set bootnode.serviceMonitor.enabled=false \ --set consensusnode.serviceMonitor.enabled=true \ - --set node.indexer.enabled=true + --set node.indexer.enabled=true \ + --set node.indexer.image.tag=develop-045732f-1677256915 fi cleanup_prnet_chart: From eb4a2eabf300a8be79fd237d1e17310197f503e6 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Mon, 27 Feb 2023 16:23:16 +0100 Subject: [PATCH 017/162] changed tag of indexer --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 56ddd45eee..bd4fee6d16 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -274,7 +274,7 @@ commands: --set bootnode.serviceMonitor.enabled=false \ --set consensusnode.serviceMonitor.enabled=true \ --set node.indexer.enabled=true \ - --set node.indexer.image.tag=develop-045732f-1677256915 + --set node.indexer.image.tag=develop-11ca154-1677511014 fi cleanup_prnet_chart: From 49341914ce3fb55804b5a8ebcc44ff69a7e0a7fe Mon Sep 17 00:00:00 2001 From: rjonczy Date: Mon, 27 Feb 2023 17:12:06 +0100 Subject: [PATCH 018/162] changed indexer image --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index bd4fee6d16..239013109c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -274,7 +274,7 @@ commands: --set bootnode.serviceMonitor.enabled=false \ --set consensusnode.serviceMonitor.enabled=true \ --set node.indexer.enabled=true \ - --set node.indexer.image.tag=develop-11ca154-1677511014 + --set node.indexer.image.tag=develop-503f613-1677511322 fi cleanup_prnet_chart: From 14d871abc65a61568fcf284a30dfbe8fd7685d36 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Mon, 27 Feb 2023 17:48:10 +0100 Subject: [PATCH 019/162] fix args for taraxa-indexer --- charts/taraxa-node/templates/taraxa-node.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/charts/taraxa-node/templates/taraxa-node.yaml b/charts/taraxa-node/templates/taraxa-node.yaml index 11b7f3ae58..aafe4abd3e 100644 --- a/charts/taraxa-node/templates/taraxa-node.yaml +++ b/charts/taraxa-node/templates/taraxa-node.yaml @@ -104,8 +104,10 @@ spec: imagePullPolicy: {{ .Values.node.indexer.image.pullPolicy }} command: ["/taraxa-indexer"] args: - - -db_path {{ .Values.node.indexer.persistence.mountPoint }} - - -blockchain_ws 'ws://localhost:8777' + - -db_path + - {{ .Values.node.indexer.persistence.mountPoint }} + - -blockchain_ws + - 'ws://localhost:8777' volumeMounts: - name: indexer-data mountPath: /data From 5de622a3dcefaed0f65f9e7818c8af07246cd855 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Mon, 27 Feb 2023 18:02:40 +0100 Subject: [PATCH 020/162] set nodeSelector for nodes in prnet --- .circleci/config.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 239013109c..289ee86bf0 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -273,6 +273,9 @@ commands: --set node.serviceMonitor.enabled=true \ --set bootnode.serviceMonitor.enabled=false \ --set consensusnode.serviceMonitor.enabled=true \ + --set node.nodeSelector."cloud\\.google\\.com/gke-nodepool"=blockchain-prnet \ + --set bootnode.nodeSelector."cloud\\.google\\.com/gke-nodepool"=blockchain-prnet \ + --set consensusnode.nodeSelector."cloud\\.google\\.com/gke-nodepool"=blockchain-prnet \ --set node.indexer.enabled=true \ --set node.indexer.image.tag=develop-503f613-1677511322 fi From a11f0b15ef2f3ba3543b4551c9e44233d97684a8 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Mon, 27 Feb 2023 18:32:30 +0100 Subject: [PATCH 021/162] added ingress for indexer --- charts/taraxa-node/templates/_helpers.tpl | 13 +++++ .../templates/taraxa-node-ingress.yaml | 50 +++++++++++++++++++ charts/taraxa-node/values.yaml | 4 ++ 3 files changed, 67 insertions(+) diff --git a/charts/taraxa-node/templates/_helpers.tpl b/charts/taraxa-node/templates/_helpers.tpl index a9855a8bc1..f183cd620b 100644 --- a/charts/taraxa-node/templates/_helpers.tpl +++ b/charts/taraxa-node/templates/_helpers.tpl @@ -87,6 +87,19 @@ If release name contains chart name it will be used as a full name. {{- end -}} {{- end -}} +{{/* +Create a default fully qualified indexer name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "taraxa-node.indexerName" -}} +{{- if .Values.indexerNameOverride -}} +{{- .Values.indexerNameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s.%s" "ws" .Release.Name .Values.domain | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + {{/* Create a default fully qualified graphql websocket. We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). diff --git a/charts/taraxa-node/templates/taraxa-node-ingress.yaml b/charts/taraxa-node/templates/taraxa-node-ingress.yaml index 927e068001..e42f1f868b 100644 --- a/charts/taraxa-node/templates/taraxa-node-ingress.yaml +++ b/charts/taraxa-node/templates/taraxa-node-ingress.yaml @@ -1,5 +1,6 @@ {{ if .Values.node.enabled }} {{- if .Values.node.ingress.enabled -}} + {{- $fullName := include "taraxa-node.fullname" . -}} {{- $apiIsStable := eq (include "taraxa-node.ingress.isStable" .) "true" -}} {{- $ingressSupportsPathType := eq (include "taraxa-node.ingress.supportsPathType" .) "true" -}} @@ -9,11 +10,14 @@ {{- $servicePortRpcWs := 8777 -}} {{- $servicePortGraphQl := 9777 -}} {{- $servicePortGraphQlWs := 6777 -}} +{{- $servicePortHttp := 8080 -}} + {{- range .Values.node.service.ports }} {{ if eq .name "rest"}} {{ $servicePortRpc = .port }} {{ end }} {{ if eq .name "ws"}} {{ $servicePortRpcWs = .port }} {{ end }} {{ if eq .name "graphql"}} {{ $servicePortGraphQl = .port }} {{ end }} {{ if eq .name "graphql-ws"}} {{ $servicePortGraphQlWs = .port }} {{ end }} + {{ if eq .name "http-indexer"}} {{ $servicePortHttp = .port }} {{ end }} {{- end }} {{- $pathType := .Values.node.ingress.pathType | default "ImplementationSpecific" -}} @@ -197,5 +201,51 @@ spec: serviceName: {{ $serviceName }} servicePort: {{ $servicePortGraphQlWs }} {{- end }} + +--- +apiVersion: {{ include "taraxa-node.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }}-indexer + labels: + app: taraxa-node + app.kubernetes.io/name: {{ include "taraxa-node.name" . }} + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{- with .Values.node.ingress.annotationsIndexer }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: +{{- if $apiIsStable }} +{{- if .Values.node.ingress.ingressClassName }} + ingressClassName: {{ .Values.node.ingress.ingressClassName }} +{{- end }} +{{- end }} + {{- if .Values.wildCertDomainSecret }} + tls: + - hosts: + - {{ include "taraxa-node.indexerName" . | quote }} + secretName: {{ .Values.wildCertDomainSecret }} + {{- end }} + rules: + - host: {{ include "taraxa-node.indexerName" . | quote }} + http: + paths: + - path: / + {{- if and $pathType $ingressSupportsPathType }} + pathType: {{ $pathType }} + {{- end }} + backend: + {{- if $apiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePortHttp }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePortHttp }} + {{- end }} {{- end }} {{- end }} diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index 5b060b38e7..9599a9669f 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -95,6 +95,7 @@ node: annotationsRpcWS: {} annotationsGraphQl: {} annotationsGraphQlWS: {} + annotationsIndexer: {} hosts: [] tls: [] ports: @@ -112,6 +113,9 @@ node: - name: udp-listen-port containerPort: 10002 protocol: UDP + - name: http-indexer + containerPort: 8080 + protocol: TCP service: ports: - name: rest From 7b9173b67f1c2a82ac50ccfae073efab0a13e8f2 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Mon, 27 Feb 2023 18:54:54 +0100 Subject: [PATCH 022/162] fix name for indexer --- charts/taraxa-node/templates/_helpers.tpl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/taraxa-node/templates/_helpers.tpl b/charts/taraxa-node/templates/_helpers.tpl index f183cd620b..02d1c805b1 100644 --- a/charts/taraxa-node/templates/_helpers.tpl +++ b/charts/taraxa-node/templates/_helpers.tpl @@ -96,7 +96,7 @@ If release name contains chart name it will be used as a full name. {{- if .Values.indexerNameOverride -}} {{- .Values.indexerNameOverride | trunc 63 | trimSuffix "-" -}} {{- else -}} -{{- printf "%s-%s.%s" "ws" .Release.Name .Values.domain | trunc 63 | trimSuffix "-" -}} +{{- printf "%s-%s.%s" "indexer" .Release.Name .Values.domain | trunc 63 | trimSuffix "-" -}} {{- end -}} {{- end -}} From a24e9d5ca0d5e1fced40b948d61fbd8c6710c023 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Mon, 27 Feb 2023 19:10:31 +0100 Subject: [PATCH 023/162] add annotation for indexer ingress --- charts/taraxa-node/values.yaml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index 9599a9669f..517e6d0267 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -95,7 +95,11 @@ node: annotationsRpcWS: {} annotationsGraphQl: {} annotationsGraphQlWS: {} - annotationsIndexer: {} + annotationsIndexer: + nginx.ingress.kubernetes.io/affinity: "cookie" + nginx.ingress.kubernetes.io/session-cookie-name: "stickounet" + nginx.ingress.kubernetes.io/session-cookie-expires: "172800" + nginx.ingress.kubernetes.io/session-cookie-max-age: "172800" hosts: [] tls: [] ports: From 10636d8611f6595f0195dff74bb35e3ffd48a483 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Mon, 27 Feb 2023 19:10:54 +0100 Subject: [PATCH 024/162] add missing port --- charts/taraxa-node/values.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index 517e6d0267..286d9d1c22 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -139,6 +139,9 @@ node: - name: metrics port: 8888 protocol: TCP + - name: http-indexer + containerPort: 8080 + protocol: TCP serviceMonitor: enabled: false resources: {} From 6d1393b4e7b14b6d3d3e3ecb12a05178f802353c Mon Sep 17 00:00:00 2001 From: rjonczy Date: Mon, 27 Feb 2023 19:20:14 +0100 Subject: [PATCH 025/162] fix port --- charts/taraxa-node/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index 286d9d1c22..b5dffcb95e 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -140,7 +140,7 @@ node: port: 8888 protocol: TCP - name: http-indexer - containerPort: 8080 + port: 8080 protocol: TCP serviceMonitor: enabled: false From 208dbfc3bc8b0973592f8ea4898d73bf72045ef6 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Tue, 28 Feb 2023 12:36:39 +0100 Subject: [PATCH 026/162] added indexer to github comment --- .circleci/config.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 289ee86bf0..d000cd63da 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -577,6 +577,8 @@ jobs: https://explorer-pr-<>.prnet.taraxa.io \ RPC>.prnet.taraxa.io >\ https://rpc-pr-<>.prnet.taraxa.io \ + Indexer>.prnet.taraxa.io >\ + https://indexer-pr-<>.prnet.taraxa.io \ \

Boot Nodes

\ \ From c4275a2a981f014cde412b7a159a312843296d24 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Tue, 28 Feb 2023 12:38:22 +0100 Subject: [PATCH 027/162] removed temp tag --- .circleci/config.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index d000cd63da..2c1fc7915c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -236,10 +236,6 @@ commands: --sort-by="~timestamp" \ --limit 1 \ --format="value(tags[0])" ) - - # temp - PR_TAG=pr-2342-1a09cc9 - # temp if [ -z ${PR_TAG} ]; then echo "No images for PR pr-<< parameters.pr-name >>" From ee164e3b999cb438e9ba72ccef37fded0efb3cec Mon Sep 17 00:00:00 2001 From: rjonczy Date: Tue, 28 Feb 2023 12:42:17 +0100 Subject: [PATCH 028/162] remove tag from indexer on ci pipeline --- .circleci/config.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 2c1fc7915c..7e64d59226 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -272,8 +272,7 @@ commands: --set node.nodeSelector."cloud\\.google\\.com/gke-nodepool"=blockchain-prnet \ --set bootnode.nodeSelector."cloud\\.google\\.com/gke-nodepool"=blockchain-prnet \ --set consensusnode.nodeSelector."cloud\\.google\\.com/gke-nodepool"=blockchain-prnet \ - --set node.indexer.enabled=true \ - --set node.indexer.image.tag=develop-503f613-1677511322 + --set node.indexer.enabled=true fi cleanup_prnet_chart: From 1fa0075a5e860c5b0679298d02c6f0f0e60365d4 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Tue, 28 Feb 2023 17:20:02 +0100 Subject: [PATCH 029/162] fix pullPolicy key --- charts/taraxa-node/templates/bootnode.yaml | 4 ++-- charts/taraxa-node/templates/consensus-node.yaml | 4 ++-- charts/taraxa-node/templates/taraxa-node.yaml | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/charts/taraxa-node/templates/bootnode.yaml b/charts/taraxa-node/templates/bootnode.yaml index e3c1d04a90..8c2a331d4d 100644 --- a/charts/taraxa-node/templates/bootnode.yaml +++ b/charts/taraxa-node/templates/bootnode.yaml @@ -40,7 +40,7 @@ spec: image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" {{- end }} - {{- if .Values.bootnode.image.imagePullPolicy }} + {{- if .Values.bootnode.image.pullPolicy }} imagePullPolicy: {{ .Values.bootnode.image.pullPolicy }} {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} @@ -69,7 +69,7 @@ spec: image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" {{- end }} - {{- if .Values.bootnode.image.imagePullPolicy }} + {{- if .Values.bootnode.image.pullPolicy }} imagePullPolicy: {{ .Values.bootnode.image.pullPolicy }} {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} diff --git a/charts/taraxa-node/templates/consensus-node.yaml b/charts/taraxa-node/templates/consensus-node.yaml index 8f8c7ca8af..fdd697f108 100644 --- a/charts/taraxa-node/templates/consensus-node.yaml +++ b/charts/taraxa-node/templates/consensus-node.yaml @@ -49,7 +49,7 @@ spec: {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" {{- end }} - {{- if .Values.consensusnode.image.imagePullPolicy }} + {{- if .Values.consensusnode.image.pullPolicy }} imagePullPolicy: {{ .Values.consensusnode.image.pullPolicy }} {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} @@ -107,7 +107,7 @@ spec: {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" {{- end }} - {{- if .Values.consensusnode.image.imagePullPolicy }} + {{- if .Values.consensusnode.image.pullPolicy }} imagePullPolicy: {{ .Values.consensusnode.image.pullPolicy }} {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} diff --git a/charts/taraxa-node/templates/taraxa-node.yaml b/charts/taraxa-node/templates/taraxa-node.yaml index aafe4abd3e..b86e5d9ed5 100644 --- a/charts/taraxa-node/templates/taraxa-node.yaml +++ b/charts/taraxa-node/templates/taraxa-node.yaml @@ -40,7 +40,7 @@ spec: {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" {{- end }} - {{- if .Values.node.image.imagePullPolicy }} + {{- if .Values.node.image.pullPolicy }} imagePullPolicy: {{ .Values.node.image.pullPolicy }} {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} @@ -118,7 +118,7 @@ spec: {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" {{- end }} - {{- if .Values.node.image.imagePullPolicy }} + {{- if .Values.node.image.pullPolicy }} imagePullPolicy: {{ .Values.node.image.pullPolicy }} {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} From 6a5b406fcb77f667fdeb4c303fb0c92201050f3e Mon Sep 17 00:00:00 2001 From: rjonczy Date: Tue, 28 Feb 2023 17:29:10 +0100 Subject: [PATCH 030/162] added affinity-mode as annotation --- charts/taraxa-node/values.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index b5dffcb95e..08a4813055 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -100,6 +100,7 @@ node: nginx.ingress.kubernetes.io/session-cookie-name: "stickounet" nginx.ingress.kubernetes.io/session-cookie-expires: "172800" nginx.ingress.kubernetes.io/session-cookie-max-age: "172800" + nginx.ingress.kubernetes.io/affinity-mode: persistent hosts: [] tls: [] ports: From fd298159ac5d3c756f61c4fe28c10dec7786ad7f Mon Sep 17 00:00:00 2001 From: Robert Jonczy Date: Tue, 28 Feb 2023 17:41:41 +0100 Subject: [PATCH 031/162] move part of .gitignore helm chart folder --- .gitignore | 4 ---- charts/taraxa-node/.gitignore | 6 ++++++ 2 files changed, 6 insertions(+), 4 deletions(-) create mode 100644 charts/taraxa-node/.gitignore diff --git a/.gitignore b/.gitignore index 50902de574..c3c9173fb8 100644 --- a/.gitignore +++ b/.gitignore @@ -40,7 +40,3 @@ charts/*/charts/*.tgz # do not include generated documentation doxygen_docs - -# Helm stuff -requirements.lock -Chart.lock \ No newline at end of file diff --git a/charts/taraxa-node/.gitignore b/charts/taraxa-node/.gitignore new file mode 100644 index 0000000000..7d251af7f5 --- /dev/null +++ b/charts/taraxa-node/.gitignore @@ -0,0 +1,6 @@ +# do not include into git chart dependencies +charts/*.tgz + +# Helm stuff +requirements.lock +Chart.lock \ No newline at end of file From 10f571b26a50370c06466a94a1211c0c5b7ccbf9 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 28 Feb 2023 09:41:23 +0100 Subject: [PATCH 032/162] chore: use old version of conan --- Dockerfile | 8 ++++++-- doc/building.md | 14 +++++++------- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/Dockerfile b/Dockerfile index c854390708..c22343e765 100644 --- a/Dockerfile +++ b/Dockerfile @@ -59,8 +59,12 @@ RUN add-apt-repository ppa:ethereum/ethereum \ ENV CXX="clang++-${LLVM_VERSION}" ENV CC="clang-${LLVM_VERSION}" +# HACK remove this when update to conan 2.0 +RUN ln -s /usr/bin/clang-${LLVM_VERSION} /usr/bin/clang +RUN ln -s /usr/bin/clang++-${LLVM_VERSION} /usr/bin/clang++ + # Install conan -RUN pip3 install --upgrade conan +RUN pip3 install conan==1.59.0 ENV CONAN_REVISIONS_ENABLED=1 @@ -76,7 +80,7 @@ RUN conan remote add -f bincrafters "https://bincrafters.jfrog.io/artifactory/ap && conan profile update settings.build_type=RelWithDebInfo clang \ && conan profile update env.CC=clang-$LLVM_VERSION clang \ && conan profile update env.CXX=clang++-$LLVM_VERSION clang \ - && conan install --build missing -pr=clang . + && conan install --build missing -pr:b=clang . ################################################################### # Build stage - use builder image for actual build of taraxa node # diff --git a/doc/building.md b/doc/building.md index 6f84ac3353..b6434a9927 100644 --- a/doc/building.md +++ b/doc/building.md @@ -37,7 +37,7 @@ will build out of the box without further effort: sudo apt install solc # Install conan package manager - sudo python3 -m pip install conan + sudo python3 -m pip install conan==1.59.0 # Setup clang as default compiler either in your IDE or by env. variables" export C="clang-14" @@ -95,7 +95,7 @@ will build out of the box without further effort: # Install conan package manager # >= 1.36.0 version is required to work properly with clang-14 - sudo python3 -m pip install conan + sudo python3 -m pip install conan==1.59.0 # Install cmake # >= 3.20 version is required for JSON subcommand @@ -216,7 +216,7 @@ Sometimes conan cache goes wrong, so you should clean it up. You could face erro ERROR: boost/1.76.0: Error in package_info() method, line 1492 raise ConanException("These libraries were expected to be built, but were not built: {}".format(non_built)) ConanException: These libraries were expected to be built, but were not built: {'boost_math_c99l', 'boost_json', 'boost_math_c99', 'boost_nowide', 'boost_math_tr1l', 'boost_math_tr1f', 'boost_math_tr1', 'boost_math_c99f'} -``` +``` It could be cleaned up with: @@ -227,7 +227,7 @@ rm -rf ~/.conan/data #### Project building issue If you are facing strange errors with project compilation it could be a problem that after install of llvm clang if pointing to a default apple clang. You could check that with `clang --version`. It should not point to `/Library/Developer/CommandLineTools/usr/bin`, but something like `/usr/local/opt/llvm/bin`. So you should specify full paths to a compiler: -1. Check full path with `brew info llvm`. Search for command that looks like +1. Check full path with `brew info llvm`. Search for command that looks like ``` echo 'export PATH="/usr/local/opt/llvm/bin:$PATH"' >> ~/.zshrc ``` @@ -248,7 +248,7 @@ make -j$(nproc) ## Building on M1 Macs for x86_64 with Rosetta2 -You should be able to build project following default MacOS building process. But here is a guide how to build project for x86_64 arch with Rosetta2. +You should be able to build project following default MacOS building process. But here is a guide how to build project for x86_64 arch with Rosetta2. ### Install Rosetta2 @@ -258,11 +258,11 @@ You should be able to build project following default MacOS building process. Bu arch -x86_64 zsh -### Install Homebrew +### Install Homebrew /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" -### Install dependencies +### Install dependencies /usr/local/bin/brew install coreutils go autoconf automake gflags git libtool llvm@13 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr From 61061921e6874316dbb6b110386e545735629ce3 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 28 Feb 2023 17:05:00 +0100 Subject: [PATCH 033/162] chore: update dependencies --- conanfile.py | 6 +++--- .../tarcap/packets_handlers/transaction_packet_handler.hpp | 3 +-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/conanfile.py b/conanfile.py index ca76325458..736f97972d 100644 --- a/conanfile.py +++ b/conanfile.py @@ -13,11 +13,11 @@ class TaraxaConan(ConanFile): generators = "cmake" def requirements(self): - self.requires("boost/1.80.0") + self.requires("boost/1.81.0") self.requires("cppcheck/2.7.5") - self.requires("openssl/1.1.1s") + self.requires("openssl/1.1.1t") self.requires("cryptopp/8.7.0") - self.requires("gtest/1.12.1") + self.requires("gtest/1.13.0") self.requires("lz4/1.9.4") self.requires("rocksdb/6.29.5") self.requires("prometheus-cpp/1.1.0") diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/transaction_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/transaction_packet_handler.hpp index dd38d286fa..cd71341cf5 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/transaction_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/transaction_packet_handler.hpp @@ -27,8 +27,7 @@ class TransactionPacketHandler final : public PacketHandler { * @param transactions serialized transactions * */ - void sendTransactions(std::shared_ptr peer, - std::vector>&& transactions); + void sendTransactions(std::shared_ptr peer, std::vector>&& transactions); /** * @brief Sends batch of transactions to all connected peers From 1c75bc7f0aca3b4048012530e27185512712744c Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Sun, 26 Feb 2023 15:22:32 +0100 Subject: [PATCH 034/162] chore: vote sync error log remove --- .../include/vote_manager/vote_manager.hpp | 10 +++------- .../src/vote_manager/vote_manager.cpp | 17 +++++------------ .../get_next_votes_sync_packet_handler.cpp | 19 ++++++++++++++++--- tests/vote_test.cpp | 1 - 4 files changed, 24 insertions(+), 23 deletions(-) diff --git a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp index 8d904c22e0..a456436801 100644 --- a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp +++ b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp @@ -197,23 +197,19 @@ class VoteManager { * @param period * @param round * @param type - * @param peer_filter if specified, get only votes that are unknown for peer * @return vector of votes if 2t+1 voted block votes found, otherwise empty vector */ - std::vector> getTwoTPlusOneVotedBlockVotes( - PbftPeriod period, PbftRound round, TwoTPlusOneVotedBlockType type, - const std::shared_ptr& peer_filter = {}) const; + std::vector> getTwoTPlusOneVotedBlockVotes(PbftPeriod period, PbftRound round, + TwoTPlusOneVotedBlockType type) const; /** * Get all 2t+1 voted block next votes(both for null block as well as specific block) for specific period and round * * @param period * @param round - * @param peer_filter if specified, get only votes that are unknown for peer * @return vector of next votes if 2t+1 voted block votes found, otherwise empty vector */ - std::vector> getAllTwoTPlusOneNextVotes( - PbftPeriod period, PbftRound round, const std::shared_ptr& peer_filter = {}) const; + std::vector> getAllTwoTPlusOneNextVotes(PbftPeriod period, PbftRound round) const; /** * @brief Sets current pbft period & round. It also checks if we dont alredy have 2t+1 vote bundles(pf any type) for diff --git a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp index 74b28f5058..f5911eae59 100644 --- a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp +++ b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp @@ -871,9 +871,8 @@ std::optional VoteManager::getTwoTPlusOneVotedBlock(PbftPeriod perio return two_t_plus_one_voted_block_it->second.first; } -std::vector> VoteManager::getTwoTPlusOneVotedBlockVotes( - PbftPeriod period, PbftRound round, TwoTPlusOneVotedBlockType type, - const std::shared_ptr& peer_filter) const { +std::vector> VoteManager::getTwoTPlusOneVotedBlockVotes(PbftPeriod period, PbftRound round, + TwoTPlusOneVotedBlockType type) const { std::shared_lock lock(verified_votes_access_); const auto found_period_it = verified_votes_.find(period); @@ -909,23 +908,17 @@ std::vector> VoteManager::getTwoTPlusOneVotedBlockVotes( std::vector> votes; votes.reserve(found_verified_votes_it->second.second.size()); for (const auto& vote : found_verified_votes_it->second.second) { - if (peer_filter && peer_filter->isVoteKnown(vote.first)) { - continue; - } - votes.push_back(vote.second); } return votes; } -std::vector> VoteManager::getAllTwoTPlusOneNextVotes( - PbftPeriod period, PbftRound round, const std::shared_ptr& peer_filter) const { - auto next_votes = - getTwoTPlusOneVotedBlockVotes(period, round, TwoTPlusOneVotedBlockType::NextVotedBlock, peer_filter); +std::vector> VoteManager::getAllTwoTPlusOneNextVotes(PbftPeriod period, PbftRound round) const { + auto next_votes = getTwoTPlusOneVotedBlockVotes(period, round, TwoTPlusOneVotedBlockType::NextVotedBlock); auto null_block_next_vote = - getTwoTPlusOneVotedBlockVotes(period, round, TwoTPlusOneVotedBlockType::NextVotedNullBlock, peer_filter); + getTwoTPlusOneVotedBlockVotes(period, round, TwoTPlusOneVotedBlockType::NextVotedNullBlock); if (!null_block_next_vote.empty()) { next_votes.reserve(next_votes.size() + null_block_next_vote.size()); next_votes.insert(next_votes.end(), std::make_move_iterator(null_block_next_vote.begin()), diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/get_next_votes_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/get_next_votes_sync_packet_handler.cpp index 6a6baa27ca..3b9ddb71c3 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/get_next_votes_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/get_next_votes_sync_packet_handler.cpp @@ -33,8 +33,7 @@ void GetNextVotesSyncPacketHandler::process(const PacketData &packet_data, const return; } - std::vector> next_votes = - vote_mgr_->getAllTwoTPlusOneNextVotes(pbft_period, pbft_round - 1, peer); + std::vector> next_votes = vote_mgr_->getAllTwoTPlusOneNextVotes(pbft_period, pbft_round - 1); // In edge case this could theoretically happen due to race condition when we moved to the next period or round // right before calling getAllTwoTPlusOneNextVotes with specific period & round if (next_votes.empty()) { @@ -52,13 +51,27 @@ void GetNextVotesSyncPacketHandler::process(const PacketData &packet_data, const return; } - next_votes = vote_mgr_->getAllTwoTPlusOneNextVotes(tmp_pbft_period, tmp_pbft_round - 1, peer); + next_votes = vote_mgr_->getAllTwoTPlusOneNextVotes(tmp_pbft_period, tmp_pbft_round - 1); if (next_votes.empty()) { LOG(log_er_) << "No next votes returned for period " << tmp_pbft_period << ", round " << tmp_pbft_round - 1; return; } } + std::vector> next_votes_to_send; + next_votes_to_send.reserve(next_votes.size()); + for (const auto &vote : next_votes) { + if (!peer->isVoteKnown(vote->getHash())) { + next_votes_to_send.emplace_back(vote); + } + } + + if (next_votes_to_send.empty()) { + LOG(log_dg_) << "Votes already gossiped, no need to send votes sync packet for" << pbft_period << ", round " + << pbft_round - 1; + return; + } + LOG(log_nf_) << "Next votes sync packet with " << next_votes.size() << " votes sent to " << peer->getId(); sendPbftVotesBundle(peer, std::move(next_votes)); } diff --git a/tests/vote_test.cpp b/tests/vote_test.cpp index 20cd913912..9731d38851 100644 --- a/tests/vote_test.cpp +++ b/tests/vote_test.cpp @@ -168,7 +168,6 @@ TEST_F(VoteTest, vote_broadcast) { WAIT_EXPECT_EQ(ctx, vote_mgr2->getVerifiedVotesSize(), 1) WAIT_EXPECT_EQ(ctx, vote_mgr3->getVerifiedVotesSize(), 1) }); - EXPECT_EQ(vote_mgr1->getVerifiedVotesSize(), 0); } TEST_F(VoteTest, two_t_plus_one_votes) { From c037d49d55f9587c5aae197690c1809ddc9c965e Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Tue, 28 Feb 2023 15:34:19 +0100 Subject: [PATCH 035/162] chore: add ValidatorExists flag into Undelegation struct returned from dpos contract --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index fc286996a5..7ee2d3cae3 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit fc286996a5c6e436b436e328336f88b3523b3236 +Subproject commit 7ee2d3cae360aca6d6f3c38a40ce9a6438840ca4 From bbd34eebc6b178db1de0afe93de68dc485248f23 Mon Sep 17 00:00:00 2001 From: kstdl Date: Wed, 1 Mar 2023 16:28:36 +0100 Subject: [PATCH 036/162] fix: update evm submodule with correct address in dpos_contract logs --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 7ee2d3cae3..1094380a89 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 7ee2d3cae360aca6d6f3c38a40ce9a6438840ca4 +Subproject commit 1094380a89d28e395503a55076bb5876c64077c4 From d42876a5b2e4fc7053a3daf9c4fe7b657a296185 Mon Sep 17 00:00:00 2001 From: Leonard Mocanu Date: Thu, 2 Mar 2023 10:59:25 +0200 Subject: [PATCH 037/162] chore: adds cors config for indexer --- charts/taraxa-node/Chart.yaml | 2 +- charts/taraxa-node/values.yaml | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/charts/taraxa-node/Chart.yaml b/charts/taraxa-node/Chart.yaml index 5e062c2ad1..9c315b27b5 100644 --- a/charts/taraxa-node/Chart.yaml +++ b/charts/taraxa-node/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 appVersion: "1.0" description: Kubernetes helm chart for Taraxa blockchain full node implementation. name: taraxa-node -version: 0.3.3 +version: 0.3.4 keywords: - blockchain - taraxa diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index 08a4813055..46ddcb8617 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -101,6 +101,10 @@ node: nginx.ingress.kubernetes.io/session-cookie-expires: "172800" nginx.ingress.kubernetes.io/session-cookie-max-age: "172800" nginx.ingress.kubernetes.io/affinity-mode: persistent + nginx.ingress.kubernetes.io/enable-cors: "true" + nginx.ingress.kubernetes.io/cors-allow-origin: "*" + nginx.ingress.kubernetes.io/cors-allow-methods: "PUT, GET, POST, OPTIONS, DELETE" + nginx.ingress.kubernetes.io/cors-allow-headers: "Authorization,Range,Content-Range,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Access-Control-Allow-Origin" hosts: [] tls: [] ports: From 2732aac8491dd19b8feb0e9699c058f0214acfc9 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 2 Mar 2023 16:55:33 +0100 Subject: [PATCH 038/162] chore: fix transfer_lot_of_blocks test --- tests/network_test.cpp | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/tests/network_test.cpp b/tests/network_test.cpp index 4807599669..9cad883f3e 100644 --- a/tests/network_test.cpp +++ b/tests/network_test.cpp @@ -73,8 +73,7 @@ TEST_F(NetworkTest, transfer_block) { ASSERT_EQ(1, num_received); } -// Test creates two Network setup and verifies sending blocks between is successfull -// This test can not work anymore as we are marking other nodes as malicous becasue of invalid dag blocks +// Test creates two Network setup and verifies sending blocks between is successful TEST_F(NetworkTest, transfer_lot_of_blocks) { auto node_cfgs = make_node_cfgs(2, 1, 20); auto nodes = launch_nodes(node_cfgs); @@ -91,9 +90,8 @@ TEST_F(NetworkTest, transfer_lot_of_blocks) { const auto nw1 = node1->getNetwork(); const auto nw2 = node2->getNetwork(); - const auto trxs = samples::createSignedTrxSamples(0, 1500, g_secret); + auto trxs = samples::createSignedTrxSamples(0, 1500, g_secret); const auto estimation = node1->getTransactionManager()->estimateTransactionGas(trxs[0], {}); - const std::vector estimations(trxs.size(), estimation); // node1 add one valid block const auto proposal_level = 1; @@ -106,9 +104,9 @@ TEST_F(NetworkTest, transfer_lot_of_blocks) { dev::bytes vdf_msg = DagManager::getVdfMessage(dag_genesis, {trxs[0]}); vdf.computeVdfSolution(sortition_params, vdf_msg, false); DagBlock blk(dag_genesis, proposal_level, {}, {trxs[0]->getHash()}, estimation, vdf, node1->getSecretKey()); - auto block_hash = blk.getHash(); + const auto block_hash = blk.getHash(); + dag_mgr1->addDagBlock(std::move(blk), {trxs[0]}); std::vector> dag_blocks; - dag_blocks.emplace_back(std::make_shared(std::move(blk))); // creating lot of blocks just for size std::vector trx_hashes; @@ -120,15 +118,20 @@ TEST_F(NetworkTest, transfer_lot_of_blocks) { trx_hashes.push_back(trx->getHash()); verified_transactions.push_back(trx); } - - for (int i = 0; i < 100; ++i) { + { const auto proposal_period = *db1->getProposalPeriodForDagLevel(proposal_level + 1); const auto period_block_hash = db1->getPeriodBlockHash(proposal_period); const auto sortition_params = dag_mgr1->sortitionParamsManager().getSortitionParams(proposal_period); - vdf_sortition::VdfSortition vdf(sortition_params, node1->getVrfSecretKey(), - VrfSortitionBase::makeVrfInput(proposal_level + 1, period_block_hash), 1, 1); - DagBlock blk(block_hash, proposal_level + 1, {}, {trxs[i + 1]->getHash()}, {}, vdf, node1->getSecretKey()); - dag_blocks.emplace_back(std::make_shared(blk)); + + for (int i = 0; i < 100; ++i) { + vdf_sortition::VdfSortition vdf(sortition_params, node1->getVrfSecretKey(), + VrfSortitionBase::makeVrfInput(proposal_level + 1, period_block_hash), 1, 1); + dev::bytes vdf_msg = DagManager::getVdfMessage(block_hash, {trxs[i + 1]}); + vdf.computeVdfSolution(sortition_params, vdf_msg, false); + DagBlock blk(block_hash, proposal_level + 1, {}, {trxs[i + 1]->getHash()}, estimation, vdf, + node1->getSecretKey()); + dag_blocks.emplace_back(std::make_shared(blk)); + } } for (auto trx : verified_transactions) @@ -138,16 +141,13 @@ TEST_F(NetworkTest, transfer_lot_of_blocks) { dag_mgr1->addDagBlock(DagBlock(*dag_blocks[i]), {trxs[i]}); } wait({1s, 200ms}, [&](auto& ctx) { WAIT_EXPECT_NE(ctx, dag_mgr1->getDagBlock(block_hash), nullptr) }); - - taraxa::thisThreadSleepForSeconds(1); const auto node1_period = node1->getPbftChain()->getPbftChainSize(); const auto node2_period = node2->getPbftChain()->getPbftChainSize(); std::cout << "node1 period " << node1_period << ", node2 period " << node2_period << std::endl; nw1->getSpecificHandler()->sendBlocks( - nw2->getNodeId(), std::move(dag_blocks), {}, node2_period, node1_period); - + nw2->getNodeId(), std::move(dag_blocks), std::move(trxs), node2_period, node1_period); std::cout << "Waiting Sync ..." << std::endl; - wait({30s, 200ms}, [&](auto& ctx) { WAIT_EXPECT_NE(ctx, dag_mgr2->getDagBlock(block_hash), nullptr) }); + wait({120s, 200ms}, [&](auto& ctx) { WAIT_EXPECT_NE(ctx, dag_mgr2->getDagBlock(block_hash), nullptr) }); } TEST_F(NetworkTest, update_peer_chainsize) { From ceeea7d66c333f3a825402a5e911f670f74ff7f7 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 3 Mar 2023 08:11:39 +0100 Subject: [PATCH 039/162] chore: refactor transaction exceptions --- .../packets_handlers/dag_sync_packet_handler.cpp | 3 ++- .../packets_handlers/pbft_sync_packet_handler.cpp | 2 +- .../packets_handlers/transaction_packet_handler.cpp | 3 ++- .../transaction/include/transaction/transaction.hpp | 12 ++++++++++-- libraries/types/transaction/src/transaction.cpp | 4 ++-- tests/transaction_test.cpp | 4 ++-- 6 files changed, 19 insertions(+), 9 deletions(-) diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/dag_sync_packet_handler.cpp index ed1406e50f..c295494e6a 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/dag_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/dag_sync_packet_handler.cpp @@ -3,6 +3,7 @@ #include "dag/dag.hpp" #include "network/tarcap/packets_handlers/common/ext_syncing_packet_handler.hpp" #include "network/tarcap/shared_states/pbft_syncing_state.hpp" +#include "transaction/transaction.hpp" #include "transaction/transaction_manager.hpp" namespace taraxa::network::tarcap { @@ -61,7 +62,7 @@ void DagSyncPacketHandler::process(const PacketData& packet_data, const std::sha auto trx = std::make_shared(tx_rlp); peer->markTransactionAsKnown(trx->getHash()); transactions.emplace_back(std::move(trx)); - } catch (const Transaction::InvalidSignature& e) { + } catch (const Transaction::InvalidTransaction& e) { throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); } } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/pbft_sync_packet_handler.cpp index 01ba610915..79a9dca4bc 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/pbft_sync_packet_handler.cpp @@ -60,7 +60,7 @@ void PbftSyncPacketHandler::process(const PacketData &packet_data, const std::sh PeriodData period_data; try { period_data = PeriodData(packet_data.rlp_[1]); - } catch (const Transaction::InvalidSignature &e) { + } catch (const Transaction::InvalidTransaction &e) { throw MaliciousPeerException("Unable to parse PeriodData: " + std::string(e.what())); } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp index d106786a85..86db1ae093 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp @@ -3,6 +3,7 @@ #include #include "network/tarcap/shared_states/test_state.hpp" +#include "transaction/transaction.hpp" #include "transaction/transaction_manager.hpp" namespace taraxa::network::tarcap { @@ -61,7 +62,7 @@ inline void TransactionPacketHandler::process(const PacketData &packet_data, con try { transaction = std::make_shared(packet_data.rlp_[1][tx_idx].data().toBytes()); received_transactions.emplace_back(trx_hash); - } catch (const Transaction::InvalidSignature &e) { + } catch (const Transaction::InvalidTransaction &e) { throw MaliciousPeerException("Unable to parse transaction: " + std::string(e.what())); } diff --git a/libraries/types/transaction/include/transaction/transaction.hpp b/libraries/types/transaction/include/transaction/transaction.hpp index 04e6f6c014..dab42ac7fa 100644 --- a/libraries/types/transaction/include/transaction/transaction.hpp +++ b/libraries/types/transaction/include/transaction/transaction.hpp @@ -10,8 +10,16 @@ namespace taraxa { struct Transaction { - struct InvalidSignature : std::runtime_error { - explicit InvalidSignature(std::string const &msg) : runtime_error("invalid signature:\n" + msg) {} + struct InvalidTransaction : std::runtime_error { + explicit InvalidTransaction(const std::string &msg) : runtime_error("invalid transaction - " + msg) {} + }; + + struct InvalidSignature : InvalidTransaction { + explicit InvalidSignature(const std::string &msg) : InvalidTransaction("signature:\n" + msg) {} + }; + + struct InvalidFormat : InvalidTransaction { + explicit InvalidFormat(const std::string &msg) : InvalidTransaction("rlp format:\n" + msg) {} }; private: diff --git a/libraries/types/transaction/src/transaction.cpp b/libraries/types/transaction/src/transaction.cpp index b6f7bc797b..5810664c89 100644 --- a/libraries/types/transaction/src/transaction.cpp +++ b/libraries/types/transaction/src/transaction.cpp @@ -14,7 +14,7 @@ using namespace dev; uint64_t toChainID(u256 const &val) { if (val == 0 || std::numeric_limits::max() < val) { - BOOST_THROW_EXCEPTION(Transaction::InvalidSignature("eip-155 chain id must be in the open interval: (0, 2^64)")); + BOOST_THROW_EXCEPTION(Transaction::InvalidTransaction("eip-155 chain id must be in the open interval: (0, 2^64)")); } return static_cast(val); } @@ -71,7 +71,7 @@ void Transaction::fromRLP(const dev::RLP &_rlp, bool verify_strict, const h256 & if (36 < v) { chain_id_ = toChainID((v - 35) / 2); } else if (v != 27 && v != 28) { - BOOST_THROW_EXCEPTION(InvalidSignature( + BOOST_THROW_EXCEPTION(InvalidFormat( "only values 27 and 28 are allowed for non-replay protected transactions for the 'v' signature field")); } vrs_.v = chain_id_ ? byte{v - (u256{chain_id_} * 2 + 35)} : byte{v - 27}; diff --git a/tests/transaction_test.cpp b/tests/transaction_test.cpp index 061bde4c2a..f6e2ebfc94 100644 --- a/tests/transaction_test.cpp +++ b/tests/transaction_test.cpp @@ -76,7 +76,7 @@ TEST_F(TransactionTest, sig) { ASSERT_THROW(Transaction(dev::jsToBytes("0xf84980808080808024a01404adc97c8b58fef303b2862d0e72378" "4fb635e7237e0e8d3ea33bbea19c36ca0229e80d57ba91a0f347686" "30fd21ad86e4c403b307de9ac4550d0ccc81c90fe3")), - Transaction::InvalidSignature); + Transaction::InvalidFormat); std::vector> valid_cases{ {0, "0xf647d1d47ce927ce2fb9f57e4e2a3c32b037c5e544b44611077f5cc6980b0bc2"}, {1, "0x49c1cb845df5d3ed238ca37ad25ca96f417e4f22d7911224cf3c2a725985e7ff"}, @@ -112,7 +112,7 @@ TEST_F(TransactionTest, sig) { } } ASSERT_NE(Transaction(with_modified_payload.out()).getSender(), sender); - ASSERT_THROW(Transaction(with_invalid_signature.out()).getSender(), Transaction::InvalidSignature); + ASSERT_THROW(Transaction(with_invalid_signature.out()).getSender(), Transaction::InvalidTransaction); } } } From 54dbb979d560d08e0e3c15b8a7f222f348d3b59e Mon Sep 17 00:00:00 2001 From: kstdl Date: Wed, 1 Mar 2023 10:27:34 +0100 Subject: [PATCH 040/162] feat: add transactions hashes to newHead event --- .../include/final_chain/final_chain.hpp | 11 - .../consensus/src/final_chain/final_chain.cpp | 38 +-- .../network/include/network/ws_server.hpp | 4 +- libraries/core_libs/network/rpc/eth/Eth.cpp | 229 ++++++++---------- libraries/core_libs/network/rpc/eth/Eth.h | 42 +++- .../core_libs/network/rpc/eth/LogFilter.cpp | 7 +- libraries/core_libs/network/src/ws_server.cpp | 7 +- libraries/core_libs/node/src/node.cpp | 2 +- .../pbft_block/include/pbft/pbft_block.hpp | 1 - .../include/transaction/transaction.hpp | 7 +- .../types/transaction/src/transaction.cpp | 9 + tests/full_node_test.cpp | 2 +- 12 files changed, 170 insertions(+), 189 deletions(-) diff --git a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp index d6a3574723..5e9e7acbfe 100644 --- a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp +++ b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp @@ -90,17 +90,6 @@ class FinalChain { * @return BlockHash h256 */ virtual std::optional block_hash(std::optional n = {}) const = 0; - struct TransactionHashes { - TransactionHashes() = default; - virtual ~TransactionHashes() = default; - TransactionHashes(const TransactionHashes&) = default; - TransactionHashes(TransactionHashes&&) = default; - TransactionHashes& operator=(const TransactionHashes&) = default; - TransactionHashes& operator=(TransactionHashes&&) = default; - - virtual size_t count() const = 0; - virtual h256 get(size_t i) const = 0; - }; /** * @brief Needed if we are changing params with hardfork and it affects Go part of code. For example DPOS contract diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index d128a32704..44fb1aaecc 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -311,7 +311,7 @@ class FinalChainImpl final : public FinalChain { ++tl.index; } db_->insert(batch, DB::Columns::final_chain_transaction_hashes_by_blk_number, blk_header.number, - TransactionHashesImpl::serialize_from_transactions(transactions)); + dev::rlp(hashes_from_transactions(transactions))); db_->insert(batch, DB::Columns::final_chain_transaction_count_by_blk_number, blk_header.number, transactions.size()); db_->insert(batch, DB::Columns::final_chain_blk_hash_by_number, blk_header.number, blk_header.hash); @@ -444,17 +444,18 @@ class FinalChainImpl final : public FinalChain { } private: - std::shared_ptr get_transaction_hashes(std::optional n = {}) const { - return make_shared( - db_->lookup(last_if_absent(n), DB::Columns::final_chain_transaction_hashes_by_blk_number)); + std::shared_ptr get_transaction_hashes(std::optional n = {}) const { + auto res = db_->lookup(last_if_absent(n), DB::Columns::final_chain_transaction_hashes_by_blk_number); + + return std::make_shared(util::rlp_dec(dev::RLP(res))); } const SharedTransactions get_transactions(std::optional n = {}) const { SharedTransactions ret; auto hashes = transaction_hashes(n); - ret.reserve(hashes->count()); + ret.reserve(hashes->size()); for (size_t i = 0; i < ret.capacity(); ++i) { - auto trx = db_->getTransaction(hashes->get(i)); + auto trx = db_->getTransaction(hashes->at(i)); assert(trx); ret.emplace_back(trx); } @@ -520,31 +521,6 @@ class FinalChainImpl final : public FinalChain { } return ret; } - - struct TransactionHashesImpl : TransactionHashes { - string serialized_; - size_t count_; - - explicit TransactionHashesImpl(string serialized) - : serialized_(std::move(serialized)), count_(serialized_.size() / h256::size) {} - - static bytes serialize_from_transactions(SharedTransactions const& transactions) { - bytes serialized; - serialized.reserve(transactions.size() * h256::size); - for (auto const& trx : transactions) { - for (auto b : trx->getHash()) { - serialized.push_back(b); - } - } - return serialized; - } - - h256 get(size_t i) const override { - return h256((uint8_t*)(serialized_.data() + i * h256::size), h256::ConstructFromPointer); - } - - size_t count() const override { return count_; } - }; }; std::shared_ptr NewFinalChain(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, diff --git a/libraries/core_libs/network/include/network/ws_server.hpp b/libraries/core_libs/network/include/network/ws_server.hpp index 7dd18406cc..7eb5dfbcd9 100644 --- a/libraries/core_libs/network/include/network/ws_server.hpp +++ b/libraries/core_libs/network/include/network/ws_server.hpp @@ -49,7 +49,7 @@ class WsSession : public std::enable_shared_from_this { virtual std::string processRequest(const std::string_view& request) = 0; - void newEthBlock(::taraxa::final_chain::BlockHeader const& payload); + void newEthBlock(::taraxa::final_chain::BlockHeader const& payload, const TransactionHashes& trx_hashes); void newDagBlock(DagBlock const& blk); void newDagBlockFinalized(blk_hash_t const& blk, uint64_t period); void newPbftBlockExecuted(Json::Value const& payload); @@ -90,7 +90,7 @@ class WsServer : public std::enable_shared_from_this, public jsonrpc:: // Start accepting incoming connections void run(); - void newEthBlock(::taraxa::final_chain::BlockHeader const& payload); + void newEthBlock(::taraxa::final_chain::BlockHeader const& payload, const TransactionHashes& trx_hashes); void newDagBlock(DagBlock const& blk); void newDagBlockFinalized(blk_hash_t const& blk, uint64_t period); void newPbftBlockExecuted(PbftBlock const& sche_blk, std::vector const& finalized_dag_blk_hashes); diff --git a/libraries/core_libs/network/rpc/eth/Eth.cpp b/libraries/core_libs/network/rpc/eth/Eth.cpp index 87cc70bfef..130fe14b90 100644 --- a/libraries/core_libs/network/rpc/eth/Eth.cpp +++ b/libraries/core_libs/network/rpc/eth/Eth.cpp @@ -8,11 +8,106 @@ #include "LogFilter.hpp" +using namespace std; +using namespace dev; +using namespace taraxa::final_chain; +using namespace taraxa::state_api; + namespace taraxa::net::rpc::eth { -using namespace ::std; -using namespace ::dev; -using namespace ::taraxa::final_chain; -using namespace ::taraxa::state_api; +void add(Json::Value& obj, optional const& info) { + obj["blockNumber"] = info ? toJS(info->blk_n) : Json::Value(); + obj["blockHash"] = info ? toJS(info->blk_h) : Json::Value(); + obj["transactionIndex"] = info ? toJS(info->index) : Json::Value(); +} + +void add(Json::Value& obj, ExtendedTransactionLocation const& info) { + add(obj, static_cast(info)); + obj["transactionHash"] = toJS(info.trx_hash); +} + +Json::Value toJson(Transaction const& trx, optional const& loc) { + Json::Value res(Json::objectValue); + add(res, loc); + res["hash"] = toJS(trx.getHash()); + res["input"] = toJS(trx.getData()); + res["to"] = toJson(trx.getReceiver()); + res["from"] = toJS(trx.getSender()); + res["gas"] = toJS(trx.getGas()); + res["gasPrice"] = toJS(trx.getGasPrice()); + res["nonce"] = toJS(trx.getNonce()); + res["value"] = toJS(trx.getValue()); + auto const& vrs = trx.getVRS(); + res["r"] = toJS(vrs.r); + res["s"] = toJS(vrs.s); + res["v"] = toJS(vrs.v); + return res; +} + +Json::Value toJson(const LocalisedTransaction& lt) { return toJson(*lt.trx, lt.trx_loc); } + +Json::Value toJson(BlockHeader const& obj) { + Json::Value res(Json::objectValue); + res["parentHash"] = toJS(obj.parent_hash); + res["sha3Uncles"] = toJS(BlockHeader::uncles_hash()); + res["stateRoot"] = toJS(obj.state_root); + res["transactionsRoot"] = toJS(obj.transactions_root); + res["receiptsRoot"] = toJS(obj.receipts_root); + res["number"] = toJS(obj.number); + res["gasUsed"] = toJS(obj.gas_used); + res["gasLimit"] = toJS(obj.gas_limit); + res["extraData"] = toJS(obj.extra_data); + res["logsBloom"] = toJS(obj.log_bloom); + res["timestamp"] = toJS(obj.timestamp); + res["author"] = toJS(obj.author); + res["mixHash"] = toJS(BlockHeader::mix_hash()); + res["nonce"] = toJS(BlockHeader::nonce()); + res["uncles"] = Json::Value(Json::arrayValue); + res["hash"] = toJS(obj.hash); + res["difficulty"] = "0x0"; + res["totalDifficulty"] = "0x0"; + res["totalReward"] = toJS(obj.total_reward); + return res; +} + +Json::Value toJson(LocalisedLogEntry const& lle) { + Json::Value res(Json::objectValue); + add(res, lle.trx_loc); + res["removed"] = false; + res["data"] = toJS(lle.le.data); + res["address"] = toJS(lle.le.address); + res["logIndex"] = toJS(lle.position_in_receipt); + auto& topics_json = res["topics"] = Json::Value(Json::arrayValue); + for (auto const& t : lle.le.topics) { + topics_json.append(toJS(t)); + } + return res; +} + +Json::Value toJson(LocalisedTransactionReceipt const& ltr) { + Json::Value res(Json::objectValue); + add(res, ltr.trx_loc); + res["from"] = toJS(ltr.trx_from); + res["to"] = toJson(ltr.trx_to); + res["status"] = toJS(ltr.r.status_code); + res["gasUsed"] = toJS(ltr.r.gas_used); + res["cumulativeGasUsed"] = toJS(ltr.r.cumulative_gas_used); + res["contractAddress"] = toJson(ltr.r.new_contract_address); + res["logsBloom"] = toJS(ltr.r.bloom()); + auto& logs_json = res["logs"] = Json::Value(Json::arrayValue); + uint log_i = 0; + for (auto const& le : ltr.r.logs) { + logs_json.append(toJson(LocalisedLogEntry{le, ltr.trx_loc, log_i++})); + } + return res; +} + +Json::Value toJson(SyncStatus const& obj) { + Json::Value res(Json::objectValue); + res["startingBlock"] = toJS(obj.starting_block); + res["currentBlock"] = toJS(obj.current_block); + res["highestBlock"] = toJS(obj.highest_block); + return res; +} class EthImpl : public Eth, EthParams { Watches watches_; @@ -71,7 +166,7 @@ class EthImpl : public Eth, EthParams { } Json::Value eth_getBlockTransactionCountByHash(string const& _blockHash) override { - return toJson(transactionCount(jsToFixed<32>(_blockHash))); + return toJS(transactionCount(jsToFixed<32>(_blockHash))); } Json::Value eth_getBlockTransactionCountByNumber(string const& _blockNumber) override { @@ -189,9 +284,7 @@ class EthImpl : public Eth, EthParams { } } else { auto hashes = final_chain->transaction_hashes(blk_n); - for (size_t i = 0; i < hashes->count(); ++i) { - trxs_json.append(toJson(hashes->get(i))); - } + trxs_json = toJsonArray(*hashes); } return ret; } @@ -213,11 +306,11 @@ class EthImpl : public Eth, EthParams { optional get_transaction(uint64_t trx_pos, EthBlockNumber blk_n) const { auto hashes = final_chain->transaction_hashes(blk_n); - if (hashes->count() <= trx_pos) { + if (hashes->size() <= trx_pos) { return {}; } return LocalisedTransaction{ - get_trx(hashes->get(trx_pos)), + get_trx(hashes->at(trx_pos)), TransactionLocationWithBlockHash{ {blk_n, trx_pos}, *final_chain->block_hash(blk_n), @@ -382,124 +475,8 @@ class EthImpl : public Eth, EthParams { } return LogFilter(from_block, to_block, std::move(addresses), std::move(topics)); } - - static void add(Json::Value& obj, optional const& info) { - obj["blockNumber"] = info ? toJson(info->blk_n) : Json::Value(); - obj["blockHash"] = info ? toJson(info->blk_h) : Json::Value(); - obj["transactionIndex"] = info ? toJson(info->index) : Json::Value(); - } - - static void add(Json::Value& obj, ExtendedTransactionLocation const& info) { - add(obj, static_cast(info)); - obj["transactionHash"] = toJson(info.trx_hash); - } - - static Json::Value toJson(Transaction const& trx, optional const& loc) { - Json::Value res(Json::objectValue); - add(res, loc); - res["hash"] = toJson(trx.getHash()); - res["input"] = toJson(trx.getData()); - res["to"] = toJson(trx.getReceiver()); - res["from"] = toJson(trx.getSender()); - res["gas"] = toJson(trx.getGas()); - res["gasPrice"] = toJson(trx.getGasPrice()); - res["nonce"] = toJson(trx.getNonce()); - res["value"] = toJson(trx.getValue()); - auto const& vrs = trx.getVRS(); - res["r"] = toJson(vrs.r); - res["s"] = toJson(vrs.s); - res["v"] = toJson(vrs.v); - return res; - } - - static Json::Value toJson(const LocalisedTransaction& lt) { return toJson(*lt.trx, lt.trx_loc); } - - static Json::Value toJson(BlockHeader const& obj) { - Json::Value res(Json::objectValue); - res["parentHash"] = toJson(obj.parent_hash); - res["sha3Uncles"] = toJson(BlockHeader::uncles_hash()); - res["stateRoot"] = toJson(obj.state_root); - res["transactionsRoot"] = toJson(obj.transactions_root); - res["receiptsRoot"] = toJson(obj.receipts_root); - res["number"] = toJson(obj.number); - res["gasUsed"] = toJson(obj.gas_used); - res["gasLimit"] = toJson(obj.gas_limit); - res["extraData"] = toJson(obj.extra_data); - res["logsBloom"] = toJson(obj.log_bloom); - res["timestamp"] = toJson(obj.timestamp); - res["author"] = toJson(obj.author); - res["mixHash"] = toJson(BlockHeader::mix_hash()); - res["nonce"] = toJson(BlockHeader::nonce()); - res["uncles"] = Json::Value(Json::arrayValue); - res["hash"] = toJson(obj.hash); - res["difficulty"] = "0x0"; - res["totalDifficulty"] = "0x0"; - res["totalReward"] = toJson(obj.total_reward); - return res; - } - - static Json::Value toJson(LocalisedLogEntry const& lle) { - Json::Value res(Json::objectValue); - add(res, lle.trx_loc); - res["removed"] = false; - res["data"] = toJson(lle.le.data); - res["address"] = toJson(lle.le.address); - res["logIndex"] = toJson(lle.position_in_receipt); - auto& topics_json = res["topics"] = Json::Value(Json::arrayValue); - for (auto const& t : lle.le.topics) { - topics_json.append(toJson(t)); - } - return res; - } - - static Json::Value toJson(LocalisedTransactionReceipt const& ltr) { - Json::Value res(Json::objectValue); - add(res, ltr.trx_loc); - res["from"] = toJson(ltr.trx_from); - res["to"] = toJson(ltr.trx_to); - res["status"] = toJson(ltr.r.status_code); - res["gasUsed"] = toJson(ltr.r.gas_used); - res["cumulativeGasUsed"] = toJson(ltr.r.cumulative_gas_used); - res["contractAddress"] = toJson(ltr.r.new_contract_address); - res["logsBloom"] = toJson(ltr.r.bloom()); - auto& logs_json = res["logs"] = Json::Value(Json::arrayValue); - uint log_i = 0; - for (auto const& le : ltr.r.logs) { - logs_json.append(toJson(LocalisedLogEntry{le, ltr.trx_loc, log_i++})); - } - return res; - } - - static Json::Value toJson(SyncStatus const& obj) { - Json::Value res(Json::objectValue); - res["startingBlock"] = toJS(obj.starting_block); - res["currentBlock"] = toJS(obj.current_block); - res["highestBlock"] = toJS(obj.highest_block); - return res; - } - - template - static Json::Value toJson(T const& t) { - return toJS(t); - } - - template - static Json::Value toJsonArray(vector const& _es) { - Json::Value res(Json::arrayValue); - for (auto const& e : _es) { - res.append(toJson(e)); - } - return res; - } - - template - static Json::Value toJson(optional const& t) { - return t ? toJson(*t) : Json::Value(); - } }; -Json::Value toJson(BlockHeader const& obj) { return EthImpl::toJson(obj); } - shared_ptr NewEth(EthParams&& prerequisites) { return make_shared(std::move(prerequisites)); } } // namespace taraxa::net::rpc::eth \ No newline at end of file diff --git a/libraries/core_libs/network/rpc/eth/Eth.h b/libraries/core_libs/network/rpc/eth/Eth.h index 6a256cae83..33617c331a 100644 --- a/libraries/core_libs/network/rpc/eth/Eth.h +++ b/libraries/core_libs/network/rpc/eth/Eth.h @@ -1,18 +1,48 @@ #pragma once +#include "data.hpp" #include "final_chain/final_chain.hpp" #include "network/rpc/EthFace.h" #include "watches.hpp" namespace taraxa::net::rpc::eth { +void add(Json::Value& obj, const std::optional& info); +void add(Json::Value& obj, const ExtendedTransactionLocation& info); +Json::Value toJson(const final_chain::BlockHeader& obj); +Json::Value toJson(const Transaction& trx, const std::optional& loc); +Json::Value toJson(const LocalisedTransaction& lt); +Json::Value toJson(const final_chain::BlockHeader& obj); +Json::Value toJson(const LocalisedLogEntry& lle); +Json::Value toJson(const LocalisedTransactionReceipt& ltr); +Json::Value toJson(const SyncStatus& obj); + +template +Json::Value toJson(T const& t) { + return toJS(t); +} + +template +Json::Value toJsonArray(std::vector const& _es) { + Json::Value res(Json::arrayValue); + for (auto const& e : _es) { + res.append(toJson(e)); + } + return res; +} + +template +Json::Value toJson(std::optional const& t) { + return t ? toJson(*t) : Json::Value(); +} + struct EthParams { Address address; uint64_t chain_id = 0; uint64_t gas_limit = ((uint64_t)1 << 53) - 1; std::shared_ptr final_chain; - std::function(h256 const&)> get_trx; - std::function const& trx)> send_trx; + std::function(const h256&)> get_trx; + std::function& trx)> send_trx; std::function gas_pricer = [] { return u256(0); }; std::function()> syncing_probe = [] { return std::nullopt; }; WatchesConfig watches_cfg; @@ -29,13 +59,11 @@ struct Eth : virtual ::taraxa::net::EthFace { ::taraxa::net::EthFace::operator=(std::move(rhs)); return *this; } - virtual void note_block_executed(final_chain::BlockHeader const&, SharedTransactions const&, - final_chain::TransactionReceipts const&) = 0; - virtual void note_pending_transaction(h256 const& trx_hash) = 0; + virtual void note_block_executed(const final_chain::BlockHeader&, const SharedTransactions&, + const final_chain::TransactionReceipts&) = 0; + virtual void note_pending_transaction(const h256& trx_hash) = 0; }; std::shared_ptr NewEth(EthParams&&); -Json::Value toJson(final_chain::BlockHeader const& obj); - } // namespace taraxa::net::rpc::eth diff --git a/libraries/core_libs/network/rpc/eth/LogFilter.cpp b/libraries/core_libs/network/rpc/eth/LogFilter.cpp index 1158486a5a..6a7208c5fd 100644 --- a/libraries/core_libs/network/rpc/eth/LogFilter.cpp +++ b/libraries/core_libs/network/rpc/eth/LogFilter.cpp @@ -154,10 +154,9 @@ std::vector LogFilter::match_all(FinalChain const& final_chai auto action = [&, this](EthBlockNumber blk_n) { ExtendedTransactionLocation trx_loc{{{blk_n}, *final_chain.block_hash(blk_n)}}; auto hashes = final_chain.transaction_hashes(trx_loc.blk_n); - for (size_t i = 0; i < hashes->count(); ++i) { - trx_loc.trx_hash = hashes->get(i); - match_one(trx_loc, *final_chain.transaction_receipt(trx_loc.trx_hash), - [&](auto const& lle) { ret.push_back(lle); }); + for (const auto& hash : *hashes) { + trx_loc.trx_hash = hash; + match_one(trx_loc, *final_chain.transaction_receipt(hash), [&](auto const& lle) { ret.push_back(lle); }); ++trx_loc.index; } }; diff --git a/libraries/core_libs/network/src/ws_server.cpp b/libraries/core_libs/network/src/ws_server.cpp index 2f85fb1c1e..d698c3f6c5 100644 --- a/libraries/core_libs/network/src/ws_server.cpp +++ b/libraries/core_libs/network/src/ws_server.cpp @@ -89,12 +89,13 @@ void WsSession::on_write_no_read(beast::error_code ec, std::size_t bytes_transfe } } -void WsSession::newEthBlock(::taraxa::final_chain::BlockHeader const &payload) { +void WsSession::newEthBlock(const ::taraxa::final_chain::BlockHeader &payload, const TransactionHashes &trx_hashes) { if (new_heads_subscription_ != 0) { Json::Value res, params; res["jsonrpc"] = "2.0"; res["method"] = "eth_subscription"; params["result"] = rpc::eth::toJson(payload); + params["result"]["transactions"] = rpc::eth::toJsonArray(trx_hashes); params["subscription"] = dev::toJS(new_heads_subscription_); res["params"] = params; auto response = util::to_string(res); @@ -318,10 +319,10 @@ void WsServer::newPbftBlockExecuted(PbftBlock const &pbft_blk, } } -void WsServer::newEthBlock(::taraxa::final_chain::BlockHeader const &payload) { +void WsServer::newEthBlock(const ::taraxa::final_chain::BlockHeader &payload, const TransactionHashes &trx_hashes) { boost::shared_lock lock(sessions_mtx_); for (auto const &session : sessions) { - if (!session->is_closed()) session->newEthBlock(payload); + if (!session->is_closed()) session->newEthBlock(payload, trx_hashes); } } diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index e35342251d..d3a8a26622 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -234,7 +234,7 @@ void FullNode::start() { _eth_json_rpc->note_block_executed(*res->final_chain_blk, res->trxs, res->trx_receipts); } if (auto _ws = ws.lock()) { - _ws->newEthBlock(*res->final_chain_blk); + _ws->newEthBlock(*res->final_chain_blk, hashes_from_transactions(res->trxs)); if (auto _db = db.lock()) { auto pbft_blk = _db->getPbftBlock(res->hash); if (const auto &hash = pbft_blk->getPivotDagBlockHash(); hash != kNullBlockHash) { diff --git a/libraries/types/pbft_block/include/pbft/pbft_block.hpp b/libraries/types/pbft_block/include/pbft/pbft_block.hpp index ccfcd6b8a3..afd95c02ba 100644 --- a/libraries/types/pbft_block/include/pbft/pbft_block.hpp +++ b/libraries/types/pbft_block/include/pbft/pbft_block.hpp @@ -7,7 +7,6 @@ #include "common/types.hpp" #include "dag/dag_block.hpp" -#include "transaction/transaction.hpp" #include "vote/vote.hpp" namespace taraxa { diff --git a/libraries/types/transaction/include/transaction/transaction.hpp b/libraries/types/transaction/include/transaction/transaction.hpp index 04e6f6c014..f624886838 100644 --- a/libraries/types/transaction/include/transaction/transaction.hpp +++ b/libraries/types/transaction/include/transaction/transaction.hpp @@ -70,7 +70,10 @@ struct Transaction { }; using SharedTransaction = std::shared_ptr; -using Transactions = ::std::vector; -using SharedTransactions = ::std::vector; +using Transactions = std::vector; +using SharedTransactions = std::vector; +using TransactionHashes = std::vector; + +TransactionHashes hashes_from_transactions(const SharedTransactions &transactions); } // namespace taraxa diff --git a/libraries/types/transaction/src/transaction.cpp b/libraries/types/transaction/src/transaction.cpp index b6f7bc797b..ba81bcc873 100644 --- a/libraries/types/transaction/src/transaction.cpp +++ b/libraries/types/transaction/src/transaction.cpp @@ -19,6 +19,15 @@ uint64_t toChainID(u256 const &val) { return static_cast(val); } +TransactionHashes hashes_from_transactions(const SharedTransactions &transactions) { + TransactionHashes trx_hashes; + trx_hashes.reserve(transactions.size()); + for (auto const &trx : transactions) { + trx_hashes.push_back(trx->getHash()); + } + return trx_hashes; +} + Transaction::Transaction(const trx_nonce_t &nonce, const val_t &value, const val_t &gas_price, gas_t gas, bytes data, const secret_t &sk, const optional &receiver, uint64_t chain_id) : nonce_(nonce), diff --git a/tests/full_node_test.cpp b/tests/full_node_test.cpp index e889fd3da7..50e0cf0488 100644 --- a/tests/full_node_test.cpp +++ b/tests/full_node_test.cpp @@ -1657,7 +1657,7 @@ TEST_F(FullNodeTest, graphql_test) { block = service::ScalarArgument::require("block", data); auto transactionAt = service::ScalarArgument::require("transactionAt", block); const auto hash2 = service::StringArgument::require("hash", transactionAt); - EXPECT_EQ(nodes[0]->getFinalChain()->transaction_hashes(2)->get(0).toString(), hash2); + EXPECT_EQ(nodes[0]->getFinalChain()->transaction_hashes(2)->at(0).toString(), hash2); } } // namespace taraxa::core_tests From af0dc415ddf0198ca4b1e8df7a0c02bcf14e898c Mon Sep 17 00:00:00 2001 From: kstdl Date: Wed, 1 Mar 2023 10:46:00 +0100 Subject: [PATCH 041/162] feat: add taraxa_getChainStats moethod to RPC --- doc/RPC.md | 33 +++++ .../network/include/network/ws_server.hpp | 20 +-- libraries/core_libs/network/rpc/Taraxa.cpp | 14 ++ libraries/core_libs/network/rpc/Taraxa.h | 1 + .../core_libs/network/rpc/Taraxa.jsonrpc.json | 6 + .../core_libs/network/rpc/TaraxaClient.h | 9 ++ libraries/core_libs/network/rpc/TaraxaFace.h | 8 ++ libraries/core_libs/network/rpc/eth/Eth.cpp | 122 +++++++++--------- libraries/core_libs/network/rpc/eth/Eth.h | 8 +- .../core_libs/network/rpc/eth/LogFilter.cpp | 34 ++--- 10 files changed, 163 insertions(+), 92 deletions(-) diff --git a/doc/RPC.md b/doc/RPC.md index 5e80b6fa49..b5edc701af 100644 --- a/doc/RPC.md +++ b/doc/RPC.md @@ -481,6 +481,39 @@ curl -X POST --data '{"jsonrpc":"2.0","method":"taraxa_getConfig","params":[],"i } ``` +### taraxa_getChainStats + +Returns current chain stats with count of transactions, PBFT blocks and DAG blocks + +#### Parameters + +none + +#### Returns + +`OBJECT` - current chain stats object +* `pbft_period`: `QUANTITY` - current PBFT period +* `dag_blocks_executed`: `QUANTITY` - count of executed(finalized) DAG blocks +* `transactions_executed`: `QUANTITY` - count of executed transactions + +#### Example + +```json +// Request +curl -X POST --data '{"jsonrpc":"2.0","method":"taraxa_getChainStats","params":[],"id":1}' + +// Result +{ + "id": 1, + "jsonrpc": "2.0", + "result": { + "pbft_period": 50, + "dag_blocks_executed": 100, + "transactions_executed": 200 + } +} +``` + ## Test API ### get_sortition_change diff --git a/libraries/core_libs/network/include/network/ws_server.hpp b/libraries/core_libs/network/include/network/ws_server.hpp index 7eb5dfbcd9..0fcd33dbb9 100644 --- a/libraries/core_libs/network/include/network/ws_server.hpp +++ b/libraries/core_libs/network/include/network/ws_server.hpp @@ -49,11 +49,11 @@ class WsSession : public std::enable_shared_from_this { virtual std::string processRequest(const std::string_view& request) = 0; - void newEthBlock(::taraxa::final_chain::BlockHeader const& payload, const TransactionHashes& trx_hashes); - void newDagBlock(DagBlock const& blk); - void newDagBlockFinalized(blk_hash_t const& blk, uint64_t period); - void newPbftBlockExecuted(Json::Value const& payload); - void newPendingTransaction(trx_hash_t const& trx_hash); + void newEthBlock(const ::taraxa::final_chain::BlockHeader& payload, const TransactionHashes& trx_hashes); + void newDagBlock(const DagBlock& blk); + void newDagBlockFinalized(const blk_hash_t& blk, uint64_t period); + void newPbftBlockExecuted(const Json::Value& payload); + void newPendingTransaction(const trx_hash_t& trx_hash); bool is_closed() const { return closed_; } bool is_normal(const beast::error_code& ec) const; LOG_OBJECTS_DEFINE @@ -90,11 +90,11 @@ class WsServer : public std::enable_shared_from_this, public jsonrpc:: // Start accepting incoming connections void run(); - void newEthBlock(::taraxa::final_chain::BlockHeader const& payload, const TransactionHashes& trx_hashes); - void newDagBlock(DagBlock const& blk); - void newDagBlockFinalized(blk_hash_t const& blk, uint64_t period); - void newPbftBlockExecuted(PbftBlock const& sche_blk, std::vector const& finalized_dag_blk_hashes); - void newPendingTransaction(trx_hash_t const& trx_hash); + void newEthBlock(const ::taraxa::final_chain::BlockHeader& payload, const TransactionHashes& trx_hashes); + void newDagBlock(const DagBlock& blk); + void newDagBlockFinalized(const blk_hash_t& blk, uint64_t period); + void newPbftBlockExecuted(const PbftBlock& sche_blk, const std::vector& finalized_dag_blk_hashes); + void newPendingTransaction(const trx_hash_t& trx_hash); virtual std::shared_ptr createSession(tcp::socket&& socket) = 0; diff --git a/libraries/core_libs/network/rpc/Taraxa.cpp b/libraries/core_libs/network/rpc/Taraxa.cpp index 87267ce200..7fe42bc23d 100644 --- a/libraries/core_libs/network/rpc/Taraxa.cpp +++ b/libraries/core_libs/network/rpc/Taraxa.cpp @@ -137,4 +137,18 @@ Json::Value Taraxa::taraxa_getDagBlockByLevel(const string& _blockLevel, bool _i } Json::Value Taraxa::taraxa_getConfig() { return enc_json(tryGetNode()->getConfig().genesis); } + +Json::Value Taraxa::taraxa_getChainStats() { + Json::Value res; + try { + if (auto node = full_node_.lock()) { + res["pbft_period"] = Json::UInt64(node->getPbftChain()->getPbftChainSize()); + res["dag_blocks_executed"] = Json::UInt64(node->getDB()->getNumBlockExecuted()); + res["transactions_executed"] = Json::UInt64(node->getDB()->getNumTransactionExecuted()); + } + } catch (std::exception& e) { + res["status"] = e.what(); + } + return res; +} } // namespace taraxa::net \ No newline at end of file diff --git a/libraries/core_libs/network/rpc/Taraxa.h b/libraries/core_libs/network/rpc/Taraxa.h index cfb774a28e..39b32cca34 100644 --- a/libraries/core_libs/network/rpc/Taraxa.h +++ b/libraries/core_libs/network/rpc/Taraxa.h @@ -28,6 +28,7 @@ class Taraxa : public TaraxaFace { virtual Json::Value taraxa_getScheduleBlockByPeriod(const std::string& _period) override; virtual std::string taraxa_pbftBlockHashByPeriod(const std::string& _period) override; virtual Json::Value taraxa_getConfig() override; + virtual Json::Value taraxa_getChainStats() override; protected: std::weak_ptr full_node_; diff --git a/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json b/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json index 48dca28de3..15e5b2135c 100644 --- a/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json +++ b/libraries/core_libs/network/rpc/Taraxa.jsonrpc.json @@ -53,6 +53,12 @@ "order": [], "returns": {} }, + { + "name": "taraxa_getChainStats", + "params": [], + "order": [], + "returns": {} + }, { "name": "taraxa_pbftBlockHashByPeriod", "params": [""], diff --git a/libraries/core_libs/network/rpc/TaraxaClient.h b/libraries/core_libs/network/rpc/TaraxaClient.h index 91d49c94dd..a8a3c105a1 100644 --- a/libraries/core_libs/network/rpc/TaraxaClient.h +++ b/libraries/core_libs/network/rpc/TaraxaClient.h @@ -88,6 +88,15 @@ class TaraxaClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } + Json::Value taraxa_getChainStats() throw(jsonrpc::JsonRpcException) { + Json::Value p; + p = Json::nullValue; + Json::Value result = this->CallMethod("taraxa_getChainStats", p); + if (result.isObject()) + return result; + else + throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); + } std::string taraxa_pbftBlockHashByPeriod(const std::string& param1) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); diff --git a/libraries/core_libs/network/rpc/TaraxaFace.h b/libraries/core_libs/network/rpc/TaraxaFace.h index bd53cf7b08..8d02894579 100644 --- a/libraries/core_libs/network/rpc/TaraxaFace.h +++ b/libraries/core_libs/network/rpc/TaraxaFace.h @@ -38,6 +38,9 @@ class TaraxaFace : public ServerInterface { this->bindAndAddMethod( jsonrpc::Procedure("taraxa_getConfig", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL), &taraxa::net::TaraxaFace::taraxa_getConfigI); + this->bindAndAddMethod( + jsonrpc::Procedure("taraxa_getChainStats", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL), + &taraxa::net::TaraxaFace::taraxa_getStatsI); this->bindAndAddMethod(jsonrpc::Procedure("taraxa_pbftBlockHashByPeriod", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::TaraxaFace::taraxa_pbftBlockHashByPeriodI); @@ -72,6 +75,10 @@ class TaraxaFace : public ServerInterface { (void)request; response = this->taraxa_getConfig(); } + inline virtual void taraxa_getStatsI(const Json::Value &request, Json::Value &response) { + (void)request; + response = this->taraxa_getChainStats(); + } inline virtual void taraxa_pbftBlockHashByPeriodI(const Json::Value &request, Json::Value &response) { response = this->taraxa_pbftBlockHashByPeriod(request[0u].asString()); } @@ -83,6 +90,7 @@ class TaraxaFace : public ServerInterface { virtual std::string taraxa_dagBlockPeriod() = 0; virtual Json::Value taraxa_getScheduleBlockByPeriod(const std::string ¶m1) = 0; virtual Json::Value taraxa_getConfig() = 0; + virtual Json::Value taraxa_getChainStats() = 0; virtual std::string taraxa_pbftBlockHashByPeriod(const std::string ¶m1) = 0; }; diff --git a/libraries/core_libs/network/rpc/eth/Eth.cpp b/libraries/core_libs/network/rpc/eth/Eth.cpp index 130fe14b90..7a4b83a91c 100644 --- a/libraries/core_libs/network/rpc/eth/Eth.cpp +++ b/libraries/core_libs/network/rpc/eth/Eth.cpp @@ -14,18 +14,18 @@ using namespace taraxa::final_chain; using namespace taraxa::state_api; namespace taraxa::net::rpc::eth { -void add(Json::Value& obj, optional const& info) { +void add(Json::Value& obj, const optional& info) { obj["blockNumber"] = info ? toJS(info->blk_n) : Json::Value(); obj["blockHash"] = info ? toJS(info->blk_h) : Json::Value(); obj["transactionIndex"] = info ? toJS(info->index) : Json::Value(); } -void add(Json::Value& obj, ExtendedTransactionLocation const& info) { - add(obj, static_cast(info)); +void add(Json::Value& obj, const ExtendedTransactionLocation& info) { + add(obj, static_cast(info)); obj["transactionHash"] = toJS(info.trx_hash); } -Json::Value toJson(Transaction const& trx, optional const& loc) { +Json::Value toJson(const Transaction& trx, const optional& loc) { Json::Value res(Json::objectValue); add(res, loc); res["hash"] = toJS(trx.getHash()); @@ -36,7 +36,7 @@ Json::Value toJson(Transaction const& trx, optionallast_block_number()); } - string eth_getBalance(string const& _address, string const& _blockNumber) override { + string eth_getBalance(const string& _address, const string& _blockNumber) override { return toJS( final_chain->get_account(toAddress(_address), parse_blk_num(_blockNumber)).value_or(ZeroAccount).balance); } - string eth_getStorageAt(string const& _address, string const& _position, string const& _blockNumber) override { + string eth_getStorageAt(const string& _address, const string& _position, const string& _blockNumber) override { return toJS( final_chain->get_account_storage(toAddress(_address), jsToU256(_position), parse_blk_num(_blockNumber))); } - string eth_getStorageRoot(string const& _address, string const& _blockNumber) override { + string eth_getStorageRoot(const string& _address, const string& _blockNumber) override { return toJS(final_chain->get_account(toAddress(_address), parse_blk_num(_blockNumber)) .value_or(ZeroAccount) .storage_root_eth()); } - string eth_getCode(string const& _address, string const& _blockNumber) override { + string eth_getCode(const string& _address, const string& _blockNumber) override { return toJS(final_chain->get_code(toAddress(_address), parse_blk_num(_blockNumber))); } - string eth_call(Json::Value const& _json, string const& _blockNumber) override { + string eth_call(const Json::Value& _json, const string& _blockNumber) override { auto t = toTransactionSkeleton(_json); auto blk_n = parse_blk_num(_blockNumber); prepare_transaction_for_call(t, blk_n); return toJS(call(blk_n, t).code_retval); } - string eth_estimateGas(Json::Value const& _json) override { + string eth_estimateGas(const Json::Value& _json) override { auto t = toTransactionSkeleton(_json); auto blk_n = final_chain->last_block_number(); prepare_transaction_for_call(t, blk_n); return toJS(call(blk_n, t).gas_used); } - string eth_getTransactionCount(string const& _address, string const& _blockNumber) override { + string eth_getTransactionCount(const string& _address, const string& _blockNumber) override { return toJS(transaction_count(parse_blk_num(_blockNumber), toAddress(_address))); } - Json::Value eth_getBlockTransactionCountByHash(string const& _blockHash) override { + Json::Value eth_getBlockTransactionCountByHash(const string& _blockHash) override { return toJS(transactionCount(jsToFixed<32>(_blockHash))); } - Json::Value eth_getBlockTransactionCountByNumber(string const& _blockNumber) override { + Json::Value eth_getBlockTransactionCountByNumber(const string& _blockNumber) override { return toJS(final_chain->transactionCount(parse_blk_num(_blockNumber))); } - Json::Value eth_getUncleCountByBlockHash(string const&) override { return toJS(0); } + Json::Value eth_getUncleCountByBlockHash(const string&) override { return toJS(0); } - Json::Value eth_getUncleCountByBlockNumber(string const&) override { return toJS(0); } + Json::Value eth_getUncleCountByBlockNumber(const string&) override { return toJS(0); } - string eth_sendRawTransaction(string const& _rlp) override { + string eth_sendRawTransaction(const string& _rlp) override { auto trx = std::make_shared(jsToBytes(_rlp, OnFailed::Throw), true); send_trx(trx); return toJS(trx->getHash()); } - Json::Value eth_getBlockByHash(string const& _blockHash, bool _includeTransactions) override { + Json::Value eth_getBlockByHash(const string& _blockHash, bool _includeTransactions) override { if (auto blk_n = final_chain->block_number(jsToFixed<32>(_blockHash)); blk_n) { return get_block_by_number(*blk_n, _includeTransactions); } return Json::Value(); } - Json::Value eth_getBlockByNumber(string const& _blockNumber, bool _includeTransactions) override { + Json::Value eth_getBlockByNumber(const string& _blockNumber, bool _includeTransactions) override { return get_block_by_number(parse_blk_num(_blockNumber), _includeTransactions); } - Json::Value eth_getTransactionByHash(string const& _transactionHash) override { + Json::Value eth_getTransactionByHash(const string& _transactionHash) override { return toJson(get_transaction(jsToFixed<32>(_transactionHash))); } - Json::Value eth_getTransactionByBlockHashAndIndex(string const& _blockHash, - string const& _transactionIndex) override { + Json::Value eth_getTransactionByBlockHashAndIndex(const string& _blockHash, + const string& _transactionIndex) override { return toJson(get_transaction(jsToFixed<32>(_blockHash), jsToInt(_transactionIndex))); } - Json::Value eth_getTransactionByBlockNumberAndIndex(string const& _blockNumber, - string const& _transactionIndex) override { + Json::Value eth_getTransactionByBlockNumberAndIndex(const string& _blockNumber, + const string& _transactionIndex) override { return toJson(get_transaction(jsToInt(_transactionIndex), parse_blk_num(_blockNumber))); } - Json::Value eth_getTransactionReceipt(string const& _transactionHash) override { + Json::Value eth_getTransactionReceipt(const string& _transactionHash) override { return toJson(get_transaction_receipt(jsToFixed<32>(_transactionHash))); } - Json::Value eth_getUncleByBlockHashAndIndex(string const&, string const&) override { return Json::Value(); } + Json::Value eth_getUncleByBlockHashAndIndex(const string&, const string&) override { return Json::Value(); } - Json::Value eth_getUncleByBlockNumberAndIndex(string const&, string const&) override { return Json::Value(); } + Json::Value eth_getUncleByBlockNumberAndIndex(const string&, const string&) override { return Json::Value(); } - string eth_newFilter(Json::Value const& _json) override { + string eth_newFilter(const Json::Value& _json) override { return toJS(watches_.logs_.install_watch(parse_log_filter(_json))); } @@ -224,26 +224,26 @@ class EthImpl : public Eth, EthParams { string eth_newPendingTransactionFilter() override { return toJS(watches_.new_transactions_.install_watch()); } - bool eth_uninstallFilter(string const& _filterId) override { + bool eth_uninstallFilter(const string& _filterId) override { auto watch_id = jsToInt(_filterId); return watches_.visit_by_id(watch_id, [=](auto watch) { return watch && watch->uninstall_watch(watch_id); }); } - Json::Value eth_getFilterChanges(string const& _filterId) override { + Json::Value eth_getFilterChanges(const string& _filterId) override { auto watch_id = jsToInt(_filterId); return watches_.visit_by_id(watch_id, [=](auto watch) { return watch ? toJsonArray(watch->poll(watch_id)) : Json::Value(Json::arrayValue); }); } - Json::Value eth_getFilterLogs(string const& _filterId) override { + Json::Value eth_getFilterLogs(const string& _filterId) override { if (auto filter = watches_.logs_.get_watch_params(jsToInt(_filterId))) { return toJsonArray(filter->match_all(*final_chain)); } return Json::Value(Json::arrayValue); } - Json::Value eth_getLogs(Json::Value const& _json) override { + Json::Value eth_getLogs(const Json::Value& _json) override { return toJsonArray(parse_log_filter(_json).match_all(*final_chain)); } @@ -254,8 +254,8 @@ class EthImpl : public Eth, EthParams { Json::Value eth_chainId() override { return chain_id ? Json::Value(toJS(chain_id)) : Json::Value(); } - void note_block_executed(BlockHeader const& blk_header, SharedTransactions const& trxs, - TransactionReceipts const& receipts) override { + void note_block_executed(const BlockHeader& blk_header, const SharedTransactions& trxs, + const TransactionReceipts& receipts) override { watches_.new_blocks_.process_update(blk_header.hash); ExtendedTransactionLocation trx_loc{{{blk_header.number}, blk_header.hash}}; for (; trx_loc.index < trxs.size(); ++trx_loc.index) { @@ -265,7 +265,7 @@ class EthImpl : public Eth, EthParams { } } - void note_pending_transaction(h256 const& trx_hash) override { watches_.new_transactions_.process_update(trx_hash); } + void note_pending_transaction(const h256& trx_hash) override { watches_.new_transactions_.process_update(trx_hash); } Json::Value get_block_by_number(EthBlockNumber blk_n, bool include_transactions) { auto blk_header = final_chain->block_header(blk_n); @@ -278,7 +278,7 @@ class EthImpl : public Eth, EthParams { ExtendedTransactionLocation loc; loc.blk_n = blk_header->number; loc.blk_h = blk_header->hash; - for (auto const& t : final_chain->transactions(blk_n)) { + for (const auto& t : final_chain->transactions(blk_n)) { trxs_json.append(toJson(*t, loc)); ++loc.index; } @@ -289,7 +289,7 @@ class EthImpl : public Eth, EthParams { return ret; } - optional get_transaction(h256 const& h) const { + optional get_transaction(const h256& h) const { auto trx = get_trx(h); if (!trx) { return {}; @@ -318,18 +318,18 @@ class EthImpl : public Eth, EthParams { }; } - optional get_transaction(h256 const& blk_h, uint64_t _i) const { + optional get_transaction(const h256& blk_h, uint64_t _i) const { auto blk_n = final_chain->block_number(blk_h); return blk_n ? get_transaction(_i, *blk_n) : nullopt; } - optional get_transaction_receipt(h256 const& trx_h) const { + optional get_transaction_receipt(const h256& trx_h) const { auto r = final_chain->transaction_receipt(trx_h); if (!r) { return {}; } auto loc_trx = get_transaction(trx_h); - auto const& trx = loc_trx->trx; + const auto& trx = loc_trx->trx; return LocalisedTransactionReceipt{ *r, ExtendedTransactionLocation{*loc_trx->trx_loc, trx_h}, @@ -338,16 +338,16 @@ class EthImpl : public Eth, EthParams { }; } - uint64_t transactionCount(h256 const& block_hash) const { + uint64_t transactionCount(const h256& block_hash) const { auto n = final_chain->block_number(block_hash); return n ? final_chain->transactionCount(n) : 0; } - trx_nonce_t transaction_count(EthBlockNumber n, Address const& addr) { + trx_nonce_t transaction_count(EthBlockNumber n, const Address& addr) { return final_chain->get_account(addr, n).value_or(ZeroAccount).nonce; } - state_api::ExecutionResult call(EthBlockNumber blk_n, TransactionSkeleton const& trx) { + state_api::ExecutionResult call(EthBlockNumber blk_n, const TransactionSkeleton& trx) { const auto result = final_chain->call( { trx.from, @@ -383,7 +383,7 @@ class EthImpl : public Eth, EthParams { } DEV_SIMPLE_EXCEPTION(InvalidAddress); - static Address toAddress(string const& s) { + static Address toAddress(const string& s) { try { if (auto b = fromHex(s.substr(0, 2) == "0x" ? s.substr(2) : s, WhenError::Throw); b.size() == Address::size) { return Address(b); @@ -393,7 +393,7 @@ class EthImpl : public Eth, EthParams { BOOST_THROW_EXCEPTION(InvalidAddress()); } - static TransactionSkeleton toTransactionSkeleton(Json::Value const& _json) { + static TransactionSkeleton toTransactionSkeleton(const Json::Value& _json) { TransactionSkeleton ret; if (!_json.isObject() || _json.empty()) { return ret; @@ -425,45 +425,45 @@ class EthImpl : public Eth, EthParams { return ret; } - static optional parse_blk_num_specific(string const& blk_num_str) { + static optional parse_blk_num_specific(const string& blk_num_str) { if (blk_num_str == "latest" || blk_num_str == "pending") { return std::nullopt; } return blk_num_str == "earliest" ? 0 : jsToInt(blk_num_str); } - EthBlockNumber parse_blk_num(string const& blk_num_str) { + EthBlockNumber parse_blk_num(const string& blk_num_str) { auto ret = parse_blk_num_specific(blk_num_str); return ret ? *ret : final_chain->last_block_number(); } - LogFilter parse_log_filter(Json::Value const& json) { + LogFilter parse_log_filter(const Json::Value& json) { EthBlockNumber from_block; optional to_block; AddressSet addresses; LogFilter::Topics topics; - if (auto const& fromBlock = json["fromBlock"]; !fromBlock.empty()) { + if (const auto& fromBlock = json["fromBlock"]; !fromBlock.empty()) { from_block = parse_blk_num(fromBlock.asString()); } else { from_block = final_chain->last_block_number(); } - if (auto const& toBlock = json["toBlock"]; !toBlock.empty()) { + if (const auto& toBlock = json["toBlock"]; !toBlock.empty()) { to_block = parse_blk_num_specific(toBlock.asString()); } - if (auto const& address = json["address"]; !address.empty()) { + if (const auto& address = json["address"]; !address.empty()) { if (address.isArray()) { - for (auto const& obj : address) { + for (const auto& obj : address) { addresses.insert(toAddress(obj.asString())); } } else { addresses.insert(toAddress(address.asString())); } } - if (auto const& topics_json = json["topics"]; !topics_json.empty()) { + if (const auto& topics_json = json["topics"]; !topics_json.empty()) { for (uint32_t i = 0; i < topics_json.size(); i++) { - auto const& topic_json = topics_json[i]; + const auto& topic_json = topics_json[i]; if (topic_json.isArray()) { - for (auto const& t : topic_json) { + for (const auto& t : topic_json) { if (!t.isNull()) { topics[i].insert(jsToFixed<32>(t.asString())); } diff --git a/libraries/core_libs/network/rpc/eth/Eth.h b/libraries/core_libs/network/rpc/eth/Eth.h index 33617c331a..acf361e0b7 100644 --- a/libraries/core_libs/network/rpc/eth/Eth.h +++ b/libraries/core_libs/network/rpc/eth/Eth.h @@ -18,21 +18,21 @@ Json::Value toJson(const LocalisedTransactionReceipt& ltr); Json::Value toJson(const SyncStatus& obj); template -Json::Value toJson(T const& t) { +Json::Value toJson(const T& t) { return toJS(t); } template -Json::Value toJsonArray(std::vector const& _es) { +Json::Value toJsonArray(const std::vector& _es) { Json::Value res(Json::arrayValue); - for (auto const& e : _es) { + for (const auto& e : _es) { res.append(toJson(e)); } return res; } template -Json::Value toJson(std::optional const& t) { +Json::Value toJson(const std::optional& t) { return t ? toJson(*t) : Json::Value(); } diff --git a/libraries/core_libs/network/rpc/eth/LogFilter.cpp b/libraries/core_libs/network/rpc/eth/LogFilter.cpp index 6a7208c5fd..115a664ffa 100644 --- a/libraries/core_libs/network/rpc/eth/LogFilter.cpp +++ b/libraries/core_libs/network/rpc/eth/LogFilter.cpp @@ -8,7 +8,7 @@ LogFilter::LogFilter(EthBlockNumber from_block, std::optional to if (!addresses_.empty()) { return; } - for (auto const& t : topics_) { + for (const auto& t : topics_) { if (!t.empty()) { return; } @@ -20,7 +20,7 @@ std::vector LogFilter::bloomPossibilities() const { // return combination of each of the addresses/topics std::vector ret; // | every address with every topic - for (auto const& i : addresses_) { + for (const auto& i : addresses_) { // 1st case, there are addresses and topics // // m_addresses = [a0, a1]; @@ -31,12 +31,12 @@ std::vector LogFilter::bloomPossibilities() const { // a1 | t0, a1 | t1a | t1b // ] // - for (auto const& t : topics_) { + for (const auto& t : topics_) { if (t.empty()) { continue; } auto b = LogBloom().shiftBloom<3>(sha3(i)); - for (auto const& j : t) { + for (const auto& j : t) { b = b.shiftBloom<3>(sha3(j)); } ret.push_back(b); @@ -51,7 +51,7 @@ std::vector LogFilter::bloomPossibilities() const { // blooms = [a0, a1]; // if (ret.empty()) { - for (auto const& i : addresses_) { + for (const auto& i : addresses_) { ret.push_back(LogBloom().shiftBloom<3>(sha3(i))); } } @@ -64,10 +64,10 @@ std::vector LogFilter::bloomPossibilities() const { // blooms = [t0, t1a | t1b]; // if (addresses_.empty()) { - for (auto const& t : topics_) { + for (const auto& t : topics_) { if (t.size()) { LogBloom b; - for (auto const& j : t) { + for (const auto& j : t) { b = b.shiftBloom<3>(sha3(j)); } ret.push_back(b); @@ -80,7 +80,7 @@ std::vector LogFilter::bloomPossibilities() const { bool LogFilter::matches(LogBloom b) const { if (!addresses_.empty()) { auto ok = false; - for (auto const& i : addresses_) { + for (const auto& i : addresses_) { if (b.containsBloom<3>(sha3(i))) { ok = true; break; @@ -90,12 +90,12 @@ bool LogFilter::matches(LogBloom b) const { return false; } } - for (auto const& t : topics_) { + for (const auto& t : topics_) { if (t.empty()) { continue; } auto ok = false; - for (auto const& i : t) { + for (const auto& i : t) { if (b.containsBloom<3>(sha3(i))) { ok = true; break; @@ -108,12 +108,12 @@ bool LogFilter::matches(LogBloom b) const { return true; } -void LogFilter::match_one(TransactionReceipt const& r, std::function const& cb) const { +void LogFilter::match_one(const TransactionReceipt& r, const std::function& cb) const { if (!matches(r.bloom())) { return; } for (size_t log_i = 0; log_i < r.logs.size(); ++log_i) { - auto const& e = r.logs[log_i]; + const auto& e = r.logs[log_i]; if (!addresses_.empty() && !addresses_.count(e.address)) { continue; } @@ -134,8 +134,8 @@ bool LogFilter::blk_number_matches(EthBlockNumber blk_n) const { return from_block_ <= blk_n && (!to_block_ || blk_n <= *to_block_); } -void LogFilter::match_one(ExtendedTransactionLocation const& trx_loc, TransactionReceipt const& r, - std::function const& cb) const { +void LogFilter::match_one(const ExtendedTransactionLocation& trx_loc, const TransactionReceipt& r, + const std::function& cb) const { if (!blk_number_matches(trx_loc.blk_n)) { return; } @@ -149,14 +149,14 @@ void LogFilter::match_one(ExtendedTransactionLocation const& trx_loc, Transactio } } -std::vector LogFilter::match_all(FinalChain const& final_chain) const { +std::vector LogFilter::match_all(const FinalChain& final_chain) const { std::vector ret; auto action = [&, this](EthBlockNumber blk_n) { ExtendedTransactionLocation trx_loc{{{blk_n}, *final_chain.block_hash(blk_n)}}; auto hashes = final_chain.transaction_hashes(trx_loc.blk_n); for (const auto& hash : *hashes) { trx_loc.trx_hash = hash; - match_one(trx_loc, *final_chain.transaction_receipt(hash), [&](auto const& lle) { ret.push_back(lle); }); + match_one(trx_loc, *final_chain.transaction_receipt(hash), [&](const auto& lle) { ret.push_back(lle); }); ++trx_loc.index; } }; @@ -168,7 +168,7 @@ std::vector LogFilter::match_all(FinalChain const& final_chai return ret; } std::set matchingBlocks; - for (auto const& bloom : bloomPossibilities()) { + for (const auto& bloom : bloomPossibilities()) { for (auto blk_n : final_chain.withBlockBloom(bloom, from_block_, to_blk_n)) { matchingBlocks.insert(blk_n); } From d32f1072f65f5a52a9799c1fb39df34cc3d6e14b Mon Sep 17 00:00:00 2001 From: rjonczy Date: Mon, 6 Mar 2023 16:42:39 +0100 Subject: [PATCH 042/162] added changelog entry for chart --- charts/taraxa-node/CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/charts/taraxa-node/CHANGELOG.md b/charts/taraxa-node/CHANGELOG.md index 2e10e6750b..4a130d884a 100644 --- a/charts/taraxa-node/CHANGELOG.md +++ b/charts/taraxa-node/CHANGELOG.md @@ -3,6 +3,13 @@ This file documents all notable changes to `taraxa-node` Helm Chart. The release numbering uses [semantic versioning](http://semver.org). + +## v0.3.4 + +### Minor changes + +* Enabled CORS on `Ingress` of indexer + ## v0.3.3 ### Major changes From b1472a975fb315bf687cff8f2bc10f3584dcb37c Mon Sep 17 00:00:00 2001 From: rjonczy Date: Mon, 6 Mar 2023 17:47:50 +0100 Subject: [PATCH 043/162] changed db_path to data_dir --- charts/taraxa-node/templates/taraxa-node.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/taraxa-node/templates/taraxa-node.yaml b/charts/taraxa-node/templates/taraxa-node.yaml index b86e5d9ed5..c03abe2a0a 100644 --- a/charts/taraxa-node/templates/taraxa-node.yaml +++ b/charts/taraxa-node/templates/taraxa-node.yaml @@ -104,7 +104,7 @@ spec: imagePullPolicy: {{ .Values.node.indexer.image.pullPolicy }} command: ["/taraxa-indexer"] args: - - -db_path + - -data_dir - {{ .Values.node.indexer.persistence.mountPoint }} - -blockchain_ws - 'ws://localhost:8777' From 18dc3eaaeb14eac5ed0a4a29161e825498841336 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Mon, 6 Mar 2023 17:51:02 +0100 Subject: [PATCH 044/162] bump chart version --- charts/taraxa-node/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/taraxa-node/Chart.yaml b/charts/taraxa-node/Chart.yaml index 9c315b27b5..c08c8abf65 100644 --- a/charts/taraxa-node/Chart.yaml +++ b/charts/taraxa-node/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 appVersion: "1.0" description: Kubernetes helm chart for Taraxa blockchain full node implementation. name: taraxa-node -version: 0.3.4 +version: 0.3.5 keywords: - blockchain - taraxa From 2f51c9e24df5e344f5bae31250d79fe48292da14 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Mon, 6 Mar 2023 17:52:10 +0100 Subject: [PATCH 045/162] updated changelog for helm-chart --- charts/taraxa-node/CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/charts/taraxa-node/CHANGELOG.md b/charts/taraxa-node/CHANGELOG.md index 4a130d884a..68eeb69e63 100644 --- a/charts/taraxa-node/CHANGELOG.md +++ b/charts/taraxa-node/CHANGELOG.md @@ -3,6 +3,11 @@ This file documents all notable changes to `taraxa-node` Helm Chart. The release numbering uses [semantic versioning](http://semver.org). +## v0.3.5 + +### Minor changes + +* Changed `db_path` to `data_dir` for taraxa-indexer ## v0.3.4 From 334177ebfc50cccc106b574d3182afb6c095f6af Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Thu, 2 Mar 2023 15:46:42 +0100 Subject: [PATCH 046/162] chore: implement claimAllRewards function in dpos contract --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 1094380a89..207225fd09 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 1094380a89d28e395503a55076bb5876c64077c4 +Subproject commit 207225fd09120f53f9de5c3f89a70892aac4061c From a949579b652686a55c2251a5b5be739ea078f5b6 Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Mon, 6 Mar 2023 12:51:17 +0100 Subject: [PATCH 047/162] chore: update evm --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 207225fd09..5c5d8b53da 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 207225fd09120f53f9de5c3f89a70892aac4061c +Subproject commit 5c5d8b53da2dfed08a9d08f99b9e36d6500b8cc3 From 01abfbb58f74d65e0defe6f8c038935ef5aa670b Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Mon, 6 Mar 2023 13:21:56 +0100 Subject: [PATCH 048/162] chore: update evm --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 5c5d8b53da..760f7ff631 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 5c5d8b53da2dfed08a9d08f99b9e36d6500b8cc3 +Subproject commit 760f7ff631a264b132b7771f7a4b423154f2c7e0 From 3a3b45e9004b57f05a55e2174a183a4fa04d2c65 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 6 Mar 2023 11:06:22 +0100 Subject: [PATCH 049/162] chore: update do cpp-check 2.10 --- CMakeModules/cppcheck.cmake | 8 ++++++-- conanfile.py | 2 +- libraries/aleth/libdevcore/RLP.cpp | 14 +++++++------- libraries/config/src/network.cpp | 2 +- .../consensus/src/final_chain/final_chain.cpp | 2 +- .../consensus/src/key_manager/key_manager.cpp | 2 +- .../core_libs/consensus/src/pbft/pbft_manager.cpp | 2 +- 7 files changed, 18 insertions(+), 14 deletions(-) diff --git a/CMakeModules/cppcheck.cmake b/CMakeModules/cppcheck.cmake index 9a7c8168ae..a0c7df1b56 100644 --- a/CMakeModules/cppcheck.cmake +++ b/CMakeModules/cppcheck.cmake @@ -11,7 +11,7 @@ else () --error-exitcode=1 --enable=all --suppress=missingInclude - --suppress=useStlAlgorithm + #--suppress=useStlAlgorithm --suppress=noExplicitConstructor --suppress=unknownMacro # false positive @@ -27,7 +27,11 @@ else () # TODO remove this when we solve correct exit of programs --suppress=localMutex:${PROJECT_SOURCE_DIR}/*/main.cpp # Just style warning - --suppress=virtualCallInConstructor:${PROJECT_SOURCE_DIR}/*/final_chain.cpp + --suppress=unmatchedSuppression:${PROJECT_SOURCE_DIR}/*/final_chain.cpp + # graphql generated + --suppress=shadowVariable:${PROJECT_SOURCE_DIR}/*/TaraxaSchema.* + + --suppress=cstyleCast # Only show found errors "--quiet" diff --git a/conanfile.py b/conanfile.py index 736f97972d..8cabe15fab 100644 --- a/conanfile.py +++ b/conanfile.py @@ -14,7 +14,7 @@ class TaraxaConan(ConanFile): def requirements(self): self.requires("boost/1.81.0") - self.requires("cppcheck/2.7.5") + self.requires("cppcheck/2.10") self.requires("openssl/1.1.1t") self.requires("cryptopp/8.7.0") self.requires("gtest/1.13.0") diff --git a/libraries/aleth/libdevcore/RLP.cpp b/libraries/aleth/libdevcore/RLP.cpp index e85aff99a2..a8a34966cb 100644 --- a/libraries/aleth/libdevcore/RLP.cpp +++ b/libraries/aleth/libdevcore/RLP.cpp @@ -115,27 +115,27 @@ size_t RLP::length() const { if (m_data.size() <= size_t(n - c_rlpDataIndLenZero)) BOOST_THROW_EXCEPTION(BadRLP()); if (m_data.size() > 1) if (m_data[1] == 0) BOOST_THROW_EXCEPTION(BadRLP()); - unsigned lengthSize = n - c_rlpDataIndLenZero; - if (lengthSize > sizeof(ret)) + const unsigned length_size = n - c_rlpDataIndLenZero; + if (length_size > sizeof(ret)) // We did not check, but would most probably not fit in our memory. BOOST_THROW_EXCEPTION(UndersizeRLP()); // No leading zeroes. if (!m_data[1]) BOOST_THROW_EXCEPTION(BadRLP()); - for (unsigned i = 0; i < lengthSize; ++i) ret = (ret << 8) | m_data[i + 1]; + for (unsigned i = 0; i < length_size; ++i) ret = (ret << 8) | m_data[i + 1]; // Must be greater than the limit. if (ret < c_rlpListStart - c_rlpDataImmLenStart - c_rlpMaxLengthBytes) BOOST_THROW_EXCEPTION(BadRLP()); } else if (n <= c_rlpListIndLenZero) return n - c_rlpListStart; else { - unsigned lengthSize = n - c_rlpListIndLenZero; - if (m_data.size() <= lengthSize) BOOST_THROW_EXCEPTION(BadRLP()); + const unsigned length_size = n - c_rlpListIndLenZero; + if (m_data.size() <= length_size) BOOST_THROW_EXCEPTION(BadRLP()); if (m_data.size() > 1) if (m_data[1] == 0) BOOST_THROW_EXCEPTION(BadRLP()); - if (lengthSize > sizeof(ret)) + if (length_size > sizeof(ret)) // We did not check, but would most probably not fit in our memory. BOOST_THROW_EXCEPTION(UndersizeRLP()); if (!m_data[1]) BOOST_THROW_EXCEPTION(BadRLP()); - for (unsigned i = 0; i < lengthSize; ++i) ret = (ret << 8) | m_data[i + 1]; + for (unsigned i = 0; i < length_size; ++i) ret = (ret << 8) | m_data[i + 1]; if (ret < 0x100 - c_rlpListStart - c_rlpMaxLengthBytes) BOOST_THROW_EXCEPTION(BadRLP()); } // We have to be able to add payloadOffset to length without overflow. diff --git a/libraries/config/src/network.cpp b/libraries/config/src/network.cpp index f4325fd9dc..3f4a83a73b 100644 --- a/libraries/config/src/network.cpp +++ b/libraries/config/src/network.cpp @@ -137,7 +137,7 @@ void dec_json(const Json::Value &json, NetworkConfig &network) { getConfigDataAsUInt(json, {"deep_syncing_threshold"}, true, network.deep_syncing_threshold); network.ddos_protection = dec_ddos_protection_config_json(getConfigData(json, {"ddos_protection"})); - for (auto &item : json["boot_nodes"]) { + for (const auto &item : json["boot_nodes"]) { network.boot_nodes.push_back(dec_json(item)); } auto listen_ip = boost::asio::ip::address::from_string(network.listen_ip); diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index 44fb1aaecc..ae997eeebf 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -243,9 +243,9 @@ class FinalChainImpl final : public FinalChain { } void prune(EthBlockNumber blk_n) override { - std::vector state_root_to_prune; const auto last_block_to_keep = get_block_header(blk_n); if (last_block_to_keep) { + std::vector state_root_to_prune; LOG(log_nf_) << "Pruning data older than " << blk_n; auto block_to_prune = get_block_header(last_block_to_keep->number - 1); while (block_to_prune && block_to_prune->number > 0) { diff --git a/libraries/core_libs/consensus/src/key_manager/key_manager.cpp b/libraries/core_libs/consensus/src/key_manager/key_manager.cpp index fe1464e31c..220707fc2f 100644 --- a/libraries/core_libs/consensus/src/key_manager/key_manager.cpp +++ b/libraries/core_libs/consensus/src/key_manager/key_manager.cpp @@ -19,7 +19,7 @@ std::shared_ptr KeyManager::get(EthBlockNumber blk_n, con std::unique_lock lock(mutex_); return key_map_.insert_or_assign(addr, std::make_shared(std::move(key))).first->second; } - } catch (state_api::ErrFutureBlock& e) { + } catch (state_api::ErrFutureBlock&) { return nullptr; } diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index cacb98ad48..70ea0b5ef5 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -1481,7 +1481,6 @@ void PbftManager::reorderTransactions(SharedTransactions &transactions) { // While iterating over transactions, account_nonce will keep the last nonce for the account std::unordered_map account_nonce; - std::unordered_map>> account_nonce_transactions; // Find accounts that need reordering and place in account_reverse_order set for (uint32_t i = 0; i < transactions.size(); i++) { @@ -1502,6 +1501,7 @@ void PbftManager::reorderTransactions(SharedTransactions &transactions) { // If account_reverse_order size is 0, there is no need to reorder transactions if (account_reverse_order.size() > 0) { + std::unordered_map>> account_nonce_transactions; // Keep the order for all transactions that do not need reordering for (uint32_t i = 0; i < transactions.size(); i++) { const auto &t = transactions[i]; From 168fb1bb5c6f668d276382cccbcc3f8b863c97ac Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 6 Mar 2023 12:49:59 +0100 Subject: [PATCH 050/162] chore: cppcheck useStlAlgorithm --- CMakeModules/cppcheck.cmake | 3 ++- libraries/aleth/libdevcore/FixedHash.h | 3 ++- libraries/aleth/libdevcore/RLP.h | 3 ++- .../consensus/src/dag/dag_block_proposer.cpp | 5 +---- .../consensus/src/final_chain/final_chain.cpp | 6 +++--- .../core_libs/consensus/src/pbft/pbft_manager.cpp | 7 +++---- .../consensus/src/vote_manager/vote_manager.cpp | 6 +++--- libraries/core_libs/network/rpc/Test.cpp | 5 ++--- libraries/core_libs/network/rpc/eth/LogFilter.cpp | 12 +++--------- .../network/src/tarcap/threadpool/priority_queue.cpp | 8 +------- libraries/types/transaction/src/transaction.cpp | 6 +++--- 11 files changed, 25 insertions(+), 39 deletions(-) diff --git a/CMakeModules/cppcheck.cmake b/CMakeModules/cppcheck.cmake index a0c7df1b56..eacce373ad 100644 --- a/CMakeModules/cppcheck.cmake +++ b/CMakeModules/cppcheck.cmake @@ -11,7 +11,8 @@ else () --error-exitcode=1 --enable=all --suppress=missingInclude - #--suppress=useStlAlgorithm + # find_if - useless here + --suppress=useStlAlgorithm:${PROJECT_SOURCE_DIR}/*/pbft_sync_packet_handler.cpp --suppress=noExplicitConstructor --suppress=unknownMacro # false positive diff --git a/libraries/aleth/libdevcore/FixedHash.h b/libraries/aleth/libdevcore/FixedHash.h index 645142ef3e..635a320715 100644 --- a/libraries/aleth/libdevcore/FixedHash.h +++ b/libraries/aleth/libdevcore/FixedHash.h @@ -209,7 +209,8 @@ class FixedHash { /// Populate with random data. template void randomize(Engine& _eng) { - for (auto& i : m_data) i = (uint8_t)std::uniform_int_distribution(0, 255)(_eng); + std::generate(m_data.begin(), m_data.end(), + [&]() { return (uint8_t)std::uniform_int_distribution(0, 255)(_eng); }); } /// @returns a random valued object. diff --git a/libraries/aleth/libdevcore/RLP.h b/libraries/aleth/libdevcore/RLP.h index 98fef745c2..af816a6bfe 100644 --- a/libraries/aleth/libdevcore/RLP.h +++ b/libraries/aleth/libdevcore/RLP.h @@ -251,7 +251,8 @@ class RLP { std::vector ret; if (isList()) { ret.reserve(itemCount()); - for (auto const i : *this) ret.push_back(i.convert(_flags)); + std::transform((*this).begin(), (*this).end(), std::back_inserter(ret), + [_flags](const auto i) { return i.template convert(_flags); }); } else if (_flags & ThrowOnFail) BOOST_THROW_EXCEPTION(BadCast()); return ret; diff --git a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp index 4c1c916652..f62543a1c3 100644 --- a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp @@ -312,10 +312,7 @@ DagBlock DagBlockProposer::createDagBlock(DagFrontier&& frontier, level_t level, trx_hashes.push_back(trx->getHash()); } - uint64_t block_estimation = 0; - for (const auto& e : estimations) { - block_estimation += e; - } + const int64_t block_estimation = std::accumulate(estimations.begin(), estimations.end(), 0); // If number of tips is over the limit filter by producer and level if (frontier.tips.size() > kDagBlockMaxTips || (frontier.tips.size() + 1) > kPbftGasLimit / kDagGasLimit) { diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index ae997eeebf..fd9a37c5e7 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -179,9 +179,9 @@ class FinalChainImpl final : public FinalChain { for (auto const& r : exec_results) { LogEntries logs; logs.reserve(r.logs.size()); - for (auto const& l : r.logs) { - logs.emplace_back(LogEntry{l.address, l.topics, l.data}); - } + std::transform(r.logs.cbegin(), r.logs.cend(), std::back_inserter(logs), [](const auto& l) { + return LogEntry{l.address, l.topics, l.data}; + }); receipts.emplace_back(TransactionReceipt{ r.code_err.empty() && r.consensus_err.empty(), r.gas_used, diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 70ea0b5ef5..1479312d9f 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -1838,10 +1838,9 @@ void PbftManager::periodDataQueuePush(PeriodData &&period_data, dev::p2p::NodeID size_t PbftManager::periodDataQueueSize() const { return sync_queue_.size(); } bool PbftManager::checkBlockWeight(const std::vector &dag_blocks) const { - u256 total_weight = 0; - for (const auto &dag_block : dag_blocks) { - total_weight += dag_block.getGasEstimation(); - } + const u256 total_weight = + std::accumulate(dag_blocks.begin(), dag_blocks.end(), u256(0), + [](u256 value, const auto &dag_block) { return value + dag_block.getGasEstimation(); }); if (total_weight > config_.gas_limit) { return false; } diff --git a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp index f5911eae59..5e61f6d3ca 100644 --- a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp +++ b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp @@ -81,9 +81,9 @@ uint64_t VoteManager::getVerifiedVotesSize() const { for (auto const& period : verified_votes_) { for (auto const& round : period.second) { for (auto const& step : round.second.step_votes) { - for (auto const& voted_value : step.second.votes) { - size += voted_value.second.second.size(); - } + size += std::accumulate( + step.second.votes.begin(), step.second.votes.end(), 0, + [](uint64_t value, const auto& voted_value) { return value + voted_value.second.second.size(); }); } } } diff --git a/libraries/core_libs/network/rpc/Test.cpp b/libraries/core_libs/network/rpc/Test.cpp index cb43a78b3d..0448abd06a 100644 --- a/libraries/core_libs/network/rpc/Test.cpp +++ b/libraries/core_libs/network/rpc/Test.cpp @@ -77,9 +77,8 @@ Json::Value Test::send_coin_transactions(const Json::Value ¶m1) { auto gas = dev::jsToInt(param1["gas"].asString()); auto transactions_count = param1["transaction_count"].asUInt64(); std::vector receivers; - for (auto rec : param1["receiver"]) { - receivers.emplace_back(addr_t(rec.asString())); - } + std::transform(param1["receiver"].begin(), param1["receiver"].end(), std::back_inserter(receivers), + [](const auto rec) { return addr_t(rec.asString()); }); for (uint32_t i = 0; i < transactions_count; i++) { auto trx = std::make_shared(nonce, value, gas_price, gas, bytes(), sk, receivers[i % receivers.size()], kChainId); diff --git a/libraries/core_libs/network/rpc/eth/LogFilter.cpp b/libraries/core_libs/network/rpc/eth/LogFilter.cpp index 115a664ffa..3969a89ae0 100644 --- a/libraries/core_libs/network/rpc/eth/LogFilter.cpp +++ b/libraries/core_libs/network/rpc/eth/LogFilter.cpp @@ -8,12 +8,7 @@ LogFilter::LogFilter(EthBlockNumber from_block, std::optional to if (!addresses_.empty()) { return; } - for (const auto& t : topics_) { - if (!t.empty()) { - return; - } - } - is_range_only_ = true; + is_range_only_ = std::all_of(topics_.cbegin(), topics_.cend(), [](const auto& t) { return t.empty(); }); } std::vector LogFilter::bloomPossibilities() const { @@ -51,9 +46,8 @@ std::vector LogFilter::bloomPossibilities() const { // blooms = [a0, a1]; // if (ret.empty()) { - for (const auto& i : addresses_) { - ret.push_back(LogBloom().shiftBloom<3>(sha3(i))); - } + std::transform(addresses_.cbegin(), addresses_.cend(), std::back_inserter(ret), + [](const auto& i) { return LogBloom().shiftBloom<3>(sha3(i)); }); } // 3rd case, there are no addresses, at least create blooms from topics diff --git a/libraries/core_libs/network/src/tarcap/threadpool/priority_queue.cpp b/libraries/core_libs/network/src/tarcap/threadpool/priority_queue.cpp index 67a4259c65..220a0131f8 100644 --- a/libraries/core_libs/network/src/tarcap/threadpool/priority_queue.cpp +++ b/libraries/core_libs/network/src/tarcap/threadpool/priority_queue.cpp @@ -114,13 +114,7 @@ std::optional PriorityQueue::pop() { } bool PriorityQueue::empty() const { - for (const auto& queue : packets_queues_) { - if (!queue.empty()) { - return false; - } - } - - return true; + return std::all_of(packets_queues_.cbegin(), packets_queues_.cend(), [](const auto& queue) { return queue.empty(); }); } void PriorityQueue::updateDependenciesStart(const PacketData& packet) { diff --git a/libraries/types/transaction/src/transaction.cpp b/libraries/types/transaction/src/transaction.cpp index fc62920efb..79f25ba161 100644 --- a/libraries/types/transaction/src/transaction.cpp +++ b/libraries/types/transaction/src/transaction.cpp @@ -3,6 +3,7 @@ #include +#include #include #include @@ -22,9 +23,8 @@ uint64_t toChainID(u256 const &val) { TransactionHashes hashes_from_transactions(const SharedTransactions &transactions) { TransactionHashes trx_hashes; trx_hashes.reserve(transactions.size()); - for (auto const &trx : transactions) { - trx_hashes.push_back(trx->getHash()); - } + std::transform(transactions.cbegin(), transactions.cend(), std::back_inserter(trx_hashes), + [](const auto &trx) { return trx->getHash(); }); return trx_hashes; } From b753b086c980a4114f1e174e0b9ea39dd1c99751 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 6 Mar 2023 13:30:45 +0100 Subject: [PATCH 051/162] chore: cppcheck remove cstyled casts --- CMakeModules/cppcheck.cmake | 9 +++++---- libraries/aleth/libdevcore/CommonData.h | 6 +++--- libraries/aleth/libdevcore/FixedHash.h | 4 ++-- libraries/aleth/libdevcore/RLP.h | 2 +- libraries/aleth/libdevcrypto/Common.h | 2 +- libraries/aleth/libdevcrypto/CryptoPP.cpp | 2 +- libraries/aleth/libp2p/UPnP.cpp | 2 +- libraries/common/src/vrf_wrapper.cpp | 17 ++++++++++------- .../consensus/src/dag/dag_block_proposer.cpp | 2 +- .../consensus/src/final_chain/state_api.cpp | 4 ++-- .../core_libs/network/rpc/jsonrpc_ws_server.cpp | 4 ++-- libraries/core_libs/network/src/ws_server.cpp | 2 +- libraries/core_libs/storage/src/storage.cpp | 4 ++-- 13 files changed, 32 insertions(+), 28 deletions(-) diff --git a/CMakeModules/cppcheck.cmake b/CMakeModules/cppcheck.cmake index eacce373ad..a9c719e5bf 100644 --- a/CMakeModules/cppcheck.cmake +++ b/CMakeModules/cppcheck.cmake @@ -29,10 +29,11 @@ else () --suppress=localMutex:${PROJECT_SOURCE_DIR}/*/main.cpp # Just style warning --suppress=unmatchedSuppression:${PROJECT_SOURCE_DIR}/*/final_chain.cpp - # graphql generated - --suppress=shadowVariable:${PROJECT_SOURCE_DIR}/*/TaraxaSchema.* - - --suppress=cstyleCast + # exclude graphql generated + -i ${PROJECT_SOURCE_DIR}/libraries/core_libs/network/graphql/gen/ + # messy files + --suppress=cstyleCast:${PROJECT_SOURCE_DIR}/*/vector_ref.h + --suppress=cstyleCast:${PROJECT_SOURCE_DIR}/*/Common.h # Only show found errors "--quiet" diff --git a/libraries/aleth/libdevcore/CommonData.h b/libraries/aleth/libdevcore/CommonData.h index 4b7177a828..56733d8601 100644 --- a/libraries/aleth/libdevcore/CommonData.h +++ b/libraries/aleth/libdevcore/CommonData.h @@ -73,19 +73,19 @@ static bool isHash(std::string const& _hash) { /// Converts byte array to a string containing the same (binary) data. Unless /// the byte array happens to contain ASCII data, this won't be printable. inline std::string asString(bytes const& _b) { - return std::string((char const*)_b.data(), (char const*)(_b.data() + _b.size())); + return std::string(reinterpret_cast(_b.data()), reinterpret_cast(_b.data() + _b.size())); } /// Converts byte array ref to a string containing the same (binary) data. /// Unless the byte array happens to contain ASCII data, this won't be /// printable. inline std::string asString(bytesConstRef _b) { - return std::string((char const*)_b.data(), (char const*)(_b.data() + _b.size())); + return std::string(reinterpret_cast(_b.data()), reinterpret_cast(_b.data() + _b.size())); } /// Converts a string to a byte array containing the string's (byte) data. inline bytes asBytes(std::string const& _b) { - return bytes((::byte const*)_b.data(), (::byte const*)(_b.data() + _b.size())); + return bytes(reinterpret_cast<::byte const*>(_b.data()), reinterpret_cast<::byte const*>(_b.data() + _b.size())); } /// Converts a string into the big-endian base-16 stream of integers (NOT diff --git a/libraries/aleth/libdevcore/FixedHash.h b/libraries/aleth/libdevcore/FixedHash.h index 635a320715..63c60f3f55 100644 --- a/libraries/aleth/libdevcore/FixedHash.h +++ b/libraries/aleth/libdevcore/FixedHash.h @@ -405,8 +405,8 @@ class SecureFixedHash : private FixedHash { /// Fast equality operator for h256. template <> inline bool FixedHash<32>::operator==(FixedHash<32> const& _other) const { - const uint64_t* hash1 = (const uint64_t*)data(); - const uint64_t* hash2 = (const uint64_t*)_other.data(); + const uint64_t* hash1 = reinterpret_cast(data()); + const uint64_t* hash2 = reinterpret_cast(_other.data()); return (hash1[0] == hash2[0]) && (hash1[1] == hash2[1]) && (hash1[2] == hash2[2]) && (hash1[3] == hash2[3]); } diff --git a/libraries/aleth/libdevcore/RLP.h b/libraries/aleth/libdevcore/RLP.h index af816a6bfe..15f897f344 100644 --- a/libraries/aleth/libdevcore/RLP.h +++ b/libraries/aleth/libdevcore/RLP.h @@ -84,7 +84,7 @@ class RLP { /// Construct a node to read RLP data in the string. explicit RLP(std::string const& _s, Strictness _st = VeryStrict) - : RLP(bytesConstRef((::byte const*)_s.data(), _s.size()), _st) {} + : RLP(bytesConstRef(reinterpret_cast<::byte const*>(_s.data()), _s.size()), _st) {} /// The bare data of the RLP. bytesConstRef data() const { return m_data; } diff --git a/libraries/aleth/libdevcrypto/Common.h b/libraries/aleth/libdevcrypto/Common.h index a3fe92e29c..83c23b1324 100644 --- a/libraries/aleth/libdevcrypto/Common.h +++ b/libraries/aleth/libdevcrypto/Common.h @@ -34,7 +34,7 @@ struct SignatureStruct { SignatureStruct() = default; SignatureStruct(Signature const& _s) { *(h520*)this = _s; } SignatureStruct(h256 const& _r, h256 const& _s, byte _v) : r(_r), s(_s), v(_v) {} - operator Signature() const { return *(h520 const*)this; } + operator Signature() const { return *reinterpret_cast(this); } /// @returns true if r,s,v values are valid, otherwise false bool isValid() const noexcept; diff --git a/libraries/aleth/libdevcrypto/CryptoPP.cpp b/libraries/aleth/libdevcrypto/CryptoPP.cpp index da5b5ff5f3..42ebf71d89 100644 --- a/libraries/aleth/libdevcrypto/CryptoPP.cpp +++ b/libraries/aleth/libdevcrypto/CryptoPP.cpp @@ -119,7 +119,7 @@ bool Secp256k1PP::decryptECIES(Secret const& _k, bytesConstRef _sharedMacData, b return false; Secret z; - if (!ecdh::agree(_k, *(Public*)(io_text.data() + 1), z)) return false; // Invalid pubkey or seckey. + if (!ecdh::agree(_k, *reinterpret_cast(io_text.data() + 1), z)) return false; // Invalid pubkey or seckey. auto key = ecies::kdf(z, bytes(), 64); bytesConstRef eKey = bytesConstRef(&key).cropped(0, 16); bytesRef mKeyMaterial = bytesRef(&key).cropped(16, 16); diff --git a/libraries/aleth/libp2p/UPnP.cpp b/libraries/aleth/libp2p/UPnP.cpp index 0451c91970..bcda5f2c0d 100644 --- a/libraries/aleth/libp2p/UPnP.cpp +++ b/libraries/aleth/libp2p/UPnP.cpp @@ -56,7 +56,7 @@ UPnP::UPnP() #elif MINIUPNPC_API_VERSION >= 9 descXML = (char*)miniwget(dev->descURL, &descXMLsize, 0); #else - descXML = (char*)miniwget(dev->descURL, &descXMLsize); + descXML = static_cast(miniwget(dev->descURL, &descXMLsize)); #endif if (descXML) { parserootdesc(descXML, descXMLsize, m_data.get()); diff --git a/libraries/common/src/vrf_wrapper.cpp b/libraries/common/src/vrf_wrapper.cpp index 66606b7ad1..fcf01f4103 100644 --- a/libraries/common/src/vrf_wrapper.cpp +++ b/libraries/common/src/vrf_wrapper.cpp @@ -5,23 +5,25 @@ namespace taraxa::vrf_wrapper { std::pair getVrfKeyPair() { vrf_sk_t sk; vrf_pk_t pk; - crypto_vrf_keypair((unsigned char *)pk.data(), (unsigned char *)sk.data()); + crypto_vrf_keypair(pk.data(), sk.data()); return {pk, sk}; } vrf_pk_t getVrfPublicKey(vrf_sk_t const &sk) { vrf_pk_t pk; - crypto_vrf_sk_to_pk((unsigned char *)pk.data(), (unsigned char *)sk.data()); + crypto_vrf_sk_to_pk(pk.data(), const_cast(sk.data())); return pk; } -bool isValidVrfPublicKey(vrf_pk_t const &pk) { return crypto_vrf_is_valid_key((unsigned char *)pk.data()) == 1; } +bool isValidVrfPublicKey(vrf_pk_t const &pk) { + return crypto_vrf_is_valid_key(const_cast(pk.data())) == 1; +} std::optional getVrfProof(vrf_sk_t const &sk, bytes const &msg) { vrf_proof_t proof; // crypto_vrf_prove return 0 on success! - if (!crypto_vrf_prove((unsigned char *)proof.data(), (const unsigned char *)sk.data(), - (const unsigned char *)msg.data(), msg.size())) { + if (!crypto_vrf_prove(proof.data(), const_cast(sk.data()), const_cast(msg.data()), + msg.size())) { return proof; } return {}; @@ -30,8 +32,9 @@ std::optional getVrfProof(vrf_sk_t const &sk, bytes const &msg) { std::optional getVrfOutput(vrf_pk_t const &pk, vrf_proof_t const &proof, bytes const &msg) { vrf_output_t output; // crypto_vrf_verify return 0 on success! - if (!crypto_vrf_verify((unsigned char *)output.data(), (const unsigned char *)pk.data(), - (const unsigned char *)proof.data(), (const unsigned char *)msg.data(), msg.size())) { + if (!crypto_vrf_verify(output.data(), const_cast(pk.data()), + const_cast(proof.data()), const_cast(msg.data()), + msg.size())) { return output; } return {}; diff --git a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp index f62543a1c3..113eb9d229 100644 --- a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp @@ -312,7 +312,7 @@ DagBlock DagBlockProposer::createDagBlock(DagFrontier&& frontier, level_t level, trx_hashes.push_back(trx->getHash()); } - const int64_t block_estimation = std::accumulate(estimations.begin(), estimations.end(), 0); + const uint64_t block_estimation = std::accumulate(estimations.begin(), estimations.end(), 0); // If number of tips is over the limit filter by producer and level if (frontier.tips.size() > kDagBlockMaxTips || (frontier.tips.size() + 1) > kPbftGasLimit / kDagGasLimit) { diff --git a/libraries/core_libs/consensus/src/final_chain/state_api.cpp b/libraries/core_libs/consensus/src/final_chain/state_api.cpp index cbf46340e7..62fe456160 100644 --- a/libraries/core_libs/consensus/src/final_chain/state_api.cpp +++ b/libraries/core_libs/consensus/src/final_chain/state_api.cpp @@ -21,7 +21,7 @@ void from_rlp(taraxa_evm_Bytes b, Result& result) { util::rlp(dev::RLP(map_bytes(b), 0), result); } -void to_str(taraxa_evm_Bytes b, string& result) { result = {(char*)b.Data, b.Len}; } +void to_str(taraxa_evm_Bytes b, string& result) { result = {reinterpret_cast(b.Data), b.Len}; } void to_bytes(taraxa_evm_Bytes b, bytes& result) { result.assign(b.Data, b.Data + b.Len); } @@ -31,7 +31,7 @@ template taraxa_evm_BytesCallback decoder_cb_c(Result& res) { return { &res, - [](auto receiver, auto b) { decode(b, *(Result*)receiver); }, + [](auto receiver, auto b) { decode(b, *static_cast(receiver)); }, }; } diff --git a/libraries/core_libs/network/rpc/jsonrpc_ws_server.cpp b/libraries/core_libs/network/rpc/jsonrpc_ws_server.cpp index 50de2ee673..269d196ba6 100644 --- a/libraries/core_libs/network/rpc/jsonrpc_ws_server.cpp +++ b/libraries/core_libs/network/rpc/jsonrpc_ws_server.cpp @@ -56,8 +56,8 @@ std::string JsonRpcWsSession::processRequest(const std::string_view &request) { auto handler = ws_server->GetHandler(); if (handler != NULL) { try { - LOG(log_tr_) << "WS Read: " << (char *)buffer_.data().data(); - handler->HandleRequest((char *)buffer_.data().data(), response); + LOG(log_tr_) << "WS Read: " << static_cast(buffer_.data().data()); + handler->HandleRequest(static_cast(buffer_.data().data()), response); } catch (std::exception const &e) { LOG(log_er_) << "Exception " << e.what(); auto &res_json_error = json_response["error"] = Json::Value(Json::objectValue); diff --git a/libraries/core_libs/network/src/ws_server.cpp b/libraries/core_libs/network/src/ws_server.cpp index d698c3f6c5..03f0bc7da5 100644 --- a/libraries/core_libs/network/src/ws_server.cpp +++ b/libraries/core_libs/network/src/ws_server.cpp @@ -51,7 +51,7 @@ void WsSession::on_read(beast::error_code ec, std::size_t bytes_transferred) { return close(is_normal(ec)); } - LOG(log_tr_) << "WS READ " << ((char *)buffer_.data().data()); + LOG(log_tr_) << "WS READ " << (static_cast(buffer_.data().data())); const std::string_view str_view(static_cast(buffer_.data().data()), buffer_.size()); const auto response = processRequest(str_view); diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index fd4d10ee94..98b510f0ed 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -448,8 +448,8 @@ void DbStorage::clearPeriodDataHistory(PbftPeriod end_period) { auto trx_hashes_raw = lookup(period, DB::Columns::final_chain_transaction_hashes_by_blk_number); auto hashes_count = trx_hashes_raw.size() / trx_hash_t::size; for (uint32_t i = 0; i < hashes_count; i++) { - auto hash = - trx_hash_t((uint8_t*)(trx_hashes_raw.data() + i * trx_hash_t::size), trx_hash_t::ConstructFromPointer); + auto hash = trx_hash_t(reinterpret_cast(trx_hashes_raw.data() + i * trx_hash_t::size), + trx_hash_t::ConstructFromPointer); remove(write_batch, Columns::final_chain_receipt_by_trx_hash, hash); remove(write_batch, Columns::final_chain_transaction_location_by_hash, hash); } From 6a5d35ebe7923c2554426a746013cd538f849736 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Mon, 6 Mar 2023 09:03:00 +0100 Subject: [PATCH 052/162] fix: missing tip block on tips selection --- .../consensus/src/dag/dag_block_proposer.cpp | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp index 113eb9d229..416a8f6906 100644 --- a/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_block_proposer.cpp @@ -264,10 +264,14 @@ vec_blk_t DagBlockProposer::selectDagBlockTips(const vec_blk_t& frontier_tips, u // Retrieve all the tips blocks and identify duplicate proposer tips for (const auto& t : frontier_tips) { auto tip_block = dag_mgr_->getDagBlock(t); - assert(tip_block != nullptr); - tips_blocks.insert({t, tip_block}); - if (!proposers.insert(tip_block->getSender()).second) { - duplicate_proposers.insert(tip_block->getSender()); + if (tip_block == nullptr) { + // This could happen if a tip block has expired, exclude this tip + LOG(log_nf_) << "selectDagBlockTips, Cannot find tip dag block " << tip_block; + } else { + tips_blocks.insert({t, tip_block}); + if (!proposers.insert(tip_block->getSender()).second) { + duplicate_proposers.insert(tip_block->getSender()); + } } } From a30128465777b668b4be4baab612346733ddce55 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Tue, 7 Mar 2023 11:12:43 +0100 Subject: [PATCH 053/162] chore: random light node prune interval --- .../consensus/src/final_chain/final_chain.cpp | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index fd9a37c5e7..4d7c2e2a8a 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -18,7 +18,8 @@ class FinalChainImpl final : public FinalChain { const uint64_t kBlockGasLimit; StateAPI state_api_; const bool kLightNode = false; - const uint64_t kLigntNodeHistory = 0; + const uint64_t kLightNodeHistory = 0; + const uint64_t kLightNodePruneOffset = 0; // It is not prepared to use more then 1 thread. Examine it if you want to change threads count boost::asio::thread_pool executor_thread_{1}; @@ -56,7 +57,11 @@ class FinalChainImpl final : public FinalChain { db->stateDbStoragePath().string(), }), kLightNode(config.is_light_node), - kLigntNodeHistory(config.light_node_history), + kLightNodeHistory(config.light_node_history), + // This will provide a speific random offset based on node address for each node to prevent all light nodes + // performing prune at the same block height + kLightNodePruneOffset((*reinterpret_cast(node_addr.asBytes().data())) % + std::max(config.light_node_history, (uint64_t)1)), block_headers_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return get_block_header(blk); }), block_hashes_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return get_block_hash(blk); }), @@ -234,9 +239,9 @@ class FinalChainImpl final : public FinalChain { if (kLightNode) { // Actual history size will be between 100% and 105% of light_node_history_ to avoid deleting on every period - if (((blk_header->number % (std::max(kLigntNodeHistory / 20, (uint64_t)1)) == 0)) && - blk_header->number > kLigntNodeHistory) { - prune(blk_header->number - kLigntNodeHistory); + if ((((blk_header->number + kLightNodePruneOffset) % (std::max(kLightNodeHistory / 20, (uint64_t)1)) == 0)) && + blk_header->number > kLightNodeHistory) { + prune(blk_header->number - kLightNodeHistory); } } return result; From 6740aa8d8dadba468d53bd1af56491bb4388cc3a Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 8 Mar 2023 13:27:03 +0100 Subject: [PATCH 054/162] remove disabled test --- tests/pbft_manager_test.cpp | 48 ------------------------------------- 1 file changed, 48 deletions(-) diff --git a/tests/pbft_manager_test.cpp b/tests/pbft_manager_test.cpp index 95f38f2ef0..aca5055b8a 100644 --- a/tests/pbft_manager_test.cpp +++ b/tests/pbft_manager_test.cpp @@ -870,54 +870,6 @@ TEST_F(PbftManagerWithDagCreation, produce_overweighted_block) { EXPECT_FALSE(node->getPbftManager()->checkBlockWeight(period_data.dag_blocks)); } -TEST_F(PbftManagerWithDagCreation, DISABLED_pbft_block_is_overweighted) { - auto node_cfgs = make_node_cfgs(1, 5, true); - node_cfgs.front().genesis.dag.gas_limit = 500000; - node_cfgs.front().genesis.pbft.gas_limit = 600000; - makeNode(); - deployContract(); - node->getDagBlockProposer()->stop(); - generateAndApplyInitialDag(); - - EXPECT_HAPPENS({10s, 500ms}, - [&](auto &ctx) { WAIT_EXPECT_EQ(ctx, nonce, node->getDB()->getNumTransactionExecuted() + 1); }); - - node->getPbftManager()->stop(); - // create pbft block - auto chain_size_before = node->getPbftChain()->getPbftChainSize(); - { - auto blocks_with_txs = generateDagBlocks(10, 3, 1); - insertBlocks(blocks_with_txs); - auto dag_block_hash = blocks_with_txs.back().blk.getHash(); - - // get DAG block and transaction order - const auto propose_period = node->getPbftChain()->getPbftChainSize() + 1; - auto dag_block_order = node->getDagManager()->getDagBlockOrder(dag_block_hash, propose_period); - ASSERT_TRUE(!dag_block_order.empty()); - - std::vector trx_hashes; - for (const auto &bt : blocks_with_txs) { - std::transform(bt.trxs.begin(), bt.trxs.end(), std::back_inserter(trx_hashes), - [](const auto &t) { return t->getHash(); }); - } - auto order_hash = node->getPbftManager()->calculateOrderHash(dag_block_order); - - const auto &last_hash = node->getPbftChain()->getLastPbftBlockHash(); - auto reward_votes = node->getDB()->getRewardVotes(); - std::vector reward_votes_hashes; - std::transform(reward_votes.begin(), reward_votes.end(), std::back_inserter(reward_votes_hashes), - [](const auto &v) { return v->getHash(); }); - const auto pbft_block = - std::make_shared(last_hash, dag_block_hash, order_hash, kNullBlockHash, propose_period, - node->getAddress(), node->getSecretKey(), std::move(reward_votes_hashes)); - // node->getPbftChain()->pushUnverifiedPbftBlock(pbft_block); - } - - EXPECT_HAPPENS({60s, 500ms}, [&](auto &ctx) { - WAIT_EXPECT_EQ(ctx, node->getPbftChain()->getPbftChainSize(), chain_size_before + 1); - }); -} - TEST_F(PbftManagerWithDagCreation, proposed_blocks) { auto db = std::make_shared(data_dir); ProposedBlocks proposed_blocks(db); From 996da355c790cda4aae11efce689a8e92e8b685d Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 8 Mar 2023 15:05:29 +0100 Subject: [PATCH 055/162] chore: remove HF functionality --- doc/RPC.md | 5 +- libraries/cli/src/config.cpp | 5 - libraries/config/CMakeLists.txt | 4 +- .../config/include/config/state_config.hpp | 2 +- .../core_libs/network/graphql/src/query.cpp | 2 + .../network/graphql/src/types/dag_block.cpp | 2 + libraries/core_libs/node/src/node.cpp | 24 --- tests/CMakeLists.txt | 7 - tests/final_chain_test.cpp | 6 - tests/hardfork_test.cpp | 199 ------------------ tests/rpc_test.cpp | 1 + tests/state_api_test.cpp | 1 - 12 files changed, 9 insertions(+), 249 deletions(-) delete mode 100644 tests/hardfork_test.cpp diff --git a/doc/RPC.md b/doc/RPC.md index b5edc701af..5713106b93 100644 --- a/doc/RPC.md +++ b/doc/RPC.md @@ -448,9 +448,6 @@ curl -X POST --data '{"jsonrpc":"2.0","method":"taraxa_getConfig","params":[],"i "0x0274cfffea9fa850e54c93a23042f12a87358a82": "0x141e8d17", "0x111f91441efc8c6c0edf6534970cc887e2fabaa8": "0x24048ce3d" }, - "hardforks": { - "fix_genesis_fork_block": "0x102ca0" - } }, "pbft": { "committee_size": "0x3e8", @@ -492,7 +489,7 @@ none #### Returns `OBJECT` - current chain stats object -* `pbft_period`: `QUANTITY` - current PBFT period +* `pbft_period`: `QUANTITY` - current PBFT period * `dag_blocks_executed`: `QUANTITY` - count of executed(finalized) DAG blocks * `transactions_executed`: `QUANTITY` - count of executed transactions diff --git a/libraries/cli/src/config.cpp b/libraries/cli/src/config.cpp index 65f9b02e75..7c19bcbafa 100644 --- a/libraries/cli/src/config.cpp +++ b/libraries/cli/src/config.cpp @@ -214,11 +214,6 @@ Config::Config(int argc, const char* argv[]) { auto default_genesis_json = tools::getGenesis((Config::ChainIdType)chain_id); // override hardforks data with one from default json addNewHardforks(genesis_json, default_genesis_json); - // add vote_eligibility_balance_step field if it is missing in the config - if (genesis_json["dpos"]["vote_eligibility_balance_step"].isNull()) { - genesis_json["dpos"]["vote_eligibility_balance_step"] = - default_genesis_json["dpos"]["vote_eligibility_balance_step"]; - } write_config_and_wallet_files(); } // Override config values with values from CLI diff --git a/libraries/config/CMakeLists.txt b/libraries/config/CMakeLists.txt index 5df88e6af7..75616c110c 100644 --- a/libraries/config/CMakeLists.txt +++ b/libraries/config/CMakeLists.txt @@ -7,7 +7,7 @@ set(HEADERS include/config/dag_config.hpp include/config/pbft_config.hpp include/config/state_config.hpp - include/config/hardfork.hpp + # include/config/hardfork.hpp ) set(SOURCES @@ -18,7 +18,7 @@ set(SOURCES src/dag_config.cpp src/pbft_config.cpp src/state_config.cpp - src/hardfork.cpp + # src/hardfork.cpp ) # Configure file with version diff --git a/libraries/config/include/config/state_config.hpp b/libraries/config/include/config/state_config.hpp index 6cd844094f..1cbda1b401 100644 --- a/libraries/config/include/config/state_config.hpp +++ b/libraries/config/include/config/state_config.hpp @@ -6,7 +6,7 @@ #include "common/encoding_rlp.hpp" #include "common/types.hpp" #include "common/vrf_wrapper.hpp" -#include "config/hardfork.hpp" +// #include "config/hardfork.hpp" namespace taraxa::state_api { diff --git a/libraries/core_libs/network/graphql/src/query.cpp b/libraries/core_libs/network/graphql/src/query.cpp index a71874823b..7606de867b 100644 --- a/libraries/core_libs/network/graphql/src/query.cpp +++ b/libraries/core_libs/network/graphql/src/query.cpp @@ -1,5 +1,7 @@ #include "graphql/query.hpp" +#include + #include "graphql/account.hpp" #include "graphql/block.hpp" #include "graphql/log.hpp" diff --git a/libraries/core_libs/network/graphql/src/types/dag_block.cpp b/libraries/core_libs/network/graphql/src/types/dag_block.cpp index e093a3acc8..7124be5fd2 100644 --- a/libraries/core_libs/network/graphql/src/types/dag_block.cpp +++ b/libraries/core_libs/network/graphql/src/types/dag_block.cpp @@ -1,5 +1,7 @@ #include "graphql/types/dag_block.hpp" +#include + #include "graphql/account.hpp" #include "graphql/transaction.hpp" diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index d3a8a26622..5622c39e30 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -302,30 +302,6 @@ void FullNode::start() { }, subscription_pool_); - // Subscription to process hardforks - // final_chain_->block_applying_.subscribe([&](uint64_t block_num) { - // // TODO: should have only common hardfork code calling hardfork executor - // auto &state_conf = conf_.genesis.state; - // if (state_conf.hardforks.fix_genesis_fork_block == block_num) { - // for (auto &e : state_conf.dpos->genesis_state) { - // for (auto &b : e.second) { - // b.second *= kOneTara; - // } - // } - // for (auto &b : state_conf.initial_balances) { - // b.second *= kOneTara; - // } - // // we are multiplying it by TARA precision - // state_conf.dpos->eligibility_balance_threshold *= kOneTara; - // // amount of stake per vote should be 10 times smaller than eligibility threshold - // state_conf.dpos->vote_eligibility_balance_step.assign(state_conf.dpos->eligibility_balance_threshold); - // state_conf.dpos->eligibility_balance_threshold *= 10; - // // if this part of code will be needed we need to overwrite genesis json here - // // conf_.overwrite_chain_config_in_file(); - // final_chain_->update_state_config(state_conf); - // } - // }); - vote_mgr_->setNetwork(network_); pbft_mgr_->setNetwork(network_); dag_mgr_->setNetwork(network_); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 93c965100c..b5b9bc2bf3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -37,13 +37,6 @@ add_executable(full_node_test full_node_test.cpp) target_link_libraries(full_node_test test_util) add_test(full_node_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/full_node_test) -# add_executable(hardfork_test hardfork_test.cpp) -# target_link_libraries(hardfork_test -# core_libs -# CONAN_PKG::gtest -# ) -# add_test(hardfork_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/hardfork_test) - add_executable(network_test network_test.cpp) target_link_libraries(network_test test_util) add_test(network_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/network_test) diff --git a/tests/final_chain_test.cpp b/tests/final_chain_test.cpp index 9b78ed321b..99d2fdd58f 100644 --- a/tests/final_chain_test.cpp +++ b/tests/final_chain_test.cpp @@ -181,12 +181,6 @@ TEST_F(FinalChainTest, initial_balances) { init(); } -// TEST_F(FinalChainTest, update_state_config) { -// init(); -// cfg.genesis.state.hardforks.fix_genesis_fork_block = 2222222; -// SUT->update_state_config(cfg.genesis.state); -// } - TEST_F(FinalChainTest, contract) { auto sender_keys = dev::KeyPair::create(); const auto& addr = sender_keys.address(); diff --git a/tests/hardfork_test.cpp b/tests/hardfork_test.cpp deleted file mode 100644 index e48784ed19..0000000000 --- a/tests/hardfork_test.cpp +++ /dev/null @@ -1,199 +0,0 @@ -#include - -#include -#include -#include -#include -#include - -#include "cli/config.hpp" -#include "cli/tools.hpp" -#include "dag/dag.hpp" -#include "logger/logger.hpp" -#include "node/node.hpp" -#include "string" -#include "test_util/samples.hpp" -#include "transaction/transaction_manager.hpp" - -namespace taraxa::core_tests { - -// We need separate fixture for this tests because hardfork is overwriting config file. But we can't change config -// stored in global variable because values will change for next test cases -struct HardforkTest : WithDataDir { - FullNodeConfig node_cfg; - - HardforkTest() { - // creating config this way to prevent config files overwriting - auto cfg_filename = std::string("conf_taraxa1.json"); - auto p = DIR_CONF / cfg_filename; - auto w = DIR_CONF / std::string("wallet1.json"); - Json::Value test_node_wallet_json; - std::ifstream(w.string(), std::ifstream::binary) >> test_node_wallet_json; - node_cfg = FullNodeConfig(p.string(), test_node_wallet_json, data_dir / cfg_filename); - - fs::remove_all(node_cfg.data_path); - fs::create_directories(node_cfg.data_path); - - auto data_path_cfg = node_cfg.data_path / fs::path(node_cfg.json_file_name).filename(); - fs::copy_file(node_cfg.json_file_name, data_path_cfg); - node_cfg.json_file_name = data_path_cfg; - - addr_t root_node_addr("de2b1203d72d3549ee2f733b00b2789414c7cea5"); - node_cfg.genesis.state.initial_balances[root_node_addr] = 9007199254740991; - auto &dpos = *node_cfg.genesis.state.dpos; - dpos.genesis_state[root_node_addr][root_node_addr] = dpos.eligibility_balance_threshold; - // speed up block production - { - node_cfg.genesis.sortition.vrf.threshold_upper = 0xffff; - node_cfg.genesis.sortition.vdf.difficulty_min = 0; - node_cfg.genesis.sortition.vdf.difficulty_max = 3; - node_cfg.genesis.sortition.vdf.difficulty_stale = 3; - node_cfg.genesis.sortition.vdf.lambda_bound = 100; - // PBFT config - node_cfg.genesis.pbft.lambda_ms /= 20; - node_cfg.network.transaction_interval_ms /= 20; - } - } - - ~HardforkTest() { fs::remove_all(node_cfg.data_path); } - - HardforkTest(const HardforkTest &) = delete; - HardforkTest(HardforkTest &&) = delete; - HardforkTest &operator=(const HardforkTest &) = delete; - HardforkTest &operator=(HardforkTest &&) = delete; -}; - -TEST_F(HardforkTest, hardfork_override) { - auto default_json = cli::tools::getConfig(cli::Config::DEFAULT_CHAIN_ID); - auto default_hardforks = default_json["genesis"]["hardforks"]; - Json::Value config = default_json; - auto &state_cfg = config["genesis"]; - state_cfg["hardforks"].removeMember("fix_genesis_fork_block"); - - EXPECT_TRUE(state_cfg["hardforks"]["fix_genesis_fork_block"].isNull()); - cli::Config::addNewHardforks(config, default_json); - EXPECT_EQ(state_cfg["hardforks"], default_hardforks); - - state_cfg.removeMember("hardforks"); - EXPECT_TRUE(state_cfg["hardforks"].isNull()); - - cli::Config::addNewHardforks(config, default_json); - EXPECT_EQ(state_cfg["hardforks"], default_hardforks); -} - -TEST_F(HardforkTest, fix_genesis_fork_block_is_zero) { - auto &cfg = node_cfg.genesis; - cfg.state.hardforks.fix_genesis_fork_block = 0; - auto node = launch_nodes({node_cfg}).front(); - - auto dummy_trx = std::make_shared(1, 0, 0, 0, bytes(), node->getSecretKey(), node->getAddress()); - // broadcast dummy transaction - node->getTransactionManager()->insertTransaction(dummy_trx); - wait({100s, 500ms}, [&](auto &ctx) { - if (node->getFinalChain()->last_block_number() <= cfg.state.hardforks.fix_genesis_fork_block) { - ctx.fail(); - } - }); - EXPECT_EQ(cfg.state.initial_balances.begin()->second, - node->getConfig().genesis.state.initial_balances.begin()->second); -} - -TEST_F(HardforkTest, hardfork) { - auto &cfg = node_cfg.genesis; - cfg.state.hardforks.fix_genesis_fork_block = 10; - cfg.state.dpos->eligibility_balance_threshold = 100000; - cfg.state.dpos->vote_eligibility_balance_step.assign(cfg.state.dpos->eligibility_balance_threshold); - cfg.state.dpos->delegation_delay = 5; - cfg.state.dpos->delegation_locking_period = 5; - - auto random_node = addr_t::random(); - auto random_votes = 3; - for (auto &gb : cfg.state.initial_balances) { - gb.second = 110000000; - } - for (auto &gs : cfg.state.dpos->genesis_state) { - for (auto &b : gs.second) { - b.second = 1100000; - std::cout << b.first << ": " << b.second << std::endl; - } - gs.second.emplace(random_node, random_votes * cfg.state.dpos->vote_eligibility_balance_step); - } - - auto node = launch_nodes({node_cfg}).front(); - auto nonce = 0; - auto dummy_trx = [&nonce, node]() { - auto dummy_trx = std::make_shared(nonce++, 0, 0, 0, bytes(), node->getSecretKey(), node->getAddress()); - // broadcast dummy transaction - node->getTransactionManager()->insertTransaction(dummy_trx); - }; - dummy_trx(); - node->getFinalChain()->block_finalized_.subscribe([&](const std::shared_ptr &res) { - const auto block_num = res->final_chain_blk->number; - if (cfg.state.hardforks.fix_genesis_fork_block == block_num) { - return; - } - dummy_trx(); - dummy_trx(); - }); - std::map balances_before; - for (const auto &b : node->getConfig().genesis.state.initial_balances) { - auto balance = node->getFinalChain()->get_account(b.first)->balance; - balances_before.emplace(b.first, balance); - } - auto votes_count = 11; - EXPECT_EQ(votes_count + random_votes, node->getFinalChain()->dpos_eligible_total_vote_count(0)); - EXPECT_EQ(random_votes, node->getFinalChain()->dpos_eligible_vote_count(0, random_node)); - - wait({100s, 500ms}, [&](auto &ctx) { - if (node->getFinalChain()->last_block_number() < cfg.state.hardforks.fix_genesis_fork_block) { - ctx.fail(); - } - }); - - u256 dpos_genesis_sum = 0; - // Verify DPOS initial balances increasing - for (const auto &gs : node->getConfig().genesis.state.dpos->genesis_state) { - for (const auto &b : gs.second) { - EXPECT_EQ(b.second, node->getFinalChain()->get_staking_balance(b.first)); - dpos_genesis_sum += b.second; - } - } - - for (const auto &b : node->getConfig().genesis.state.initial_balances) { - auto balance_after = node->getFinalChain()->get_account(b.first)->balance; - auto res = b.second - dpos_genesis_sum; - EXPECT_EQ(res, balance_after); - } - - auto block = node->getFinalChain()->last_block_number(); - EXPECT_EQ(votes_count, node->getFinalChain()->dpos_eligible_total_vote_count(block)); - EXPECT_EQ(0, node->getFinalChain()->dpos_eligible_vote_count(block, random_node)); - - // check for dpos_query method - { - const auto &genesis_sender = cfg.state.dpos->genesis_state.begin()->first; - - state_api::DPOSQuery::AccountQuery acc_q; - acc_q.with_staking_balance = true; - acc_q.with_outbound_deposits = true; - acc_q.with_inbound_deposits = true; - state_api::DPOSQuery q; - q.with_eligible_count = true; - q.account_queries[genesis_sender] = acc_q; - - // auto q_res = node->getFinalChain()->dpos_query(q); - auto res = q_res.account_results[genesis_sender]; - EXPECT_EQ(res.inbound_deposits.size(), 1); - EXPECT_EQ(res.inbound_deposits.begin()->first, genesis_sender); - EXPECT_EQ(res.inbound_deposits.begin()->second, res.staking_balance); - } - - EXPECT_EQ(cfg.state.dpos->vote_eligibility_balance_step * kOneTara, - node->getConfig().genesis.state.dpos->vote_eligibility_balance_step); - EXPECT_NE(cfg.state.initial_balances.begin()->second, - node->getConfig().genesis.state.initial_balances.begin()->second); - EXPECT_NE(cfg.state.dpos->eligibility_balance_threshold, - node->getConfig().genesis.state.dpos->eligibility_balance_threshold); -} - -} // namespace taraxa::core_tests diff --git a/tests/rpc_test.cpp b/tests/rpc_test.cpp index 3675644792..85f2b98706 100644 --- a/tests/rpc_test.cpp +++ b/tests/rpc_test.cpp @@ -1,6 +1,7 @@ #include #include #include +#include #include "network/rpc/eth/Eth.h" #include "test_util/gtest.hpp" diff --git a/tests/state_api_test.cpp b/tests/state_api_test.cpp index 6c49267725..49ce8fbe72 100644 --- a/tests/state_api_test.cpp +++ b/tests/state_api_test.cpp @@ -59,7 +59,6 @@ TEST_F(StateAPITest, DISABLED_dpos_integration) { // dpos_cfg.eligibility_balance_threshold = 1000; // dpos_cfg.vote_eligibility_balance_step = 1000; // addr_1_bal_expected -= dpos_cfg.genesis_state[make_addr(1)][make_addr(1)] = dpos_cfg.eligibility_balance_threshold; - // chain_cfg.hardforks.fix_genesis_fork_block = 0; // uint64_t curr_blk = 0; // StateAPI SUT([&](auto /*n*/) -> h256 { assert(false); }, // From 61325a8069495967fbd0d079892d36a267d65cf0 Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Tue, 7 Mar 2023 13:18:25 +0100 Subject: [PATCH 056/162] chore: apart from previous period cert votes and previous round next votes process only votes with period >= current period & round >= current round --- .../common/ext_votes_packet_handler.hpp | 8 ------ .../common/ext_votes_packet_handler.cpp | 27 +++++-------------- 2 files changed, 6 insertions(+), 29 deletions(-) diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/common/ext_votes_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/common/ext_votes_packet_handler.hpp index 7f140fc72d..0ba977ef2c 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/common/ext_votes_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/common/ext_votes_packet_handler.hpp @@ -63,14 +63,6 @@ class ExtVotesPacketHandler : public PacketHandler { const std::shared_ptr& peer, bool validate_max_round_step); - /** - * @brief Common validation for all types of votes - * - * @param vote to be validated - * @return vote validation passed, otherwise - */ - std::pair validateVote(const std::shared_ptr& vote) const; - /** * @brief Validates provided vote if voted value == provided block * diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp index cc5fd3e42a..d90bcb4326 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp @@ -140,21 +140,6 @@ std::pair ExtVotesPacketHandler::validateVotePeriodRoundStep( return {true, ""}; } -std::pair ExtVotesPacketHandler::validateVote(const std::shared_ptr &vote) const { - // Check is vote is unique per period, round & step & voter -> each address can generate just 1 vote - // (for a value that isn't NBH) per period, round & step - if (auto unique_vote_validation = vote_mgr_->isUniqueVote(vote); !unique_vote_validation.first) { - return unique_vote_validation; - } - - const auto vote_valid = vote_mgr_->validateVote(vote); - if (!vote_valid.first) { - LOG(log_er_) << "Vote \"dpos\" validation failed: " << vote_valid.second; - } - - return vote_valid; -} - bool ExtVotesPacketHandler::validateVoteAndBlock(const std::shared_ptr &vote, const std::shared_ptr &pbft_block) const { if (pbft_block->getBlockHash() != vote->getBlockHash()) { @@ -169,15 +154,15 @@ bool ExtVotesPacketHandler::validateVoteAndBlock(const std::shared_ptr &vo bool ExtVotesPacketHandler::isPbftRelevantVote(const std::shared_ptr &vote) const { const auto [current_pbft_round, current_pbft_period] = pbft_mgr_->getPbftRoundAndPeriod(); - // Previous round next vote - if (vote->getPeriod() == current_pbft_period && (current_pbft_round - 1) == vote->getRound() && - vote->getType() == PbftVoteTypes::next_vote) { + if (vote->getPeriod() >= current_pbft_period && vote->getRound() >= current_pbft_round) { + // Standard current or future vote return true; - } else if (vote->getPeriod() >= current_pbft_period) { - // Standard vote + } else if (vote->getPeriod() == current_pbft_period && vote->getRound() == (current_pbft_round - 1) && + vote->getType() == PbftVoteTypes::next_vote) { + // Previous round next vote return true; } else if (vote->getPeriod() == current_pbft_period - 1 && vote->getType() == PbftVoteTypes::cert_vote) { - // Previous round cert vote - potential reward vote + // Previous period cert vote - potential reward vote return true; } From b561223387f8b13cf67db561004d8869a204b6d6 Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Tue, 7 Mar 2023 13:20:17 +0100 Subject: [PATCH 057/162] fix: process votes bundle even if some of the votes were already processed before --- .../src/tarcap/packets_handlers/votes_sync_packet_handler.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp index 4f747848f2..88faf800ce 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp @@ -58,7 +58,7 @@ void VotesSyncPacketHandler::process(const PacketData &packet_data, const std::s // Do not process vote that has already been validated if (vote_mgr_->voteAlreadyValidated(vote->getHash())) { LOG(log_dg_) << "Received vote " << vote->getHash() << " has already been validated"; - return; + continue; } // Next votes bundle can contain votes for kNullBlockHash as well as some specific block hash From ff83ae2ec1bc770c329d800bc6f823899636449b Mon Sep 17 00:00:00 2001 From: rjonczy Date: Thu, 9 Mar 2023 10:51:35 +0100 Subject: [PATCH 058/162] Empty-Commit From 3708c64d0287de040706e834ffc12870f8e41d37 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Thu, 9 Mar 2023 13:33:50 +0100 Subject: [PATCH 059/162] chore: optimize prune --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 760f7ff631..f75cd5f637 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 760f7ff631a264b132b7771f7a4b423154f2c7e0 +Subproject commit f75cd5f6374188da3700611e50c390b89f216926 From 9a4f4afcc7517a2a46bf09c50f666fad0b64c88f Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 9 Mar 2023 16:22:38 +0100 Subject: [PATCH 060/162] feat: remove duplicated columns from DB --- .../consensus/src/final_chain/final_chain.cpp | 21 +++++-------------- .../storage/include/storage/storage.hpp | 4 +--- libraries/core_libs/storage/src/storage.cpp | 12 +++++++++-- tests/final_chain_test.cpp | 8 +++---- tests/transaction_test.cpp | 2 -- 5 files changed, 19 insertions(+), 28 deletions(-) diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index 4d7c2e2a8a..5c4d2d8b34 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -309,16 +309,8 @@ class FinalChainImpl final : public FinalChain { chunk_to_alter[index % c_bloomIndexSize] |= log_bloom_for_index; db_->insert(batch, DB::Columns::final_chain_log_blooms_index, chunk_id, util::rlp_enc(rlp_strm, chunk_to_alter)); } - TransactionLocation tl{blk_header.number}; - for (auto const& trx : transactions) { - db_->insert(batch, DB::Columns::final_chain_transaction_location_by_hash, trx->getHash(), - util::rlp_enc(rlp_strm, tl)); - ++tl.index; - } db_->insert(batch, DB::Columns::final_chain_transaction_hashes_by_blk_number, blk_header.number, dev::rlp(hashes_from_transactions(transactions))); - db_->insert(batch, DB::Columns::final_chain_transaction_count_by_blk_number, blk_header.number, - transactions.size()); db_->insert(batch, DB::Columns::final_chain_blk_hash_by_number, blk_header.number, blk_header.hash); db_->insert(batch, DB::Columns::final_chain_blk_number_by_hash, blk_header.hash, blk_header.number); db_->insert(batch, DB::Columns::final_chain_meta, DBMetaKeys::LAST_NUMBER, blk_header.number); @@ -343,14 +335,12 @@ class FinalChainImpl final : public FinalChain { return block_headers_cache_.get(*n); } - std::optional transaction_location(h256 const& trx_hash) const override { - auto raw = db_->lookup(trx_hash, DB::Columns::final_chain_transaction_location_by_hash); - if (raw.empty()) { + std::optional transaction_location(const h256& trx_hash) const override { + const auto period = db_->getTransactionPeriod(trx_hash); + if (!period) { return {}; } - TransactionLocation ret; - ret.rlp(dev::RLP(raw)); - return ret; + return TransactionLocation{period->first, period->second}; } std::optional transaction_receipt(h256 const& trx_h) const override { @@ -364,8 +354,7 @@ class FinalChainImpl final : public FinalChain { } uint64_t transactionCount(std::optional n = {}) const override { - return db_->lookup_int(last_if_absent(n), DB::Columns::final_chain_transaction_count_by_blk_number) - .value_or(0); + return db_->getTransactionCount(last_if_absent(n)); } std::shared_ptr transaction_hashes(std::optional n = {}) const override { diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 5f3ae3fbd1..7f78647eba 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -111,10 +111,7 @@ class DbStorage : public std::enable_shared_from_this { COLUMN(dag_block_period); COLUMN_W_COMP(proposal_period_levels_map, getIntComparator()); COLUMN(final_chain_meta); - COLUMN(final_chain_transaction_location_by_hash); - COLUMN(final_chain_replay_protection); COLUMN(final_chain_transaction_hashes_by_blk_number); - COLUMN(final_chain_transaction_count_by_blk_number); COLUMN(final_chain_blk_by_number); COLUMN(final_chain_blk_hash_by_number); COLUMN(final_chain_blk_number_by_hash); @@ -231,6 +228,7 @@ class DbStorage : public std::enable_shared_from_this { void addTransactionPeriodToBatch(Batch& write_batch, trx_hash_t const& trx, PbftPeriod period, uint32_t position); std::optional> getTransactionPeriod(trx_hash_t const& hash) const; std::unordered_map getAllTransactionPeriod(); + uint64_t getTransactionCount(PbftPeriod period) const; // PBFT manager uint32_t getPbftMgrField(PbftMgrField field); diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index 98b510f0ed..61cec78330 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -2,6 +2,7 @@ #include #include +#include #include #include "config/version.hpp" @@ -451,7 +452,6 @@ void DbStorage::clearPeriodDataHistory(PbftPeriod end_period) { auto hash = trx_hash_t(reinterpret_cast(trx_hashes_raw.data() + i * trx_hash_t::size), trx_hash_t::ConstructFromPointer); remove(write_batch, Columns::final_chain_receipt_by_trx_hash, hash); - remove(write_batch, Columns::final_chain_transaction_location_by_hash, hash); } remove(write_batch, Columns::final_chain_transaction_hashes_by_blk_number, EthBlockNumber(period)); if ((period - start_period + 1) % max_batch_delete == 0) { @@ -466,7 +466,6 @@ void DbStorage::clearPeriodDataHistory(PbftPeriod end_period) { // data in the database and free disk space db_->CompactRange({}, handle(Columns::period_data), &start_slice, &end_slice); db_->CompactRange({}, handle(Columns::final_chain_receipt_by_trx_hash), nullptr, nullptr); - db_->CompactRange({}, handle(Columns::final_chain_transaction_location_by_hash), nullptr, nullptr); db_->CompactRange({}, handle(Columns::final_chain_transaction_hashes_by_blk_number), nullptr, nullptr); } } @@ -608,6 +607,15 @@ std::shared_ptr DbStorage::getTransaction(trx_hash_t const& hash) { return nullptr; } +uint64_t DbStorage::getTransactionCount(PbftPeriod period) const { + auto period_data = getPeriodDataRaw(period); + if (period_data.size()) { + auto period_data_rlp = dev::RLP(period_data); + return period_data_rlp[TRANSACTIONS_POS_IN_PERIOD_DATA].itemCount(); + } + return 0; +} + std::pair, trx_hash_t> DbStorage::getFinalizedTransactions( std::vector const& trx_hashes) const { // Map of period to position of transactions within a period diff --git a/tests/final_chain_test.cpp b/tests/final_chain_test.cpp index 99d2fdd58f..daaa0b45b9 100644 --- a/tests/final_chain_test.cpp +++ b/tests/final_chain_test.cpp @@ -45,17 +45,16 @@ struct FinalChainTest : WithDataDir { SUT = nullptr; SUT = NewFinalChain(db, cfg); std::vector trx_hashes; - int pos = 0; + ++expected_blk_num; for (const auto& trx : trxs) { - db->saveTransactionPeriod(trx->getHash(), 1, pos++); trx_hashes.emplace_back(trx->getHash()); } DagBlock dag_blk({}, {}, {}, trx_hashes, {}, {}, secret_t::random()); db->saveDagBlock(dag_blk); std::vector reward_votes_hashes; auto pbft_block = - std::make_shared(kNullBlockHash, kNullBlockHash, kNullBlockHash, kNullBlockHash, 1, addr_t::random(), - dev::KeyPair::create().secret(), std::move(reward_votes_hashes)); + std::make_shared(kNullBlockHash, kNullBlockHash, kNullBlockHash, kNullBlockHash, expected_blk_num, + addr_t::random(), dev::KeyPair::create().secret(), std::move(reward_votes_hashes)); std::vector> votes; PeriodData period_data(pbft_block, votes); period_data.dag_blocks.push_back(dag_blk); @@ -67,7 +66,6 @@ struct FinalChainTest : WithDataDir { db->commitWriteBatch(batch); auto result = SUT->finalize(std::move(period_data), {dag_blk.getHash()}).get(); - ++expected_blk_num; const auto& blk_h = *result->final_chain_blk; EXPECT_EQ(util::rlp_enc(blk_h), util::rlp_enc(*SUT->block_header(blk_h.number))); EXPECT_EQ(util::rlp_enc(blk_h), util::rlp_enc(*SUT->block_header())); diff --git a/tests/transaction_test.cpp b/tests/transaction_test.cpp index f6e2ebfc94..53637d89a0 100644 --- a/tests/transaction_test.cpp +++ b/tests/transaction_test.cpp @@ -211,8 +211,6 @@ TEST_F(TransactionTest, transaction_low_nonce) { SharedTransactions trxs{trx_1, trx_2}; period_data.transactions = trxs; auto batch = db->createWriteBatch(); - db->saveTransactionPeriod(trx_1->getHash(), 1, 0); - db->saveTransactionPeriod(trx_2->getHash(), 1, 0); db->savePeriodData(period_data, batch); db->commitWriteBatch(batch); final_chain->finalize(std::move(period_data), {dag_blk.getHash()}).get(); From 70f0be73e81da32bf58edece65d25335b21203c6 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Fri, 10 Mar 2023 12:08:58 +0100 Subject: [PATCH 061/162] chore: asynchronous prune of state_db --- .../consensus/src/final_chain/final_chain.cpp | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index 4d7c2e2a8a..8d0b0f4fb4 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -23,6 +23,7 @@ class FinalChainImpl final : public FinalChain { // It is not prepared to use more then 1 thread. Examine it if you want to change threads count boost::asio::thread_pool executor_thread_{1}; + boost::asio::thread_pool prune_thread_{1}; std::atomic num_executed_dag_blk_ = 0; std::atomic num_executed_trx_ = 0; @@ -124,7 +125,10 @@ class FinalChainImpl final : public FinalChain { delegation_delay_ = config.genesis.state.dpos.delegation_delay; } - void stop() override { executor_thread_.join(); } + void stop() override { + executor_thread_.join(); + prune_thread_.join(); + } std::future> finalize(PeriodData&& new_blk, std::vector&& finalized_dag_blk_hashes, @@ -261,10 +265,13 @@ class FinalChainImpl final : public FinalChain { block_to_prune = get_block_header(block_to_prune->number - 1); } - state_api_.prune(last_block_to_keep->state_root, state_root_to_prune, last_block_to_keep->number); db_->compactColumn(DB::Columns::final_chain_blk_by_number); db_->compactColumn(DB::Columns::final_chain_blk_hash_by_number); db_->compactColumn(DB::Columns::final_chain_blk_number_by_hash); + + boost::asio::post(prune_thread_, [this, last_block_to_keep, state_root_to_prune]() { + state_api_.prune(last_block_to_keep->state_root, state_root_to_prune, last_block_to_keep->number); + }); } } From 672388c278b93506cef381c5f0d93dda4a5e960f Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Tue, 7 Mar 2023 15:02:27 +0100 Subject: [PATCH 062/162] chore: remove unrelevant test + other code related to this test --- .../consensus/include/pbft/pbft_manager.hpp | 15 -- .../consensus/src/pbft/pbft_manager.cpp | 148 +++++------------- tests/pbft_manager_test.cpp | 55 ------- 3 files changed, 40 insertions(+), 178 deletions(-) diff --git a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp index f0bc305285..5893020ab1 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp @@ -254,11 +254,6 @@ class PbftManager : public std::enable_shared_from_this { */ void resume(); - /** - * @brief Resume PBFT daemon on single state. Only to be used for unit tests - */ - void resumeSingleState(); - /** * @brief Get a proposed PBFT block based on specified period and block hash * @param period @@ -331,16 +326,6 @@ class PbftManager : public std::enable_shared_from_this { */ void sleep_(); - /** - * @brief Go to next PBFT state. Only to be used for unit tests - */ - void doNextState_(); - - /** - * @brief Set next PBFT state - */ - void setNextState_(); - /** * @brief Set PBFT filter state */ diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 1479312d9f..ec41370b89 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -125,65 +125,6 @@ void PbftManager::resume() { daemon_ = std::make_unique([this]() { run(); }); } -// Only to be used for tests... -void PbftManager::resumeSingleState() { - if (!stopped_.load()) daemon_->join(); - stopped_ = false; - - if (step_ == 1) { - state_ = value_proposal_state; - } else if (step_ == 2) { - state_ = filter_state; - } else if (step_ == 3) { - state_ = certify_state; - } else if (step_ % 2 == 0) { - state_ = finish_state; - } else { - state_ = finish_polling_state; - } - - doNextState_(); -} - -// Only to be used for tests... -void PbftManager::doNextState_() { - auto initial_state = state_; - - while (!stopped_ && state_ == initial_state) { - if (stateOperations_()) { - continue; - } - - // PBFT states - switch (state_) { - case value_proposal_state: - proposeBlock_(); - break; - case filter_state: - identifyBlock_(); - break; - case certify_state: - certifyBlock_(); - break; - case finish_state: - firstFinish_(); - break; - case finish_polling_state: - secondFinish_(); - break; - default: - LOG(log_er_) << "Unknown PBFT state " << state_; - assert(false); - } - - setNextState_(); - if (state_ != initial_state) { - return; - } - sleep_(); - } -} - /* When a node starts up it has to sync to the current phase (type of block * being generated) and step (within the block generation round) * Five step loop for block generation over three phases of blocks @@ -201,25 +142,41 @@ void PbftManager::run() { switch (state_) { case value_proposal_state: proposeBlock_(); + setFilterState_(); break; case filter_state: identifyBlock_(); + setCertifyState_(); break; case certify_state: certifyBlock_(); + if (go_finish_state_) { + setFinishState_(); + } else { + next_step_time_ms_ += kPollingIntervalMs; + } break; case finish_state: firstFinish_(); + setFinishPollingState_(); break; case finish_polling_state: secondFinish_(); + if (loop_back_finish_state_) { + loopBackFinishState_(); + + // Print voting summary for current round + printVotingSummary(); + } else { + next_step_time_ms_ += kPollingIntervalMs; + } break; default: LOG(log_er_) << "Unknown PBFT state " << state_; assert(false); } - setNextState_(); + LOG(log_tr_) << "next step time(ms): " << next_step_time_ms_.count() << ", step " << step_; sleep_(); } } @@ -289,23 +246,33 @@ void PbftManager::setPbftStep(PbftStep pbft_step) { db_->savePbftMgrField(PbftMgrField::Step, pbft_step); step_ = pbft_step; - if (step_ > kMaxSteps && LAMBDA_backoff_multiple < 8) { - // Note: We calculate the lambda for a step independently of prior steps - // in case missed earlier steps. - std::uniform_int_distribution distribution(0, step_ - kMaxSteps); - auto lambda_random_count = distribution(random_engine_); - LAMBDA_backoff_multiple = 2 * LAMBDA_backoff_multiple; - LAMBDA_ms = LAMBDA_ms_MIN * (LAMBDA_backoff_multiple + lambda_random_count); + if (step_ > kMaxSteps && LAMBDA_ms < kMaxLambda) { + // Note: We calculate the lambda for a step independently of prior steps in case missed earlier steps. + LAMBDA_ms *= 2; if (LAMBDA_ms > kMaxLambda) { LAMBDA_ms = kMaxLambda; } - - LOG(log_dg_) << "Surpassed max steps, exponentially backing off lambda to " << LAMBDA_ms.count() << " ms in round " - << getPbftRound() << ", step " << step_; - } else { - LAMBDA_ms = LAMBDA_ms_MIN; - LAMBDA_backoff_multiple = 1; } + + // TODO: remove this block of code and variables + // if (step_ > kMaxSteps && LAMBDA_backoff_multiple < 8) { + // // Note: We calculate the lambda for a step independently of prior steps + // // in case missed earlier steps. + // std::uniform_int_distribution distribution(0, step_ - kMaxSteps); + // auto lambda_random_count = distribution(random_engine_); + // LAMBDA_backoff_multiple = 2 * LAMBDA_backoff_multiple; + // LAMBDA_ms = LAMBDA_ms_MIN * (LAMBDA_backoff_multiple + lambda_random_count); + // if (LAMBDA_ms > kMaxLambda) { + // LAMBDA_ms = kMaxLambda; + // } + // + // LOG(log_dg_) << "Surpassed max steps, exponentially backing off lambda to " << LAMBDA_ms.count() << " ms in + // round " + // << getPbftRound() << ", step " << step_; + // } else { + // LAMBDA_ms = LAMBDA_ms_MIN; + // LAMBDA_backoff_multiple = 1; + // } } void PbftManager::resetStep() { @@ -536,38 +503,6 @@ void PbftManager::initialState() { : "no value"); } -void PbftManager::setNextState_() { - switch (state_) { - case value_proposal_state: - setFilterState_(); - break; - case filter_state: - setCertifyState_(); - break; - case certify_state: - if (go_finish_state_) { - setFinishState_(); - } else { - next_step_time_ms_ += kPollingIntervalMs; - } - break; - case finish_state: - setFinishPollingState_(); - break; - case finish_polling_state: - if (loop_back_finish_state_) { - loopBackFinishState_(); - } else { - next_step_time_ms_ += kPollingIntervalMs; - } - break; - default: - LOG(log_er_) << "Unknown PBFT state " << state_; - assert(false); - } - LOG(log_tr_) << "next step time(ms): " << next_step_time_ms_.count() << ", step " << step_; -} - void PbftManager::setFilterState_() { state_ = filter_state; setPbftStep(step_ + 1); @@ -610,9 +545,6 @@ void PbftManager::loopBackFinishState_() { already_next_voted_null_block_hash_ = false; assert(step_ >= startingStepInRound_); next_step_time_ms_ += kPollingIntervalMs; - - // Print voting summary for current round - printVotingSummary(); } void PbftManager::broadcastVotes(bool rebroadcast) { diff --git a/tests/pbft_manager_test.cpp b/tests/pbft_manager_test.cpp index aca5055b8a..adcaa4b20f 100644 --- a/tests/pbft_manager_test.cpp +++ b/tests/pbft_manager_test.cpp @@ -192,61 +192,6 @@ struct PbftManagerTest : NodesTest { } }; -// Test that after some amount of elapsed time will not continue soft voting for same value -TEST_F(PbftManagerTest, terminate_soft_voting_pbft_block) { - auto node_cfgs = make_node_cfgs(1, 1, 20); - makeNodesWithNonces(node_cfgs); - - auto pbft_mgr = nodes[0]->getPbftManager(); - auto vote_mgr = nodes[0]->getVoteManager(); - pbft_mgr->stop(); - std::cout << "PBFT manager stopped" << std::endl; - - // Generate bogus votes - auto stale_block_hash = blk_hash_t("0000000100000000000000000000000000000000000000000000000000000000"); - auto propose_vote = vote_mgr->generateVote(stale_block_hash, PbftVoteTypes::propose_vote, 2, 2, 1); - propose_vote->calculateWeight(1, 1, 1); - vote_mgr->addVerifiedVote(propose_vote); - - // uint64_t time_till_stale_ms = 1000; - // std::cout << "Set max wait for soft voted value to " << time_till_stale_ms << "ms..." << std::endl; - // pbft_mgr->setMaxWaitForSoftVotedBlock_ms(time_till_stale_ms); - // pbft_mgr->setMaxWaitForNextVotedBlock_ms(std::numeric_limits::max()); - - auto sleep_time = 1100; - std::cout << "Sleep " << sleep_time << "ms so that last soft voted value of " << stale_block_hash.abridged() - << " becomes stale..." << std::endl; - taraxa::thisThreadSleepForMilliSeconds(sleep_time); - - std::cout << "Initialize PBFT manager at round 2 step 2" << std::endl; - pbft_mgr->setPbftRound(2); - pbft_mgr->setPbftStep(2); - pbft_mgr->resumeSingleState(); - std::cout << "Into cert voted state in round 2..." << std::endl; - EXPECT_EQ(pbft_mgr->getPbftRound(), 2); - EXPECT_EQ(pbft_mgr->getPbftStep(), 3); - - std::cout << "Check did not soft vote for stale soft voted value of " << stale_block_hash.abridged() << "..." - << std::endl; - bool skipped_soft_voting = true; - auto votes = vote_mgr->getVerifiedVotes(); - for (const auto &v : votes) { - if (PbftVoteTypes::soft_vote == v->getType()) { - if (v->getBlockHash() == stale_block_hash) { - skipped_soft_voting = false; - } - std::cout << "Found soft voted value of " << v->getBlockHash().abridged() << " in round 2" << std::endl; - } - } - EXPECT_EQ(skipped_soft_voting, true); - - auto start_round = pbft_mgr->getPbftRound(); - pbft_mgr->resume(); - - std::cout << "Wait ensure node is still advancing in rounds... " << std::endl; - EXPECT_HAPPENS({60s, 50ms}, [&](auto &ctx) { WAIT_EXPECT_NE(ctx, start_round, pbft_mgr->getPbftRound()) }); -} - // Test that after some amount of elapsed time will give up on the next voting value if corresponding DAG blocks can't // be found From 2426ddb2c9bf3aba412a0ef4b2909bca15ad760e Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Tue, 7 Mar 2023 15:02:56 +0100 Subject: [PATCH 063/162] chore: wait 100ms before going into second finishing steo --- libraries/core_libs/consensus/src/pbft/pbft_manager.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index ec41370b89..308a25a5a9 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -532,6 +532,7 @@ void PbftManager::setFinishPollingState_() { already_next_voted_value_ = false; already_next_voted_null_block_hash_ = false; second_finish_step_start_datetime_ = std::chrono::system_clock::now(); + next_step_time_ms_ += kPollingIntervalMs; } void PbftManager::loopBackFinishState_() { From 420d0120c8cdc93455e565ca2123a4a9988492fa Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Tue, 7 Mar 2023 15:27:00 +0100 Subject: [PATCH 064/162] refactor: rename variables and remove unsued code --- .../consensus/include/pbft/pbft_manager.hpp | 9 ++-- .../consensus/src/pbft/pbft_manager.cpp | 53 ++++++------------- 2 files changed, 19 insertions(+), 43 deletions(-) diff --git a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp index 5893020ab1..7c5ba840c3 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp @@ -206,7 +206,7 @@ class PbftManager : public std::enable_shared_from_this { * @brief Get PBFT lambda. PBFT lambda is a timer clock * @return PBFT lambda */ - std::chrono::milliseconds getPbftInitialLambda() const { return LAMBDA_ms_MIN; } + std::chrono::milliseconds getPbftInitialLambda() const { return kMinLambda; } /** * @brief Calculate DAG blocks ordering hash @@ -532,9 +532,8 @@ class PbftManager : public std::enable_shared_from_this { const addr_t node_addr_; const secret_t node_sk_; - const std::chrono::milliseconds LAMBDA_ms_MIN; - std::chrono::milliseconds LAMBDA_ms{0}; - uint64_t LAMBDA_backoff_multiple = 1; + const std::chrono::milliseconds kMinLambda; // [ms] + std::chrono::milliseconds lambda_{0}; // [ms] const std::chrono::milliseconds kMaxLambda{60000}; // in ms, max lambda is 1 minutes const uint32_t kBroadcastVotesLambdaTime = 20; @@ -542,8 +541,6 @@ class PbftManager : public std::enable_shared_from_this { uint32_t broadcast_votes_counter_ = 1; uint32_t rebroadcast_votes_counter_ = 1; - std::default_random_engine random_engine_{std::random_device{}()}; - PbftStates state_ = value_proposal_state; std::atomic round_ = 1; PbftStep step_ = 1; diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 308a25a5a9..9bb5c14803 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -41,7 +41,7 @@ PbftManager::PbftManager(const PbftConfig &conf, const blk_hash_t &dag_genesis_b final_chain_(std::move(final_chain)), node_addr_(std::move(node_addr)), node_sk_(std::move(node_sk)), - LAMBDA_ms_MIN(conf.lambda_ms), + kMinLambda(conf.lambda_ms), dag_genesis_block_hash_(dag_genesis_block_hash), config_(conf), proposed_blocks_(db_), @@ -246,41 +246,20 @@ void PbftManager::setPbftStep(PbftStep pbft_step) { db_->savePbftMgrField(PbftMgrField::Step, pbft_step); step_ = pbft_step; - if (step_ > kMaxSteps && LAMBDA_ms < kMaxLambda) { + if (step_ > kMaxSteps && lambda_ < kMaxLambda) { // Note: We calculate the lambda for a step independently of prior steps in case missed earlier steps. - LAMBDA_ms *= 2; - if (LAMBDA_ms > kMaxLambda) { - LAMBDA_ms = kMaxLambda; + // TODO: do we need some randomness here ? + lambda_ *= 2; + if (lambda_ > kMaxLambda) { + lambda_ = kMaxLambda; } } - - // TODO: remove this block of code and variables - // if (step_ > kMaxSteps && LAMBDA_backoff_multiple < 8) { - // // Note: We calculate the lambda for a step independently of prior steps - // // in case missed earlier steps. - // std::uniform_int_distribution distribution(0, step_ - kMaxSteps); - // auto lambda_random_count = distribution(random_engine_); - // LAMBDA_backoff_multiple = 2 * LAMBDA_backoff_multiple; - // LAMBDA_ms = LAMBDA_ms_MIN * (LAMBDA_backoff_multiple + lambda_random_count); - // if (LAMBDA_ms > kMaxLambda) { - // LAMBDA_ms = kMaxLambda; - // } - // - // LOG(log_dg_) << "Surpassed max steps, exponentially backing off lambda to " << LAMBDA_ms.count() << " ms in - // round " - // << getPbftRound() << ", step " << step_; - // } else { - // LAMBDA_ms = LAMBDA_ms_MIN; - // LAMBDA_backoff_multiple = 1; - // } } void PbftManager::resetStep() { step_ = 1; startingStepInRound_ = 1; - - LAMBDA_ms = LAMBDA_ms_MIN; - LAMBDA_backoff_multiple = 1; + lambda_ = kMinLambda; } bool PbftManager::tryPushCertVotesBlock() { @@ -425,7 +404,7 @@ void PbftManager::initialState() { // Initial PBFT state // Time constants... - LAMBDA_ms = LAMBDA_ms_MIN; + lambda_ = kMinLambda; const auto current_pbft_period = getPbftPeriod(); const auto current_pbft_round = db_->getPbftMgrField(PbftMgrField::Round); @@ -506,20 +485,20 @@ void PbftManager::initialState() { void PbftManager::setFilterState_() { state_ = filter_state; setPbftStep(step_ + 1); - next_step_time_ms_ = 2 * LAMBDA_ms; + next_step_time_ms_ = 2 * lambda_; } void PbftManager::setCertifyState_() { state_ = certify_state; setPbftStep(step_ + 1); - next_step_time_ms_ = 2 * LAMBDA_ms; + next_step_time_ms_ = 2 * lambda_; } void PbftManager::setFinishState_() { LOG(log_dg_) << "Will go to first finish State"; state_ = finish_state; setPbftStep(step_ + 1); - next_step_time_ms_ = 4 * LAMBDA_ms; + next_step_time_ms_ = 4 * lambda_; } void PbftManager::setFinishPollingState_() { @@ -608,12 +587,12 @@ bool PbftManager::stateOperations_() { const auto round_elapsed_time = elapsedTimeInMs(current_round_start_datetime_); - if (round_elapsed_time / LAMBDA_ms_MIN > kRebroadcastVotesLambdaTime * rebroadcast_votes_counter_) { + if (round_elapsed_time / kMinLambda > kRebroadcastVotesLambdaTime * rebroadcast_votes_counter_) { broadcastVotes(true); rebroadcast_votes_counter_++; // If there was a rebroadcast no need to do next broadcast either broadcast_votes_counter_++; - } else if (round_elapsed_time / LAMBDA_ms_MIN > kBroadcastVotesLambdaTime * broadcast_votes_counter_) { + } else if (round_elapsed_time / kMinLambda > kBroadcastVotesLambdaTime * broadcast_votes_counter_) { broadcastVotes(false); broadcast_votes_counter_++; } @@ -838,14 +817,14 @@ void PbftManager::certifyBlock_() { LOG(log_dg_) << "PBFT certifying state in period " << period << ", round " << round; const auto elapsed_time_in_round = elapsedTimeInMs(current_round_start_datetime_); - go_finish_state_ = elapsed_time_in_round > 4 * LAMBDA_ms - kPollingIntervalMs; + go_finish_state_ = elapsed_time_in_round > 4 * lambda_ - kPollingIntervalMs; if (go_finish_state_) { LOG(log_dg_) << "Step 3 expired, will go to step 4 in period " << period << ", round " << round; return; } // Should not happen, add log here for safety checking - if (elapsed_time_in_round < 2 * LAMBDA_ms) { + if (elapsed_time_in_round < 2 * lambda_) { LOG(log_er_) << "PBFT Reached step 3 too quickly after only " << elapsed_time_in_round.count() << " [ms] in period " << period << ", round " << round; return; @@ -1015,7 +994,7 @@ void PbftManager::secondFinish_() { // Try to next vote 2t+1 next voted null block from previous round next_vote_null_block(); - loop_back_finish_state_ = elapsedTimeInMs(second_finish_step_start_datetime_) > 2 * (LAMBDA_ms - kPollingIntervalMs); + loop_back_finish_state_ = elapsedTimeInMs(second_finish_step_start_datetime_) > 2 * (lambda_ - kPollingIntervalMs); } std::optional, std::vector>>> PbftManager::generatePbftBlock( From d4c163ea3981e0fe19177389e1f23412da73b94e Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Thu, 9 Mar 2023 15:12:13 +0100 Subject: [PATCH 065/162] chore: trigger exponentail backoff only if node is too far behind the rest of the network so it can catch up --- .../consensus/include/pbft/pbft_manager.hpp | 4 +-- .../include/vote_manager/verified_votes.hpp | 6 ++++ .../include/vote_manager/vote_manager.hpp | 11 +++++++ .../consensus/src/pbft/pbft_manager.cpp | 23 +++++++++---- .../src/vote_manager/vote_manager.cpp | 33 +++++++++++++++++-- 5 files changed, 67 insertions(+), 10 deletions(-) diff --git a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp index 7c5ba840c3..5cb4c6e253 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp @@ -532,8 +532,8 @@ class PbftManager : public std::enable_shared_from_this { const addr_t node_addr_; const secret_t node_sk_; - const std::chrono::milliseconds kMinLambda; // [ms] - std::chrono::milliseconds lambda_{0}; // [ms] + const std::chrono::milliseconds kMinLambda; // [ms] + std::chrono::milliseconds lambda_{0}; // [ms] const std::chrono::milliseconds kMaxLambda{60000}; // in ms, max lambda is 1 minutes const uint32_t kBroadcastVotesLambdaTime = 20; diff --git a/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp b/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp index 36f64a1291..ea3b24250b 100644 --- a/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp +++ b/libraries/core_libs/consensus/include/vote_manager/verified_votes.hpp @@ -19,6 +19,12 @@ struct VerifiedVotes { // Step votes std::map step_votes; + + // Greatest step, for which there is at least t+1 next votes - it is used for lambda exponential backoff: Usually + // when network gets stalled it is due to lack of 2t+1 voting power and steps keep increasing. When new node joins + // the network, it should catch up with the rest of nodes asap so we dont start exponentially backing of its lambda + // if it's current step is far behind network_t_plus_one_step (at least 1 third of network is at this step) + PbftStep network_t_plus_one_step{0}; }; } // namespace taraxa \ No newline at end of file diff --git a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp index a456436801..9299c28ea0 100644 --- a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp +++ b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp @@ -220,6 +220,17 @@ class VoteManager { */ void setCurrentPbftPeriodAndRound(PbftPeriod pbft_period, PbftRound pbft_round); + /** + * @brief Returns greatest step (in specified period & round), for which there is at least t+1 voting power + * from all nodes + * @note It is used for triggering lambda exponential backoff + * + * @param period + * @param round + * @return greatest network 2t+1 next voting step + */ + PbftStep getNetworkTplusOneNextVotingStep(PbftPeriod period, PbftRound round) const; + private: /** * @param vote diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 9bb5c14803..7740252f30 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -246,12 +246,23 @@ void PbftManager::setPbftStep(PbftStep pbft_step) { db_->savePbftMgrField(PbftMgrField::Step, pbft_step); step_ = pbft_step; - if (step_ > kMaxSteps && lambda_ < kMaxLambda) { - // Note: We calculate the lambda for a step independently of prior steps in case missed earlier steps. - // TODO: do we need some randomness here ? - lambda_ *= 2; - if (lambda_ > kMaxLambda) { - lambda_ = kMaxLambda; + // Increase lambda only for odd steps (second finish steps) after node reached kMaxSteps steps + if (step_ > kMaxSteps && step_ % 2) { + const auto [round, period] = getPbftRoundAndPeriod(); + const auto network_next_voting_step = vote_mgr_->getNetworkTplusOneNextVotingStep(period, round); + + // Node is still >= kMaxSteps steps behind the rest (at least 1/3) of the network - keep lambda at the standard + // value so node can catch up with the rest of the nodes + if (network_next_voting_step > step_ && network_next_voting_step - step_ >= kMaxSteps) { + lambda_ = kMinLambda; + } else if (lambda_ < kMaxLambda) { + // Node is < kMaxSteps steps behind the rest (at least 1/3) of the network - start exponentially backing off + // lambda until it reaches kMaxLambda + // Note: We calculate the lambda for a step independently of prior steps in case missed earlier steps. + lambda_ *= 2; + if (lambda_ > kMaxLambda) { + lambda_ = kMaxLambda; + } } } } diff --git a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp index 5e61f6d3ca..30cb38198a 100644 --- a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp +++ b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp @@ -142,6 +142,22 @@ void VoteManager::setCurrentPbftPeriodAndRound(PbftPeriod pbft_period, PbftRound } } +PbftStep VoteManager::getNetworkTplusOneNextVotingStep(PbftPeriod period, PbftRound round) const { + std::shared_lock lock(verified_votes_access_); + + const auto found_period_it = verified_votes_.find(period); + if (found_period_it == verified_votes_.end()) { + return 0; + } + + const auto found_round_it = found_period_it->second.find(round); + if (found_round_it == found_period_it->second.end()) { + return 0; + } + + return found_round_it->second.network_t_plus_one_step; +} + bool VoteManager::addVerifiedVote(const std::shared_ptr& vote) { assert(vote->getWeight().has_value()); const auto hash = vote->getHash(); @@ -218,9 +234,22 @@ bool VoteManager::addVerifiedVote(const std::shared_ptr& vote) { const auto total_weight = (found_voted_value_it->second.first += weight); - // Not enough votes - do not set 2t+1 voted block for period,round and step + // Unable to get 2t+1 const auto two_t_plus_one = getPbftTwoTPlusOne(vote->getPeriod() - 1, vote->getType()); - if (total_weight < two_t_plus_one) { + if (!two_t_plus_one.has_value()) [[unlikely]] { + return true; + } + + // Calculate t+1 + const auto t_plus_one = ((*two_t_plus_one - 1) / 2) + 1; + // Set network_t_plus_one_step - used for triggering exponential backoff + if (vote->getType() == PbftVoteTypes::next_vote && total_weight >= t_plus_one && + vote->getStep() > found_round_it->second.network_t_plus_one_step) { + found_round_it->second.network_t_plus_one_step = vote->getStep(); + } + + // Not enough votes - do not set 2t+1 voted block for period,round and step + if (total_weight < *two_t_plus_one) { return true; } From 5dc72ce46053c8b0cadf09dfbbdbeb6ba0587a8c Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Fri, 10 Mar 2023 10:00:51 +0100 Subject: [PATCH 066/162] chore: fix round_determine_from_next_votes test --- .../src/vote_manager/vote_manager.cpp | 1 + tests/vote_test.cpp | 31 +++++++++---------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp index 30cb38198a..aa5a26ff3f 100644 --- a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp +++ b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp @@ -237,6 +237,7 @@ bool VoteManager::addVerifiedVote(const std::shared_ptr& vote) { // Unable to get 2t+1 const auto two_t_plus_one = getPbftTwoTPlusOne(vote->getPeriod() - 1, vote->getType()); if (!two_t_plus_one.has_value()) [[unlikely]] { + LOG(log_er_) << "Cannot set(or not) 2t+1 voted block as 2t+1 threshold is unavailable, vote " << vote->getHash(); return true; } diff --git a/tests/vote_test.cpp b/tests/vote_test.cpp index 9731d38851..b6c8dab36e 100644 --- a/tests/vote_test.cpp +++ b/tests/vote_test.cpp @@ -58,30 +58,29 @@ TEST_F(VoteTest, verified_votes) { TEST_F(VoteTest, round_determine_from_next_votes) { auto node = create_nodes(1, true /*start*/).front(); - // stop PBFT manager, that will place vote - node->getPbftManager()->stop(); + auto pbft_mgr = node->getPbftManager(); + auto vote_mgr = node->getVoteManager(); + // stop PBFT manager, that will place vote + pbft_mgr->stop(); clearAllVotes({node}); - auto vote_mgr = node->getVoteManager(); - size_t two_t_plus_one = 2; + const auto [current_round, current_period] = pbft_mgr->getPbftRoundAndPeriod(); - // Generate votes in 3 rounds, 2 steps, each step have 3 votes + // Generate votes for a few future rounds blk_hash_t voted_block_hash(1); PbftVoteTypes type = PbftVoteTypes::next_vote; - for (int i = 10; i <= 12; i++) { - for (int j = 4; j <= 5; j++) { - PbftPeriod period = i; - PbftRound round = i; - PbftStep step = j; - auto vote = vote_mgr->generateVote(voted_block_hash, type, period, round, step); - vote->calculateWeight(3, 3, 3); - vote_mgr->addVerifiedVote(vote); - } + const PbftRound kMaxRound = current_round + 3; + PbftStep step = 5; + for (PbftRound round = current_round; round <= kMaxRound; round++) { + auto vote = vote_mgr->generateVote(voted_block_hash, type, current_period, round, step); + vote->calculateWeight(3, 3, 3); + vote_mgr->addVerifiedVote(vote); } - auto new_round = vote_mgr->determineNewRound(12, two_t_plus_one); - EXPECT_EQ(*new_round, 13); + auto new_round = vote_mgr->determineNewRound(current_period, kMaxRound); + EXPECT_EQ(new_round.has_value(), true); + EXPECT_EQ(*new_round, kMaxRound + 1); } TEST_F(VoteTest, reconstruct_votes) { From 8cf40432aa07f2ec82714a5ad6e9a91e7b37fa49 Mon Sep 17 00:00:00 2001 From: kstdl Date: Tue, 14 Mar 2023 12:51:25 +0100 Subject: [PATCH 067/162] fix: build on arm macs that was broken after update of cppcheck --- conanfile.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/conanfile.py b/conanfile.py index 8cabe15fab..cf82c04941 100644 --- a/conanfile.py +++ b/conanfile.py @@ -67,13 +67,9 @@ def configure(self): # mpir is required by cppcheck and it causing gmp confict self.options["mpir"].enable_gmpcompat = False - # mpir is z3 dependency and it couldn't be built for arm - if (self.settings.arch == "armv8"): - self.options["cppcheck"].with_z3 = False - def _configure_cmake(self): cmake = CMake(self) - # set find path to clang utils dowloaded by that script + # set find path to clang utils downloaded by that script cmake.configure() return cmake From eadfc894c0bd12258e720e0113abbcfa3f9b2bc0 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 14 Mar 2023 13:25:08 +0100 Subject: [PATCH 068/162] chore: fix building instructions --- doc/building.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/building.md b/doc/building.md index b6434a9927..584658f761 100644 --- a/doc/building.md +++ b/doc/building.md @@ -40,7 +40,7 @@ will build out of the box without further effort: sudo python3 -m pip install conan==1.59.0 # Setup clang as default compiler either in your IDE or by env. variables" - export C="clang-14" + export CC="clang-14" export CXX="clang++-14" ### Clone the Repository @@ -127,7 +127,7 @@ will build out of the box without further effort: rm -f llvm.sh # Setup clang as default compiler either in your IDE or by env. variables" - export C="clang-14" + export CC="clang-14" export CXX="clang++-14" ### Clone the Repository From ff589b77fd088c90b726151294521ccbd43b210e Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Tue, 28 Feb 2023 15:38:05 +0100 Subject: [PATCH 069/162] chore: optimize rewards votes handling a --- .../include/vote_manager/vote_manager.hpp | 12 +- .../consensus/src/pbft/pbft_manager.cpp | 8 +- .../src/vote_manager/vote_manager.cpp | 132 ++++++++++++++---- .../get_pbft_sync_packet_handler.hpp | 5 +- .../get_pbft_sync_packet_handler.cpp | 7 +- .../network/src/tarcap/taraxa_capability.cpp | 2 +- libraries/core_libs/node/src/node.cpp | 9 +- .../storage/include/storage/storage.hpp | 8 +- libraries/core_libs/storage/src/storage.cpp | 32 +++-- tests/full_node_test.cpp | 13 +- tests/network_test.cpp | 6 +- 11 files changed, 164 insertions(+), 70 deletions(-) diff --git a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp index 9299c28ea0..ab7a1a03ca 100644 --- a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp +++ b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp @@ -96,13 +96,16 @@ class VoteManager { std::optional determineNewRound(PbftPeriod current_pbft_period, PbftRound current_pbft_round); /** - * @brief Replace current reward votes info with new period, round & block hash based on vote + * @brief Replace current reward votes with new period, round & block hash based on vote * * @param period * @param round + * @param step * @param block_hash + * @param batch */ - void resetRewardVotesInfo(PbftPeriod period, PbftRound round, const blk_hash_t& block_hash); + void resetRewardVotes(PbftPeriod period, PbftRound round, PbftStep step, const blk_hash_t& block_hash, + DbStorage::Batch& batch); /** * @brief Check reward votes for specified pbft block @@ -115,11 +118,11 @@ class VoteManager { bool copy_votes); /** - * @brief Get reward votes from reward_votes_ with the round during which was the previous block pushed + * @brief Get reward votes with the round during which was the previous block pushed * * @return vector of reward votes */ - std::vector> getProposeRewardVotes(); + std::vector> getRewardVotes(); /** * @brief Get current reward votes pbft block period @@ -280,6 +283,7 @@ class VoteManager { blk_hash_t reward_votes_block_hash_; PbftRound reward_votes_period_; PbftRound reward_votes_round_; + std::vector extra_reward_votes_; mutable std::shared_mutex reward_votes_info_mutex_; // Cache for current 2T+1 - > diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 7740252f30..3e139fad40 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -555,7 +555,7 @@ void PbftManager::broadcastVotes(bool rebroadcast) { } // Broadcast reward votes - previous round 2t+1 cert votes - auto reward_votes = vote_mgr_->getProposeRewardVotes(); + auto reward_votes = vote_mgr_->getRewardVotes(); if (!reward_votes.empty()) { LOG(log_dg_) << "Broadcast propose reward votes for period " << period << ", round " << round; net->getSpecificHandler()->onNewPbftVotesBundle(std::move(reward_votes), @@ -1013,7 +1013,7 @@ std::optional, std::vectorgetProposeRewardVotes(); + auto reward_votes = vote_mgr_->getRewardVotes(); if (propose_period > 1) [[likely]] { assert(!reward_votes.empty()); if (reward_votes[0]->getPeriod() != propose_period - 1) { @@ -1511,8 +1511,8 @@ bool PbftManager::pushPbftBlock_(PeriodData &&period_data, std::vectorsavePeriodData(period_data, batch); // Replace current reward votes - vote_mgr_->resetRewardVotesInfo(cert_votes[0]->getPeriod(), cert_votes[0]->getRound(), cert_votes[0]->getBlockHash()); - db_->replaceRewardVotes(cert_votes, batch); + vote_mgr_->resetRewardVotes(cert_votes[0]->getPeriod(), cert_votes[0]->getRound(), cert_votes[0]->getStep(), + cert_votes[0]->getBlockHash(), batch); // pass pbft with dag blocks and transactions to adjust difficulty if (period_data.pbft_blk->getPivotDagBlockHash() != kNullBlockHash) { diff --git a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp index aa5a26ff3f..2cc98476a3 100644 --- a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp +++ b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp @@ -32,12 +32,20 @@ VoteManager::VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, auto db_votes = db_->getAllTwoTPlusOneVotes(); auto loadVotesFromDb = [this](const std::vector>& votes) { + bool reward_votes_info_set = false; for (const auto& vote : votes) { // Check if votes are unique per round, step & voter if (!isUniqueVote(vote).first) { continue; } + if (!reward_votes_info_set && vote->getType() == PbftVoteTypes::cert_vote) { + reward_votes_info_set = true; + reward_votes_block_hash_ = vote->getBlockHash(); + reward_votes_period_ = vote->getPeriod(); + reward_votes_round_ = vote->getRound(); + } + addVerifiedVote(vote); LOG(log_dg_) << "Vote " << vote->getHash() << " loaded from db to memory"; } @@ -45,11 +53,9 @@ VoteManager::VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, loadVotesFromDb(db_->getAllTwoTPlusOneVotes()); loadVotesFromDb(db_->getOwnVerifiedVotes()); - - if (const auto reward_votes = db_->getRewardVotes(); !reward_votes.empty()) { - loadVotesFromDb(reward_votes); - resetRewardVotesInfo(reward_votes[0]->getPeriod(), reward_votes[0]->getRound(), reward_votes[0]->getBlockHash()); - } + auto reward_votes = db_->getRewardVotes(); + for (const auto& vote : reward_votes) extra_reward_votes_.emplace_back(vote->getHash()); + loadVotesFromDb(reward_votes); } void VoteManager::setNetwork(std::weak_ptr network) { network_ = std::move(network); } @@ -227,9 +233,9 @@ bool VoteManager::addVerifiedVote(const std::shared_ptr& vote) { LOG(log_nf_) << "Added verified vote: " << hash; LOG(log_dg_) << "Added verified vote: " << *vote; - // Save in db only those reward votes that have the same round as round during which we pushed the block into chain - if (is_valid_potential_reward_vote && reward_votes_round_ == vote->getRound()) { - db_->saveRewardVote(vote); + if (is_valid_potential_reward_vote) { + extra_reward_votes_.emplace_back(vote->getHash()); + db_->saveExtraRewardVote(vote); } const auto total_weight = (found_voted_value_it->second.first += weight); @@ -279,7 +285,9 @@ bool VoteManager::addVerifiedVote(const std::shared_ptr& vote) { {two_plus_one_voted_block_type, std::make_pair(vote->getBlockHash(), vote->getStep())}); // Save only current pbft period & round 2t+1 votes bundles into db - if (vote->getPeriod() == current_pbft_period_ && vote->getRound() == current_pbft_round_) { + // Cert votes are saved once the pbft block is pushed in the chain + if (vote->getType() != PbftVoteTypes::cert_vote && vote->getPeriod() == current_pbft_period_ && + vote->getRound() == current_pbft_round_) { std::vector> votes; votes.reserve(found_voted_value_it->second.second.size()); for (const auto& tmp_vote : found_voted_value_it->second.second) { @@ -541,22 +549,70 @@ PbftPeriod VoteManager::getRewardVotesPbftBlockPeriod() { return reward_votes_period_; } -void VoteManager::resetRewardVotesInfo(PbftPeriod period, PbftRound round, const blk_hash_t& block_hash) { +void VoteManager::resetRewardVotes(PbftPeriod period, PbftRound round, PbftStep step, const blk_hash_t& block_hash, + DbStorage::Batch& batch) { + // Save 2t+1 cert votes to database, remove old reward votes { std::scoped_lock lock(reward_votes_info_mutex_); - reward_votes_block_hash_ = block_hash; reward_votes_period_ = period; reward_votes_round_ = round; } + std::scoped_lock lock(verified_votes_access_); + auto found_period_it = verified_votes_.find(period); + if (found_period_it == verified_votes_.end()) { + LOG(log_er_) << "resetRewardVotes missing period"; + assert(false); + return; + } + auto found_round_it = found_period_it->second.find(round); + if (found_round_it == found_period_it->second.end()) { + LOG(log_er_) << "resetRewardVotes missing round" << round; + assert(false); + return; + } + auto found_step_it = found_round_it->second.step_votes.find(step); + if (found_step_it == found_round_it->second.step_votes.end()) { + LOG(log_er_) << "resetRewardVotes missing step" << step; + assert(false); + return; + } + auto found_two_t_plus_one_voted_block = + found_round_it->second.two_t_plus_one_voted_blocks_.find(TwoTPlusOneVotedBlockType::CertVotedBlock); + if (found_two_t_plus_one_voted_block == found_round_it->second.two_t_plus_one_voted_blocks_.end()) { + LOG(log_er_) << "resetRewardVotes missing cert voted block"; + assert(false); + return; + } + if (found_two_t_plus_one_voted_block->second.first != block_hash) { + LOG(log_er_) << "resetRewardVotes incorrect block " << found_two_t_plus_one_voted_block->second.first + << " expected " << block_hash; + assert(false); + return; + } + auto found_voted_value_it = found_step_it->second.votes.find(block_hash); + if (found_voted_value_it == found_step_it->second.votes.end()) { + LOG(log_er_) << "resetRewardVotes missing vote block " << block_hash; + assert(false); + return; + } + std::vector> votes; + votes.reserve(found_voted_value_it->second.second.size()); + for (const auto& tmp_vote : found_voted_value_it->second.second) { + votes.push_back(tmp_vote.second); + } + + db_->replaceTwoTPlusOneVotesToBatch(TwoTPlusOneVotedBlockType::CertVotedBlock, votes, batch); + db_->removeExtraRewardVotes(extra_reward_votes_, batch); + extra_reward_votes_.clear(); + LOG(log_dg_) << "Reward votes info reset to: block_hash: " << block_hash << ", period: " << period << ", round: " << round; } bool VoteManager::isValidRewardVote(const std::shared_ptr& vote) const { std::shared_lock lock(reward_votes_info_mutex_); - if (vote->getType() != PbftVoteTypes::cert_vote) { LOG(log_tr_) << "Invalid reward vote: type " << static_cast(vote->getType()) << " is different from cert type"; @@ -637,19 +693,27 @@ std::pair>> VoteManager::checkRewardVote } }; - std::shared_lock reward_votes_info_lock(reward_votes_info_mutex_); + blk_hash_t reward_votes_block_hash; + PbftRound reward_votes_period; + PbftRound reward_votes_round; + { + std::shared_lock reward_votes_info_lock(reward_votes_info_mutex_); + reward_votes_block_hash = reward_votes_block_hash_; + reward_votes_period = reward_votes_period_; + reward_votes_round = reward_votes_round_; + } std::shared_lock verified_votes_lock(verified_votes_access_); - const auto found_period_it = verified_votes_.find(reward_votes_period_); + const auto found_period_it = verified_votes_.find(reward_votes_period); if (found_period_it == verified_votes_.end()) { - LOG(log_er_) << "No reward votes found for period " << reward_votes_period_; + LOG(log_er_) << "No reward votes found for period " << reward_votes_period; assert(false); return {false, {}}; } - const auto found_round_it = found_period_it->second.find(reward_votes_round_); + const auto found_round_it = found_period_it->second.find(reward_votes_round); if (found_round_it == found_period_it->second.end()) { - LOG(log_er_) << "No reward votes found for round " << reward_votes_round_; + LOG(log_er_) << "No reward votes found for round " << reward_votes_round; assert(false); return {false, {}}; } @@ -657,7 +721,7 @@ std::pair>> VoteManager::checkRewardVote const auto reward_votes_hashes = pbft_block->getRewardVotes(); // Most of the time we should get the reward votes based on reward_votes_period_ and reward_votes_round_ - auto reward_votes = getRewardVotes(found_round_it, reward_votes_hashes, reward_votes_block_hash_, copy_votes); + auto reward_votes = getRewardVotes(found_round_it, reward_votes_hashes, reward_votes_block_hash, copy_votes); if (reward_votes.first) [[likely]] { return {true, std::move(reward_votes.second)}; } @@ -666,13 +730,13 @@ std::pair>> VoteManager::checkRewardVote // and when they included the reward votes in new block, these votes have different round than what saved in // reward_votes_round_ -> therefore we have to iterate over all rounds and find the correct round for (auto round_it = found_period_it->second.begin(); round_it != found_period_it->second.end(); round_it++) { - const auto tmp_reward_votes = getRewardVotes(round_it, reward_votes_hashes, reward_votes_block_hash_, copy_votes); + const auto tmp_reward_votes = getRewardVotes(round_it, reward_votes_hashes, reward_votes_block_hash, copy_votes); if (!tmp_reward_votes.first) { LOG(log_dg_) << "No (or not enough) reward votes found for block " << pbft_block->getBlockHash() << ", period: " << pbft_block->getPeriod() << ", prev. block hash: " << pbft_block->getPrevBlockHash() - << ", reward_votes_period_: " << reward_votes_period_ << ", reward_votes_round_: " << round_it->first - << ", reward_votes_block_hash_: " << reward_votes_block_hash_; + << ", reward_votes_period: " << reward_votes_period << ", reward_votes_round_: " << round_it->first + << ", reward_votes_block_hash: " << reward_votes_block_hash; continue; } @@ -681,19 +745,28 @@ std::pair>> VoteManager::checkRewardVote LOG(log_er_) << "No (or not enough) reward votes found for block " << pbft_block->getBlockHash() << ", period: " << pbft_block->getPeriod() << ", prev. block hash: " << pbft_block->getPrevBlockHash() - << ", reward_votes_period_: " << reward_votes_period_ << ", reward_votes_round_: " << reward_votes_round_ - << ", reward_votes_block_hash_: " << reward_votes_block_hash_; + << ", reward_votes_period: " << reward_votes_period << ", reward_votes_round_: " << reward_votes_round + << ", reward_votes_block_hash: " << reward_votes_block_hash; return {false, {}}; } -std::vector> VoteManager::getProposeRewardVotes() { - std::shared_lock lock(reward_votes_info_mutex_); - const auto reward_votes = getTwoTPlusOneVotedBlockVotes(reward_votes_period_, reward_votes_round_, - TwoTPlusOneVotedBlockType::CertVotedBlock); +std::vector> VoteManager::getRewardVotes() { + blk_hash_t reward_votes_block_hash; + PbftRound reward_votes_period; + PbftRound reward_votes_round; + { + std::shared_lock reward_votes_info_lock(reward_votes_info_mutex_); + reward_votes_block_hash = reward_votes_block_hash_; + reward_votes_period = reward_votes_period_; + reward_votes_round = reward_votes_round_; + } + std::shared_lock lock(verified_votes_access_); + auto reward_votes = + getTwoTPlusOneVotedBlockVotes(reward_votes_period, reward_votes_round, TwoTPlusOneVotedBlockType::CertVotedBlock); - if (!reward_votes.empty() && reward_votes[0]->getBlockHash() != reward_votes_block_hash_) { + if (!reward_votes.empty() && reward_votes[0]->getBlockHash() != reward_votes_block_hash) { // This should never happen - LOG(log_er_) << "Proposal reward votes block hash mismatch. reward_votes_block_hash_ " << reward_votes_block_hash_ + LOG(log_er_) << "Proposal reward votes block hash mismatch. reward_votes_block_hash " << reward_votes_block_hash << ", reward_votes[0]->getBlockHash() " << reward_votes[0]->getBlockHash(); assert(false); return {}; @@ -904,7 +977,6 @@ std::optional VoteManager::getTwoTPlusOneVotedBlock(PbftPeriod perio std::vector> VoteManager::getTwoTPlusOneVotedBlockVotes(PbftPeriod period, PbftRound round, TwoTPlusOneVotedBlockType type) const { std::shared_lock lock(verified_votes_access_); - const auto found_period_it = verified_votes_.find(period); if (found_period_it == verified_votes_.end()) { return {}; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/get_pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/get_pbft_sync_packet_handler.hpp index c503066bbb..856bc52eca 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/get_pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/get_pbft_sync_packet_handler.hpp @@ -5,6 +5,7 @@ namespace taraxa { class PbftChain; class DbStorage; +class VoteManager; } // namespace taraxa namespace taraxa::network::tarcap { @@ -16,7 +17,8 @@ class GetPbftSyncPacketHandler final : public PacketHandler { GetPbftSyncPacketHandler(const FullNodeConfig& conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_syncing_state, std::shared_ptr pbft_chain, - std::shared_ptr db, const addr_t& node_addr); + std::shared_ptr vote_mgr, std::shared_ptr db, + const addr_t& node_addr); void sendPbftBlocks(dev::p2p::NodeID const& peer_id, PbftPeriod from_period, size_t blocks_to_transfer, bool pbft_chain_synced); @@ -30,6 +32,7 @@ class GetPbftSyncPacketHandler final : public PacketHandler { std::shared_ptr pbft_syncing_state_; std::shared_ptr pbft_chain_; + std::shared_ptr vote_mgr_; std::shared_ptr db_; }; diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/get_pbft_sync_packet_handler.cpp index 2a0de430d9..c5b0113f89 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/get_pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/get_pbft_sync_packet_handler.cpp @@ -4,17 +4,20 @@ #include "pbft/pbft_chain.hpp" #include "storage/storage.hpp" #include "vote/vote.hpp" +#include "vote_manager/vote_manager.hpp" namespace taraxa::network::tarcap { GetPbftSyncPacketHandler::GetPbftSyncPacketHandler(const FullNodeConfig &conf, std::shared_ptr peers_state, std::shared_ptr packets_stats, std::shared_ptr pbft_syncing_state, - std::shared_ptr pbft_chain, std::shared_ptr db, + std::shared_ptr pbft_chain, + std::shared_ptr vote_mgr, std::shared_ptr db, const addr_t &node_addr) : PacketHandler(conf, std::move(peers_state), std::move(packets_stats), node_addr, "GET_PBFT_SYNC_PH"), pbft_syncing_state_(std::move(pbft_syncing_state)), pbft_chain_(std::move(pbft_chain)), + vote_mgr_(std::move(vote_mgr)), db_(std::move(db)) {} void GetPbftSyncPacketHandler::validatePacketRlpFormat(const PacketData &packet_data) const { @@ -80,7 +83,7 @@ void GetPbftSyncPacketHandler::sendPbftBlocks(dev::p2p::NodeID const &peer_id, P s << last_block; s.appendRaw(data); // Latest finalized block cert votes are saved in db as reward votes for new blocks - const auto votes = db_->getRewardVotes(); + const auto votes = vote_mgr_->getRewardVotes(); s.appendList(votes.size()); for (const auto &vote : votes) { s.appendRaw(vote->rlp(true)); diff --git a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp index c031e3114e..4744e12bdd 100644 --- a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp +++ b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp @@ -237,7 +237,7 @@ void TaraxaCapability::registerPacketHandlers( // TODO there is additional logic, that should be moved outside process function packets_handlers_->registerHandler(kConf, peers_state_, packets_stats, pbft_syncing_state_, - pbft_chain, db, node_addr); + pbft_chain, vote_mgr, db, node_addr); packets_handlers_->registerHandler(kConf, peers_state_, packets_stats, pbft_syncing_state_, pbft_chain, pbft_mgr, dag_mgr, vote_mgr, diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index 5622c39e30..4c721b72d9 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -347,8 +347,8 @@ void FullNode::rebuildDb() { // Read pbft blocks one by one PbftPeriod period = 1; std::shared_ptr period_data, next_period_data; - std::vector> cert_votes; while (true) { + std::vector> cert_votes; if (next_period_data != nullptr) { period_data = next_period_data; } else { @@ -359,8 +359,11 @@ void FullNode::rebuildDb() { auto data = old_db_->getPeriodDataRaw(period + 1); if (data.size() == 0) { next_period_data = nullptr; - // Latest finalized block cert votes are saved in db as reward votes for new blocks - cert_votes = old_db_->getRewardVotes(); + // Latest finalized block cert votes are saved in db as 2t+1 cert votes + auto votes = old_db_->getAllTwoTPlusOneVotes(); + for (auto v : votes) { + if (v->getType() == PbftVoteTypes::cert_vote) cert_votes.push_back(v); + } } else { next_period_data = std::make_shared(std::move(data)); cert_votes = next_period_data->previous_block_cert_votes; diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 7f78647eba..992fb61b51 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -105,7 +105,7 @@ class DbStorage : public std::enable_shared_from_this { COLUMN(pbft_head); COLUMN(latest_round_own_votes); // own votes of any type for the latest round COLUMN(latest_round_two_t_plus_one_votes); // 2t+1 votes bundles of any type for the latest round - COLUMN(latest_reward_votes); // extra reward votes on top of 2t+1 cert votes bundle from + COLUMN(extra_reward_votes); // extra reward votes on top of 2t+1 cert votes bundle from // latest_round_two_t_plus_one_votes COLUMN(pbft_block_period); COLUMN(dag_block_period); @@ -269,11 +269,13 @@ class DbStorage : public std::enable_shared_from_this { // 2t+1 votes bundles for the latest round void replaceTwoTPlusOneVotes(TwoTPlusOneVotedBlockType type, const std::vector>& votes); + void replaceTwoTPlusOneVotesToBatch(TwoTPlusOneVotedBlockType type, const std::vector>& votes, + Batch& write_batch); std::vector> getAllTwoTPlusOneVotes(); // Reward votes - cert votes for the latest finalized block - void replaceRewardVotes(const std::vector>& votes, Batch& write_batch); - void saveRewardVote(const std::shared_ptr& vote); + void removeExtraRewardVotes(const std::vector& votes, Batch& write_batch); + void saveExtraRewardVote(const std::shared_ptr& vote); std::vector> getRewardVotes(); // period_pbft_block diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index 61cec78330..5b6eca9cc1 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -846,6 +846,17 @@ void DbStorage::replaceTwoTPlusOneVotes(TwoTPlusOneVotedBlockType type, insert(Columns::latest_round_two_t_plus_one_votes, static_cast(type), s.out()); } +void DbStorage::replaceTwoTPlusOneVotesToBatch(TwoTPlusOneVotedBlockType type, + const std::vector>& votes, Batch& write_batch) { + remove(write_batch, Columns::latest_round_two_t_plus_one_votes, static_cast(type)); + + dev::RLPStream s(votes.size()); + for (const auto& vote : votes) { + s.appendRaw(vote->rlp(true, true)); + } + insert(write_batch, Columns::latest_round_two_t_plus_one_votes, static_cast(type), s.out()); +} + std::vector> DbStorage::getAllTwoTPlusOneVotes() { std::vector> votes; auto load_db_votes = [this, &votes](TwoTPlusOneVotedBlockType type) { @@ -866,29 +877,20 @@ std::vector> DbStorage::getAllTwoTPlusOneVotes() { return votes; } -void DbStorage::replaceRewardVotes(const std::vector>& votes, Batch& write_batch) { - // TODO: deletion could be optimized if we save votes in memory - // Remove existing reward votes - auto it = std::unique_ptr(db_->NewIterator(read_options_, handle(Columns::latest_reward_votes))); - for (it->SeekToFirst(); it->Valid(); it->Next()) { - const auto vote = std::make_shared(asBytes(it->value().ToString())); - remove(write_batch, Columns::latest_reward_votes, vote->getHash().asBytes()); - } - - // Add new reward votes - for (const auto& vote : votes) { - insert(write_batch, Columns::latest_reward_votes, vote->getHash().asBytes(), vote->rlp(true, true)); +void DbStorage::removeExtraRewardVotes(const std::vector& votes, Batch& write_batch) { + for (const auto& v : votes) { + remove(write_batch, Columns::extra_reward_votes, v.asBytes()); } } -void DbStorage::saveRewardVote(const std::shared_ptr& vote) { - insert(Columns::latest_reward_votes, vote->getHash().asBytes(), vote->rlp(true, true)); +void DbStorage::saveExtraRewardVote(const std::shared_ptr& vote) { + insert(Columns::extra_reward_votes, vote->getHash().asBytes(), vote->rlp(true, true)); } std::vector> DbStorage::getRewardVotes() { std::vector> votes; - auto it = std::unique_ptr(db_->NewIterator(read_options_, handle(Columns::latest_reward_votes))); + auto it = std::unique_ptr(db_->NewIterator(read_options_, handle(Columns::extra_reward_votes))); for (it->SeekToFirst(); it->Valid(); it->Next()) { votes.emplace_back(std::make_shared(asBytes(it->value().ToString()))); } diff --git a/tests/full_node_test.cpp b/tests/full_node_test.cpp index 50e0cf0488..16e00a70f6 100644 --- a/tests/full_node_test.cpp +++ b/tests/full_node_test.cpp @@ -285,9 +285,7 @@ TEST_F(FullNodeTest, db_test) { } EXPECT_TRUE(db.getRewardVotes().empty()); - batch = db.createWriteBatch(); - db.replaceRewardVotes(verified_votes, batch); - db.commitWriteBatch(batch); + for (auto v : verified_votes) db.saveExtraRewardVote(v); const auto db_reward_votes = db.getRewardVotes(); EXPECT_EQ(db_reward_votes.size(), verified_votes_map.size()); @@ -297,7 +295,7 @@ TEST_F(FullNodeTest, db_test) { const auto new_reward_vote = genVote(PbftVoteTypes::cert_vote, 10, 10, 3); verified_votes_map[new_reward_vote->getHash()] = new_reward_vote; - db.saveRewardVote(new_reward_vote); + db.saveExtraRewardVote(new_reward_vote); const auto new_db_reward_votes = db.getRewardVotes(); EXPECT_EQ(new_db_reward_votes.size(), verified_votes_map.size()); @@ -306,7 +304,12 @@ TEST_F(FullNodeTest, db_test) { } batch = db.createWriteBatch(); - db.replaceRewardVotes({}, batch); + + std::vector verified_votes_hashes, new_db_reward_votes_hashes; + for (const auto &v : verified_votes) verified_votes_hashes.emplace_back(v->getHash()); + for (const auto &v : new_db_reward_votes) new_db_reward_votes_hashes.emplace_back(v->getHash()); + db.removeExtraRewardVotes(verified_votes_hashes, batch); + db.removeExtraRewardVotes(new_db_reward_votes_hashes, batch); db.commitWriteBatch(batch); EXPECT_TRUE(db.getRewardVotes().empty()); diff --git a/tests/network_test.cpp b/tests/network_test.cpp index 9cad883f3e..03c78a278f 100644 --- a/tests/network_test.cpp +++ b/tests/network_test.cpp @@ -615,7 +615,7 @@ TEST_F(NetworkTest, node_pbft_sync) { beneficiary, node1->getSecretKey(), {}); std::vector> votes_for_pbft_blk2; votes_for_pbft_blk2.emplace_back( - node1->getVoteManager()->generateVote(pbft_block2.getBlockHash(), PbftVoteTypes::cert_vote, 2, 2, 3)); + node1->getVoteManager()->generateVoteWithWeight(pbft_block2.getBlockHash(), PbftVoteTypes::cert_vote, 2, 1, 3)); std::cout << "Generate 1 vote for second PBFT block" << std::endl; // node1 put block2 into pbft chain and store into DB // Add cert votes in DB @@ -629,7 +629,9 @@ TEST_F(NetworkTest, node_pbft_sync) { period_data2.transactions.push_back(g_signed_trx_samples[3]); db1->savePeriodData(period_data2, batch); - db1->replaceRewardVotes(votes_for_pbft_blk2, batch); + node1->getVoteManager()->addVerifiedVote(votes_for_pbft_blk2[0]); + db1->replaceTwoTPlusOneVotesToBatch(TwoTPlusOneVotedBlockType::CertVotedBlock, votes_for_pbft_blk2, batch); + node1->getVoteManager()->resetRewardVotes(2, 1, 3, pbft_block2.getBlockHash(), batch); // Update pbft chain pbft_chain1->updatePbftChain(pbft_block2.getBlockHash(), pbft_block2.getPivotDagBlockHash()); From b4a0a60fe8f02d26b1cede4553c6bbaab53b7b08 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 16 Mar 2023 10:59:18 +0100 Subject: [PATCH 070/162] feat: implement eip-1898 --- .../core_libs/network/rpc/Eth.jsonrpc.json | 10 ++-- libraries/core_libs/network/rpc/EthClient.h | 10 ++-- libraries/core_libs/network/rpc/EthFace.h | 30 ++++++------ libraries/core_libs/network/rpc/README.md | 6 +++ libraries/core_libs/network/rpc/TaraxaFace.h | 4 +- libraries/core_libs/network/rpc/eth/Eth.cpp | 47 +++++++++++++------ tests/rpc_test.cpp | 21 +++++++++ 7 files changed, 86 insertions(+), 42 deletions(-) create mode 100644 libraries/core_libs/network/rpc/README.md diff --git a/libraries/core_libs/network/rpc/Eth.jsonrpc.json b/libraries/core_libs/network/rpc/Eth.jsonrpc.json index eb1efbe28e..77d724669c 100644 --- a/libraries/core_libs/network/rpc/Eth.jsonrpc.json +++ b/libraries/core_libs/network/rpc/Eth.jsonrpc.json @@ -33,7 +33,7 @@ "name": "eth_getBalance", "params": [ "", - "" + {} ], "order": [], "returns": "" @@ -43,7 +43,7 @@ "params": [ "", "", - "" + {} ], "order": [], "returns": "" @@ -61,7 +61,7 @@ "name": "eth_getTransactionCount", "params": [ "", - "" + {} ], "order": [], "returns": "" @@ -102,7 +102,7 @@ "name": "eth_getCode", "params": [ "", - "" + {} ], "order": [], "returns": "" @@ -111,7 +111,7 @@ "name": "eth_call", "params": [ {}, - "" + {} ], "order": [], "returns": "" diff --git a/libraries/core_libs/network/rpc/EthClient.h b/libraries/core_libs/network/rpc/EthClient.h index 0e9a220298..47869dfe8b 100644 --- a/libraries/core_libs/network/rpc/EthClient.h +++ b/libraries/core_libs/network/rpc/EthClient.h @@ -59,7 +59,7 @@ class EthClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } - std::string eth_getBalance(const std::string& param1, const std::string& param2) throw(jsonrpc::JsonRpcException) { + std::string eth_getBalance(const std::string& param1, const Json::Value& param2) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); p.append(param2); @@ -70,7 +70,7 @@ class EthClient : public jsonrpc::Client { throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } std::string eth_getStorageAt(const std::string& param1, const std::string& param2, - const std::string& param3) throw(jsonrpc::JsonRpcException) { + const Json::Value& param3) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); p.append(param2); @@ -93,7 +93,7 @@ class EthClient : public jsonrpc::Client { throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } std::string eth_getTransactionCount(const std::string& param1, - const std::string& param2) throw(jsonrpc::JsonRpcException) { + const Json::Value& param2) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); p.append(param2); @@ -139,7 +139,7 @@ class EthClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } - std::string eth_getCode(const std::string& param1, const std::string& param2) throw(jsonrpc::JsonRpcException) { + std::string eth_getCode(const std::string& param1, const Json::Value& param2) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); p.append(param2); @@ -149,7 +149,7 @@ class EthClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } - std::string eth_call(const Json::Value& param1, const std::string& param2) throw(jsonrpc::JsonRpcException) { + std::string eth_call(const Json::Value& param1, const Json::Value& param2) throw(jsonrpc::JsonRpcException) { Json::Value p; p.append(param1); p.append(param2); diff --git a/libraries/core_libs/network/rpc/EthFace.h b/libraries/core_libs/network/rpc/EthFace.h index d050fad608..2723def03e 100644 --- a/libraries/core_libs/network/rpc/EthFace.h +++ b/libraries/core_libs/network/rpc/EthFace.h @@ -25,18 +25,18 @@ class EthFace : public ServerInterface { jsonrpc::Procedure("eth_blockNumber", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_blockNumberI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBalance", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, - "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), + "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_OBJECT, NULL), &taraxa::net::EthFace::eth_getBalanceI); this->bindAndAddMethod( jsonrpc::Procedure("eth_getStorageAt", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", - jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, "param3", jsonrpc::JSON_STRING, NULL), + jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, "param3", jsonrpc::JSON_OBJECT, NULL), &taraxa::net::EthFace::eth_getStorageAtI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getStorageRoot", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_getStorageRootI); this->bindAndAddMethod( jsonrpc::Procedure("eth_getTransactionCount", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", - jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), + jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_OBJECT, NULL), &taraxa::net::EthFace::eth_getTransactionCountI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBlockTransactionCountByHash", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), @@ -51,10 +51,10 @@ class EthFace : public ServerInterface { jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_getUncleCountByBlockNumberI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getCode", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, - "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), + "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_OBJECT, NULL), &taraxa::net::EthFace::eth_getCodeI); this->bindAndAddMethod(jsonrpc::Procedure("eth_call", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", - jsonrpc::JSON_OBJECT, "param2", jsonrpc::JSON_STRING, NULL), + jsonrpc::JSON_OBJECT, "param2", jsonrpc::JSON_OBJECT, NULL), &taraxa::net::EthFace::eth_callI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBlockByHash", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_BOOLEAN, NULL), @@ -138,16 +138,16 @@ class EthFace : public ServerInterface { response = this->eth_blockNumber(); } inline virtual void eth_getBalanceI(const Json::Value &request, Json::Value &response) { - response = this->eth_getBalance(request[0u].asString(), request[1u].asString()); + response = this->eth_getBalance(request[0u].asString(), request[1u]); } inline virtual void eth_getStorageAtI(const Json::Value &request, Json::Value &response) { - response = this->eth_getStorageAt(request[0u].asString(), request[1u].asString(), request[2u].asString()); + response = this->eth_getStorageAt(request[0u].asString(), request[1u].asString(), request[2u]); } inline virtual void eth_getStorageRootI(const Json::Value &request, Json::Value &response) { response = this->eth_getStorageRoot(request[0u].asString(), request[1u].asString()); } inline virtual void eth_getTransactionCountI(const Json::Value &request, Json::Value &response) { - response = this->eth_getTransactionCount(request[0u].asString(), request[1u].asString()); + response = this->eth_getTransactionCount(request[0u].asString(), request[1u]); } inline virtual void eth_getBlockTransactionCountByHashI(const Json::Value &request, Json::Value &response) { response = this->eth_getBlockTransactionCountByHash(request[0u].asString()); @@ -162,10 +162,10 @@ class EthFace : public ServerInterface { response = this->eth_getUncleCountByBlockNumber(request[0u].asString()); } inline virtual void eth_getCodeI(const Json::Value &request, Json::Value &response) { - response = this->eth_getCode(request[0u].asString(), request[1u].asString()); + response = this->eth_getCode(request[0u].asString(), request[1u]); } inline virtual void eth_callI(const Json::Value &request, Json::Value &response) { - response = this->eth_call(request[0u], request[1u].asString()); + response = this->eth_call(request[0u], request[1u]); } inline virtual void eth_getBlockByHashI(const Json::Value &request, Json::Value &response) { response = this->eth_getBlockByHash(request[0u].asString(), request[1u].asBool()); @@ -233,17 +233,17 @@ class EthFace : public ServerInterface { virtual std::string eth_gasPrice() = 0; virtual Json::Value eth_accounts() = 0; virtual std::string eth_blockNumber() = 0; - virtual std::string eth_getBalance(const std::string ¶m1, const std::string ¶m2) = 0; + virtual std::string eth_getBalance(const std::string ¶m1, const Json::Value ¶m2) = 0; virtual std::string eth_getStorageAt(const std::string ¶m1, const std::string ¶m2, - const std::string ¶m3) = 0; + const Json::Value ¶m3) = 0; virtual std::string eth_getStorageRoot(const std::string ¶m1, const std::string ¶m2) = 0; - virtual std::string eth_getTransactionCount(const std::string ¶m1, const std::string ¶m2) = 0; + virtual std::string eth_getTransactionCount(const std::string ¶m1, const Json::Value ¶m2) = 0; virtual Json::Value eth_getBlockTransactionCountByHash(const std::string ¶m1) = 0; virtual Json::Value eth_getBlockTransactionCountByNumber(const std::string ¶m1) = 0; virtual Json::Value eth_getUncleCountByBlockHash(const std::string ¶m1) = 0; virtual Json::Value eth_getUncleCountByBlockNumber(const std::string ¶m1) = 0; - virtual std::string eth_getCode(const std::string ¶m1, const std::string ¶m2) = 0; - virtual std::string eth_call(const Json::Value ¶m1, const std::string ¶m2) = 0; + virtual std::string eth_getCode(const std::string ¶m1, const Json::Value ¶m2) = 0; + virtual std::string eth_call(const Json::Value ¶m1, const Json::Value ¶m2) = 0; virtual Json::Value eth_getBlockByHash(const std::string ¶m1, bool param2) = 0; virtual Json::Value eth_getBlockByNumber(const std::string ¶m1, bool param2) = 0; virtual Json::Value eth_getTransactionByHash(const std::string ¶m1) = 0; diff --git a/libraries/core_libs/network/rpc/README.md b/libraries/core_libs/network/rpc/README.md new file mode 100644 index 0000000000..17c6edac3e --- /dev/null +++ b/libraries/core_libs/network/rpc/README.md @@ -0,0 +1,6 @@ +# How to generate new API interface +``` +sudo apt install libjsonrpccpp-tools +make gen_rpc_stubs +make clang-format +``` \ No newline at end of file diff --git a/libraries/core_libs/network/rpc/TaraxaFace.h b/libraries/core_libs/network/rpc/TaraxaFace.h index 8d02894579..acf4d29c0d 100644 --- a/libraries/core_libs/network/rpc/TaraxaFace.h +++ b/libraries/core_libs/network/rpc/TaraxaFace.h @@ -40,7 +40,7 @@ class TaraxaFace : public ServerInterface { &taraxa::net::TaraxaFace::taraxa_getConfigI); this->bindAndAddMethod( jsonrpc::Procedure("taraxa_getChainStats", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, NULL), - &taraxa::net::TaraxaFace::taraxa_getStatsI); + &taraxa::net::TaraxaFace::taraxa_getChainStatsI); this->bindAndAddMethod(jsonrpc::Procedure("taraxa_pbftBlockHashByPeriod", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::TaraxaFace::taraxa_pbftBlockHashByPeriodI); @@ -75,7 +75,7 @@ class TaraxaFace : public ServerInterface { (void)request; response = this->taraxa_getConfig(); } - inline virtual void taraxa_getStatsI(const Json::Value &request, Json::Value &response) { + inline virtual void taraxa_getChainStatsI(const Json::Value &request, Json::Value &response) { (void)request; response = this->taraxa_getChainStats(); } diff --git a/libraries/core_libs/network/rpc/eth/Eth.cpp b/libraries/core_libs/network/rpc/eth/Eth.cpp index 7a4b83a91c..3f796586c7 100644 --- a/libraries/core_libs/network/rpc/eth/Eth.cpp +++ b/libraries/core_libs/network/rpc/eth/Eth.cpp @@ -127,14 +127,14 @@ class EthImpl : public Eth, EthParams { string eth_blockNumber() override { return toJS(final_chain->last_block_number()); } - string eth_getBalance(const string& _address, const string& _blockNumber) override { - return toJS( - final_chain->get_account(toAddress(_address), parse_blk_num(_blockNumber)).value_or(ZeroAccount).balance); + string eth_getBalance(const string& _address, const Json::Value& _json) override { + const auto block_number = get_block_number_from_json(_json); + return toJS(final_chain->get_account(toAddress(_address), block_number).value_or(ZeroAccount).balance); } - string eth_getStorageAt(const string& _address, const string& _position, const string& _blockNumber) override { - return toJS( - final_chain->get_account_storage(toAddress(_address), jsToU256(_position), parse_blk_num(_blockNumber))); + string eth_getStorageAt(const string& _address, const string& _position, const Json::Value& _json) override { + const auto block_number = get_block_number_from_json(_json); + return toJS(final_chain->get_account_storage(toAddress(_address), jsToU256(_position), block_number)); } string eth_getStorageRoot(const string& _address, const string& _blockNumber) override { @@ -143,15 +143,16 @@ class EthImpl : public Eth, EthParams { .storage_root_eth()); } - string eth_getCode(const string& _address, const string& _blockNumber) override { - return toJS(final_chain->get_code(toAddress(_address), parse_blk_num(_blockNumber))); + string eth_getCode(const string& _address, const Json::Value& _json) override { + const auto block_number = get_block_number_from_json(_json); + return toJS(final_chain->get_code(toAddress(_address), block_number)); } - string eth_call(const Json::Value& _json, const string& _blockNumber) override { + string eth_call(const Json::Value& _json, const Json::Value& _jsonBlock) override { + const auto block_number = get_block_number_from_json(_jsonBlock); auto t = toTransactionSkeleton(_json); - auto blk_n = parse_blk_num(_blockNumber); - prepare_transaction_for_call(t, blk_n); - return toJS(call(blk_n, t).code_retval); + prepare_transaction_for_call(t, block_number); + return toJS(call(block_number, t).code_retval); } string eth_estimateGas(const Json::Value& _json) override { @@ -161,8 +162,9 @@ class EthImpl : public Eth, EthParams { return toJS(call(blk_n, t).gas_used); } - string eth_getTransactionCount(const string& _address, const string& _blockNumber) override { - return toJS(transaction_count(parse_blk_num(_blockNumber), toAddress(_address))); + string eth_getTransactionCount(const string& _address, const Json::Value& _json) override { + const auto block_number = get_block_number_from_json(_json); + return toJS(transaction_count(block_number, toAddress(_address))); } Json::Value eth_getBlockTransactionCountByHash(const string& _blockHash) override { @@ -426,7 +428,7 @@ class EthImpl : public Eth, EthParams { } static optional parse_blk_num_specific(const string& blk_num_str) { - if (blk_num_str == "latest" || blk_num_str == "pending") { + if (blk_num_str == "latest" || blk_num_str == "pending" || blk_num_str == "safe" || blk_num_str == "finalized") { return std::nullopt; } return blk_num_str == "earliest" ? 0 : jsToInt(blk_num_str); @@ -437,6 +439,21 @@ class EthImpl : public Eth, EthParams { return ret ? *ret : final_chain->last_block_number(); } + EthBlockNumber get_block_number_from_json(const Json::Value& json) { + if (json.isObject()) { + if (!json["blockNumber"].empty()) { + return parse_blk_num(json["blockNumber"].asString()); + } + if (!json["blockHash"].empty()) { + if (auto ret = final_chain->block_number(jsToFixed<32>(json["blockHash"].asString()))) { + return *ret; + } + throw std::runtime_error("Resource not found"); + } + } + return parse_blk_num(json.asString()); + } + LogFilter parse_log_filter(const Json::Value& json) { EthBlockNumber from_block; optional to_block; diff --git a/tests/rpc_test.cpp b/tests/rpc_test.cpp index 85f2b98706..a104432dfb 100644 --- a/tests/rpc_test.cpp +++ b/tests/rpc_test.cpp @@ -233,6 +233,27 @@ TEST_F(RPCTest, eth_getBlock) { EXPECT_EQ(4, dev::jsToU256(block["number"].asString())); EXPECT_GT(dev::jsToU256(block["totalReward"].asString()), 0); } + +TEST_F(RPCTest, eip_1898) { + auto node_cfg = make_node_cfgs(1); + auto nodes = launch_nodes(node_cfg); + net::rpc::eth::EthParams eth_rpc_params; + eth_rpc_params.chain_id = node_cfg.front().genesis.chain_id; + eth_rpc_params.gas_limit = node_cfg.front().genesis.dag.gas_limit; + eth_rpc_params.final_chain = nodes.front()->getFinalChain(); + auto eth_json_rpc = net::rpc::eth::NewEth(std::move(eth_rpc_params)); + + const auto from = dev::toHex(dev::toAddress(node_cfg.front().node_secret)); + + Json::Value zero_block(Json::objectValue); + zero_block["blockNumber"] = dev::toJS(0); + EXPECT_EQ(eth_json_rpc->eth_getBalance(from, "0x0"), eth_json_rpc->eth_getBalance(from, zero_block)); + + Json::Value genesis_block(Json::objectValue); + genesis_block["blockHash"] = dev::toJS(*nodes.front()->getFinalChain()->block_hash(0)); + EXPECT_EQ(eth_json_rpc->eth_getBalance(from, "0x0"), eth_json_rpc->eth_getBalance(from, genesis_block)); +} + } // namespace taraxa::core_tests using namespace taraxa; From 35ffa41fa44c0e6ee4ce601bedbb1514a698808b Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Thu, 16 Mar 2023 12:50:21 +0100 Subject: [PATCH 071/162] fix: do not broadcast votes that were not processed + request pbft syncing only vote sender is also vote author --- .../packets_handlers/common/ext_votes_packet_handler.cpp | 3 ++- .../src/tarcap/packets_handlers/vote_packet_handler.cpp | 4 +++- .../tarcap/packets_handlers/votes_sync_packet_handler.cpp | 5 ++++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp index d90bcb4326..a1bededff5 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp @@ -85,7 +85,8 @@ std::pair ExtVotesPacketHandler::validateVotePeriodRoundStep( // skip this check if kConf.network.ddos_protection.vote_accepting_periods == 0 // vote->getPeriod() - 1 is here because votes are validated against vote_period - 1 in dpos contract // Do not request round sync too often here - if (std::chrono::system_clock::now() - last_pbft_block_sync_request_time_ > kSyncRequestInterval) { + if (vote->getVoter() == peer->getId() && + std::chrono::system_clock::now() - last_pbft_block_sync_request_time_ > kSyncRequestInterval) { // request PBFT chain sync from this node sealAndSend(peer->getId(), SubprotocolPacketType::GetPbftSyncPacket, std::move(dev::RLPStream(1) << std::max(vote->getPeriod() - 1, peer->pbft_chain_size_.load()))); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp index 7bd333abc9..dd213a4e63 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp @@ -63,7 +63,9 @@ void VotePacketHandler::process(const PacketData &packet_data, const std::shared peer->markPbftBlockAsKnown(pbft_block->getBlockHash()); } - processVote(vote, pbft_block, peer, true); + if (processVote(vote, pbft_block, peer, true)) { + return; + } // Do not mark it before, as peers have small caches of known votes. Only mark gossiping votes peer->markVoteAsKnown(vote_hash); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp index 88faf800ce..02124ebbb2 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp @@ -114,7 +114,10 @@ void VotesSyncPacketHandler::process(const PacketData &packet_data, const std::s // Process processStandardVote is called with false in case of next votes bundle -> does not check max boundaries // for round and step to actually being able to sync the current round in case network is stalled bool check_max_round_step = votes_bundle_votes_type == PbftVoteTypes::next_vote ? false : true; - processVote(vote, nullptr, peer, check_max_round_step); + if (processVote(vote, nullptr, peer, check_max_round_step)) { + continue; + } + votes.push_back(std::move(vote)); } From 5aac615aa0b918a6a9ee107555e809262ae06065 Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Thu, 16 Mar 2023 13:09:45 +0100 Subject: [PATCH 072/162] fix: request votes syncing only if packet sender == vote author --- .../packets_handlers/common/ext_votes_packet_handler.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp index a1bededff5..759a0752e5 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp @@ -114,7 +114,8 @@ std::pair ExtVotesPacketHandler::validateVotePeriodRoundStep( // Trigger votes(round) syncing only if we are in sync in terms of period if (current_pbft_period == vote->getPeriod()) { // Do not request round sync too often here - if (std::chrono::system_clock::now() - last_votes_sync_request_time_ > kSyncRequestInterval) { + if (vote->getVoter() == peer->getId() && + std::chrono::system_clock::now() - last_votes_sync_request_time_ > kSyncRequestInterval) { // request round votes sync from this node requestPbftNextVotesAtPeriodRound(peer->getId(), current_pbft_period, current_pbft_round); last_votes_sync_request_time_ = std::chrono::system_clock::now(); From a7da136246e95429e490451b99b929f89143f169 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 16 Mar 2023 15:35:50 +0100 Subject: [PATCH 073/162] chore: introduce HACK for json-rpc function params overloading --- libraries/core_libs/network/rpc/EthFace.h | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/libraries/core_libs/network/rpc/EthFace.h b/libraries/core_libs/network/rpc/EthFace.h index 2723def03e..e17fa2acaf 100644 --- a/libraries/core_libs/network/rpc/EthFace.h +++ b/libraries/core_libs/network/rpc/EthFace.h @@ -9,6 +9,8 @@ namespace taraxa { namespace net { +// Please read README +const int JSON_ANY = 0; class EthFace : public ServerInterface { public: EthFace() { @@ -25,18 +27,18 @@ class EthFace : public ServerInterface { jsonrpc::Procedure("eth_blockNumber", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_blockNumberI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBalance", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, - "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_OBJECT, NULL), + "param1", JSON_ANY, "param2", jsonrpc::JSON_OBJECT, NULL), &taraxa::net::EthFace::eth_getBalanceI); this->bindAndAddMethod( jsonrpc::Procedure("eth_getStorageAt", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", - jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, "param3", jsonrpc::JSON_OBJECT, NULL), + jsonrpc::JSON_STRING, "param2", JSON_ANY, "param3", jsonrpc::JSON_OBJECT, NULL), &taraxa::net::EthFace::eth_getStorageAtI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getStorageRoot", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_getStorageRootI); this->bindAndAddMethod( jsonrpc::Procedure("eth_getTransactionCount", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", - jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_OBJECT, NULL), + JSON_ANY, "param2", jsonrpc::JSON_OBJECT, NULL), &taraxa::net::EthFace::eth_getTransactionCountI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBlockTransactionCountByHash", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), @@ -51,10 +53,10 @@ class EthFace : public ServerInterface { jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_getUncleCountByBlockNumberI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getCode", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, - "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_OBJECT, NULL), + "param1", JSON_ANY, "param2", jsonrpc::JSON_OBJECT, NULL), &taraxa::net::EthFace::eth_getCodeI); this->bindAndAddMethod(jsonrpc::Procedure("eth_call", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", - jsonrpc::JSON_OBJECT, "param2", jsonrpc::JSON_OBJECT, NULL), + JSON_ANY, "param2", jsonrpc::JSON_OBJECT, NULL), &taraxa::net::EthFace::eth_callI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBlockByHash", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_BOOLEAN, NULL), From 293a7a97303b6aa7d1b7a60cf1ed4dbcdc248f57 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 16 Mar 2023 15:36:10 +0100 Subject: [PATCH 074/162] chore: disable gen_rpc_stubs --- .../core_libs/network/rpc/CMakeLists.txt | 32 +++++++++---------- libraries/core_libs/network/rpc/README.md | 7 ++-- 2 files changed, 21 insertions(+), 18 deletions(-) diff --git a/libraries/core_libs/network/rpc/CMakeLists.txt b/libraries/core_libs/network/rpc/CMakeLists.txt index 08056792fa..23273a886a 100644 --- a/libraries/core_libs/network/rpc/CMakeLists.txt +++ b/libraries/core_libs/network/rpc/CMakeLists.txt @@ -1,20 +1,20 @@ # Note: run make gen_rpc_stubs to re-generate rpc classes -include(EthDependencies) -include(EthExecutableHelper) +# include(EthDependencies) +# include(EthExecutableHelper) -find_program(ETH_JSON_RPC_STUB jsonrpcstub) +# find_program(ETH_JSON_RPC_STUB jsonrpcstub) -set(CPP_NAMESPACE taraxa::net) -file(GLOB API_DEF_FILES "${CMAKE_CURRENT_SOURCE_DIR}/*.jsonrpc.json") -add_custom_target(gen_rpc_stubs) -foreach (api_def_file ${API_DEF_FILES}) - get_filename_component(api_name ${api_def_file} NAME_WE) - jsonrpcstub_create( - gen_rpc_stubs ${api_name}.jsonrpc.json - ${CPP_NAMESPACE}::${api_name}Face - ${CMAKE_CURRENT_SOURCE_DIR} ${api_name}Face - ${CPP_NAMESPACE}::${api_name}Client - ${CMAKE_CURRENT_SOURCE_DIR} ${api_name}Client - ) -endforeach () +# set(CPP_NAMESPACE taraxa::net) +# file(GLOB API_DEF_FILES "${CMAKE_CURRENT_SOURCE_DIR}/*.jsonrpc.json") +# add_custom_target(gen_rpc_stubs) +# foreach (api_def_file ${API_DEF_FILES}) +# get_filename_component(api_name ${api_def_file} NAME_WE) +# jsonrpcstub_create( +# gen_rpc_stubs ${api_name}.jsonrpc.json +# ${CPP_NAMESPACE}::${api_name}Face +# ${CMAKE_CURRENT_SOURCE_DIR} ${api_name}Face +# ${CPP_NAMESPACE}::${api_name}Client +# ${CMAKE_CURRENT_SOURCE_DIR} ${api_name}Client +# ) +# endforeach () diff --git a/libraries/core_libs/network/rpc/README.md b/libraries/core_libs/network/rpc/README.md index 17c6edac3e..761ae481ff 100644 --- a/libraries/core_libs/network/rpc/README.md +++ b/libraries/core_libs/network/rpc/README.md @@ -1,6 +1,9 @@ -# How to generate new API interface +# DISABLED !!!! How to generate new API interface ``` sudo apt install libjsonrpccpp-tools make gen_rpc_stubs make clang-format -``` \ No newline at end of file +``` + +# PLEASE READ +As libjsonrpccpp doesn't support function arguments overload I had to made HACK and introduce `JSON_ANY`. `jsonrpc::Procedure` is created to check validation of passed arguments, but as it is not class enum we can used anything above 7 or number 0 to disable this check more https://github.com/cinemast/libjson-rpc-cpp/blob/d5ede2277d849f1a9d2dc111c4ec3ea652bd31ec/src/jsonrpccpp/common/specification.h#L29 . That's why generation via `gen_rpc_stubs` is disabled, because it would overwrite my hack in `EthFace.h` \ No newline at end of file From 2245bc855146fdf248531c3e974fcfaec39feae9 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 16 Mar 2023 15:54:26 +0100 Subject: [PATCH 075/162] chore: modify rest of the API calls to accept int for block num --- libraries/core_libs/network/rpc/EthFace.h | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/libraries/core_libs/network/rpc/EthFace.h b/libraries/core_libs/network/rpc/EthFace.h index e17fa2acaf..d3d415636c 100644 --- a/libraries/core_libs/network/rpc/EthFace.h +++ b/libraries/core_libs/network/rpc/EthFace.h @@ -27,24 +27,24 @@ class EthFace : public ServerInterface { jsonrpc::Procedure("eth_blockNumber", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_blockNumberI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBalance", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, - "param1", JSON_ANY, "param2", jsonrpc::JSON_OBJECT, NULL), + "param1", jsonrpc::JSON_STRING, "param2", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getBalanceI); this->bindAndAddMethod( jsonrpc::Procedure("eth_getStorageAt", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", - jsonrpc::JSON_STRING, "param2", JSON_ANY, "param3", jsonrpc::JSON_OBJECT, NULL), + jsonrpc::JSON_STRING, "param2", JSON_ANY, "param3", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getStorageAtI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getStorageRoot", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, - "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), + "param1", jsonrpc::JSON_STRING, "param2", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getStorageRootI); this->bindAndAddMethod( jsonrpc::Procedure("eth_getTransactionCount", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", - JSON_ANY, "param2", jsonrpc::JSON_OBJECT, NULL), + jsonrpc::JSON_STRING, "param2", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getTransactionCountI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBlockTransactionCountByHash", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_getBlockTransactionCountByHashI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBlockTransactionCountByNumber", jsonrpc::PARAMS_BY_POSITION, - jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), + jsonrpc::JSON_OBJECT, "param1", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getBlockTransactionCountByNumberI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getUncleCountByBlockHash", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), @@ -53,16 +53,16 @@ class EthFace : public ServerInterface { jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_getUncleCountByBlockNumberI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getCode", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, - "param1", JSON_ANY, "param2", jsonrpc::JSON_OBJECT, NULL), + "param1", jsonrpc::JSON_STRING, "param2", JSON_ANY, NULL), &taraxa::net::EthFace::eth_getCodeI); this->bindAndAddMethod(jsonrpc::Procedure("eth_call", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_STRING, "param1", - JSON_ANY, "param2", jsonrpc::JSON_OBJECT, NULL), + jsonrpc::JSON_OBJECT, "param2", JSON_ANY, NULL), &taraxa::net::EthFace::eth_callI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBlockByHash", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_BOOLEAN, NULL), &taraxa::net::EthFace::eth_getBlockByHashI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getBlockByNumber", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, - "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_BOOLEAN, NULL), + "param1", JSON_ANY, "param2", jsonrpc::JSON_BOOLEAN, NULL), &taraxa::net::EthFace::eth_getBlockByNumberI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getTransactionByHash", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), @@ -71,10 +71,9 @@ class EthFace : public ServerInterface { jsonrpc::Procedure("eth_getTransactionByBlockHashAndIndex", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_getTransactionByBlockHashAndIndexI); - this->bindAndAddMethod( - jsonrpc::Procedure("eth_getTransactionByBlockNumberAndIndex", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, - "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_STRING, NULL), - &taraxa::net::EthFace::eth_getTransactionByBlockNumberAndIndexI); + this->bindAndAddMethod(jsonrpc::Procedure("eth_getTransactionByBlockNumberAndIndex", jsonrpc::PARAMS_BY_POSITION, + jsonrpc::JSON_OBJECT, "param1", JSON_ANY, "param2", JSON_ANY, NULL), + &taraxa::net::EthFace::eth_getTransactionByBlockNumberAndIndexI); this->bindAndAddMethod(jsonrpc::Procedure("eth_getTransactionReceipt", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, NULL), &taraxa::net::EthFace::eth_getTransactionReceiptI); From 8ad4b47af1020549030d1c5f976d75892d55f5a2 Mon Sep 17 00:00:00 2001 From: Robert Jonczy Date: Fri, 17 Mar 2023 21:50:34 +0100 Subject: [PATCH 076/162] remove rpc nodes --- .../cli/config_jsons/testnet/testnet_config.json | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json index 1b304297ba..21051126f2 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_config.json @@ -49,21 +49,6 @@ "id": "c6e7263f44d88c0d6cc3b0d5ebdc31cf891908e8fa7e545e137d3ed0bfec1810fa24c1379228afbb53df0d59e716e17138115fd096782a84261718ab77665171", "ip": "boot-node-2.testnet.taraxa.io", "port": 10002 - }, - { - "id": "dcc7f60f64eee9fc470e24fd821b3d82acca646c6c951169a8f5b3de297e241cd9987e1cca1083c85b6482ba09715bbea8765710b149c55493e0bb16fc1c29cc", - "ip": "taraxa-node-0.testnet.taraxa.io", - "port": 10002 - }, - { - "id": "dedae37d9c96e0f2c8ef2796351f5560234a3e8e0407ef5cd16aaf93dd8ffd437608a585d5dc3eea54e30e1392591cfc61c6fce3cbf12b46dcc479b5fbdcde96", - "ip": "taraxa-node-1.testnet.taraxa.io", - "port": 10002 - }, - { - "id": "edc73153c2aa5991aee267f46d3ca153fa15f61eb7847b3f4c8b8fa308282b93a315a071bd5ff4327dd321faf4d979710b0e681a7088f65552ce0065fa7683fd", - "ip": "taraxa-node-2.testnet.taraxa.io", - "port": 10002 } ] }, From 84c14244dd20bf58df5ffdadc258f349d08b26e8 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Sat, 18 Mar 2023 08:48:12 +0100 Subject: [PATCH 077/162] chore: fix replacing nodes from table --- libraries/aleth/libp2p/Host.h | 2 +- libraries/core_libs/network/src/tarcap/taraxa_capability.cpp | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/libraries/aleth/libp2p/Host.h b/libraries/aleth/libp2p/Host.h index fde4ea5d60..b24b70c9dd 100644 --- a/libraries/aleth/libp2p/Host.h +++ b/libraries/aleth/libp2p/Host.h @@ -145,9 +145,9 @@ struct Host final : std::enable_shared_from_this { return "enode://" + id().hex() + "@" + address + ":" + port; } + bool nodeTableHasNode(Public const& _id) const; // private but can be made public if needed private: - bool nodeTableHasNode(Public const& _id) const; Node nodeFromNodeTable(Public const& _id) const; struct KnownNode { diff --git a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp index 4744e12bdd..2920a5de36 100644 --- a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp +++ b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp @@ -105,6 +105,11 @@ void TaraxaCapability::addBootNodes(bool initial) { continue; } + if (host->nodeTableHasNode(pub)) { + LOG(log_dg_) << "skipping node " << node.id << " already in table"; + continue; + } + auto ip = resolveHost(node.ip, node.port); LOG(log_nf_) << "Adding boot node:" << node.ip << ":" << node.port << " " << ip.second.address().to_string(); dev::p2p::Node boot_node(pub, dev::p2p::NodeIPEndpoint(ip.second.address(), node.port, node.port), From f9cbfdb5e81d736607ece154d323fea9fce60958 Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Mon, 20 Mar 2023 12:15:21 +0100 Subject: [PATCH 078/162] fix processVote return value check --- .../packets_handlers/common/ext_votes_packet_handler.cpp | 2 +- .../src/tarcap/packets_handlers/vote_packet_handler.cpp | 2 +- .../tarcap/packets_handlers/votes_sync_packet_handler.cpp | 8 ++++++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp index 759a0752e5..4a8e9f67b9 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp @@ -30,7 +30,7 @@ bool ExtVotesPacketHandler::processVote(const std::shared_ptr &vote, const return false; } - // Validate vote's period, roun and step min/max values + // Validate vote's period, round and step min/max values if (const auto vote_valid = validateVotePeriodRoundStep(vote, peer, validate_max_round_step); !vote_valid.first) { LOG(log_wr_) << "Vote period/round/step " << vote->getHash() << " validation failed. Err: " << vote_valid.second; return false; diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp index dd213a4e63..7c4f3aa6cd 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp @@ -63,7 +63,7 @@ void VotePacketHandler::process(const PacketData &packet_data, const std::shared peer->markPbftBlockAsKnown(pbft_block->getBlockHash()); } - if (processVote(vote, pbft_block, peer, true)) { + if (!processVote(vote, pbft_block, peer, true)) { return; } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp index 02124ebbb2..74f4b2e30a 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp @@ -113,8 +113,12 @@ void VotesSyncPacketHandler::process(const PacketData &packet_data, const std::s // Process processStandardVote is called with false in case of next votes bundle -> does not check max boundaries // for round and step to actually being able to sync the current round in case network is stalled - bool check_max_round_step = votes_bundle_votes_type == PbftVoteTypes::next_vote ? false : true; - if (processVote(vote, nullptr, peer, check_max_round_step)) { + bool check_max_round_step = true; + if (votes_bundle_votes_type == PbftVoteTypes::cert_vote || votes_bundle_votes_type == PbftVoteTypes::next_vote) { + check_max_round_step = false; + } + + if (!processVote(vote, nullptr, peer, check_max_round_step)) { continue; } From a1e84ed55c17d30df217d3eb71d02e9ce26a9b6a Mon Sep 17 00:00:00 2001 From: kstdl Date: Tue, 21 Mar 2023 10:38:47 +0100 Subject: [PATCH 079/162] fix: cpp-check warnings on build --- CMakeModules/cppcheck.cmake | 5 ++++- .../tarcap/packets_handlers/transaction_packet_handler.cpp | 7 +++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/CMakeModules/cppcheck.cmake b/CMakeModules/cppcheck.cmake index a9c719e5bf..4f7a0b22bb 100644 --- a/CMakeModules/cppcheck.cmake +++ b/CMakeModules/cppcheck.cmake @@ -32,9 +32,12 @@ else () # exclude graphql generated -i ${PROJECT_SOURCE_DIR}/libraries/core_libs/network/graphql/gen/ # messy files + --suppress=unmatchedSuppression:${PROJECT_SOURCE_DIR}/*/vector_ref.h + --suppress=unmatchedSuppression:${PROJECT_SOURCE_DIR}/*/Common.h --suppress=cstyleCast:${PROJECT_SOURCE_DIR}/*/vector_ref.h --suppress=cstyleCast:${PROJECT_SOURCE_DIR}/*/Common.h - + #not an issue here + --suppress=virtualCallInConstructor:${PROJECT_SOURCE_DIR}/*/final_chain.cpp # Only show found errors "--quiet" diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp index 86db1ae093..6132d0091c 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/transaction_packet_handler.cpp @@ -127,11 +127,7 @@ void TransactionPacketHandler::periodicSendTransactions(SharedTransactions &&tra std::vector> peers_with_transactions_to_send; auto peers = peers_state_->getAllPeers(); - std::string transactions_to_log; std::string peers_to_log; - for (auto const &trx : transactions) { - transactions_to_log += trx->getHash().abridged(); - } for (const auto &peer : peers) { // Confirm that status messages were exchanged otherwise message might be ignored and node would // incorrectly markTransactionAsKnown @@ -150,6 +146,9 @@ void TransactionPacketHandler::periodicSendTransactions(SharedTransactions &&tra } const auto peers_to_send_count = peers_with_transactions_to_send.size(); if (peers_to_send_count > 0) { + auto transactions_to_log = + std::accumulate(transactions.begin(), transactions.end(), std::string{}, + [](const auto &r, const auto &trx) { return r + trx->getHash().abridged(); }); LOG(log_tr_) << "Sending Transactions " << transactions_to_log << " to " << peers_to_log; // Sending it in same order favours some peers over others, always start with a different position uint32_t start_with = rand() % peers_to_send_count; From dacb4a65557087b38d664236075e0f0fffa5d2a3 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 21 Mar 2023 21:38:33 +0100 Subject: [PATCH 080/162] chore: fix rebuilding db columns --- libraries/core_libs/storage/src/storage.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index 5b6eca9cc1..d3bd850c93 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -106,7 +106,11 @@ void DbStorage::rebuildColumns(const rocksdb::Options& options) { std::vector handles; handles.reserve(column_families.size()); std::transform(column_families.begin(), column_families.end(), std::back_inserter(descriptors), [](const auto& name) { - return rocksdb::ColumnFamilyDescriptor(name, rocksdb::ColumnFamilyOptions()); + const auto it = std::find_if(Columns::all.begin(), Columns::all.end(), + [&name](const Column& col) { return col.name() == name; }); + auto options = rocksdb::ColumnFamilyOptions(); + if (it != Columns::all.end() && it->comparator_) options.comparator = it->comparator_; + return rocksdb::ColumnFamilyDescriptor(name, options); }); rocksdb::DB* db_ptr = nullptr; checkStatus(rocksdb::DB::Open(options, db_path_.string(), descriptors, &handles, &db_ptr)); From 42991cd80a25e2b23312f28dc2d1ce6d8ee6ff87 Mon Sep 17 00:00:00 2001 From: Dmytro Kostenko Date: Wed, 22 Mar 2023 10:06:55 +0100 Subject: [PATCH 081/162] fix: RLP InvalidEncodingSize inherits from RLPException --- .../common/include/common/encoding_rlp.hpp | 51 ++++++------------- 1 file changed, 16 insertions(+), 35 deletions(-) diff --git a/libraries/common/include/common/encoding_rlp.hpp b/libraries/common/include/common/encoding_rlp.hpp index e973d9decd..41a4c2c917 100644 --- a/libraries/common/include/common/encoding_rlp.hpp +++ b/libraries/common/include/common/encoding_rlp.hpp @@ -9,7 +9,7 @@ #include "common/range_view.hpp" #include "common/util.hpp" -namespace taraxa::util::encoding_rlp { +namespace taraxa::util { using dev::RLP; using RLPEncoderRef = dev::RLPStream&; @@ -153,13 +153,12 @@ void __dec_rlp_tuple_body__(RLP::iterator& i, RLP::iterator const& end, RLP::Str } } -struct InvalidEncodingSize : std::invalid_argument { - uint expected, actual; +struct InvalidEncodingSize : dev::RLPException { + dev::bigint expected, actual; - InvalidEncodingSize(uint expected, uint actual) - : invalid_argument(fmt("Invalid rlp list size; expected: %s, actual: %s", expected, actual)), - expected(expected), - actual(actual) {} + InvalidEncodingSize(uint e, uint a) : expected(e), actual(a) { + RLPException() << dev::errinfo_comment("Invalid rlp list size") << dev::RequirementError(expected, actual); + } }; template @@ -196,34 +195,16 @@ bytes rlp_enc(T const& obj) { return std::move(s.invalidate()); } -} // namespace taraxa::util::encoding_rlp - -#define HAS_RLP_FIELDS \ - void rlp(::taraxa::util::encoding_rlp::RLPDecoderRef encoding); \ - void rlp(::taraxa::util::encoding_rlp::RLPEncoderRef encoding) const; +} // namespace taraxa::util -#define RLP_FIELDS_DEFINE(_class_, ...) \ - void _class_::rlp(::taraxa::util::encoding_rlp::RLPDecoderRef encoding) { \ - ::taraxa::util::encoding_rlp::rlp_tuple(encoding, __VA_ARGS__); \ - } \ - void _class_::rlp(::taraxa::util::encoding_rlp::RLPEncoderRef encoding) const { \ - ::taraxa::util::encoding_rlp::rlp_tuple(encoding, __VA_ARGS__); \ - } +#define HAS_RLP_FIELDS \ + void rlp(::taraxa::util::RLPDecoderRef encoding); \ + void rlp(::taraxa::util::RLPEncoderRef encoding) const; -#define RLP_FIELDS_DEFINE_INPLACE(...) \ - void rlp(::taraxa::util::encoding_rlp::RLPDecoderRef encoding) { \ - ::taraxa::util::encoding_rlp::rlp_tuple(encoding, __VA_ARGS__); \ - } \ - void rlp(::taraxa::util::encoding_rlp::RLPEncoderRef encoding) const { \ - ::taraxa::util::encoding_rlp::rlp_tuple(encoding, __VA_ARGS__); \ - } +#define RLP_FIELDS_DEFINE(_class_, ...) \ + void _class_::rlp(::taraxa::util::RLPDecoderRef encoding) { ::taraxa::util::rlp_tuple(encoding, __VA_ARGS__); } \ + void _class_::rlp(::taraxa::util::RLPEncoderRef encoding) const { ::taraxa::util::rlp_tuple(encoding, __VA_ARGS__); } -namespace taraxa::util { -using encoding_rlp::InvalidEncodingSize; -using encoding_rlp::rlp; -using encoding_rlp::rlp_dec; -using encoding_rlp::rlp_enc; -using encoding_rlp::rlp_tuple; -using encoding_rlp::RLPDecoderRef; -using encoding_rlp::RLPEncoderRef; -} // namespace taraxa::util +#define RLP_FIELDS_DEFINE_INPLACE(...) \ + void rlp(::taraxa::util::RLPDecoderRef encoding) { ::taraxa::util::rlp_tuple(encoding, __VA_ARGS__); } \ + void rlp(::taraxa::util::RLPEncoderRef encoding) const { ::taraxa::util::rlp_tuple(encoding, __VA_ARGS__); } From d7f693d3265ca9518363b0aaf64d093fc2730a82 Mon Sep 17 00:00:00 2001 From: Dmytro Kostenko Date: Wed, 22 Mar 2023 10:52:54 +0100 Subject: [PATCH 082/162] fix: formating for clang-format --- .../src/tarcap/packets_handlers/votes_sync_packet_handler.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp index 74f4b2e30a..3f38145c36 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/votes_sync_packet_handler.cpp @@ -113,7 +113,7 @@ void VotesSyncPacketHandler::process(const PacketData &packet_data, const std::s // Process processStandardVote is called with false in case of next votes bundle -> does not check max boundaries // for round and step to actually being able to sync the current round in case network is stalled - bool check_max_round_step = true; + bool check_max_round_step = true; if (votes_bundle_votes_type == PbftVoteTypes::cert_vote || votes_bundle_votes_type == PbftVoteTypes::next_vote) { check_max_round_step = false; } From f722213d8e0254943f4dd196764d48ec134c880e Mon Sep 17 00:00:00 2001 From: Robert Jonczy Date: Thu, 23 Mar 2023 10:07:32 +0100 Subject: [PATCH 083/162] added kubemonkey labels --- charts/taraxa-node/Chart.yaml | 2 +- charts/taraxa-node/templates/bootnode.yaml | 14 ++++++++++++++ charts/taraxa-node/templates/consensus-node.yaml | 14 ++++++++++++++ charts/taraxa-node/templates/taraxa-node.yaml | 14 ++++++++++++++ charts/taraxa-node/values.yaml | 3 +++ 5 files changed, 46 insertions(+), 1 deletion(-) diff --git a/charts/taraxa-node/Chart.yaml b/charts/taraxa-node/Chart.yaml index c08c8abf65..7b5af2d71c 100644 --- a/charts/taraxa-node/Chart.yaml +++ b/charts/taraxa-node/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 appVersion: "1.0" description: Kubernetes helm chart for Taraxa blockchain full node implementation. name: taraxa-node -version: 0.3.5 +version: 0.3.6 keywords: - blockchain - taraxa diff --git a/charts/taraxa-node/templates/bootnode.yaml b/charts/taraxa-node/templates/bootnode.yaml index 8c2a331d4d..5658e2592f 100644 --- a/charts/taraxa-node/templates/bootnode.yaml +++ b/charts/taraxa-node/templates/bootnode.yaml @@ -9,6 +9,13 @@ metadata: helm.sh/chart: {{ include "taraxa-node.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: monkey-victim + kube-monkey/mtbf: '2' + kube-monkey/kill-mode: "fixed" + kube-monkey/kill-value: '1' + {{ end }} spec: replicas: {{ .Values.bootnode.replicaCount }} serviceName: {{ include "taraxa-boot-node.fullname" . }} @@ -29,6 +36,13 @@ spec: partition: a app.kubernetes.io/name: {{ .Release.Name }}-boot-node app.kubernetes.io/instance: {{ .Release.Name }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: monkey-victim + kube-monkey/mtbf: '2' + kube-monkey/kill-mode: "fixed" + kube-monkey/kill-value: '1' + {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." spec: diff --git a/charts/taraxa-node/templates/consensus-node.yaml b/charts/taraxa-node/templates/consensus-node.yaml index fdd697f108..bc7ca0c5e0 100644 --- a/charts/taraxa-node/templates/consensus-node.yaml +++ b/charts/taraxa-node/templates/consensus-node.yaml @@ -9,6 +9,13 @@ metadata: helm.sh/chart: {{ include "taraxa-node.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: monkey-victim + kube-monkey/mtbf: '2' + kube-monkey/kill-mode: "fixed" + kube-monkey/kill-value: '1' + {{ end }} spec: replicas: {{ .Values.consensusnode.replicaCount }} serviceName: {{ include "taraxa-consensus-node.fullname" . }} @@ -29,6 +36,13 @@ spec: partition: a app.kubernetes.io/name: {{ .Release.Name }}-consensus-node app.kubernetes.io/instance: {{ .Release.Name }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: monkey-victim + kube-monkey/mtbf: '2' + kube-monkey/kill-mode: "fixed" + kube-monkey/kill-value: '1' + {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." spec: diff --git a/charts/taraxa-node/templates/taraxa-node.yaml b/charts/taraxa-node/templates/taraxa-node.yaml index c03abe2a0a..7a5c3385be 100644 --- a/charts/taraxa-node/templates/taraxa-node.yaml +++ b/charts/taraxa-node/templates/taraxa-node.yaml @@ -10,6 +10,13 @@ metadata: helm.sh/chart: {{ include "taraxa-node.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: monkey-victim + kube-monkey/mtbf: '2' + kube-monkey/kill-mode: "fixed" + kube-monkey/kill-value: '1' + {{ end }} spec: replicas: {{ .Values.node.replicaCount }} # to launch or terminate all Pods in parallel. @@ -30,6 +37,13 @@ spec: partition: a app.kubernetes.io/name: {{ include "taraxa-node.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: monkey-victim + kube-monkey/mtbf: '2' + kube-monkey/kill-mode: "fixed" + kube-monkey/kill-value: '1' + {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." spec: diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index 46ddcb8617..47a1e4ef98 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -291,3 +291,6 @@ test: repository: gcr.io/jovial-meridian-249123/python tag: latest pullPolicy: IfNotPresent + +kubemonkey: + enabled: false \ No newline at end of file From caee001f8bfccb918e5395b7275a91b4a915de0d Mon Sep 17 00:00:00 2001 From: Robert Jonczy Date: Thu, 23 Mar 2023 10:29:09 +0100 Subject: [PATCH 084/162] parametrize kubemonkey --- charts/taraxa-node/templates/bootnode.yaml | 16 ++++++++-------- charts/taraxa-node/templates/consensus-node.yaml | 16 ++++++++-------- charts/taraxa-node/templates/taraxa-node.yaml | 16 ++++++++-------- charts/taraxa-node/values.yaml | 5 ++++- 4 files changed, 28 insertions(+), 25 deletions(-) diff --git a/charts/taraxa-node/templates/bootnode.yaml b/charts/taraxa-node/templates/bootnode.yaml index 5658e2592f..d0781b2b98 100644 --- a/charts/taraxa-node/templates/bootnode.yaml +++ b/charts/taraxa-node/templates/bootnode.yaml @@ -11,10 +11,10 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} {{ if .Values.kubemonkey.enabled }} kube-monkey/enabled: enabled - kube-monkey/identifier: monkey-victim - kube-monkey/mtbf: '2' - kube-monkey/kill-mode: "fixed" - kube-monkey/kill-value: '1' + kube-monkey/identifier: {{ include "taraxa-boot-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.kill-mode }} + kube-monkey/kill-value: {{ .Values.kubemonkey.kill-value }} {{ end }} spec: replicas: {{ .Values.bootnode.replicaCount }} @@ -38,10 +38,10 @@ spec: app.kubernetes.io/instance: {{ .Release.Name }} {{ if .Values.kubemonkey.enabled }} kube-monkey/enabled: enabled - kube-monkey/identifier: monkey-victim - kube-monkey/mtbf: '2' - kube-monkey/kill-mode: "fixed" - kube-monkey/kill-value: '1' + kube-monkey/identifier: {{ include "taraxa-boot-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.kill-mode }} + kube-monkey/kill-value: {{ .Values.kubemonkey.kill-value }} {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." diff --git a/charts/taraxa-node/templates/consensus-node.yaml b/charts/taraxa-node/templates/consensus-node.yaml index bc7ca0c5e0..0fb342e3a3 100644 --- a/charts/taraxa-node/templates/consensus-node.yaml +++ b/charts/taraxa-node/templates/consensus-node.yaml @@ -11,10 +11,10 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} {{ if .Values.kubemonkey.enabled }} kube-monkey/enabled: enabled - kube-monkey/identifier: monkey-victim - kube-monkey/mtbf: '2' - kube-monkey/kill-mode: "fixed" - kube-monkey/kill-value: '1' + kube-monkey/identifier: {{ include "taraxa-consensus-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.kill-mode }} + kube-monkey/kill-value: {{ .Values.kubemonkey.kill-value }} {{ end }} spec: replicas: {{ .Values.consensusnode.replicaCount }} @@ -38,10 +38,10 @@ spec: app.kubernetes.io/instance: {{ .Release.Name }} {{ if .Values.kubemonkey.enabled }} kube-monkey/enabled: enabled - kube-monkey/identifier: monkey-victim - kube-monkey/mtbf: '2' - kube-monkey/kill-mode: "fixed" - kube-monkey/kill-value: '1' + kube-monkey/identifier: {{ include "taraxa-consensus-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.kill-mode }} + kube-monkey/kill-value: {{ .Values.kubemonkey.kill-value }} {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." diff --git a/charts/taraxa-node/templates/taraxa-node.yaml b/charts/taraxa-node/templates/taraxa-node.yaml index 7a5c3385be..5efe5e2938 100644 --- a/charts/taraxa-node/templates/taraxa-node.yaml +++ b/charts/taraxa-node/templates/taraxa-node.yaml @@ -12,10 +12,10 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} {{ if .Values.kubemonkey.enabled }} kube-monkey/enabled: enabled - kube-monkey/identifier: monkey-victim - kube-monkey/mtbf: '2' - kube-monkey/kill-mode: "fixed" - kube-monkey/kill-value: '1' + kube-monkey/identifier: {{ include "taraxa-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.kill-mode }} + kube-monkey/kill-value: {{ .Values.kubemonkey.kill-value }} {{ end }} spec: replicas: {{ .Values.node.replicaCount }} @@ -39,10 +39,10 @@ spec: app.kubernetes.io/instance: {{ .Release.Name }} {{ if .Values.kubemonkey.enabled }} kube-monkey/enabled: enabled - kube-monkey/identifier: monkey-victim - kube-monkey/mtbf: '2' - kube-monkey/kill-mode: "fixed" - kube-monkey/kill-value: '1' + kube-monkey/identifier: {{ include "taraxa-node.fullname" . }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.kill-mode }} + kube-monkey/kill-value: {{ .Values.kubemonkey.kill-value }} {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index 47a1e4ef98..d462d82c26 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -293,4 +293,7 @@ test: pullPolicy: IfNotPresent kubemonkey: - enabled: false \ No newline at end of file + enabled: false + mtbf: 2 + kill-mode: "fixed" + kill-value: '1' \ No newline at end of file From dd23f4b6d0b38b873cca9cfd1218c2cd96827cd2 Mon Sep 17 00:00:00 2001 From: Robert Jonczy Date: Thu, 23 Mar 2023 10:33:10 +0100 Subject: [PATCH 085/162] fix keys --- charts/taraxa-node/templates/bootnode.yaml | 8 ++++---- charts/taraxa-node/templates/consensus-node.yaml | 8 ++++---- charts/taraxa-node/templates/taraxa-node.yaml | 8 ++++---- charts/taraxa-node/values.yaml | 4 ++-- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/charts/taraxa-node/templates/bootnode.yaml b/charts/taraxa-node/templates/bootnode.yaml index d0781b2b98..689214799a 100644 --- a/charts/taraxa-node/templates/bootnode.yaml +++ b/charts/taraxa-node/templates/bootnode.yaml @@ -13,8 +13,8 @@ metadata: kube-monkey/enabled: enabled kube-monkey/identifier: {{ include "taraxa-boot-node.fullname" . }} kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} - kube-monkey/kill-mode: {{ .Values.kubemonkey.kill-mode }} - kube-monkey/kill-value: {{ .Values.kubemonkey.kill-value }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue }} {{ end }} spec: replicas: {{ .Values.bootnode.replicaCount }} @@ -40,8 +40,8 @@ spec: kube-monkey/enabled: enabled kube-monkey/identifier: {{ include "taraxa-boot-node.fullname" . }} kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} - kube-monkey/kill-mode: {{ .Values.kubemonkey.kill-mode }} - kube-monkey/kill-value: {{ .Values.kubemonkey.kill-value }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue }} {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." diff --git a/charts/taraxa-node/templates/consensus-node.yaml b/charts/taraxa-node/templates/consensus-node.yaml index 0fb342e3a3..d478de429f 100644 --- a/charts/taraxa-node/templates/consensus-node.yaml +++ b/charts/taraxa-node/templates/consensus-node.yaml @@ -13,8 +13,8 @@ metadata: kube-monkey/enabled: enabled kube-monkey/identifier: {{ include "taraxa-consensus-node.fullname" . }} kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} - kube-monkey/kill-mode: {{ .Values.kubemonkey.kill-mode }} - kube-monkey/kill-value: {{ .Values.kubemonkey.kill-value }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue }} {{ end }} spec: replicas: {{ .Values.consensusnode.replicaCount }} @@ -40,8 +40,8 @@ spec: kube-monkey/enabled: enabled kube-monkey/identifier: {{ include "taraxa-consensus-node.fullname" . }} kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} - kube-monkey/kill-mode: {{ .Values.kubemonkey.kill-mode }} - kube-monkey/kill-value: {{ .Values.kubemonkey.kill-value }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue }} {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." diff --git a/charts/taraxa-node/templates/taraxa-node.yaml b/charts/taraxa-node/templates/taraxa-node.yaml index 5efe5e2938..28ff5f1bfe 100644 --- a/charts/taraxa-node/templates/taraxa-node.yaml +++ b/charts/taraxa-node/templates/taraxa-node.yaml @@ -14,8 +14,8 @@ metadata: kube-monkey/enabled: enabled kube-monkey/identifier: {{ include "taraxa-node.fullname" . }} kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} - kube-monkey/kill-mode: {{ .Values.kubemonkey.kill-mode }} - kube-monkey/kill-value: {{ .Values.kubemonkey.kill-value }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue }} {{ end }} spec: replicas: {{ .Values.node.replicaCount }} @@ -41,8 +41,8 @@ spec: kube-monkey/enabled: enabled kube-monkey/identifier: {{ include "taraxa-node.fullname" . }} kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} - kube-monkey/kill-mode: {{ .Values.kubemonkey.kill-mode }} - kube-monkey/kill-value: {{ .Values.kubemonkey.kill-value }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue }} {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index d462d82c26..8fed2c18a2 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -295,5 +295,5 @@ test: kubemonkey: enabled: false mtbf: 2 - kill-mode: "fixed" - kill-value: '1' \ No newline at end of file + killMode: "fixed" + killValue: '1' \ No newline at end of file From 680c22321e031f69ddc453c04d38deb7bb74920c Mon Sep 17 00:00:00 2001 From: Robert Jonczy Date: Thu, 23 Mar 2023 10:47:06 +0100 Subject: [PATCH 086/162] updated changelod --- charts/taraxa-node/CHANGELOG.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/charts/taraxa-node/CHANGELOG.md b/charts/taraxa-node/CHANGELOG.md index 68eeb69e63..67bea0458f 100644 --- a/charts/taraxa-node/CHANGELOG.md +++ b/charts/taraxa-node/CHANGELOG.md @@ -3,6 +3,13 @@ This file documents all notable changes to `taraxa-node` Helm Chart. The release numbering uses [semantic versioning](http://semver.org). +## v0.3.6 + +### Minor changes + +* Added labels into `StatefulSets` for [kube-monkey](https://github.com/asobti/kube-monkey) + + ## v0.3.5 ### Minor changes From 73883c00f22cb466dbf3a3904ed2d4323b818712 Mon Sep 17 00:00:00 2001 From: Robert Jonczy Date: Thu, 23 Mar 2023 11:38:26 +0100 Subject: [PATCH 087/162] scrape from taraxa-indexer --- charts/taraxa-node/CHANGELOG.md | 6 ++++++ .../taraxa-node/templates/taraxa-node-servicemonitor.yaml | 3 +++ charts/taraxa-node/values.yaml | 3 +++ 3 files changed, 12 insertions(+) diff --git a/charts/taraxa-node/CHANGELOG.md b/charts/taraxa-node/CHANGELOG.md index 67bea0458f..24cdcc3d84 100644 --- a/charts/taraxa-node/CHANGELOG.md +++ b/charts/taraxa-node/CHANGELOG.md @@ -3,6 +3,12 @@ This file documents all notable changes to `taraxa-node` Helm Chart. The release numbering uses [semantic versioning](http://semver.org). +## v0.3.8 + +### Minor changes + +* Added port for scrapping metrics from sidecar of rpc-nodes ([taraxa-indexer](https://github.com/Taraxa-project/taraxa-indexer)) + ## v0.3.6 ### Minor changes diff --git a/charts/taraxa-node/templates/taraxa-node-servicemonitor.yaml b/charts/taraxa-node/templates/taraxa-node-servicemonitor.yaml index 0ee519890d..ed7d6aaa2a 100644 --- a/charts/taraxa-node/templates/taraxa-node-servicemonitor.yaml +++ b/charts/taraxa-node/templates/taraxa-node-servicemonitor.yaml @@ -22,5 +22,8 @@ spec: - honorLabels: true path: /metrics port: metrics + - honorLabels: true + path: /metrics + port: metrics-indexer {{- end }} {{- end }} diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index 8fed2c18a2..4e484a7429 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -144,6 +144,9 @@ node: - name: metrics port: 8888 protocol: TCP + - name: metrics-indexer + port: 2112 + protocol: TCP - name: http-indexer port: 8080 protocol: TCP From 142b2f6a9e6e8ad71f2df38ac41028db8fd0d273 Mon Sep 17 00:00:00 2001 From: Robert Jonczy Date: Thu, 23 Mar 2023 12:51:31 +0100 Subject: [PATCH 088/162] added quotes to labels' values --- charts/taraxa-node/templates/bootnode.yaml | 12 ++++++------ charts/taraxa-node/templates/consensus-node.yaml | 12 ++++++------ charts/taraxa-node/templates/taraxa-node.yaml | 12 ++++++------ 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/charts/taraxa-node/templates/bootnode.yaml b/charts/taraxa-node/templates/bootnode.yaml index 689214799a..0b848666ec 100644 --- a/charts/taraxa-node/templates/bootnode.yaml +++ b/charts/taraxa-node/templates/bootnode.yaml @@ -12,9 +12,9 @@ metadata: {{ if .Values.kubemonkey.enabled }} kube-monkey/enabled: enabled kube-monkey/identifier: {{ include "taraxa-boot-node.fullname" . }} - kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} - kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode }} - kube-monkey/kill-value: {{ .Values.kubemonkey.killValue }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} {{ end }} spec: replicas: {{ .Values.bootnode.replicaCount }} @@ -39,9 +39,9 @@ spec: {{ if .Values.kubemonkey.enabled }} kube-monkey/enabled: enabled kube-monkey/identifier: {{ include "taraxa-boot-node.fullname" . }} - kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} - kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode }} - kube-monkey/kill-value: {{ .Values.kubemonkey.killValue }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." diff --git a/charts/taraxa-node/templates/consensus-node.yaml b/charts/taraxa-node/templates/consensus-node.yaml index d478de429f..920a688eb0 100644 --- a/charts/taraxa-node/templates/consensus-node.yaml +++ b/charts/taraxa-node/templates/consensus-node.yaml @@ -12,9 +12,9 @@ metadata: {{ if .Values.kubemonkey.enabled }} kube-monkey/enabled: enabled kube-monkey/identifier: {{ include "taraxa-consensus-node.fullname" . }} - kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} - kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode }} - kube-monkey/kill-value: {{ .Values.kubemonkey.killValue }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} {{ end }} spec: replicas: {{ .Values.consensusnode.replicaCount }} @@ -39,9 +39,9 @@ spec: {{ if .Values.kubemonkey.enabled }} kube-monkey/enabled: enabled kube-monkey/identifier: {{ include "taraxa-consensus-node.fullname" . }} - kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} - kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode }} - kube-monkey/kill-value: {{ .Values.kubemonkey.killValue }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." diff --git a/charts/taraxa-node/templates/taraxa-node.yaml b/charts/taraxa-node/templates/taraxa-node.yaml index 28ff5f1bfe..b462673a3a 100644 --- a/charts/taraxa-node/templates/taraxa-node.yaml +++ b/charts/taraxa-node/templates/taraxa-node.yaml @@ -13,9 +13,9 @@ metadata: {{ if .Values.kubemonkey.enabled }} kube-monkey/enabled: enabled kube-monkey/identifier: {{ include "taraxa-node.fullname" . }} - kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} - kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode }} - kube-monkey/kill-value: {{ .Values.kubemonkey.killValue }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} {{ end }} spec: replicas: {{ .Values.node.replicaCount }} @@ -40,9 +40,9 @@ spec: {{ if .Values.kubemonkey.enabled }} kube-monkey/enabled: enabled kube-monkey/identifier: {{ include "taraxa-node.fullname" . }} - kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf }} - kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode }} - kube-monkey/kill-value: {{ .Values.kubemonkey.killValue }} + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} {{ end }} annotations: kubernetes.io/change-cause: "Configuration through configmaps." From 3409596e67180704ffb9acca050be6d55af2127d Mon Sep 17 00:00:00 2001 From: Leonard Mocanu Date: Thu, 16 Mar 2023 18:09:14 +0200 Subject: [PATCH 089/162] chore: adds transaction generating script --- charts/taraxa-node/CHANGELOG.md | 7 +- charts/taraxa-node/Chart.yaml | 2 +- charts/taraxa-node/templates/secrets.yaml | 1 + .../transaction-generation-script.yaml | 127 ++++++++++++++++++ .../transaction.generation.statefulset.yaml | 73 ++++++++++ charts/taraxa-node/values.yaml | 6 +- 6 files changed, 213 insertions(+), 3 deletions(-) create mode 100644 charts/taraxa-node/templates/transaction-generation-script.yaml create mode 100644 charts/taraxa-node/templates/transaction.generation.statefulset.yaml diff --git a/charts/taraxa-node/CHANGELOG.md b/charts/taraxa-node/CHANGELOG.md index 67bea0458f..156e1835bd 100644 --- a/charts/taraxa-node/CHANGELOG.md +++ b/charts/taraxa-node/CHANGELOG.md @@ -3,13 +3,18 @@ This file documents all notable changes to `taraxa-node` Helm Chart. The release numbering uses [semantic versioning](http://semver.org). +## v0.3.7 + +### Minor changes + +* Adds transaction generating service to replace the explorer faucet + ## v0.3.6 ### Minor changes * Added labels into `StatefulSets` for [kube-monkey](https://github.com/asobti/kube-monkey) - ## v0.3.5 ### Minor changes diff --git a/charts/taraxa-node/Chart.yaml b/charts/taraxa-node/Chart.yaml index 7b5af2d71c..37568f68b3 100644 --- a/charts/taraxa-node/Chart.yaml +++ b/charts/taraxa-node/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 appVersion: "1.0" description: Kubernetes helm chart for Taraxa blockchain full node implementation. name: taraxa-node -version: 0.3.6 +version: 0.3.7 keywords: - blockchain - taraxa diff --git a/charts/taraxa-node/templates/secrets.yaml b/charts/taraxa-node/templates/secrets.yaml index aa7631a87f..7e64d37951 100644 --- a/charts/taraxa-node/templates/secrets.yaml +++ b/charts/taraxa-node/templates/secrets.yaml @@ -36,5 +36,6 @@ data: SLACK_TOKEN: {{ .Values.slack.token | b64enc | quote }} EXPLORER_DELEGATION_PRIVATE_KEY: {{ .Values.config.consensusnode.explorerDelegationPrivateKey | b64enc | quote }} EXPLORER_FAUCET_PRIVATE_KEY: {{ .Values.explorer.faucet.privKey | b64enc | quote }} + TRANSACTION_GENERATION_PRIVATE_KEY: {{ .Values.transactionGeneration.privateKey | b64enc | quote }} {{- end }} {{- end }} diff --git a/charts/taraxa-node/templates/transaction-generation-script.yaml b/charts/taraxa-node/templates/transaction-generation-script.yaml new file mode 100644 index 0000000000..316d45bbc5 --- /dev/null +++ b/charts/taraxa-node/templates/transaction-generation-script.yaml @@ -0,0 +1,127 @@ +{{- if .Values.transactionGeneration.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-transaction-generation + labels: + app: node + app.kubernetes.io/name: node + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +data: + requirements.txt: |- + aiohttp==3.8.4 + aiosignal==1.3.1 + async-timeout==4.0.2 + attrs==22.2.0 + base58==2.1.1 + bitarray==2.7.3 + certifi==2022.12.7 + charset-normalizer==3.1.0 + coloredlogs==15.0.1 + cytoolz==0.12.1 + eth-abi==2.2.0 + eth-account==0.5.9 + eth-hash==0.5.1 + eth-keyfile==0.5.1 + eth-keys==0.3.4 + eth-rlp==0.2.1 + eth-typing==2.3.0 + eth-utils==1.9.5 + frozenlist==1.3.3 + hexbytes==0.3.0 + humanfriendly==10.0 + idna==3.4 + ipfshttpclient==0.8.0a2 + jsonschema==4.17.3 + lru-dict==1.1.8 + multiaddr==0.0.9 + multidict==6.0.4 + netaddr==0.8.0 + parsimonious==0.8.1 + protobuf==3.19.5 + pycryptodome==3.17 + pyrsistent==0.19.3 + python-dotenv==1.0.0 + requests==2.28.2 + rlp==2.0.1 + six==1.16.0 + toolz==0.12.0 + urllib3==1.26.15 + varint==1.0.2 + web3==5.31.4 + websockets==9.1 + yarl==1.8.2 + transactions.py: |- + import logging + import coloredlogs + import time + from dotenv import load_dotenv + from os import getenv + from web3 import Web3 + + load_dotenv() + + LOG_LEVEL = getenv('LOG_LEVEL', 'INFO') + PROVIDER_URL = getenv('PROVIDER_URL') + PRIVATE_KEY = getenv('PRIVATE_KEY') + PENDING_TRANSACTIONS_THRESHOLD = 1000 + + logger = logging.getLogger() + coloredlogs.install(level=LOG_LEVEL, logger=logger) + + provider = Web3.HTTPProvider(PROVIDER_URL) + chain_id = provider.make_request('net_version', []) + chain_id = int(chain_id['result']) + logger.info(f'Got chain ID: {chain_id}') + + node_config = provider.make_request('taraxa_getConfig', []) + initial_validators = list( + map(lambda x: Web3.to_checksum_address(x['address']), node_config['result']['dpos']['initial_validators'])) + logger.info(f'Got initial validators: {initial_validators}') + + web3 = Web3(provider) + logger.info(f'Connected to Taraxa node: {PROVIDER_URL}') + + last_block = web3.eth.getBlock('latest') + logger.info(f'Last block: #{last_block.number}') + + account = web3.eth.account.from_key(PRIVATE_KEY) + logger.info(f'Account: {account.address}') + + transaction_count = int(web3.eth.get_transaction_count(account.address)) + logger.info(f'Transaction count for address: {transaction_count}') + + while True: + pending_transactions = web3.eth.get_block_transaction_count('pending') + logger.info(f'Number of pending transactions: {pending_transactions}') + if pending_transactions > PENDING_TRANSACTIONS_THRESHOLD: + logger.info( + f'Number of pending transactions is above threshold, sleeping for 10 seconds') + time.sleep(10) + continue + + logger.info(f'Sending transactions to initial validators') + + for initial_validator in initial_validators: + transaction_count = transaction_count+1 + logger.info( + f'Sending transaction #{transaction_count} to {initial_validator}') + + transaction = { + 'from': account.address, + 'to': initial_validator, + 'value': 1, + 'gas': 21000, + 'gasPrice': 1, + 'nonce': transaction_count, + 'chainId': chain_id, + } + logger.debug(f'Transaction {transaction}') + signed_transaction = account.sign_transaction(transaction) + web3.eth.send_raw_transaction(signed_transaction.rawTransaction) + + time.sleep(1) +{{- end }} diff --git a/charts/taraxa-node/templates/transaction.generation.statefulset.yaml b/charts/taraxa-node/templates/transaction.generation.statefulset.yaml new file mode 100644 index 0000000000..c620d5b3fb --- /dev/null +++ b/charts/taraxa-node/templates/transaction.generation.statefulset.yaml @@ -0,0 +1,73 @@ +{{ if .Values.transactionGeneration.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ .Release.Name }}-transaction-generation + labels: + app: transaction-generation + app.kubernetes.io/name: transaction-generation + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + replicas: 1 + serviceName: {{ .Release.Name }}-transaction-generation + # to launch or terminate all Pods in parallel. + # https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#parallel-pod-management + podManagementPolicy: Parallel + selector: + matchLabels: + app: transaction-generation + app.kubernetes.io/name: {{ .Release.Name }}-transaction-generation + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + name: transaction-generation + labels: + app: transaction-generation + app.kubernetes.io/name: {{ .Release.Name }}-transaction-generation + app.kubernetes.io/instance: {{ .Release.Name }} + annotations: + kubernetes.io/change-cause: "Configuration through configmaps." + spec: + containers: + - name: transaction-generation + image: "python:3.8" + imagePullPolicy: IfNotPresent + env: + - name: PRIVATE_KEY + valueFrom: + secretKeyRef: + name: {{ .Release.Name }} + key: TRANSACTION_GENERATION_PRIVATE_KEY + - name: PROVIDER_URL + value: http://{{ include "taraxa-node.fullname" . }}-head:7777 + command: ["/bin/bash", "-c", "--"] + args: [ "pip install -r /app/requirements.txt && python /app/transactions.py" ] + volumeMounts: + - name: requirements + mountPath: /app/requirements.txt + readOnly: true + subPath: requirements.txt + - name: script + mountPath: /app/transactions.py + readOnly: true + subPath: transactions.py + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: requirements + configMap: + defaultMode: 0700 + name: {{ .Release.Name }}-transaction-generation + - name: script + configMap: + defaultMode: 0700 + name: {{ .Release.Name }}-transaction-generation +{{- end }} diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index 8fed2c18a2..035e17fd45 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -50,6 +50,10 @@ slack: channel: channel k8s_cluster: taraxa +transactionGeneration: + enabled: true + privateKey: "" + nameOverride: "" fullnameOverride: "" @@ -162,7 +166,7 @@ node: image: repository: gcr.io/jovial-meridian-249123/taraxa-indexer tag: latest - pullPolicy: IfNotPresent + pullPolicy: Always persistence: enabled: false accessMode: ReadWriteOnce From 931fde277fe41a30728007cb35513c5687caa99d Mon Sep 17 00:00:00 2001 From: Robert Jonczy Date: Fri, 24 Mar 2023 12:45:51 +0100 Subject: [PATCH 090/162] bump version --- charts/taraxa-node/Chart.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/taraxa-node/Chart.yaml b/charts/taraxa-node/Chart.yaml index 37568f68b3..fcac4f703c 100644 --- a/charts/taraxa-node/Chart.yaml +++ b/charts/taraxa-node/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 appVersion: "1.0" description: Kubernetes helm chart for Taraxa blockchain full node implementation. name: taraxa-node -version: 0.3.7 +version: 0.3.8 keywords: - blockchain - taraxa From 566ae97e2042f9ee176530f704efdbef9955d766 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 28 Mar 2023 11:27:29 +0200 Subject: [PATCH 091/162] chore: update evm --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index f75cd5f637..c7eaf8e07f 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit f75cd5f6374188da3700611e50c390b89f216926 +Subproject commit c7eaf8e07f63856946dca8422bd0fc4f8c3ac593 From ec6516aadd2665da1a78b2e95e2a89ff769158a1 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 29 Mar 2023 10:48:47 +0200 Subject: [PATCH 092/162] do not use @channel in slack notifications --- charts/taraxa-node/templates/node-status-script.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/taraxa-node/templates/node-status-script.yaml b/charts/taraxa-node/templates/node-status-script.yaml index 185546ce2a..01fa04ac0c 100644 --- a/charts/taraxa-node/templates/node-status-script.yaml +++ b/charts/taraxa-node/templates/node-status-script.yaml @@ -118,8 +118,8 @@ data: switcher = { "UP": ":white_check_mark: {} ({}) node is up and running :white_check_mark:".format(HOSTNAME, K8S_CLUSTER), - "DOWN": "@channel :fire: {} ({}) node is down (RPC not responding) :fire:".format(HOSTNAME, K8S_CLUSTER), - "DOWN_NP": "@channel :fire: {} ({}) node is down (no network progress). Last block is {} :fire:".format(HOSTNAME, K8S_CLUSTER, last_block), + "DOWN": ":fire: {} ({}) node is down (RPC not responding) :fire:".format(HOSTNAME, K8S_CLUSTER), + "DOWN_NP": ":fire: {} ({}) node is down (no network progress). Last block is {} :fire:".format(HOSTNAME, K8S_CLUSTER, last_block), } message = switcher.get(current_status) From 45e0ad4dde0a785006848b8048405be35202c6a3 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 29 Mar 2023 11:15:55 +0200 Subject: [PATCH 093/162] rename files --- .../templates/{bootnode.yaml => bootnode-statefulset.yaml} | 0 .../{consensus-node.yaml => consensus-node-statefulset.yaml} | 0 .../templates/{taraxa-node.yaml => taraxa-node-statefulset.yaml} | 0 3 files changed, 0 insertions(+), 0 deletions(-) rename charts/taraxa-node/templates/{bootnode.yaml => bootnode-statefulset.yaml} (100%) rename charts/taraxa-node/templates/{consensus-node.yaml => consensus-node-statefulset.yaml} (100%) rename charts/taraxa-node/templates/{taraxa-node.yaml => taraxa-node-statefulset.yaml} (100%) diff --git a/charts/taraxa-node/templates/bootnode.yaml b/charts/taraxa-node/templates/bootnode-statefulset.yaml similarity index 100% rename from charts/taraxa-node/templates/bootnode.yaml rename to charts/taraxa-node/templates/bootnode-statefulset.yaml diff --git a/charts/taraxa-node/templates/consensus-node.yaml b/charts/taraxa-node/templates/consensus-node-statefulset.yaml similarity index 100% rename from charts/taraxa-node/templates/consensus-node.yaml rename to charts/taraxa-node/templates/consensus-node-statefulset.yaml diff --git a/charts/taraxa-node/templates/taraxa-node.yaml b/charts/taraxa-node/templates/taraxa-node-statefulset.yaml similarity index 100% rename from charts/taraxa-node/templates/taraxa-node.yaml rename to charts/taraxa-node/templates/taraxa-node-statefulset.yaml From 95c2cc770f6f994772aecb25bcba90f07f2619ed Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 29 Mar 2023 11:19:56 +0200 Subject: [PATCH 094/162] move bootnode loadbalancer service to one file --- .../bootnode-service-loadbalancer.yaml | 28 ------------------- .../templates/bootnode-service.yaml | 28 +++++++++++++++++++ 2 files changed, 28 insertions(+), 28 deletions(-) delete mode 100644 charts/taraxa-node/templates/bootnode-service-loadbalancer.yaml diff --git a/charts/taraxa-node/templates/bootnode-service-loadbalancer.yaml b/charts/taraxa-node/templates/bootnode-service-loadbalancer.yaml deleted file mode 100644 index ed6379ba82..0000000000 --- a/charts/taraxa-node/templates/bootnode-service-loadbalancer.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{ if .Values.bootnode.enabled }} -{{ if .Values.bootnode.loadBalancer.enabled }} -{{- range $key, $value := .Values.bootnode.loadBalancer.addresses }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ $.Release.Name }}-boot-node-udp-{{ $key }} - {{- with $.Values.bootnode.loadBalancer.serviceAnnotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - labels: - name: {{ $.Release.Name }}-boot-node-udp-{{ $key }} -spec: - type: LoadBalancer - loadBalancerIP: {{ $value | quote }} - externalTrafficPolicy: Local - selector: - statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-boot-node-{{ $key }} - ports: - - name: udp-listen-port - port: 10002 - targetPort: 10002 - protocol: UDP -{{- end }} -{{- end }} -{{- end }} diff --git a/charts/taraxa-node/templates/bootnode-service.yaml b/charts/taraxa-node/templates/bootnode-service.yaml index 0f6f1ab71f..5e2f2c52fd 100644 --- a/charts/taraxa-node/templates/bootnode-service.yaml +++ b/charts/taraxa-node/templates/bootnode-service.yaml @@ -28,4 +28,32 @@ spec: {{- end }} {{- end }} {{- end }} + +{{ if .Values.bootnode.loadBalancer.enabled }} +{{- range $key, $value := .Values.bootnode.loadBalancer.addresses }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ $.Release.Name }}-boot-node-udp-{{ $key }} + {{- with $.Values.bootnode.loadBalancer.serviceAnnotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + labels: + name: {{ $.Release.Name }}-boot-node-udp-{{ $key }} +spec: + type: LoadBalancer + loadBalancerIP: {{ $value | quote }} + externalTrafficPolicy: Local + selector: + statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-boot-node-{{ $key }} + ports: + - name: udp-listen-port + port: 10002 + targetPort: 10002 + protocol: UDP +{{- end }} +{{- end }} + {{- end }} From 1c9f23312f7b57001ab3d57258be877fdacfe6b6 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 29 Mar 2023 11:22:26 +0200 Subject: [PATCH 095/162] move nodePort and loadBalancer for rpc node into 1 files --- .../templates/taraxa-node-service.yaml | 70 +++++++++++++++++++ .../taraxa-nodes-services-loadbalancer.yaml | 43 ------------ .../taraxa-nodes-services-nodeport.yaml | 28 -------- 3 files changed, 70 insertions(+), 71 deletions(-) delete mode 100644 charts/taraxa-node/templates/taraxa-nodes-services-loadbalancer.yaml delete mode 100644 charts/taraxa-node/templates/taraxa-nodes-services-nodeport.yaml diff --git a/charts/taraxa-node/templates/taraxa-node-service.yaml b/charts/taraxa-node/templates/taraxa-node-service.yaml index b9a6653e1c..7b3dab0832 100644 --- a/charts/taraxa-node/templates/taraxa-node-service.yaml +++ b/charts/taraxa-node/templates/taraxa-node-service.yaml @@ -52,4 +52,74 @@ spec: protocol: {{ $port.protocol }} {{- end }} {{- end }} + +{{ if .Values.node.loadBalancer.enabled }} +{{- range $key, $value := .Values.node.loadBalancer.addresses }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ $.Release.Name }}-taraxa-node-udp-{{ $key }} + labels: + name: {{ $.Release.Name }}-taraxa-node-udp-{{ $key }} +spec: + type: LoadBalancer + loadBalancerIP: {{ $value | quote }} + externalTrafficPolicy: Local + selector: + statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} + ports: + - name: udp-listen-port + port: 10002 + targetPort: 10002 + protocol: UDP + +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ $.Release.Name }}-taraxa-node-tcp-{{ $key }} + labels: + name: {{ $.Release.Name }}-taraxa-node-tcp-{{ $key }} +spec: + type: LoadBalancer + loadBalancerIP: {{ $value | quote }} + externalTrafficPolicy: Local + selector: + statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} + ports: + - name: tcp-listen-port + port: 10002 + targetPort: 10002 + protocol: TCP +{{- end }} +{{- end }} + +{{ if .Values.node.nodePort.enabled }} +{{- range $key, $value := .Values.node.nodePort.ports }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ $.Release.Name }}-taraxa-node-discovery-{{ $key }} + labels: + name: {{ $.Release.Name }}-taraxa-node-discovery-{{ $key }} +spec: + type: NodePort + selector: + statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} + ports: + - name: udp-listen-port + port: {{ $value }} + targetPort: {{ $value }} + nodePort: {{ $value }} + protocol: UDP + - name: tcp-listen-port + port: {{ $value }} + targetPort: {{ $value }} + nodePort: {{ $value }} + protocol: TCP +{{- end }} +{{- end }} + {{- end }} diff --git a/charts/taraxa-node/templates/taraxa-nodes-services-loadbalancer.yaml b/charts/taraxa-node/templates/taraxa-nodes-services-loadbalancer.yaml deleted file mode 100644 index 818c98b107..0000000000 --- a/charts/taraxa-node/templates/taraxa-nodes-services-loadbalancer.yaml +++ /dev/null @@ -1,43 +0,0 @@ -{{ if .Values.node.enabled }} -{{ if .Values.node.loadBalancer.enabled }} -{{- range $key, $value := .Values.node.loadBalancer.addresses }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ $.Release.Name }}-taraxa-node-udp-{{ $key }} - labels: - name: {{ $.Release.Name }}-taraxa-node-udp-{{ $key }} -spec: - type: LoadBalancer - loadBalancerIP: {{ $value | quote }} - externalTrafficPolicy: Local - selector: - statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} - ports: - - name: udp-listen-port - port: 10002 - targetPort: 10002 - protocol: UDP - ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ $.Release.Name }}-taraxa-node-tcp-{{ $key }} - labels: - name: {{ $.Release.Name }}-taraxa-node-tcp-{{ $key }} -spec: - type: LoadBalancer - loadBalancerIP: {{ $value | quote }} - externalTrafficPolicy: Local - selector: - statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} - ports: - - name: tcp-listen-port - port: 10002 - targetPort: 10002 - protocol: TCP -{{- end }} -{{- end }} -{{- end }} diff --git a/charts/taraxa-node/templates/taraxa-nodes-services-nodeport.yaml b/charts/taraxa-node/templates/taraxa-nodes-services-nodeport.yaml deleted file mode 100644 index 51a5e78fab..0000000000 --- a/charts/taraxa-node/templates/taraxa-nodes-services-nodeport.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{ if .Values.node.enabled }} -{{ if .Values.node.nodePort.enabled }} -{{- range $key, $value := .Values.node.nodePort.ports }} ---- -apiVersion: v1 -kind: Service -metadata: - name: {{ $.Release.Name }}-taraxa-node-discovery-{{ $key }} - labels: - name: {{ $.Release.Name }}-taraxa-node-discovery-{{ $key }} -spec: - type: NodePort - selector: - statefulset.kubernetes.io/pod-name: {{ $.Release.Name }}-taraxa-node-{{ $key }} - ports: - - name: udp-listen-port - port: {{ $value }} - targetPort: {{ $value }} - nodePort: {{ $value }} - protocol: UDP - - name: tcp-listen-port - port: {{ $value }} - targetPort: {{ $value }} - nodePort: {{ $value }} - protocol: TCP -{{- end }} -{{- end }} -{{- end }} From 711d5e87c4d567c634a6b65428d1bb71186bbf93 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 29 Mar 2023 11:24:50 +0200 Subject: [PATCH 096/162] port-check cm not used --- charts/taraxa-node/templates/port-check.yaml | 19 ------------------- 1 file changed, 19 deletions(-) delete mode 100644 charts/taraxa-node/templates/port-check.yaml diff --git a/charts/taraxa-node/templates/port-check.yaml b/charts/taraxa-node/templates/port-check.yaml deleted file mode 100644 index 2a754fa05b..0000000000 --- a/charts/taraxa-node/templates/port-check.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- $fullName := include "taraxa-node.fullname" . -}} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ $fullName }}-port-check - labels: - helm.sh/chart: {{ include "taraxa-node.chart" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -data: - entrypoint.sh: |- - #!/bin/sh - set -e - echo "Checking host $1" - while [ $(nc -z -w5 $1 $2 > /dev/null 2>&1; echo $?) -ne 0 ]; do - sleep 5 - echo "Waiting for $1:$2..." - done From b1be529fbaebce07db8648857ca050114fd06b3f Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 29 Mar 2023 11:27:43 +0200 Subject: [PATCH 097/162] rename transaction generation file name manifests --- ...n-script.yaml => transaction-generation-script-configmap.yaml} | 0 ...n.statefulset.yaml => transaction-generation-statefulset.yaml} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename charts/taraxa-node/templates/{transaction-generation-script.yaml => transaction-generation-script-configmap.yaml} (100%) rename charts/taraxa-node/templates/{transaction.generation.statefulset.yaml => transaction-generation-statefulset.yaml} (100%) diff --git a/charts/taraxa-node/templates/transaction-generation-script.yaml b/charts/taraxa-node/templates/transaction-generation-script-configmap.yaml similarity index 100% rename from charts/taraxa-node/templates/transaction-generation-script.yaml rename to charts/taraxa-node/templates/transaction-generation-script-configmap.yaml diff --git a/charts/taraxa-node/templates/transaction.generation.statefulset.yaml b/charts/taraxa-node/templates/transaction-generation-statefulset.yaml similarity index 100% rename from charts/taraxa-node/templates/transaction.generation.statefulset.yaml rename to charts/taraxa-node/templates/transaction-generation-statefulset.yaml From 23d07edec3362b626b5f79adc31b00d80e7cd8ea Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 29 Mar 2023 11:28:13 +0200 Subject: [PATCH 098/162] added configmap suffix --- .../{explorer-check.yaml => explorer-check-configmap.yaml} | 0 ...{node-status-script.yaml => node-status-script-configmap.yaml} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename charts/taraxa-node/templates/{explorer-check.yaml => explorer-check-configmap.yaml} (100%) rename charts/taraxa-node/templates/{node-status-script.yaml => node-status-script-configmap.yaml} (100%) diff --git a/charts/taraxa-node/templates/explorer-check.yaml b/charts/taraxa-node/templates/explorer-check-configmap.yaml similarity index 100% rename from charts/taraxa-node/templates/explorer-check.yaml rename to charts/taraxa-node/templates/explorer-check-configmap.yaml diff --git a/charts/taraxa-node/templates/node-status-script.yaml b/charts/taraxa-node/templates/node-status-script-configmap.yaml similarity index 100% rename from charts/taraxa-node/templates/node-status-script.yaml rename to charts/taraxa-node/templates/node-status-script-configmap.yaml From 53cbc90fb387e11611e2d923202fc34266ca4a20 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 29 Mar 2023 11:36:41 +0200 Subject: [PATCH 099/162] rename configmaps --- .../{initconfig-boot-node.yaml => bootnode-configmap.yaml} | 0 ...itconfig-consensus-node.yaml => consensus-node-configmap.yaml} | 0 ...-status-script-configmap.yaml => status-script-configmap.yaml} | 0 .../{initconfig-node.yaml => taraxa-node-configmap.yaml} | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename charts/taraxa-node/templates/{initconfig-boot-node.yaml => bootnode-configmap.yaml} (100%) rename charts/taraxa-node/templates/{initconfig-consensus-node.yaml => consensus-node-configmap.yaml} (100%) rename charts/taraxa-node/templates/{node-status-script-configmap.yaml => status-script-configmap.yaml} (100%) rename charts/taraxa-node/templates/{initconfig-node.yaml => taraxa-node-configmap.yaml} (100%) diff --git a/charts/taraxa-node/templates/initconfig-boot-node.yaml b/charts/taraxa-node/templates/bootnode-configmap.yaml similarity index 100% rename from charts/taraxa-node/templates/initconfig-boot-node.yaml rename to charts/taraxa-node/templates/bootnode-configmap.yaml diff --git a/charts/taraxa-node/templates/initconfig-consensus-node.yaml b/charts/taraxa-node/templates/consensus-node-configmap.yaml similarity index 100% rename from charts/taraxa-node/templates/initconfig-consensus-node.yaml rename to charts/taraxa-node/templates/consensus-node-configmap.yaml diff --git a/charts/taraxa-node/templates/node-status-script-configmap.yaml b/charts/taraxa-node/templates/status-script-configmap.yaml similarity index 100% rename from charts/taraxa-node/templates/node-status-script-configmap.yaml rename to charts/taraxa-node/templates/status-script-configmap.yaml diff --git a/charts/taraxa-node/templates/initconfig-node.yaml b/charts/taraxa-node/templates/taraxa-node-configmap.yaml similarity index 100% rename from charts/taraxa-node/templates/initconfig-node.yaml rename to charts/taraxa-node/templates/taraxa-node-configmap.yaml From 2b5cac197368efb9c35ad491605e32c23163ec91 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 29 Mar 2023 11:44:30 +0200 Subject: [PATCH 100/162] fix with right chain-in values in comment --- charts/taraxa-node/values.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index 00f6ef1f8a..e4115eb6a2 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -16,7 +16,7 @@ image: pullPolicy: IfNotPresent config: - # integer, 1=Mainnet, 2=Testnet, 3=Devnet) + # integer, 841=Mainnet, 842=Testnet, 843=Devnet # 100 for default helm test network: "100" extraArgs: [] From 03adff173536c70016acc1d75d6b7b0a1ad260c5 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 29 Mar 2023 11:52:59 +0200 Subject: [PATCH 101/162] added consensusLight node --- .../consensus-node-light-configmap.yaml | 178 +++++++++++++ .../consensus-node-light-service.yaml | 31 +++ .../consensus-node-light-servicemonitor.yaml | 26 ++ .../consensus-node-light-statefulset.yaml | 235 ++++++++++++++++++ charts/taraxa-node/values.yaml | 55 ++++ 5 files changed, 525 insertions(+) create mode 100644 charts/taraxa-node/templates/consensus-node-light-configmap.yaml create mode 100644 charts/taraxa-node/templates/consensus-node-light-service.yaml create mode 100644 charts/taraxa-node/templates/consensus-node-light-servicemonitor.yaml create mode 100644 charts/taraxa-node/templates/consensus-node-light-statefulset.yaml diff --git a/charts/taraxa-node/templates/consensus-node-light-configmap.yaml b/charts/taraxa-node/templates/consensus-node-light-configmap.yaml new file mode 100644 index 0000000000..3f9bbf1991 --- /dev/null +++ b/charts/taraxa-node/templates/consensus-node-light-configmap.yaml @@ -0,0 +1,178 @@ +{{ if .Values.consensusnodeLight.enabled }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-consensus-node-light-init-script-light + labels: + app: consensus-node-light + app.kubernetes.io/name: consensus-node-light + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +data: + genconfig.py: |- + import json + import sys + import subprocess + + def get_vrf_public(vrf_prv_key): + process = subprocess.Popen(['taraxad', '--command', 'vrf', vrf_prv_key],stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + for line in process.stdout: + l = line.decode("utf-8") + + if "vrf_public" in l: + vrf_public = l.split(':')[1].replace("\"", "").strip() + return f'0x{vrf_public}' + + def get_addr(prv_key): + process = subprocess.Popen(['taraxad', '--command', 'account', prv_key],stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + for line in process.stdout: + l = line.decode("utf-8") + + if "node_address" in l: + addr = l.split(':')[1].replace("\"", "").strip() + return f'0x{addr}' + + def main(config): + keys = [] + vrfs = [] + + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + keys = [ + {{- range $key, $value := .Values.config.consensusnode.keys }} + "{{ $value }}", + {{- end }} + ] + {{- end }} + + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + vrfs = [ + {{- range $key, $value := .Values.config.consensusnode.vrfs }} + "{{ $value }}", + {{- end }} + ] + {{- end }} + + with open(config) as f: + data = json.load(f) + + initial_validators = data['dpos']['initial_validators'] + + # get delegations from 1st default validator + delegations = initial_validators[0]['delegations'] + + validators = [] + for idx, key in enumerate(keys): + + validator = { + 'address': '', + 'commission': '0x0', + 'delegations': {}, + 'description': 'Taraxa validator', + 'endpoint': '', + 'owner': '' + } + + addr = get_addr(key) + validator['address'] = addr + validator['owner'] = addr + validator['delegations'] = delegations + validator['vrf_key'] = get_vrf_public(vrfs[idx]) + validators.append(validator) + + data['dpos']['initial_validators'] = validators + print(json.dumps(data)) + + if __name__ == "__main__": + config_file_name = sys.argv[1] + main(config_file_name) + + entrypoint.sh: |- + #!/bin/bash + DATA_PATH=/root/.taraxa + CONFIG_PATH=$DATA_PATH/conf_taraxa.json + GENESIS_PATH=$DATA_PATH/genesis_taraxa.json + WALLET_PATH=$DATA_PATH/wallet_taraxa.json + + echo "Cleaning up old config..." + rm -rf $CONFIG_PATH + rm -rf $GENESIS_PATH + + echo "Generating config" + INDEX=${HOSTNAME##*-} + KEY="CONSENSUS_NODE_KEY_${INDEX}" + VRF="CONSENSUS_NODE_VRF_${INDEX}" + + KEY="${!KEY}" + VRF="${!VRF}" + + if [ -z "$KEY" ] + then + if [ ! -f "$WALLET_PATH" ] + then + echo "No predifined keys. Generating new wallet..." + KEY=$(taraxad --command account | grep node_secret | cut -d\ -f3- | tr -d \") + VRF=$(taraxad --command vrf | grep vrf_secret | cut -d\ -f3 | tr -d \") + {{ if .Values.explorer.enabled }} + NODE_ADDRESS=$(taraxad --command account ${KEY} | grep node_address | cut -d\ -f3 | tr -d \") + echo "New wallet: 0x${NODE_ADDRESS}" + + SIG=$(taraxa-sign sign --key 0x${EXPLORER_DELEGATION_PRIVATE_KEY} 0x${NODE_ADDRESS}) + + curl --silent http://{{ .Release.Name }}-explorer/api/delegate/0x${NODE_ADDRESS}?sig=${SIG} + {{- end }} + else + echo "Found wallet file." + KEY=$(cat "$WALLET_PATH" | jq -r .node_secret) + VRF=$(cat "$WALLET_PATH" | jq -r .vrf_secret) + fi + fi + + {{ if .Values.explorer.enabled }} + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + NODE_ADDRESS=$(taraxad --command account ${KEY} | grep node_address | cut -d\ -f3 | tr -d \") + curl --silent http://{{ .Release.Name }}-explorer/api/faucet/0x${NODE_ADDRESS} + {{- end }} + {{- end }} + + taraxad --command config \ + --chain-id {{ .Values.config.network }} \ + --node-secret ${KEY} \ + --vrf-secret ${VRF} \ + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + {{- $lbEnabled := .Values.bootnode.loadBalancer.enabled -}} + {{- $lbIPs := .Values.bootnode.loadBalancer.addresses -}} + {{- range $key, $value := .Values.config.bootnode.addresses }} + --boot-nodes {{ if $lbEnabled }}{{ index $lbIPs $key }}{{- else }}{{ include "taraxa-boot-node.fullname" $ }}-{{ $key }}.{{ include "taraxa-boot-node.fullname" $ }}.{{$.Release.Namespace}}{{- end }}:10002/{{ $value }} \ + {{- end }} + {{- end }} + {{- if .Values.config.extraArgs }} + {{ join " " .Values.config.extraArgs }} \ + {{- end }} + --config $CONFIG_PATH \ + --genesis $GENESIS_PATH \ + --wallet $WALLET_PATH \ + --data-dir $DATA_PATH + + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + python3 /bin/genconfig.py $GENESIS_PATH > $GENESIS_PATH.tmp && mv $GENESIS_PATH.tmp $GENESIS_PATH + {{- end }} + + {{- if and (ne .Values.config.network "841") (ne .Values.config.network "842") (ne .Values.config.network "843") }} + {{ if .Values.explorer.enabled }} + export FAUCET_ADDRESS=$(taraxad --command account ${EXPLORER_FAUCET_PRIVATE_KEY} | grep node_address | cut -d\ -f3 | tr -d \") + cat $GENESIS_PATH | jq '.initial_balances += ({("0x"+env.FAUCET_ADDRESS): "0x1027e72f1f12813088000000"})' > $GENESIS_PATH.tmp && mv $GENESIS_PATH.tmp $GENESIS_PATH + {{- end }} + {{- end }} + + echo "***** $CONFIG_PATH *****" + cat $CONFIG_PATH + echo "***** $CONFIG_PATH *****" + + echo "***** $GENESIS_PATH *****" + cat $GENESIS_PATH + echo "***** $GENESIS_PATH *****" +{{- end }} diff --git a/charts/taraxa-node/templates/consensus-node-light-service.yaml b/charts/taraxa-node/templates/consensus-node-light-service.yaml new file mode 100644 index 0000000000..724a7e02df --- /dev/null +++ b/charts/taraxa-node/templates/consensus-node-light-service.yaml @@ -0,0 +1,31 @@ +{{ if .Values.consensusnodeLight.enabled }} +{{- if .Values.consensusnodeLight.service.ports }} +# Note: This is a headless service +apiVersion: v1 +kind: Service +metadata: + name: {{ include "taraxa-consensus-node.fullname" . }}-light + labels: + name: consensus-node-light + app.kubernetes.io/name: consensus-node-light + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + selector: + app: consensus-node-light + app.kubernetes.io/name: {{ .Release.Name }}-consensus-node-light + app.kubernetes.io/instance: {{ .Release.Name }} + ports: + {{- range $port := .Values.consensusnode.service.ports }} + - name: {{ $port.name | default $port.port }} + port: {{ $port.port }} + targetPort: {{ $port.targetPort | default $port.port }} + {{- if $port.protocol }} + protocol: {{ $port.protocol }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/charts/taraxa-node/templates/consensus-node-light-servicemonitor.yaml b/charts/taraxa-node/templates/consensus-node-light-servicemonitor.yaml new file mode 100644 index 0000000000..730f84f570 --- /dev/null +++ b/charts/taraxa-node/templates/consensus-node-light-servicemonitor.yaml @@ -0,0 +1,26 @@ +{{ if .Values.consensusnodeLight.enabled }} +{{- if .Values.consensusnodeLight.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "taraxa-consensus-node.fullname" . }}-light + labels: + name: consensus-node-light + app.kubernetes.io/name: consensus-node-light + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} +spec: + selector: + matchLabels: + app.kubernetes.io/name: consensus-node-light + app.kubernetes.io/instance: {{ .Release.Name }} + namespaceSelector: + matchNames: + - {{ $.Release.Namespace | quote }} + endpoints: + - honorLabels: true + path: /metrics + port: metrics +{{- end }} +{{- end }} diff --git a/charts/taraxa-node/templates/consensus-node-light-statefulset.yaml b/charts/taraxa-node/templates/consensus-node-light-statefulset.yaml new file mode 100644 index 0000000000..8e2ae4f420 --- /dev/null +++ b/charts/taraxa-node/templates/consensus-node-light-statefulset.yaml @@ -0,0 +1,235 @@ +{{ if .Values.consensusnodeLight.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "taraxa-consensus-node.fullname" . }}-light + labels: + app: consensus-node-light + app.kubernetes.io/name: consensus-node-light + helm.sh/chart: {{ include "taraxa-node.chart" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-consensus-node.fullname" . }}-light + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} +spec: + replicas: {{ .Values.consensusnode.replicaCount }} + serviceName: {{ include "taraxa-consensus-node.fullname" . }}-light + # to launch or terminate all Pods in parallel. + # https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#parallel-pod-management + podManagementPolicy: Parallel + selector: + matchLabels: + app: consensus-node-light + partition: a + app.kubernetes.io/name: {{ .Release.Name }}-consensus-node-light + app.kubernetes.io/instance: {{ .Release.Name }} + template: + metadata: + name: consensus-node-light + labels: + app: consensus-node-light + partition: a + app.kubernetes.io/name: {{ .Release.Name }}-consensus-node-light + app.kubernetes.io/instance: {{ .Release.Name }} + {{ if .Values.kubemonkey.enabled }} + kube-monkey/enabled: enabled + kube-monkey/identifier: {{ include "taraxa-consensus-node.fullname" . }}-light + kube-monkey/mtbf: {{ .Values.kubemonkey.mtbf | quote }} + kube-monkey/kill-mode: {{ .Values.kubemonkey.killMode | quote }} + kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} + {{ end }} + annotations: + kubernetes.io/change-cause: "Configuration through configmaps." + spec: + initContainers: + {{ if .Values.explorer.enabled }} + - name: wait-for-explorer + image: dwdraju/alpine-curl-jq:latest + command: ["/bin/entrypoint.sh"] + volumeMounts: + - name: explorer-check + mountPath: /bin/entrypoint.sh + readOnly: true + subPath: entrypoint.sh + {{- end }} + - name: config-adapter + {{- if and .Values.consensusnode.image.repository .Values.consensusnode.image.tag }} + image: "{{ .Values.consensusnode.image.repository }}:{{ .Values.consensusnode.image.tag }}" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + {{- if .Values.consensusnode.image.pullPolicy }} + imagePullPolicy: {{ .Values.consensusnode.image.pullPolicy }} + {{- else }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} + envFrom: + - secretRef: + name: {{ .Release.Name }} + env: + - name: HOST + valueFrom: + fieldRef: + fieldPath: status.podIP + command: ["/bin/entrypoint.sh"] + volumeMounts: + - name: initconfig + mountPath: /bin/entrypoint.sh + readOnly: true + subPath: entrypoint.sh + - name: initconfig + mountPath: /bin/genconfig.py + readOnly: true + subPath: genconfig.py + - name: data + mountPath: /root/.taraxa + containers: + {{- if .Values.slack.enabled }} + - name: status + image: "python:3.8" + imagePullPolicy: IfNotPresent + env: + - name: SLACK_TOKEN + valueFrom: + secretKeyRef: + name: {{ .Release.Name }} + key: SLACK_TOKEN + - name: SLACK_CHANNEL + value: {{ .Values.slack.channel }} + - name: K8S_CLUSTER + value: {{ .Values.slack.k8s_cluster }} + command: ["/bin/bash", "-c", "--"] + args: [ "pip install -r /app/requirements.txt && python /app/status.py" ] + volumeMounts: + - name: status-requirements + mountPath: /app/requirements.txt + readOnly: true + subPath: requirements.txt + - name: status-script + mountPath: /app/status.py + readOnly: true + subPath: status.py + {{- end }} + - name: consensus-node-light + {{- if and .Values.consensusnode.image.repository .Values.consensusnode.image.tag }} + image: "{{ .Values.consensusnode.image.repository }}:{{ .Values.consensusnode.image.tag }}" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" + {{- end }} + {{- if .Values.consensusnode.image.pullPolicy }} + imagePullPolicy: {{ .Values.consensusnode.image.pullPolicy }} + {{- else }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- end }} + args: + {{- toYaml .Values.consensusnode.args | nindent 12 }} + env: + - name: DEBUG + value: "{{ .Values.consensusnode.debug }}" + - name: HOST + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if not .Values.consensusnode.probes.enabled }} + - name: TARAXA_SLEEP_DIAGNOSE + value: "true" + {{- end }} + ports: + {{- toYaml .Values.consensusnode.ports | nindent 12 }} + {{- if .Values.consensusnode.probes.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + - "ps -A | grep taraxad" + initialDelaySeconds: 10 + periodSeconds: 5 + readinessProbe: + exec: + command: + - curl + - -X + - POST + - -H + - "'Content-Type: application/json'" + - -d + - "'{\"jsonrpc\":\"2.0\",\"method\":\"taraxa_protocolVersion\",\"params\": [],\"id\":1}'" + - http://127.0.0.1:7777 + initialDelaySeconds: 10 + periodSeconds: 5 + {{- end }} + resources: + {{- toYaml .Values.consensusnode.resources | nindent 12 }} + volumeMounts: + - name: data + mountPath: /root/.taraxa + securityContext: + capabilities: + add: + - SYS_PTRACE + {{- with .Values.consensusnode.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: initconfig + configMap: + defaultMode: 0700 + name: {{ .Release.Name }}-consensus-node-init-script-light + {{ if .Values.explorer.enabled }} + - name: explorer-check + configMap: + defaultMode: 0700 + name: {{ include "taraxa-node.fullname" . }}-explorer-check + {{- end }} + {{- if .Values.slack.enabled }} + - name: status-requirements + configMap: + defaultMode: 0700 + name: {{ .Release.Name }}-node-status-script + - name: status-script + configMap: + defaultMode: 0700 + name: {{ .Release.Name }}-node-status-script + {{- end }} + {{- if not .Values.consensusnode.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.consensusnode.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + annotations: + {{- if .Values.consensusnode.persistence.annotations}} + {{- toYaml .Values.consensusnode.persistence.annotations | nindent 4 }} + {{- end }} + spec: + accessModes: + - {{ .Values.consensusnode.persistence.accessMode | quote }} + {{- if .Values.consensusnode.persistence.storageClass }} + {{- if (eq "-" .Values.consensusnode.persistence.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.consensusnode.persistence.storageClass }}" + {{- end }} + {{- end }} + resources: + requests: + storage: "{{ .Values.consensusnode.persistence.size }}" + {{- end }} +{{- end }} diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index e4115eb6a2..a5c5bb0d31 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -274,6 +274,61 @@ consensusnode: storageClass: annotations: {} +consensusnodeLight: + enabled: false + image: {} + replicaCount: 1 + probes: + enabled: true + debug: 0 + args: + - "taraxad" + - "--config" + - "/root/.taraxa/conf_taraxa.json" + - "--genesis" + - "/root/.taraxa/genesis_taraxa.json" + - "--wallet" + - "/root/.taraxa/wallet_taraxa.json" + - "--light" + ports: + - name: rest + containerPort: 7777 + - name: ws + containerPort: 8777 + - name: tcp-listen-port + containerPort: 10002 + protocol: TCP + - name: udp-listen-port + containerPort: 10002 + protocol: UDP + service: + ports: + - name: rest + port: 7777 + - name: ws + port: 8777 + - name: tcp-listen-port + port: 10002 + protocol: TCP + - name: udp-listen-port + port: 10002 + protocol: UDP + - name: metrics + port: 8888 + protocol: TCP + serviceMonitor: + enabled: false + resources: {} + nodeSelector: {} + persistence: + enabled: false + accessMode: ReadWriteOnce + size: 30Gi + storageClass: + annotations: {} + + + explorer: enabled: false From 4000279648d070685b9ac820131810a8cc67dceb Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 29 Mar 2023 14:12:37 +0200 Subject: [PATCH 102/162] fix consensusnodeLight statefeulset --- .../consensus-node-light-statefulset.yaml | 50 +++++++++---------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/charts/taraxa-node/templates/consensus-node-light-statefulset.yaml b/charts/taraxa-node/templates/consensus-node-light-statefulset.yaml index 8e2ae4f420..50fc705f1e 100644 --- a/charts/taraxa-node/templates/consensus-node-light-statefulset.yaml +++ b/charts/taraxa-node/templates/consensus-node-light-statefulset.yaml @@ -17,7 +17,7 @@ metadata: kube-monkey/kill-value: {{ .Values.kubemonkey.killValue | quote }} {{ end }} spec: - replicas: {{ .Values.consensusnode.replicaCount }} + replicas: {{ .Values.consensusnodeLight.replicaCount }} serviceName: {{ include "taraxa-consensus-node.fullname" . }}-light # to launch or terminate all Pods in parallel. # https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#parallel-pod-management @@ -58,13 +58,13 @@ spec: subPath: entrypoint.sh {{- end }} - name: config-adapter - {{- if and .Values.consensusnode.image.repository .Values.consensusnode.image.tag }} - image: "{{ .Values.consensusnode.image.repository }}:{{ .Values.consensusnode.image.tag }}" + {{- if and .Values.consensusnodeLight.image.repository .Values.consensusnodeLight.image.tag }} + image: "{{ .Values.consensusnodeLight.image.repository }}:{{ .Values.consensusnodeLight.image.tag }}" {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" {{- end }} - {{- if .Values.consensusnode.image.pullPolicy }} - imagePullPolicy: {{ .Values.consensusnode.image.pullPolicy }} + {{- if .Values.consensusnodeLight.image.pullPolicy }} + imagePullPolicy: {{ .Values.consensusnodeLight.image.pullPolicy }} {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} {{- end }} @@ -116,32 +116,32 @@ spec: subPath: status.py {{- end }} - name: consensus-node-light - {{- if and .Values.consensusnode.image.repository .Values.consensusnode.image.tag }} - image: "{{ .Values.consensusnode.image.repository }}:{{ .Values.consensusnode.image.tag }}" + {{- if and .Values.consensusnodeLight.image.repository .Values.consensusnodeLight.image.tag }} + image: "{{ .Values.consensusnodeLight.image.repository }}:{{ .Values.consensusnodeLight.image.tag }}" {{- else }} image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" {{- end }} - {{- if .Values.consensusnode.image.pullPolicy }} - imagePullPolicy: {{ .Values.consensusnode.image.pullPolicy }} + {{- if .Values.consensusnodeLight.image.pullPolicy }} + imagePullPolicy: {{ .Values.consensusnodeLight.image.pullPolicy }} {{- else }} imagePullPolicy: {{ .Values.image.pullPolicy }} {{- end }} args: - {{- toYaml .Values.consensusnode.args | nindent 12 }} + {{- toYaml .Values.consensusnodeLight.args | nindent 12 }} env: - name: DEBUG - value: "{{ .Values.consensusnode.debug }}" + value: "{{ .Values.consensusnodeLight.debug }}" - name: HOST valueFrom: fieldRef: fieldPath: status.podIP - {{- if not .Values.consensusnode.probes.enabled }} + {{- if not .Values.consensusnodeLight.probes.enabled }} - name: TARAXA_SLEEP_DIAGNOSE value: "true" {{- end }} ports: - {{- toYaml .Values.consensusnode.ports | nindent 12 }} - {{- if .Values.consensusnode.probes.enabled }} + {{- toYaml .Values.consensusnodeLight.ports | nindent 12 }} + {{- if .Values.consensusnodeLight.probes.enabled }} livenessProbe: exec: command: @@ -165,7 +165,7 @@ spec: periodSeconds: 5 {{- end }} resources: - {{- toYaml .Values.consensusnode.resources | nindent 12 }} + {{- toYaml .Values.consensusnodeLight.resources | nindent 12 }} volumeMounts: - name: data mountPath: /root/.taraxa @@ -173,7 +173,7 @@ spec: capabilities: add: - SYS_PTRACE - {{- with .Values.consensusnode.nodeSelector }} + {{- with .Values.consensusnodeLight.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} {{- end }} @@ -206,30 +206,30 @@ spec: defaultMode: 0700 name: {{ .Release.Name }}-node-status-script {{- end }} - {{- if not .Values.consensusnode.persistence.enabled }} + {{- if not .Values.consensusnodeLight.persistence.enabled }} - name: data emptyDir: {} {{- end }} - {{- if .Values.consensusnode.persistence.enabled }} + {{- if .Values.consensusnodeLight.persistence.enabled }} volumeClaimTemplates: - metadata: name: data annotations: - {{- if .Values.consensusnode.persistence.annotations}} - {{- toYaml .Values.consensusnode.persistence.annotations | nindent 4 }} + {{- if .Values.consensusnodeLight.persistence.annotations}} + {{- toYaml .Values.consensusnodeLight.persistence.annotations | nindent 4 }} {{- end }} spec: accessModes: - - {{ .Values.consensusnode.persistence.accessMode | quote }} - {{- if .Values.consensusnode.persistence.storageClass }} - {{- if (eq "-" .Values.consensusnode.persistence.storageClass) }} + - {{ .Values.consensusnodeLight.persistence.accessMode | quote }} + {{- if .Values.consensusnodeLight.persistence.storageClass }} + {{- if (eq "-" .Values.consensusnodeLight.persistence.storageClass) }} storageClassName: "" {{- else }} - storageClassName: "{{ .Values.consensusnode.persistence.storageClass }}" + storageClassName: "{{ .Values.consensusnodeLight.persistence.storageClass }}" {{- end }} {{- end }} resources: requests: - storage: "{{ .Values.consensusnode.persistence.size }}" + storage: "{{ .Values.consensusnodeLight.persistence.size }}" {{- end }} {{- end }} From 7a7e8cae00abea9d2759d7164b4bdd5d52d6e645 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Thu, 30 Mar 2023 12:03:44 +0200 Subject: [PATCH 103/162] update changelog --- charts/taraxa-node/CHANGELOG.md | 11 +++++++++++ charts/taraxa-node/Chart.yaml | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/charts/taraxa-node/CHANGELOG.md b/charts/taraxa-node/CHANGELOG.md index c4e860b129..f923c637a9 100644 --- a/charts/taraxa-node/CHANGELOG.md +++ b/charts/taraxa-node/CHANGELOG.md @@ -3,6 +3,17 @@ This file documents all notable changes to `taraxa-node` Helm Chart. The release numbering uses [semantic versioning](http://semver.org). +## v0.3.9 + +### Major changes + +* Rename / restrusture manifest files +* Added light nodes + +### Minor changes + +* Removed "@channel" from slack notifications + ## v0.3.8 ### Minor changes diff --git a/charts/taraxa-node/Chart.yaml b/charts/taraxa-node/Chart.yaml index fcac4f703c..b638b10387 100644 --- a/charts/taraxa-node/Chart.yaml +++ b/charts/taraxa-node/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 appVersion: "1.0" description: Kubernetes helm chart for Taraxa blockchain full node implementation. name: taraxa-node -version: 0.3.8 +version: 0.3.9 keywords: - blockchain - taraxa From bbc82cb997aa7153b726ebc309f6e5c6f90502b3 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Thu, 30 Mar 2023 12:32:35 +0200 Subject: [PATCH 104/162] rename configmap for light node --- .../taraxa-node/templates/consensus-node-light-configmap.yaml | 2 +- .../taraxa-node/templates/consensus-node-light-statefulset.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/charts/taraxa-node/templates/consensus-node-light-configmap.yaml b/charts/taraxa-node/templates/consensus-node-light-configmap.yaml index 3f9bbf1991..f1331b0803 100644 --- a/charts/taraxa-node/templates/consensus-node-light-configmap.yaml +++ b/charts/taraxa-node/templates/consensus-node-light-configmap.yaml @@ -3,7 +3,7 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ .Release.Name }}-consensus-node-light-init-script-light + name: {{ .Release.Name }}-consensus-node-light-init-script labels: app: consensus-node-light app.kubernetes.io/name: consensus-node-light diff --git a/charts/taraxa-node/templates/consensus-node-light-statefulset.yaml b/charts/taraxa-node/templates/consensus-node-light-statefulset.yaml index 50fc705f1e..91fcda21a2 100644 --- a/charts/taraxa-node/templates/consensus-node-light-statefulset.yaml +++ b/charts/taraxa-node/templates/consensus-node-light-statefulset.yaml @@ -189,7 +189,7 @@ spec: - name: initconfig configMap: defaultMode: 0700 - name: {{ .Release.Name }}-consensus-node-init-script-light + name: {{ .Release.Name }}-consensus-node-light-init-script {{ if .Values.explorer.enabled }} - name: explorer-check configMap: From 73d7d404548828815b5641ec7324275f16706622 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Thu, 30 Mar 2023 15:12:24 +0200 Subject: [PATCH 105/162] env vars for existing secrets --- .../templates/consensus-node-light-configmap.yaml | 4 ++-- charts/taraxa-node/templates/secrets.yaml | 6 ++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/charts/taraxa-node/templates/consensus-node-light-configmap.yaml b/charts/taraxa-node/templates/consensus-node-light-configmap.yaml index f1331b0803..b1fa283c97 100644 --- a/charts/taraxa-node/templates/consensus-node-light-configmap.yaml +++ b/charts/taraxa-node/templates/consensus-node-light-configmap.yaml @@ -103,8 +103,8 @@ data: echo "Generating config" INDEX=${HOSTNAME##*-} - KEY="CONSENSUS_NODE_KEY_${INDEX}" - VRF="CONSENSUS_NODE_VRF_${INDEX}" + KEY="CONSENSUS_NODE_LIGHT_KEY_${INDEX}" + VRF="CONSENSUS_NODE_LIGHT_VRF_${INDEX}" KEY="${!KEY}" VRF="${!VRF}" diff --git a/charts/taraxa-node/templates/secrets.yaml b/charts/taraxa-node/templates/secrets.yaml index 7e64d37951..7d2f50167b 100644 --- a/charts/taraxa-node/templates/secrets.yaml +++ b/charts/taraxa-node/templates/secrets.yaml @@ -27,6 +27,12 @@ data: {{- range $key, $value := .Values.config.consensusnode.vrfs }} CONSENSUS_NODE_VRF_{{ $key }}: {{ $value | b64enc | quote }} {{- end }} + {{- range $key, $value := .Values.config.consensusnodeLight.keys }} + CONSENSUS_NODE_LIGHT_KEY_{{ $key }}: {{ $value | b64enc | quote }} + {{- end }} + {{- range $key, $value := .Values.config.consensusnodeLight.vrfs }} + CONSENSUS_NODE_LIGHT_VRF_{{ $key }}: {{ $value | b64enc | quote }} + {{- end }} {{- range $key, $value := .Values.config.bootnode.keys }} BOOT_NODE_KEY_{{ $key }}: {{ $value | b64enc | quote }} {{- end }} From 3583b14fb6e944b9a7a71e47739847f81c297549 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Thu, 30 Mar 2023 23:24:00 +0200 Subject: [PATCH 106/162] add default keys for consensusLight node --- charts/taraxa-node/values.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/charts/taraxa-node/values.yaml b/charts/taraxa-node/values.yaml index a5c5bb0d31..aa49074ef8 100644 --- a/charts/taraxa-node/values.yaml +++ b/charts/taraxa-node/values.yaml @@ -33,6 +33,11 @@ config: - "badf7196e18f653130564fd2f27419bff36194094057a69053bbe3a83a97b4fe" vrfs: - "c7c32f136cf4529471905a6b775ad82a076a5b5d3160b76ba683c743b8a852cff06560102e3dbab2e8b62082611dfc378c90336d01c0a7fd2a1a7bb88fb63478" + consensusnodeLight: + keys: + - "a48867f0133acd5e10dd980c4ad824da69c6c1947d2fb6c2b576f41cccf5e782" + vrfs: + - "6441cd427dcad51d7a2054d777237e1e53f6cb280eebfed6a6647a5c15fd0808d24dab2ffe1c32b4b608bdadf657f82f1871fa8dc19faeef3833bb3e42bb65ec" bootnode: keys: - "45dc56636faf97230f557e16345055f5839dad25f4b3f6f88a02add24b4a00fc" From bb3017e4e9061d6ace46a8775251e85f42b35905 Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Wed, 5 Apr 2023 16:06:53 -0700 Subject: [PATCH 107/162] chore: minor improvements + fixes --- .../consensus/include/pbft/pbft_manager.hpp | 5 ++- .../consensus/src/pbft/pbft_manager.cpp | 37 ++++++++++++++----- .../src/vote_manager/vote_manager.cpp | 1 + 3 files changed, 32 insertions(+), 11 deletions(-) diff --git a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp index 0661f3a0cb..def7868a5c 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp @@ -551,7 +551,6 @@ class PbftManager : public std::enable_shared_from_this { PbftStates state_ = value_proposal_state; std::atomic round_ = 1; PbftStep step_ = 1; - PbftStep startingStepInRound_ = 1; // Block that node cert voted std::optional> cert_voted_block_for_round_{}; @@ -570,6 +569,10 @@ class PbftManager : public std::enable_shared_from_this { bool go_finish_state_ = false; bool loop_back_finish_state_ = false; + // Used to avoid cyclic logging in voting steps that are called repeatedly + bool printSecondFinishStepInfo = true; + bool printCertStepInfo = true; + const blk_hash_t dag_genesis_block_hash_; const PbftConfig &config_; diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 127fcf91b8..fb9702b2d4 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -252,23 +252,35 @@ void PbftManager::setPbftStep(PbftStep pbft_step) { // Node is still >= kMaxSteps steps behind the rest (at least 1/3) of the network - keep lambda at the standard // value so node can catch up with the rest of the nodes - if (network_next_voting_step > step_ && network_next_voting_step - step_ >= kMaxSteps) { - lambda_ = kMinLambda; + + // To get withing 1 round with the rest of the network - node cannot start exponentially backing off its lambda + // exactly when it is kMaxSteps behind the network as it would reach kMaxLambda lambda time before catching up. If + // we delay triggering exponential backoff by 4 steps, node should get within 1 round with the network. + // !!! Important: This is true only for values kMinLambda = 15000ms and kMaxLambda = 60000 ms + if (network_next_voting_step > step_ && network_next_voting_step - step_ >= kMaxSteps - 4 /* hardcoded delay */) { + // Reset it only if it was already increased compared to default value + if (lambda_ != kMinLambda) { + lambda_ = kMinLambda; + LOG(log_nf_) << "Node is " << network_next_voting_step - step_ + << " steps behind the rest of the network. Reset lambda to the default value " + << lambda_.count() << " [ms]"; + } } else if (lambda_ < kMaxLambda) { // Node is < kMaxSteps steps behind the rest (at least 1/3) of the network - start exponentially backing off - // lambda until it reaches kMaxLambda + // lambda until it reaches kMaxLambdagetNetworkTplusOneNextVotingStep // Note: We calculate the lambda for a step independently of prior steps in case missed earlier steps. lambda_ *= 2; if (lambda_ > kMaxLambda) { lambda_ = kMaxLambda; } + + LOG(log_nf_) << "Exponentially backing off lambda to " << lambda_.count() << " [ms] "; } } } void PbftManager::resetStep() { step_ = 1; - startingStepInRound_ = 1; lambda_ = kMinLambda; } @@ -443,8 +455,6 @@ void PbftManager::initialState() { assert(false); } - // This is used to offset endtime for second finishing step... - startingStepInRound_ = current_pbft_step; setPbftStep(current_pbft_step); round_ = current_pbft_round; @@ -506,6 +516,7 @@ void PbftManager::setCertifyState_() { state_ = certify_state; setPbftStep(step_ + 1); next_step_time_ms_ = 2 * lambda_; + printCertStepInfo = true; } void PbftManager::setFinishState_() { @@ -524,6 +535,7 @@ void PbftManager::setFinishPollingState_() { db_->commitWriteBatch(batch); already_next_voted_value_ = false; already_next_voted_null_block_hash_ = false; + printSecondFinishStepInfo = true; second_finish_step_start_datetime_ = std::chrono::system_clock::now(); next_step_time_ms_ += kPollingIntervalMs; } @@ -537,7 +549,6 @@ void PbftManager::loopBackFinishState_() { db_->commitWriteBatch(batch); already_next_voted_value_ = false; already_next_voted_null_block_hash_ = false; - assert(step_ >= startingStepInRound_); next_step_time_ms_ += kPollingIntervalMs; } @@ -857,7 +868,11 @@ void PbftManager::identifyBlock_() { void PbftManager::certifyBlock_() { // The Certifying Step auto [round, period] = getPbftRoundAndPeriod(); - LOG(log_dg_) << "PBFT certifying state in period " << period << ", round " << round; + + if (printCertStepInfo) { + LOG(log_dg_) << "PBFT certifying state in period " << period << ", round " << round; + printCertStepInfo = false; + } const auto elapsed_time_in_round = elapsedTimeInMs(current_round_start_datetime_); go_finish_state_ = elapsed_time_in_round > 4 * lambda_ - kPollingIntervalMs; @@ -974,9 +989,11 @@ void PbftManager::firstFinish_() { void PbftManager::secondFinish_() { // Odd number steps from 5 are in second finish auto [round, period] = getPbftRoundAndPeriod(); - LOG(log_dg_) << "PBFT second finishing state in period " << period << ", round " << round << ", step " << step_; - assert(step_ >= startingStepInRound_); + if (printSecondFinishStepInfo) { + LOG(log_dg_) << "PBFT second finishing state in period " << period << ", round " << round << ", step " << step_; + printSecondFinishStepInfo = false; + } // Lambda function for next voting 2t+1 soft voted block from current round auto next_vote_soft_voted_block = [this, period = period, round = round]() { diff --git a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp index 2cc98476a3..30765e3528 100644 --- a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp +++ b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp @@ -253,6 +253,7 @@ bool VoteManager::addVerifiedVote(const std::shared_ptr& vote) { if (vote->getType() == PbftVoteTypes::next_vote && total_weight >= t_plus_one && vote->getStep() > found_round_it->second.network_t_plus_one_step) { found_round_it->second.network_t_plus_one_step = vote->getStep(); + LOG(log_nf_) << "Set t+1 next voted block " << vote->getHash() << " in step " << vote->getStep(); } // Not enough votes - do not set 2t+1 voted block for period,round and step From 89cebc490e7d45f3ff508fcb1d3c47e49c9a3e4d Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Thu, 6 Apr 2023 16:15:02 -0700 Subject: [PATCH 108/162] fix: save own votes and gossip them in case network get stalled --- .../consensus/include/pbft/pbft_manager.hpp | 20 +-- .../include/vote_manager/vote_manager.hpp | 22 +++ .../consensus/src/pbft/pbft_manager.cpp | 169 ++++++++++-------- .../src/vote_manager/vote_manager.cpp | 38 +++- .../network/tarcap/taraxa_capability.hpp | 2 +- .../network/src/tarcap/taraxa_capability.cpp | 27 +-- .../storage/include/storage/storage.hpp | 2 +- libraries/core_libs/storage/src/storage.cpp | 11 +- tests/full_node_test.cpp | 2 +- tests/network_test.cpp | 2 +- 10 files changed, 163 insertions(+), 132 deletions(-) diff --git a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp index def7868a5c..0685c1ce6c 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp @@ -268,18 +268,16 @@ class PbftManager : public std::enable_shared_from_this { size_t getPbftCommitteeSize() const { return config_.committee_size; } /** - * @brief Broadcast or rebroadcast current round soft votes and previous round next votes - * @param rebroadcast + * @brief Test/enforce broadcastVotes() to actually send votes */ - void broadcastSoftAndNextVotes(bool rebroadcast); + void testBroadcatVotesFunctionality(); + private: /** - * @brief Broadcast or rebroadcast reward votes - * @param rebroadcast + * @brief Broadcast or rebroadcast 2t+1 soft/reward/previous round next votes + all own votes if needed */ - void broadcastRewardVotes(bool rebroadcast); + void broadcastVotes(); - private: /** * @brief Check PBFT blocks syncing queue. If there are synced PBFT blocks in queue, push it to PBFT chain */ @@ -543,8 +541,8 @@ class PbftManager : public std::enable_shared_from_this { const uint32_t kBroadcastVotesLambdaTime = 20; const uint32_t kRebroadcastVotesLambdaTime = 60; - uint32_t broadcast_soft_next_votes_counter_ = 1; - uint32_t rebroadcast_soft_next_votes_counter_ = 1; + uint32_t broadcast_votes_counter_ = 1; + uint32_t rebroadcast_votes_counter_ = 1; uint32_t broadcast_reward_votes_counter_ = 1; uint32_t rebroadcast_reward_votes_counter_ = 1; @@ -570,8 +568,8 @@ class PbftManager : public std::enable_shared_from_this { bool loop_back_finish_state_ = false; // Used to avoid cyclic logging in voting steps that are called repeatedly - bool printSecondFinishStepInfo = true; - bool printCertStepInfo = true; + bool printSecondFinishStepInfo_ = true; + bool printCertStepInfo_ = true; const blk_hash_t dag_genesis_block_hash_; diff --git a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp index ab7a1a03ca..b387810031 100644 --- a/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp +++ b/libraries/core_libs/consensus/include/vote_manager/vote_manager.hpp @@ -131,6 +131,25 @@ class VoteManager { */ PbftPeriod getRewardVotesPbftBlockPeriod(); + /** + * @brief Saves own verified vote into memory and db + * + * @param vote + */ + void saveOwnVerifiedVote(const std::shared_ptr& vote); + + /** + * @return all own verified votes + */ + std::vector> getOwnVerifiedVotes(); + + /** + * @brief Clear own verified votes + * + * @param write_batch + */ + void clearOwnVerifiedVotes(DbStorage::Batch& write_batch); + /** * @brief Place a vote, save it in the verified votes queue, and gossip to peers * @param blockhash vote on PBFT block hash @@ -286,6 +305,9 @@ class VoteManager { std::vector extra_reward_votes_; mutable std::shared_mutex reward_votes_info_mutex_; + // Own votes generated during current period & round + std::vector> own_verified_votes_; + // Cache for current 2T+1 - > // !!! Important: do not access it directly as it is not updated automatically, always call getPbftTwoTPlusOne instead // !!! diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index fb9702b2d4..5faa4a9428 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -262,8 +262,8 @@ void PbftManager::setPbftStep(PbftStep pbft_step) { if (lambda_ != kMinLambda) { lambda_ = kMinLambda; LOG(log_nf_) << "Node is " << network_next_voting_step - step_ - << " steps behind the rest of the network. Reset lambda to the default value " - << lambda_.count() << " [ms]"; + << " steps behind the rest of the network. Reset lambda to the default value " << lambda_.count() + << " [ms]"; } } else if (lambda_ < kMaxLambda) { // Node is < kMaxSteps steps behind the rest (at least 1/3) of the network - start exponentially backing off @@ -274,7 +274,7 @@ void PbftManager::setPbftStep(PbftStep pbft_step) { lambda_ = kMaxLambda; } - LOG(log_nf_) << "Exponentially backing off lambda to " << lambda_.count() << " [ms] "; + LOG(log_nf_) << "No round progress - exponentially backing off lambda to " << lambda_.count() << " [ms] in step " << step_; } } } @@ -360,8 +360,8 @@ void PbftManager::resetPbftConsensus(PbftRound round) { LOG(log_dg_) << "Reset PBFT consensus to: period " << getPbftPeriod() << ", round " << round << ", step 1"; // Reset broadcast counters - broadcast_soft_next_votes_counter_ = 1; - rebroadcast_soft_next_votes_counter_ = 1; + broadcast_votes_counter_ = 1; + rebroadcast_votes_counter_ = 1; // Update current round and reset step to 1 round_ = round; @@ -381,8 +381,8 @@ void PbftManager::resetPbftConsensus(PbftRound round) { cert_voted_block_for_round_.reset(); } - // Remove all own votes generated in previous round - db_->clearOwnVerifiedVotes(batch); + // Clear all own votes generated in previous round + vote_mgr_->clearOwnVerifiedVotes(batch); db_->commitWriteBatch(batch); @@ -516,7 +516,7 @@ void PbftManager::setCertifyState_() { state_ = certify_state; setPbftStep(step_ + 1); next_step_time_ms_ = 2 * lambda_; - printCertStepInfo = true; + printCertStepInfo_ = true; } void PbftManager::setFinishState_() { @@ -535,7 +535,7 @@ void PbftManager::setFinishPollingState_() { db_->commitWriteBatch(batch); already_next_voted_value_ = false; already_next_voted_null_block_hash_ = false; - printSecondFinishStepInfo = true; + printSecondFinishStepInfo_ = true; second_finish_step_start_datetime_ = std::chrono::system_clock::now(); next_step_time_ms_ += kPollingIntervalMs; } @@ -552,57 +552,89 @@ void PbftManager::loopBackFinishState_() { next_step_time_ms_ += kPollingIntervalMs; } -void PbftManager::broadcastSoftAndNextVotes(bool rebroadcast) { +void PbftManager::broadcastVotes() { auto net = network_.lock(); if (!net) { + LOG(log_er_) << "Unable to broadcast votes -> cant obtain net ptr"; return; } - auto [round, period] = getPbftRoundAndPeriod(); + const auto votes_sync_packet_handler = net->getSpecificHandler(); - // Broadcast 2t+1 soft votes - auto soft_votes = vote_mgr_->getTwoTPlusOneVotedBlockVotes(period, round, TwoTPlusOneVotedBlockType::SoftVotedBlock); - if (!soft_votes.empty()) { - LOG(log_dg_) << "Broadcast soft votes for period " << period << ", round " << round; - net->getSpecificHandler()->onNewPbftVotesBundle(std::move(soft_votes), - rebroadcast); - } + // Send votes to the other peers + auto gossipVotes = [this, &votes_sync_packet_handler](std::vector> &&votes, + const std::string &votes_type_str, bool rebroadcast) { + if (!votes.empty()) { + LOG(log_dg_) << "Broadcast " << votes_type_str << " for period " << votes.back()->getPeriod() << ", round " + << votes.back()->getRound(); + votes_sync_packet_handler->onNewPbftVotesBundle(std::move(votes), rebroadcast); + } + }; - // Broadcast reward votes - previous round 2t+1 cert votes - auto reward_votes = vote_mgr_->getRewardVotes(); - if (!reward_votes.empty()) { - LOG(log_dg_) << "Broadcast propose reward votes for period " << period << ", round " << round; - net->getSpecificHandler()->onNewPbftVotesBundle(std::move(reward_votes), - rebroadcast); - } + // (Re)broadcast 2t+1 soft/reward/previous round next votes + all own votes + auto broadcastVotes = [this, &net, &gossipVotes](bool rebroadcast) { + auto [round, period] = getPbftRoundAndPeriod(); + + // Broadcast 2t+1 soft votes + gossipVotes(vote_mgr_->getTwoTPlusOneVotedBlockVotes(period, round, TwoTPlusOneVotedBlockType::SoftVotedBlock), + "2t+1 soft votes", rebroadcast); - // Broadcast previous round 2t+1 next votes - if (round > 1) { - if (auto next_votes = vote_mgr_->getAllTwoTPlusOneNextVotes(period, round - 1); !next_votes.empty()) { - LOG(log_dg_) << "Broadcast next votes for period " << period << ", round " << round - 1; - net->getSpecificHandler()->onNewPbftVotesBundle(std::move(next_votes), - rebroadcast); + // Broadcast reward votes - previous round 2t+1 cert votes + gossipVotes(vote_mgr_->getRewardVotes(), "2t+1 propose reward votes", rebroadcast); + + // Broadcast previous round 2t+1 next votes + if (round > 1) { + gossipVotes(vote_mgr_->getAllTwoTPlusOneNextVotes(period, round - 1), "2t+1 next votes", rebroadcast); } - } -} -void PbftManager::broadcastRewardVotes(bool rebroadcast) { - auto net = network_.lock(); - if (!net) { - return; - } + // Broadcast own votes + auto vote_packet_handler = net->getSpecificHandler(); + // TODO: this could be optimized to use VotesSyncPacketHandler if we drop some of the checks in process function + // TODO: onNewPbftVote does not use rebroadcast flag to force sending the votes + // Send votes by one as votes sync packet must contain votes with the same type, period and round + const auto& own_votes = vote_mgr_->getOwnVerifiedVotes(); + for (const auto &vote : own_votes) { + vote_packet_handler->onNewPbftVote(vote, getPbftProposedBlock(vote->getPeriod(), vote->getBlockHash())); + } + if (!own_votes.empty()) { + LOG(log_dg_) << "Broadcast own votes for period " << period << ", round " << round; + } + }; - auto [round, period] = getPbftRoundAndPeriod(); + const auto round_elapsed_time = elapsedTimeInMs(current_round_start_datetime_); + const auto period_elapsed_time = elapsedTimeInMs(current_period_start_datetime_); - // Broadcast reward votes - previous round 2t+1 cert votes - auto reward_votes = vote_mgr_->getRewardVotes(); - if (!reward_votes.empty()) { - LOG(log_dg_) << "Broadcast propose reward votes for period " << period << ", round " << round; - net->getSpecificHandler()->onNewPbftVotesBundle(std::move(reward_votes), - rebroadcast); + if (round_elapsed_time / kMinLambda > kRebroadcastVotesLambdaTime * rebroadcast_votes_counter_) { + // Stalled in the same round for kRebroadcastVotesLambdaTime * kMinLambda time -> rebroadcast votes + broadcastVotes(true); + rebroadcast_votes_counter_++; + // If there was a rebroadcast no need to do next broadcast either + broadcast_votes_counter_++; + } else if (round_elapsed_time / kMinLambda > kBroadcastVotesLambdaTime * broadcast_votes_counter_) { + // Stalled in the same round for kBroadcastVotesLambdaTime * kMinLambda time -> broadcast votes + broadcastVotes(false); + broadcast_votes_counter_++; + } else if (period_elapsed_time / kMinLambda > kRebroadcastVotesLambdaTime * rebroadcast_reward_votes_counter_) { + // Stalled in the same period for kRebroadcastVotesLambdaTime * kMinLambda time -> rebroadcast reward votes + gossipVotes(vote_mgr_->getRewardVotes(), "2t+1 propose reward votes", true); + rebroadcast_reward_votes_counter_++; + // If there was a rebroadcast no need to do next broadcast either + broadcast_reward_votes_counter_++; + } else if (period_elapsed_time / kMinLambda > kBroadcastVotesLambdaTime * broadcast_reward_votes_counter_) { + // Stalled in the same period for kBroadcastVotesLambdaTime * kMinLambda time -> broadcast reward votes + gossipVotes(vote_mgr_->getRewardVotes(), "2t+1 propose reward votes", false); + broadcast_reward_votes_counter_++; } } +void PbftManager::testBroadcatVotesFunctionality() { + // Set these variables to force broadcastVotes() send votes + current_round_start_datetime_ = time_point{}; + current_period_start_datetime_ = time_point{}; + + broadcastVotes(); +} + void PbftManager::printVotingSummary() const { const auto [round, period] = getPbftRoundAndPeriod(); Json::Value json_obj; @@ -625,41 +657,21 @@ void PbftManager::printVotingSummary() const { } bool PbftManager::stateOperations_() { - pushSyncedPbftBlocksIntoChain(); - - const auto round_elapsed_time = elapsedTimeInMs(current_round_start_datetime_); - const auto period_elapsed_time = elapsedTimeInMs(current_period_start_datetime_); - - if (round_elapsed_time / kMinLambda > kRebroadcastVotesLambdaTime * rebroadcast_soft_next_votes_counter_) { - broadcastSoftAndNextVotes(true); - rebroadcast_soft_next_votes_counter_++; - // If there was a rebroadcast no need to do next broadcast either - broadcast_soft_next_votes_counter_++; - } else if (round_elapsed_time / kMinLambda > kBroadcastVotesLambdaTime * broadcast_soft_next_votes_counter_) { - broadcastSoftAndNextVotes(false); - broadcast_soft_next_votes_counter_++; - } - - // Reward votes need to be broadcast even if we are advancing rounds but unable to advance a period - if (period_elapsed_time / kMinLambda > kRebroadcastVotesLambdaTime * rebroadcast_reward_votes_counter_) { - broadcastRewardVotes(true); - rebroadcast_reward_votes_counter_++; - // If there was a rebroadcast no need to do next broadcast either - broadcast_reward_votes_counter_++; - } else if (period_elapsed_time / kMinLambda > kBroadcastVotesLambdaTime * broadcast_reward_votes_counter_) { - broadcastRewardVotes(false); - broadcast_reward_votes_counter_++; - } - auto [round, period] = getPbftRoundAndPeriod(); LOG(log_tr_) << "PBFT current round: " << round << ", period: " << period << ", step " << step_; - // Check if these is already 2t+1 cert votes for some valid block, if so - push it into the chain + // Process synced blocks + pushSyncedPbftBlocksIntoChain(); + + // (Re)broadcast votes if needed + broadcastVotes(); + + // Check if there is 2t+1 cert votes for some valid block, if so - push it into the chain if (tryPushCertVotesBlock()) { return true; } - // 2t+1 next votes were seen + // Check if there is 2t+1 next votes for some valid block, if so - advance round if (advanceRound()) { return true; } @@ -709,6 +721,9 @@ bool PbftManager::placeVote(const std::shared_ptr &vote, std::string_view gossipNewVote(vote, voted_block); + // Save own verified vote + vote_mgr_->saveOwnVerifiedVote(vote); + LOG(log_nf_) << "Placed " << log_vote_id << " " << vote->getHash() << " for block " << vote->getBlockHash() << ", vote weight " << *vote->getWeight() << ", period " << vote->getPeriod() << ", round " << vote->getRound() << ", step " << vote->getStep(); @@ -869,9 +884,9 @@ void PbftManager::certifyBlock_() { // The Certifying Step auto [round, period] = getPbftRoundAndPeriod(); - if (printCertStepInfo) { + if (printCertStepInfo_) { LOG(log_dg_) << "PBFT certifying state in period " << period << ", round " << round; - printCertStepInfo = false; + printCertStepInfo_ = false; } const auto elapsed_time_in_round = elapsedTimeInMs(current_round_start_datetime_); @@ -990,9 +1005,9 @@ void PbftManager::secondFinish_() { // Odd number steps from 5 are in second finish auto [round, period] = getPbftRoundAndPeriod(); - if (printSecondFinishStepInfo) { + if (printSecondFinishStepInfo_) { LOG(log_dg_) << "PBFT second finishing state in period " << period << ", round " << round << ", step " << step_; - printSecondFinishStepInfo = false; + printSecondFinishStepInfo_ = false; } // Lambda function for next voting 2t+1 soft voted block from current round diff --git a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp index 30765e3528..9b791c4ba7 100644 --- a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp +++ b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp @@ -31,7 +31,7 @@ VoteManager::VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, auto db_votes = db_->getAllTwoTPlusOneVotes(); - auto loadVotesFromDb = [this](const std::vector>& votes) { + auto addVerifiedVotes = [this](const std::vector>& votes) { bool reward_votes_info_set = false; for (const auto& vote : votes) { // Check if votes are unique per round, step & voter @@ -51,11 +51,22 @@ VoteManager::VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, } }; - loadVotesFromDb(db_->getAllTwoTPlusOneVotes()); - loadVotesFromDb(db_->getOwnVerifiedVotes()); - auto reward_votes = db_->getRewardVotes(); - for (const auto& vote : reward_votes) extra_reward_votes_.emplace_back(vote->getHash()); - loadVotesFromDb(reward_votes); + // Load 2t+1 vote blocks votes + addVerifiedVotes(db_->getAllTwoTPlusOneVotes()); + + // Load own votes + const auto own_votes = db_->getOwnVerifiedVotes(); + for (const auto& own_vote : own_votes) { + own_verified_votes_.emplace_back(own_vote); + } + addVerifiedVotes(own_votes); + + // Load reward votes + const auto reward_votes = db_->getRewardVotes(); + for (const auto& reward_vote : reward_votes) { + extra_reward_votes_.emplace_back(reward_vote->getHash()); + } + addVerifiedVotes(reward_votes); } void VoteManager::setNetwork(std::weak_ptr network) { network_ = std::move(network); } @@ -253,7 +264,8 @@ bool VoteManager::addVerifiedVote(const std::shared_ptr& vote) { if (vote->getType() == PbftVoteTypes::next_vote && total_weight >= t_plus_one && vote->getStep() > found_round_it->second.network_t_plus_one_step) { found_round_it->second.network_t_plus_one_step = vote->getStep(); - LOG(log_nf_) << "Set t+1 next voted block " << vote->getHash() << " in step " << vote->getStep(); + LOG(log_nf_) << "Set t+1 next voted block " << vote->getHash() << " for period " << vote->getPeriod() + << ", round " << vote->getRound() << ", step " << vote->getStep(); } // Not enough votes - do not set 2t+1 voted block for period,round and step @@ -776,6 +788,18 @@ std::vector> VoteManager::getRewardVotes() { return reward_votes; } +void VoteManager::saveOwnVerifiedVote(const std::shared_ptr& vote) { + own_verified_votes_.push_back(vote); + db_->saveOwnVerifiedVote(vote); +} + +std::vector> VoteManager::getOwnVerifiedVotes() { return own_verified_votes_; } + +void VoteManager::clearOwnVerifiedVotes(DbStorage::Batch& write_batch) { + db_->clearOwnVerifiedVotes(write_batch, own_verified_votes_); + own_verified_votes_.clear(); +} + uint64_t VoteManager::getPbftSortitionThreshold(uint64_t total_dpos_votes_count, PbftVoteTypes vote_type) const { switch (vote_type) { case PbftVoteTypes::propose_vote: diff --git a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp index faad6cc7ec..b651d85b5c 100644 --- a/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp +++ b/libraries/core_libs/network/include/network/tarcap/taraxa_capability.hpp @@ -88,7 +88,7 @@ class TaraxaCapability : public dev::p2p::CapabilityFace { // END METHODS USED IN TESTS ONLY protected: - virtual void initPeriodicEvents(const std::shared_ptr &pbft_mgr, const std::shared_ptr &db, + virtual void initPeriodicEvents(const std::shared_ptr &pbft_mgr, std::shared_ptr trx_mgr, std::shared_ptr packets_stats); virtual void registerPacketHandlers(const h256 &genesis_hash, diff --git a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp index 4744e12bdd..27344616ab 100644 --- a/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp +++ b/libraries/core_libs/network/src/tarcap/taraxa_capability.cpp @@ -68,7 +68,7 @@ void TaraxaCapability::init(const h256 &genesis_hash, std::shared_ptr node_addr); // Inits periodic events. Must be called after registerHandlers !!! - initPeriodicEvents(pbft_mgr, db, trx_mgr, all_packets_stats_); + initPeriodicEvents(pbft_mgr, trx_mgr, all_packets_stats_); } void TaraxaCapability::addBootNodes(bool initial) { @@ -117,7 +117,6 @@ void TaraxaCapability::addBootNodes(bool initial) { } void TaraxaCapability::initPeriodicEvents(const std::shared_ptr &pbft_mgr, - const std::shared_ptr &db, std::shared_ptr trx_mgr, std::shared_ptr packets_stats) { // TODO: refactor this: @@ -173,30 +172,6 @@ void TaraxaCapability::initPeriodicEvents(const std::shared_ptr &pb addBootNodes(); } }); - - // If period and round did not change after 60 seconds from node start, rebroadcast own pbft votes - if (pbft_mgr && db /* just because of tests */) { - auto vote_packet_handler = packets_handlers_->getSpecificHandler(); - const auto [init_round, init_period] = pbft_mgr->getPbftRoundAndPeriod(); - periodic_events_tp_->post(60000, [init_round = init_round, init_period = init_period, db = db, pbft_mgr = pbft_mgr, - vote_packet_handler = std::move(vote_packet_handler)] { - const auto [curent_round, curent_period] = pbft_mgr->getPbftRoundAndPeriod(); - if (curent_period != init_period || curent_round != init_round) { - return; - } - - const auto own_votes = db->getOwnVerifiedVotes(); - if (own_votes.empty()) { - return; - } - - // Send votes by one as votes sync packet must contain votes with the same type, period and round - for (const auto &vote : own_votes) { - vote_packet_handler->onNewPbftVote(vote, - pbft_mgr->getPbftProposedBlock(vote->getPeriod(), vote->getBlockHash())); - } - }); - } } void TaraxaCapability::registerPacketHandlers( diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 992fb61b51..eae9f453e6 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -265,7 +265,7 @@ class DbStorage : public std::enable_shared_from_this { // Own votes for the latest round void saveOwnVerifiedVote(const std::shared_ptr& vote); std::vector> getOwnVerifiedVotes(); - void clearOwnVerifiedVotes(Batch& write_batch); + void clearOwnVerifiedVotes(Batch& write_batch, const std::vector>& own_verified_votes); // 2t+1 votes bundles for the latest round void replaceTwoTPlusOneVotes(TwoTPlusOneVotedBlockType type, const std::vector>& votes); diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index d3bd850c93..e5181e3184 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -829,13 +829,10 @@ std::vector> DbStorage::getOwnVerifiedVotes() { return votes; } -void DbStorage::clearOwnVerifiedVotes(Batch& write_batch) { - // TODO: deletion could be optimized if we save votes in memory - auto it = - std::unique_ptr(db_->NewIterator(read_options_, handle(Columns::latest_round_own_votes))); - for (it->SeekToFirst(); it->Valid(); it->Next()) { - const auto vote = std::make_shared(asBytes(it->value().ToString())); - remove(write_batch, Columns::latest_round_own_votes, vote->getHash().asBytes()); +void DbStorage::clearOwnVerifiedVotes(Batch& write_batch, + const std::vector>& own_verified_votes) { + for (const auto& own_vote : own_verified_votes) { + remove(write_batch, Columns::latest_round_own_votes, own_vote->getHash().asBytes()); } } diff --git a/tests/full_node_test.cpp b/tests/full_node_test.cpp index 16e00a70f6..0b0ce29a37 100644 --- a/tests/full_node_test.cpp +++ b/tests/full_node_test.cpp @@ -242,7 +242,7 @@ TEST_F(FullNodeTest, db_test) { } batch = db.createWriteBatch(); - db.clearOwnVerifiedVotes(batch); + db.clearOwnVerifiedVotes(batch, verified_votes); db.commitWriteBatch(batch); EXPECT_TRUE(db.getOwnVerifiedVotes().empty()); diff --git a/tests/network_test.cpp b/tests/network_test.cpp index 4441728508..21a901c543 100644 --- a/tests/network_test.cpp +++ b/tests/network_test.cpp @@ -883,7 +883,7 @@ TEST_F(NetworkTest, pbft_next_votes_sync_in_same_round) { node2->getPbftManager()->setPbftRound(2); // Node 1 broadcast his votes - node1_pbft_mgr->broadcastSoftAndNextVotes(false); + node1_pbft_mgr->testBroadcatVotesFunctionality(); // Node 2 should receive votes from node 1, node 1 has its own 2 votes EXPECT_EQ(node1_vote_mgr->getVerifiedVotesSize(), 2); EXPECT_HAPPENS({5s, 100ms}, [&](auto& ctx) { WAIT_EXPECT_EQ(ctx, node2_vote_mgr->getVerifiedVotesSize(), 3) }); From 74944cd65e6468ebbcd50572858d141e48f8ba3b Mon Sep 17 00:00:00 2001 From: JakubFornadel Date: Fri, 7 Apr 2023 12:47:00 -0700 Subject: [PATCH 109/162] use rebroadcast flag in onNewPbftVote method --- libraries/core_libs/consensus/src/pbft/pbft_manager.cpp | 9 +++++---- .../tarcap/packets_handlers/vote_packet_handler.hpp | 4 +++- .../src/tarcap/packets_handlers/vote_packet_handler.cpp | 5 +++-- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 5faa4a9428..ffb281e153 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -274,7 +274,8 @@ void PbftManager::setPbftStep(PbftStep pbft_step) { lambda_ = kMaxLambda; } - LOG(log_nf_) << "No round progress - exponentially backing off lambda to " << lambda_.count() << " [ms] in step " << step_; + LOG(log_nf_) << "No round progress - exponentially backing off lambda to " << lambda_.count() << " [ms] in step " + << step_; } } } @@ -590,11 +591,11 @@ void PbftManager::broadcastVotes() { // Broadcast own votes auto vote_packet_handler = net->getSpecificHandler(); // TODO: this could be optimized to use VotesSyncPacketHandler if we drop some of the checks in process function - // TODO: onNewPbftVote does not use rebroadcast flag to force sending the votes // Send votes by one as votes sync packet must contain votes with the same type, period and round - const auto& own_votes = vote_mgr_->getOwnVerifiedVotes(); + const auto &own_votes = vote_mgr_->getOwnVerifiedVotes(); for (const auto &vote : own_votes) { - vote_packet_handler->onNewPbftVote(vote, getPbftProposedBlock(vote->getPeriod(), vote->getBlockHash())); + vote_packet_handler->onNewPbftVote(vote, getPbftProposedBlock(vote->getPeriod(), vote->getBlockHash()), + rebroadcast); } if (!own_votes.empty()) { LOG(log_dg_) << "Broadcast own votes for period " << period << ", round " << round; diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/vote_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/vote_packet_handler.hpp index 9035d7945a..13c30d346a 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/vote_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/vote_packet_handler.hpp @@ -16,8 +16,10 @@ class VotePacketHandler final : public ExtVotesPacketHandler { * * @param vote Votes to send * @param block block to send - nullptr means no block + * @param rebroadcast - send even of vote i known for the peer */ - void onNewPbftVote(const std::shared_ptr& vote, const std::shared_ptr& block); + void onNewPbftVote(const std::shared_ptr& vote, const std::shared_ptr& block, + bool rebroadcast = false); void sendPbftVote(const std::shared_ptr& peer, const std::shared_ptr& vote, const std::shared_ptr& block); diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp index 7c4f3aa6cd..aafbc0e2fe 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp @@ -77,14 +77,15 @@ void VotePacketHandler::process(const PacketData &packet_data, const std::shared } } -void VotePacketHandler::onNewPbftVote(const std::shared_ptr &vote, const std::shared_ptr &block) { +void VotePacketHandler::onNewPbftVote(const std::shared_ptr &vote, const std::shared_ptr &block, + bool rebroadcast) { for (const auto &peer : peers_state_->getAllPeers()) { if (peer.second->syncing_) { LOG(log_dg_) << " PBFT vote " << vote->getHash() << " not sent to " << peer.first << " peer syncing"; continue; } - if (peer.second->isVoteKnown(vote->getHash())) { + if (!rebroadcast && peer.second->isVoteKnown(vote->getHash())) { continue; } From e372a56d5ade35b6432671bcb4789ab99ae34ee3 Mon Sep 17 00:00:00 2001 From: Jakub Fornadel Date: Mon, 10 Apr 2023 13:00:16 -0700 Subject: [PATCH 110/162] chore: adjust mac building compilation --- doc/building.md | 29 ++++------------------------- 1 file changed, 4 insertions(+), 25 deletions(-) diff --git a/doc/building.md b/doc/building.md index 584658f761..628dae2a78 100644 --- a/doc/building.md +++ b/doc/building.md @@ -169,10 +169,10 @@ And optional: ### Install taraxa-node dependencies: -First you need to get (Brew)[https://brew.sh/] package manager. After that you need tot install dependencies with it. Currently there is no llvm-14 in brew, but it works well with llvm-13 +First you need to get (Brew)[https://brew.sh/] package manager. After that you need tot install dependencies with it. Clang-14 is used for compilation. brew update - brew install coreutils go autoconf automake gflags git libtool llvm@13 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr + brew install coreutils go autoconf automake gflags git libtool llvm@14 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr ### Clone the Repository @@ -186,7 +186,8 @@ First you need to get (Brew)[https://brew.sh/] package manager. After that you n # It is recommended to use clang because on other compilers you could face some errors conan profile new clang --detect && \ conan profile update settings.compiler=clang clang && \ - conan profile update settings.compiler.version=13 clang && \ + conan profile update settings.compiler.version=14 clang && \ + conan profile update settings.compiler.compiler.cppstd=14 conan profile update settings.compiler.libcxx=libc++ clang && \ conan profile update env.CC=clang clang && \ conan profile update env.CXX=clang++ clang @@ -224,28 +225,6 @@ It could be cleaned up with: rm -rf ~/.conan/data ``` -#### Project building issue - -If you are facing strange errors with project compilation it could be a problem that after install of llvm clang if pointing to a default apple clang. You could check that with `clang --version`. It should not point to `/Library/Developer/CommandLineTools/usr/bin`, but something like `/usr/local/opt/llvm/bin`. So you should specify full paths to a compiler: -1. Check full path with `brew info llvm`. Search for command that looks like -``` - echo 'export PATH="/usr/local/opt/llvm/bin:$PATH"' >> ~/.zshrc -``` -2. Take bin path from it. In our case this is `/usr/local/opt/llvm/bin` It shouldn't differ for most cases. -3. Append compiler to it and specify it in conan profile: -``` - conan profile update env.CC=/usr/local/opt/llvm/bin/clang clang && \ - conan profile update env.CXX=/usr/local/opt/llvm/bin/clang++ clang -``` -4. Specify compiler with full path to cmake: -``` -cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=/usr/local/opt/llvm/bin/clang -DCMAKE_CXX_COMPILER=/usr/local/opt/llvm/bin/clang++ ../ -``` -5. After successfull finish of that command processing compile project with: -``` -make -j$(nproc) -``` - ## Building on M1 Macs for x86_64 with Rosetta2 You should be able to build project following default MacOS building process. But here is a guide how to build project for x86_64 arch with Rosetta2. From e4cd97cf08ad1804eeefcebb0f6f25dce53c3cec Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 12 Apr 2023 08:34:33 +0200 Subject: [PATCH 111/162] chore: correct check for duplicate reward votes --- .../consensus/src/final_chain/rewards_stats.cpp | 2 +- .../core_libs/consensus/src/pbft/pbft_manager.cpp | 12 ++++++++---- .../common/ext_votes_packet_handler.cpp | 12 ------------ .../packets_handlers/pbft_sync_packet_handler.cpp | 2 +- .../packets_handlers/vote_packet_handler.cpp | 6 +++++- .../types/pbft_block/include/pbft/pbft_block.hpp | 6 ++++++ libraries/types/pbft_block/src/pbft_block.cpp | 14 ++++++++++++++ 7 files changed, 35 insertions(+), 19 deletions(-) diff --git a/libraries/core_libs/consensus/src/final_chain/rewards_stats.cpp b/libraries/core_libs/consensus/src/final_chain/rewards_stats.cpp index f14bbb0d11..809a48d227 100644 --- a/libraries/core_libs/consensus/src/final_chain/rewards_stats.cpp +++ b/libraries/core_libs/consensus/src/final_chain/rewards_stats.cpp @@ -30,7 +30,7 @@ std::optional RewardsStats::getTransactionValidator(const trx_hash_t& tx bool RewardsStats::addVote(const std::shared_ptr& vote) { // Set valid cert vote to validator auto& validator_stats = validators_stats_[vote->getVoterAddr()]; - // assert(validator_stats.vote_weight_ == 0); + assert(validator_stats.vote_weight_ == 0); assert(vote->getWeight()); if (validator_stats.vote_weight_) { diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index ffb281e153..b40c09abe6 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -1102,11 +1102,15 @@ std::optional, std::vector(prev_blk_hash, anchor_hash, order_hash, last_state_root, propose_period, + node_addr_, node_sk_, std::move(reward_votes_hashes)); - auto block = std::make_shared(prev_blk_hash, anchor_hash, order_hash, last_state_root, propose_period, - node_addr_, node_sk_, std::move(reward_votes_hashes)); - - return {std::make_pair(std::move(block), std::move(reward_votes))}; + return {std::make_pair(std::move(block), std::move(reward_votes))}; + } catch (const std::exception &e) { + LOG(log_er_) << "Block for period " << propose_period << " could not be proposed " << e.what(); + return {}; + } } void PbftManager::processProposedBlock(const std::shared_ptr &proposed_block, diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp index d9725c4b9d..d8862d06df 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/common/ext_votes_packet_handler.cpp @@ -149,18 +149,6 @@ bool ExtVotesPacketHandler::validateVoteAndBlock(const std::shared_ptr &vo << pbft_block->getBlockHash(); return false; } - // TODO[2401]: move this check to PBFT block - std::unordered_set set; - const auto reward_votes = pbft_block->getRewardVotes(); - set.reserve(reward_votes.size()); - for (const auto &hash : reward_votes) { - if (!set.insert(hash).second) { - LOG(log_er_) << "PBFT block " << pbft_block->getBlockHash() << " proposed by " << pbft_block->getBeneficiary() - << " has duplicated vote " << hash; - return false; - } - } - return true; } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/pbft_sync_packet_handler.cpp index 79a9dca4bc..ed574aa4fe 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/pbft_sync_packet_handler.cpp @@ -60,7 +60,7 @@ void PbftSyncPacketHandler::process(const PacketData &packet_data, const std::sh PeriodData period_data; try { period_data = PeriodData(packet_data.rlp_[1]); - } catch (const Transaction::InvalidTransaction &e) { + } catch (const std::runtime_error &e) { throw MaliciousPeerException("Unable to parse PeriodData: " + std::string(e.what())); } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp index aafbc0e2fe..3051d87122 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp @@ -29,7 +29,11 @@ void VotePacketHandler::process(const PacketData &packet_data, const std::shared std::shared_ptr vote = std::make_shared(packet_data.rlp_[0]); if (const size_t item_count = packet_data.rlp_.itemCount(); item_count == kExtendedVotePacketSize) { - pbft_block = std::make_shared(packet_data.rlp_[1]); + try { + pbft_block = std::make_shared(packet_data.rlp_[1]); + } catch (const std::exception &e) { + throw MaliciousPeerException(e.what()); + } peer_chain_size = packet_data.rlp_[2].toInt(); LOG(log_dg_) << "Received PBFT vote " << vote->getHash() << " with PBFT block " << pbft_block->getBlockHash(); } else { diff --git a/libraries/types/pbft_block/include/pbft/pbft_block.hpp b/libraries/types/pbft_block/include/pbft/pbft_block.hpp index afd95c02ba..19fc8f5b42 100644 --- a/libraries/types/pbft_block/include/pbft/pbft_block.hpp +++ b/libraries/types/pbft_block/include/pbft/pbft_block.hpp @@ -129,6 +129,12 @@ class PbftBlock { * @brief Set PBFT block hash and block proposer address */ void calculateHash_(); + + /** + * @brief Check if all rewards votes are unique + * + */ + void checkRewardVotes(); }; std::ostream& operator<<(std::ostream& strm, PbftBlock const& pbft_blk); diff --git a/libraries/types/pbft_block/src/pbft_block.cpp b/libraries/types/pbft_block/src/pbft_block.cpp index 94668acbee..908e75cb45 100644 --- a/libraries/types/pbft_block/src/pbft_block.cpp +++ b/libraries/types/pbft_block/src/pbft_block.cpp @@ -7,12 +7,14 @@ #include "common/jsoncpp.hpp" namespace taraxa { + PbftBlock::PbftBlock(bytes const& b) : PbftBlock(dev::RLP(b)) {} PbftBlock::PbftBlock(dev::RLP const& rlp) { util::rlp_tuple(util::RLPDecoderRef(rlp, true), prev_block_hash_, dag_block_hash_as_pivot_, order_hash_, prev_state_root_hash_, period_, timestamp_, reward_votes_, signature_); calculateHash_(); + checkRewardVotes(); } PbftBlock::PbftBlock(const blk_hash_t& prev_blk_hash, const blk_hash_t& dag_blk_hash_as_pivot, @@ -28,6 +30,7 @@ PbftBlock::PbftBlock(const blk_hash_t& prev_blk_hash, const blk_hash_t& dag_blk_ timestamp_ = dev::utcTime(); signature_ = dev::sign(sk, sha3(false)); calculateHash_(); + checkRewardVotes(); } Json::Value PbftBlock::toJson(PbftBlock const& b, std::vector const& dag_blks) { @@ -53,6 +56,17 @@ void PbftBlock::calculateHash_() { beneficiary_ = dev::right160(dev::sha3(dev::bytesConstRef(p.data(), sizeof(p)))); } +void PbftBlock::checkRewardVotes() { + std::unordered_set set; + set.reserve(reward_votes_.size()); + for (const auto& hash : reward_votes_) { + if (!set.insert(hash).second) { + throw std::runtime_error( + fmt("Invalid PBFT Block %s proposed by %s has duplicated vote %s", block_hash_, beneficiary_, hash)); + } + } +} + blk_hash_t PbftBlock::sha3(bool include_sig) const { return dev::sha3(rlp(include_sig)); } std::string PbftBlock::getJsonStr() const { return getJson().toStyledString(); } From 7c599ada804810857b6c96c23a70d3eb8790e6a3 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 12 Apr 2023 11:35:46 +0200 Subject: [PATCH 112/162] chore: fix logs --- .../src/tarcap/packets_handlers/dag_block_packet_handler.cpp | 2 +- .../src/tarcap/packets_handlers/dag_sync_packet_handler.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/dag_block_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/dag_block_packet_handler.cpp index 24f984aaf1..d71dfbfe15 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/dag_block_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/dag_block_packet_handler.cpp @@ -105,7 +105,7 @@ void DagBlockPacketHandler::onNewBlockReceived(DagBlock &&block, const std::shar case DagManager::VerifyBlockReturnType::NotEligible: case DagManager::VerifyBlockReturnType::FailedTipsVerification: { std::ostringstream err_msg; - err_msg << "DagBlock" << block_hash << " failed verification with error code " + err_msg << "DagBlock " << block_hash << " failed verification with error code " << static_cast(verified); throw MaliciousPeerException(err_msg.str()); } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/dag_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/dag_sync_packet_handler.cpp index c295494e6a..7d1b04acc7 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/dag_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/dag_sync_packet_handler.cpp @@ -120,7 +120,7 @@ void DagSyncPacketHandler::process(const PacketData& packet_data, const std::sha const auto verified = dag_mgr_->verifyBlock(block); if (verified != DagManager::VerifyBlockReturnType::Verified) { std::ostringstream err_msg; - err_msg << "DagBlock" << block.getHash() << " failed verification with error code " + err_msg << "DagBlock " << block.getHash() << " failed verification with error code " << static_cast(verified); throw MaliciousPeerException(err_msg.str()); } From 1bf6dea8ff2bdc4761022d4e0fb1015b991fab5a Mon Sep 17 00:00:00 2001 From: Dmytro Kostenko Date: Thu, 6 Apr 2023 18:35:31 +0200 Subject: [PATCH 113/162] fix: add revert reason to execution error --- submodules/taraxa-evm | 2 +- tests/final_chain_test.cpp | 48 ++++++++++++++++++- tests/rpc_test.cpp | 7 --- .../test_util/include/test_util/test_util.hpp | 8 ++++ 4 files changed, 55 insertions(+), 10 deletions(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index cc8639e427..4a1e215d42 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit cc8639e4275171fd5804986d1705256d5b736df4 +Subproject commit 4a1e215d426204d1bce525e67a25f1b9cd9c3fbb diff --git a/tests/final_chain_test.cpp b/tests/final_chain_test.cpp index daaa0b45b9..70e899a6dc 100644 --- a/tests/final_chain_test.cpp +++ b/tests/final_chain_test.cpp @@ -7,6 +7,8 @@ #include "common/vrf_wrapper.hpp" #include "config/config.hpp" #include "final_chain/trie_common.hpp" +#include "libdevcore/CommonJS.h" +#include "network/rpc/eth/Eth.h" #include "test_util/gtest.hpp" #include "test_util/samples.hpp" #include "test_util/test_util.hpp" @@ -42,8 +44,6 @@ struct FinalChainTest : WithDataDir { } auto advance(const SharedTransactions& trxs, advance_check_opts opts = {}) { - SUT = nullptr; - SUT = NewFinalChain(db, cfg); std::vector trx_hashes; ++expected_blk_num; for (const auto& trx : trxs) { @@ -473,6 +473,50 @@ TEST_F(FinalChainTest, failed_transaction_fee) { } } +TEST_F(FinalChainTest, revert_reason) { + const auto test_contract_code = + "608060405234801561001057600080fd5b506101ac806100206000396000f3fe608060405234801561001057600080fd5b50600436106100" + "2b5760003560e01c806336091dff14610030575b600080fd5b61004a600480360381019061004591906100cc565b61004c565b005b806100" + "8c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040161008390610156565b60405180" + "910390fd5b50565b600080fd5b60008115159050919050565b6100a981610094565b81146100b457600080fd5b50565b6000813590506100" + "c6816100a0565b92915050565b6000602082840312156100e2576100e161008f565b5b60006100f0848285016100b7565b91505092915050" + "565b600082825260208201905092915050565b7f617267207265717569726564000000000000000000000000000000000000000060008201" + "5250565b6000610140600c836100f9565b915061014b8261010a565b602082019050919050565b6000602082019050818103600083015261" + "016f81610133565b905091905056fea2646970667358221220846c5a92aab30dade0d92661a25b1fd6ba9a914fd114f2f264c2003b5abdda" + "db64736f6c63430008120033"; + auto sender_keys = dev::KeyPair::create(); + const auto& from = sender_keys.address(); + const auto& sk = sender_keys.secret(); + cfg.genesis.state.initial_balances = {}; + cfg.genesis.state.initial_balances[from] = u256("10000000000000000000000"); + // disable balances check as we have internal transfer + assume_only_toplevel_transfers = false; + init(); + + net::rpc::eth::EthParams eth_rpc_params; + eth_rpc_params.chain_id = cfg.genesis.chain_id; + eth_rpc_params.gas_limit = cfg.genesis.dag.gas_limit; + eth_rpc_params.final_chain = SUT; + auto eth_json_rpc = net::rpc::eth::NewEth(std::move(eth_rpc_params)); + + auto nonce = 0; + auto trx1 = std::make_shared(nonce++, 0, 0, TEST_TX_GAS_LIMIT, dev::fromHex(test_contract_code), sk); + auto result = advance({trx1}); + auto test_contract_addr = result->trx_receipts[0].new_contract_address; + EXPECT_EQ(test_contract_addr, dev::right160(dev::sha3(dev::rlpList(from, 0)))); + auto call_data = "0x36091dff0000000000000000000000000000000000000000000000000000000000000000"; + { + Json::Value est(Json::objectValue); + est["to"] = dev::toHex(*test_contract_addr); + est["from"] = dev::toHex(from); + est["data"] = call_data; + EXPECT_THROW_WITH(dev::jsToInt(eth_json_rpc->eth_estimateGas(est)), std::exception, + "evm: execution reverted: arg required"); + EXPECT_THROW_WITH(eth_json_rpc->eth_call(est, "latest"), std::exception, "evm: execution reverted: arg required"); + } +} + +// This test should be last as state_api isn't destructed correctly because of exception TEST_F(FinalChainTest, initial_validator_exceed_maximum_stake) { const dev::KeyPair key = dev::KeyPair::create(); const dev::KeyPair validator_key = dev::KeyPair::create(); diff --git a/tests/rpc_test.cpp b/tests/rpc_test.cpp index a104432dfb..a4ea10b90a 100644 --- a/tests/rpc_test.cpp +++ b/tests/rpc_test.cpp @@ -58,13 +58,6 @@ TEST_F(RPCTest, eth_estimateGas) { } } -#define EXPECT_THROW_WITH(statement, expected_exception, msg) \ - try { \ - statement; \ - } catch (const expected_exception& e) { \ - ASSERT_EQ(std::string(msg), std::string(e.what())); \ - } - TEST_F(RPCTest, eth_call) { auto node_cfg = make_node_cfgs(1); auto nodes = launch_nodes(node_cfg); diff --git a/tests/test_util/include/test_util/test_util.hpp b/tests/test_util/include/test_util/test_util.hpp index ab6a4b3731..cdbd8659dc 100644 --- a/tests/test_util/include/test_util/test_util.hpp +++ b/tests/test_util/include/test_util/test_util.hpp @@ -122,6 +122,14 @@ bool wait(const wait_opts& opts, const std::function& poller); EXPECT_GE(o1, o2); \ } +#define EXPECT_THROW_WITH(statement, expected_exception, msg) \ + try { \ + statement; \ + EXPECT_TRUE("No exception thrown" && false); \ + } catch (const expected_exception& e) { \ + EXPECT_EQ(std::string(msg), std::string(e.what())); \ + } + struct TransactionClient { enum class TransactionStage { created, From 3762f8a5683e98ba6aaabdee44e0916c537eaf40 Mon Sep 17 00:00:00 2001 From: Dmytro Kostenko Date: Wed, 12 Apr 2023 14:17:36 +0200 Subject: [PATCH 114/162] fix: network_test.transfer_lot_of_blocks --- tests/network_test.cpp | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/tests/network_test.cpp b/tests/network_test.cpp index 21a901c543..c8097b8440 100644 --- a/tests/network_test.cpp +++ b/tests/network_test.cpp @@ -108,16 +108,6 @@ TEST_F(NetworkTest, transfer_lot_of_blocks) { dag_mgr1->addDagBlock(std::move(blk), {trxs[0]}); std::vector> dag_blocks; - // creating lot of blocks just for size - std::vector trx_hashes; - std::vector> verified_transactions; - trx_hashes.reserve(trxs.size()); - verified_transactions.reserve(trxs.size()); - - for (const auto& trx : trxs) { - trx_hashes.push_back(trx->getHash()); - verified_transactions.push_back(trx); - } { const auto proposal_period = *db1->getProposalPeriodForDagLevel(proposal_level + 1); const auto period_block_hash = db1->getPeriodBlockHash(proposal_period); @@ -126,16 +116,17 @@ TEST_F(NetworkTest, transfer_lot_of_blocks) { for (int i = 0; i < 100; ++i) { vdf_sortition::VdfSortition vdf(sortition_params, node1->getVrfSecretKey(), VrfSortitionBase::makeVrfInput(proposal_level + 1, period_block_hash), 1, 1); - dev::bytes vdf_msg = DagManager::getVdfMessage(block_hash, {trxs[i + 1]}); + dev::bytes vdf_msg = DagManager::getVdfMessage(block_hash, {trxs[i]}); vdf.computeVdfSolution(sortition_params, vdf_msg, false); - DagBlock blk(block_hash, proposal_level + 1, {}, {trxs[i + 1]->getHash()}, estimation, vdf, - node1->getSecretKey()); + DagBlock blk(block_hash, proposal_level + 1, {}, {trxs[i]->getHash()}, estimation, vdf, node1->getSecretKey()); dag_blocks.emplace_back(std::make_shared(blk)); } } - for (auto trx : verified_transactions) - node1->getTransactionManager()->insertValidatedTransaction(std::move(trx), TransactionStatus::Verified); + for (auto trx : trxs) { + auto tx = trx; + node1->getTransactionManager()->insertValidatedTransaction(std::move(tx), TransactionStatus::Verified); + } for (size_t i = 0; i < dag_blocks.size(); i++) { if (dag_mgr1->verifyBlock(*dag_blocks[i]) == DagManager::VerifyBlockReturnType::Verified) dag_mgr1->addDagBlock(DagBlock(*dag_blocks[i]), {trxs[i]}); From f0b8d65e9ee7b8a66385bdc8cfbe7242fa7f2a52 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 13 Apr 2023 09:53:56 +0200 Subject: [PATCH 115/162] chore: change name of the function --- libraries/types/pbft_block/include/pbft/pbft_block.hpp | 2 +- libraries/types/pbft_block/src/pbft_block.cpp | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/libraries/types/pbft_block/include/pbft/pbft_block.hpp b/libraries/types/pbft_block/include/pbft/pbft_block.hpp index 19fc8f5b42..a830a3afa5 100644 --- a/libraries/types/pbft_block/include/pbft/pbft_block.hpp +++ b/libraries/types/pbft_block/include/pbft/pbft_block.hpp @@ -134,7 +134,7 @@ class PbftBlock { * @brief Check if all rewards votes are unique * */ - void checkRewardVotes(); + void checkUniqueRewardVotes(); }; std::ostream& operator<<(std::ostream& strm, PbftBlock const& pbft_blk); diff --git a/libraries/types/pbft_block/src/pbft_block.cpp b/libraries/types/pbft_block/src/pbft_block.cpp index 908e75cb45..317ef637c5 100644 --- a/libraries/types/pbft_block/src/pbft_block.cpp +++ b/libraries/types/pbft_block/src/pbft_block.cpp @@ -14,7 +14,7 @@ PbftBlock::PbftBlock(dev::RLP const& rlp) { util::rlp_tuple(util::RLPDecoderRef(rlp, true), prev_block_hash_, dag_block_hash_as_pivot_, order_hash_, prev_state_root_hash_, period_, timestamp_, reward_votes_, signature_); calculateHash_(); - checkRewardVotes(); + checkUniqueRewardVotes(); } PbftBlock::PbftBlock(const blk_hash_t& prev_blk_hash, const blk_hash_t& dag_blk_hash_as_pivot, @@ -30,7 +30,7 @@ PbftBlock::PbftBlock(const blk_hash_t& prev_blk_hash, const blk_hash_t& dag_blk_ timestamp_ = dev::utcTime(); signature_ = dev::sign(sk, sha3(false)); calculateHash_(); - checkRewardVotes(); + checkUniqueRewardVotes(); } Json::Value PbftBlock::toJson(PbftBlock const& b, std::vector const& dag_blks) { @@ -56,7 +56,7 @@ void PbftBlock::calculateHash_() { beneficiary_ = dev::right160(dev::sha3(dev::bytesConstRef(p.data(), sizeof(p)))); } -void PbftBlock::checkRewardVotes() { +void PbftBlock::checkUniqueRewardVotes() { std::unordered_set set; set.reserve(reward_votes_.size()); for (const auto& hash : reward_votes_) { From 903ec6deff92c67145a491e6cf201cfb3e3a10d5 Mon Sep 17 00:00:00 2001 From: Dmytro Kostenko Date: Tue, 11 Apr 2023 09:54:31 +0200 Subject: [PATCH 116/162] fix: gas estimations for transactions with internal calls --- libraries/core_libs/network/rpc/eth/Eth.cpp | 45 ++++++++++-- tests/final_chain_test.cpp | 81 ++++++++++++++++++++- tests/rpc_test.cpp | 17 +++-- 3 files changed, 130 insertions(+), 13 deletions(-) diff --git a/libraries/core_libs/network/rpc/eth/Eth.cpp b/libraries/core_libs/network/rpc/eth/Eth.cpp index 3f796586c7..735217b279 100644 --- a/libraries/core_libs/network/rpc/eth/Eth.cpp +++ b/libraries/core_libs/network/rpc/eth/Eth.cpp @@ -152,14 +152,50 @@ class EthImpl : public Eth, EthParams { const auto block_number = get_block_number_from_json(_jsonBlock); auto t = toTransactionSkeleton(_json); prepare_transaction_for_call(t, block_number); - return toJS(call(block_number, t).code_retval); + auto ret = call(block_number, t); + if (!ret.consensus_err.empty() || !ret.code_err.empty()) { + throw std::runtime_error(ret.consensus_err.empty() ? ret.code_err : ret.consensus_err); + } + return toJS(ret.code_retval); } string eth_estimateGas(const Json::Value& _json) override { auto t = toTransactionSkeleton(_json); auto blk_n = final_chain->last_block_number(); prepare_transaction_for_call(t, blk_n); - return toJS(call(blk_n, t).gas_used); + + auto is_enough_gas = [&](gas_t gas) -> bool { + t.gas = gas; + auto res = call(blk_n, t); + if (!res.consensus_err.empty()) { + throw std::runtime_error(res.consensus_err); + } + if (!res.code_err.empty()) { + return false; + } + return true; + }; + // couldn't be lower than execution gas_used. So we should start with this value + auto call_result = call(blk_n, t); + if (!call_result.consensus_err.empty() || !call_result.code_err.empty()) { + throw std::runtime_error(call_result.consensus_err.empty() ? call_result.code_err : call_result.consensus_err); + } + gas_t low = call_result.gas_used; + gas_t hi = *t.gas; + if (low > hi) { + throw std::runtime_error("out of gas"); + } + // precision is 5%(1/20) of higher gas_used value + while (hi - low > hi / 20) { + auto mid = low + ((hi - low) / 2); + + if (is_enough_gas(mid)) { + hi = mid; + } else { + low = mid; + } + } + return toJS(hi); } string eth_getTransactionCount(const string& _address, const Json::Value& _json) override { @@ -362,10 +398,7 @@ class EthImpl : public Eth, EthParams { }, blk_n); - if (result.consensus_err.empty() && result.code_err.empty()) { - return result; - } - throw std::runtime_error(result.consensus_err.empty() ? result.code_err : result.consensus_err); + return result; } // this should be used only in eth_call and eth_estimateGas diff --git a/tests/final_chain_test.cpp b/tests/final_chain_test.cpp index 70e899a6dc..8e5608620d 100644 --- a/tests/final_chain_test.cpp +++ b/tests/final_chain_test.cpp @@ -474,6 +474,11 @@ TEST_F(FinalChainTest, failed_transaction_fee) { } TEST_F(FinalChainTest, revert_reason) { + // contract TestRevert { + // function test(bool arg) public pure { + // require(arg, "arg required"); + // } + // } const auto test_contract_code = "608060405234801561001057600080fd5b506101ac806100206000396000f3fe608060405234801561001057600080fd5b50600436106100" "2b5760003560e01c806336091dff14610030575b600080fd5b61004a600480360381019061004591906100cc565b61004c565b005b806100" @@ -489,8 +494,6 @@ TEST_F(FinalChainTest, revert_reason) { const auto& sk = sender_keys.secret(); cfg.genesis.state.initial_balances = {}; cfg.genesis.state.initial_balances[from] = u256("10000000000000000000000"); - // disable balances check as we have internal transfer - assume_only_toplevel_transfers = false; init(); net::rpc::eth::EthParams eth_rpc_params; @@ -516,6 +519,80 @@ TEST_F(FinalChainTest, revert_reason) { } } +TEST_F(FinalChainTest, incorrect_estimation_regress) { + // contract Receiver { + // uint256 public receivedETH; + // receive() external payable { + // receivedETH += msg.value; + // } + // } + const auto receiver_contract_code = + "608060405234801561001057600080fd5b5061012d806100206000396000f3fe608060405260043610601f5760003560e01c8063820bec9d" + "14603f57603a565b36603a57346000808282546032919060a4565b925050819055005b600080fd5b348015604a57600080fd5b5060516065" + "565b604051605c919060de565b60405180910390f35b60005481565b6000819050919050565b7f4e487b7100000000000000000000000000" + "000000000000000000000000000000600052601160045260246000fd5b600060ad82606b565b915060b683606b565b925082820190508082" + "111560cb5760ca6075565b5b92915050565b60d881606b565b82525050565b600060208201905060f1600083018460d1565b9291505056fe" + "a264697066735822122099ea1faf8b41cec96834060f2daaea3ae5c03561e110bdcf5a74ce041ddb497164736f6c63430008120033"; + + // contract SendFunction { + // function send(address to) external payable { + // (bool success,) = to.call{value: msg.value}(""); + // if (!success) { + // revert("Failed to send ETH"); + // } + // } + // } + const auto sender_contract_code = + "608060405234801561001057600080fd5b50610278806100206000396000f3fe60806040526004361061001e5760003560e01c80633e58c5" + "8c14610023575b600080fd5b61003d60048036038101906100389190610152565b61003f565b005b60008173ffffffffffffffffffffffff" + "ffffffffffffffff1634604051610065906101b0565b60006040518083038185875af1925050503d80600081146100a2576040519150601f" + "19603f3d011682016040523d82523d6000602084013e6100a7565b606091505b50509050806100eb576040517f08c379a000000000000000" + "00000000000000000000000000000000000000000081526004016100e290610222565b60405180910390fd5b5050565b600080fd5b600073" + "ffffffffffffffffffffffffffffffffffffffff82169050919050565b600061011f826100f4565b9050919050565b61012f81610114565b" + "811461013a57600080fd5b50565b60008135905061014c81610126565b92915050565b600060208284031215610168576101676100ef565b" + "5b60006101768482850161013d565b91505092915050565b600081905092915050565b50565b600061019a60008361017f565b91506101a5" + "8261018a565b600082019050919050565b60006101bb8261018d565b9150819050919050565b600082825260208201905092915050565b7f" + "4661696c656420746f2073656e64204554480000000000000000000000000000600082015250565b600061020c6012836101c5565b915061" + "0217826101d6565b602082019050919050565b6000602082019050818103600083015261023b816101ff565b905091905056fea264697066" + "73582212205fd48a05d31cae1309b1a3bb8fe678c4bfee4cd28079acd90056ad228e18d82864736f6c63430008120033"; + + auto sender_keys = dev::KeyPair::create(); + const auto& from = sender_keys.address(); + const auto& sk = sender_keys.secret(); + cfg.genesis.state.initial_balances = {}; + cfg.genesis.state.initial_balances[from] = u256("10000000000000000000000"); + // disable balances check as we have internal transfer + assume_only_toplevel_transfers = false; + init(); + + net::rpc::eth::EthParams eth_rpc_params; + eth_rpc_params.chain_id = cfg.genesis.chain_id; + eth_rpc_params.gas_limit = cfg.genesis.dag.gas_limit; + eth_rpc_params.final_chain = SUT; + auto eth_json_rpc = net::rpc::eth::NewEth(std::move(eth_rpc_params)); + + auto nonce = 0; + auto trx1 = std::make_shared(nonce++, 0, 0, TEST_TX_GAS_LIMIT, dev::fromHex(receiver_contract_code), sk); + auto trx2 = std::make_shared(nonce++, 0, 0, TEST_TX_GAS_LIMIT, dev::fromHex(sender_contract_code), sk); + auto result = advance({trx1, trx2}); + auto receiver_contract_addr = result->trx_receipts[0].new_contract_address; + auto sender_contract_addr = result->trx_receipts[1].new_contract_address; + EXPECT_EQ(receiver_contract_addr, dev::right160(dev::sha3(dev::rlpList(from, 0)))); + + const auto call_data = "0x3e58c58c000000000000000000000000" + receiver_contract_addr->toString(); + const auto value = 10000; + { + Json::Value est(Json::objectValue); + est["to"] = dev::toHex(*sender_contract_addr); + est["from"] = dev::toHex(from); + est["value"] = value; + est["data"] = call_data; + auto estimate = dev::jsToInt(eth_json_rpc->eth_estimateGas(est)); + est["gas"] = dev::toJS(estimate); + eth_json_rpc->eth_call(est, "latest"); + } +} + // This test should be last as state_api isn't destructed correctly because of exception TEST_F(FinalChainTest, initial_validator_exceed_maximum_stake) { const dev::KeyPair key = dev::KeyPair::create(); diff --git a/tests/rpc_test.cpp b/tests/rpc_test.cpp index a4ea10b90a..55157b691a 100644 --- a/tests/rpc_test.cpp +++ b/tests/rpc_test.cpp @@ -21,13 +21,20 @@ TEST_F(RPCTest, eth_estimateGas) { auto eth_json_rpc = net::rpc::eth::NewEth(std::move(eth_rpc_params)); const auto from = dev::toHex(dev::toAddress(node_cfg.front().node_secret)); + auto check_estimation_is_in_range = [&](const Json::Value& trx, const std::string& e) { + auto estimate = dev::jsToInt(eth_json_rpc->eth_estimateGas(trx)); + auto expected = dev::jsToInt(e); + EXPECT_GE(estimate, expected); + EXPECT_GE(expected / 20, estimate - expected); + }; + // Contract creation estimations with author + without author { Json::Value trx(Json::objectValue); trx["data"] = samples::greeter_contract_code; - EXPECT_EQ(eth_json_rpc->eth_estimateGas(trx), "0x5ccc5"); + check_estimation_is_in_range(trx, "0x5ccc5"); trx["from"] = from; - EXPECT_EQ(eth_json_rpc->eth_estimateGas(trx), "0x5ccc5"); + check_estimation_is_in_range(trx, "0x5ccc5"); } // Contract creation with value @@ -35,7 +42,7 @@ TEST_F(RPCTest, eth_estimateGas) { Json::Value trx(Json::objectValue); trx["value"] = 1; trx["data"] = samples::greeter_contract_code; - EXPECT_EQ(eth_json_rpc->eth_estimateGas(trx), "0x5ccc5"); + check_estimation_is_in_range(trx, "0x5ccc5"); } // Simple transfer estimations with author + without author @@ -43,9 +50,9 @@ TEST_F(RPCTest, eth_estimateGas) { Json::Value trx(Json::objectValue); trx["value"] = 1; trx["to"] = dev::toHex(addr_t::random()); - EXPECT_EQ(eth_json_rpc->eth_estimateGas(trx), "0x5208"); // 21k + check_estimation_is_in_range(trx, "0x5208"); // 21k trx["from"] = from; - EXPECT_EQ(eth_json_rpc->eth_estimateGas(trx), "0x5208"); // 21k + check_estimation_is_in_range(trx, "0x5208"); // 21k } // Test throw on failed transaction From 8d85640ad145e8efb17b0f64ddc56059dec502e3 Mon Sep 17 00:00:00 2001 From: Dmytro Kostenko Date: Tue, 4 Apr 2023 17:25:23 +0200 Subject: [PATCH 117/162] feat: implement db migrations --- libraries/core_libs/node/src/node.cpp | 3 +++ .../storage/migration/migration_base.hpp | 21 +++++++++++++++++++ .../storage/migration/migration_manager.hpp | 19 +++++++++++++++++ .../storage/include/storage/storage.hpp | 2 ++ .../src/migration/migration_manager.cpp | 15 +++++++++++++ 5 files changed, 60 insertions(+) create mode 100644 libraries/core_libs/storage/include/storage/migration/migration_base.hpp create mode 100644 libraries/core_libs/storage/include/storage/migration/migration_manager.hpp create mode 100644 libraries/core_libs/storage/src/migration/migration_manager.cpp diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index ecf04206c9..e46b89c366 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -26,6 +26,7 @@ #include "network/rpc/jsonrpc_http_processor.hpp" #include "network/rpc/jsonrpc_ws_server.hpp" #include "pbft/pbft_manager.hpp" +#include "storage/migration/migration_manager.hpp" #include "transaction/gas_pricer.hpp" #include "transaction/transaction_manager.hpp" @@ -82,6 +83,8 @@ void FullNode::init() { if (db_->getDagBlocksCount() == 0) { db_->setGenesisHash(conf_.genesis.genesisHash()); } + + storage::migration::Manager(db_).applyAll(); } LOG(log_nf_) << "DB initialized ..."; diff --git a/libraries/core_libs/storage/include/storage/migration/migration_base.hpp b/libraries/core_libs/storage/include/storage/migration/migration_base.hpp new file mode 100644 index 0000000000..60c1edd532 --- /dev/null +++ b/libraries/core_libs/storage/include/storage/migration/migration_base.hpp @@ -0,0 +1,21 @@ +#pragma once +#include "storage/storage.hpp" + +namespace taraxa::storage::migration { +class Base { + public: + Base(std::shared_ptr db) : db_(std::move(db)) {} + virtual ~Base() = default; + virtual std::string id() = 0; + virtual void migrate() = 0; + void apply() { + migrate(); + setApplied(); + } + void setApplied() { db_->insert(DB::Columns::migrations, id(), true); } + bool isApplied() { return db_->lookup_int(id(), DB::Columns::migrations).has_value(); } + + protected: + std::shared_ptr db_; +}; +} // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/include/storage/migration/migration_manager.hpp b/libraries/core_libs/storage/include/storage/migration/migration_manager.hpp new file mode 100644 index 0000000000..1a4500e4da --- /dev/null +++ b/libraries/core_libs/storage/include/storage/migration/migration_manager.hpp @@ -0,0 +1,19 @@ +#pragma once +#include "storage/migration/migration_base.hpp" + +namespace taraxa::storage::migration { +class Manager { + public: + explicit Manager(std::shared_ptr db, const addr_t& node_addr = {}); + template + void registerMigration() { + migrations_.push_back(std::make_shared(db_)); + } + void applyAll(); + + private: + std::shared_ptr db_; + std::vector> migrations_; + LOG_OBJECTS_DEFINE +}; +} // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index eae9f453e6..79b82925c4 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -90,6 +90,8 @@ class DbStorage : public std::enable_shared_from_this { // do not change/move COLUMN(default_column); + // migrations + COLUMN(migrations); // Contains full data for an executed PBFT block including PBFT block, cert votes, dag blocks and transactions COLUMN_W_COMP(period_data, getIntComparator()); COLUMN(genesis); diff --git a/libraries/core_libs/storage/src/migration/migration_manager.cpp b/libraries/core_libs/storage/src/migration/migration_manager.cpp new file mode 100644 index 0000000000..a1d78e564c --- /dev/null +++ b/libraries/core_libs/storage/src/migration/migration_manager.cpp @@ -0,0 +1,15 @@ +#include "storage/migration/migration_manager.hpp" + +namespace taraxa::storage::migration { +Manager::Manager(std::shared_ptr db, const addr_t& node_addr) : db_(db) { LOG_OBJECTS_CREATE("MIGRATIONS"); } + +void Manager::applyAll() { + for (const auto& m : migrations_) { + if (!m->isApplied()) { + LOG(log_nf_) << "Applying migration " << m->id(); + m->apply(); + LOG(log_nf_) << "Migration applied " << m->id(); + } + } +} +} // namespace taraxa::storage::migration \ No newline at end of file From 377bf9a24d70eb10bcefa2bcc453d889be344767 Mon Sep 17 00:00:00 2001 From: Dmytro Kostenko Date: Tue, 4 Apr 2023 17:29:13 +0200 Subject: [PATCH 118/162] feat: add TransactionHashes migration --- .../storage/migration/transaction_hashes.hpp | 15 ++++++++ .../src/migration/migration_manager.cpp | 7 +++- .../src/migration/transaction_hashes.cpp | 35 +++++++++++++++++++ 3 files changed, 56 insertions(+), 1 deletion(-) create mode 100644 libraries/core_libs/storage/include/storage/migration/transaction_hashes.hpp create mode 100644 libraries/core_libs/storage/src/migration/transaction_hashes.cpp diff --git a/libraries/core_libs/storage/include/storage/migration/transaction_hashes.hpp b/libraries/core_libs/storage/include/storage/migration/transaction_hashes.hpp new file mode 100644 index 0000000000..a1c392e656 --- /dev/null +++ b/libraries/core_libs/storage/include/storage/migration/transaction_hashes.hpp @@ -0,0 +1,15 @@ +#pragma once +#include + +#include "final_chain/final_chain.hpp" +#include "storage/migration/migration_base.hpp" +#include "transaction/transaction.hpp" + +namespace taraxa::storage::migration { +class TransactionHashes : public migration::Base { + public: + TransactionHashes(std::shared_ptr db) : migration::Base(db) {} + std::string id() override { return "TransactionHashes"; } + void migrate() override; +}; +} // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/src/migration/migration_manager.cpp b/libraries/core_libs/storage/src/migration/migration_manager.cpp index a1d78e564c..e249a65547 100644 --- a/libraries/core_libs/storage/src/migration/migration_manager.cpp +++ b/libraries/core_libs/storage/src/migration/migration_manager.cpp @@ -1,7 +1,12 @@ #include "storage/migration/migration_manager.hpp" +#include "storage/migration/transaction_hashes.hpp" + namespace taraxa::storage::migration { -Manager::Manager(std::shared_ptr db, const addr_t& node_addr) : db_(db) { LOG_OBJECTS_CREATE("MIGRATIONS"); } +Manager::Manager(std::shared_ptr db, const addr_t& node_addr) : db_(db) { + LOG_OBJECTS_CREATE("MIGRATIONS"); + registerMigration(); +} void Manager::applyAll() { for (const auto& m : migrations_) { diff --git a/libraries/core_libs/storage/src/migration/transaction_hashes.cpp b/libraries/core_libs/storage/src/migration/transaction_hashes.cpp new file mode 100644 index 0000000000..1365fe2b9a --- /dev/null +++ b/libraries/core_libs/storage/src/migration/transaction_hashes.cpp @@ -0,0 +1,35 @@ +#include "storage/migration/transaction_hashes.hpp" + +namespace taraxa::storage::migration { +struct OldTransactionsHashes { + std::string serialized_; + size_t count_; + + explicit OldTransactionsHashes(std::string serialized) + : serialized_(std::move(serialized)), count_(serialized_.size() / dev::h256::size) {} + dev::h256 get(size_t i) const { + return dev::h256(reinterpret_cast(serialized_.data() + i * dev::h256::size), + dev::h256::ConstructFromPointer); + } + size_t count() const { return count_; } +}; + +void TransactionHashes::migrate() { + auto last_blk_num = + db_->lookup_int(final_chain::DBMetaKeys::LAST_NUMBER, DB::Columns::final_chain_meta); + + auto batch = db_->createWriteBatch(); + // Get and save data in new format for all blocks + for (uint64_t p = 1; p <= last_blk_num; ++p) { + ::taraxa::TransactionHashes new_data; + auto old_data = std::make_unique( + db_->lookup(p, DB::Columns::final_chain_transaction_hashes_by_blk_number)); + new_data.reserve(old_data->count()); + for (size_t i = 0; i < new_data.capacity(); ++i) { + new_data.emplace_back(old_data->get(i)); + } + db_->insert(batch, DB::Columns::final_chain_transaction_hashes_by_blk_number, p, dev::rlp(new_data)); + } + db_->commitWriteBatch(batch); +} +} // namespace taraxa::storage::migration \ No newline at end of file From 1a0b7d8fed373d0e5fb0c560a13950ba26e03761 Mon Sep 17 00:00:00 2001 From: Dmytro Kostenko Date: Thu, 13 Apr 2023 15:29:24 +0200 Subject: [PATCH 119/162] chore: resolve PR discussions --- libraries/cli/include/cli/config.hpp | 1 - libraries/cli/src/config.cpp | 4 --- libraries/config/include/config/config.hpp | 1 - libraries/core_libs/node/src/node.cpp | 4 +-- .../storage/migration/migration_base.hpp | 5 ++++ .../storage/migration/transaction_hashes.hpp | 5 ++-- .../storage/include/storage/storage.hpp | 6 ++++- .../src/migration/transaction_hashes.cpp | 18 +++++++------ libraries/core_libs/storage/src/storage.cpp | 25 ++++++++++++------- 9 files changed, 41 insertions(+), 28 deletions(-) diff --git a/libraries/cli/include/cli/config.hpp b/libraries/cli/include/cli/config.hpp index 9fcabe410c..d2c91d75bf 100644 --- a/libraries/cli/include/cli/config.hpp +++ b/libraries/cli/include/cli/config.hpp @@ -38,7 +38,6 @@ class Config { static constexpr const char* REBUILD_DB = "rebuild-db"; static constexpr const char* REBUILD_DB_PERIOD = "rebuild-db-period"; static constexpr const char* REVERT_TO_PERIOD = "revert-to-period"; - static constexpr const char* REBUILD_DB_COLUMNS = "rebuild-db-columns"; static constexpr const char* LIGHT = "light"; static constexpr const char* HELP = "help"; static constexpr const char* VERSION = "version"; diff --git a/libraries/cli/src/config.cpp b/libraries/cli/src/config.cpp index 7c19bcbafa..2465a368f3 100644 --- a/libraries/cli/src/config.cpp +++ b/libraries/cli/src/config.cpp @@ -38,7 +38,6 @@ Config::Config(int argc, const char* argv[]) { bool destroy_db = false; bool rebuild_network = false; bool rebuild_db = false; - bool rebuild_db_columns = false; bool light_node = false; bool version = false; uint64_t rebuild_db_period = 0; @@ -81,8 +80,6 @@ Config::Config(int argc, const char* argv[]) { "rebuilding all the other " "database tables - this could take a long " "time"); - node_command_options.add_options()(REBUILD_DB_COLUMNS, bpo::bool_switch(&rebuild_db_columns), - "Removes old DB columns "); node_command_options.add_options()(REBUILD_DB_PERIOD, bpo::value(&rebuild_db_period), "Use with rebuild-db - Rebuild db up " "to a specified period"); @@ -264,7 +261,6 @@ Config::Config(int argc, const char* argv[]) { } node_config_.db_config.db_revert_to_period = revert_to_period; node_config_.db_config.rebuild_db = rebuild_db; - node_config_.db_config.rebuild_db_columns = rebuild_db_columns; node_config_.db_config.rebuild_db_period = rebuild_db_period; node_config_.enable_test_rpc = enable_test_rpc; diff --git a/libraries/config/include/config/config.hpp b/libraries/config/include/config/config.hpp index 5d8653044a..c4c577e22d 100644 --- a/libraries/config/include/config/config.hpp +++ b/libraries/config/include/config/config.hpp @@ -16,7 +16,6 @@ struct DBConfig { PbftPeriod db_revert_to_period = 0; bool rebuild_db = false; PbftPeriod rebuild_db_period = 0; - bool rebuild_db_columns = false; }; void dec_json(Json::Value const &json, DBConfig &db_config); diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index e46b89c366..b588b60184 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -63,11 +63,9 @@ void FullNode::init() { conf_.db_config.db_max_open_files, conf_.db_config.db_max_snapshots, conf_.db_config.db_revert_to_period, node_addr, true); } - db_ = std::make_shared(conf_.db_path, conf_.db_config.db_snapshot_each_n_pbft_block, conf_.db_config.db_max_open_files, conf_.db_config.db_max_snapshots, - conf_.db_config.db_revert_to_period, node_addr, false, - conf_.db_config.rebuild_db_columns); + conf_.db_config.db_revert_to_period, node_addr, false); if (db_->hasMinorVersionChanged()) { LOG(log_si_) << "Minor DB version has changed. Rebuilding Db"; diff --git a/libraries/core_libs/storage/include/storage/migration/migration_base.hpp b/libraries/core_libs/storage/include/storage/migration/migration_base.hpp index 60c1edd532..1c323bd535 100644 --- a/libraries/core_libs/storage/include/storage/migration/migration_base.hpp +++ b/libraries/core_libs/storage/include/storage/migration/migration_base.hpp @@ -7,8 +7,13 @@ class Base { Base(std::shared_ptr db) : db_(std::move(db)) {} virtual ~Base() = default; virtual std::string id() = 0; + // We need to specify version here, so in case of major version change(db reindex) we won't apply unneeded migrations + virtual uint32_t dbVersion() = 0; virtual void migrate() = 0; void apply() { + if (db_->getMajorVersion() != dbVersion()) { + return; + } migrate(); setApplied(); } diff --git a/libraries/core_libs/storage/include/storage/migration/transaction_hashes.hpp b/libraries/core_libs/storage/include/storage/migration/transaction_hashes.hpp index a1c392e656..ab0876a987 100644 --- a/libraries/core_libs/storage/include/storage/migration/transaction_hashes.hpp +++ b/libraries/core_libs/storage/include/storage/migration/transaction_hashes.hpp @@ -8,8 +8,9 @@ namespace taraxa::storage::migration { class TransactionHashes : public migration::Base { public: - TransactionHashes(std::shared_ptr db) : migration::Base(db) {} - std::string id() override { return "TransactionHashes"; } + TransactionHashes(std::shared_ptr db); + std::string id() override; + uint32_t dbVersion() override; void migrate() override; }; } // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 79b82925c4..96f18a4b34 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -143,6 +143,7 @@ class DbStorage : public std::enable_shared_from_this { const uint32_t kDbSnapshotsMaxCount = 0; std::set snapshots_; + uint32_t kMajorVersion_; bool minor_version_changed_ = false; auto handle(Column const& col) const { return handles_[col.ordinal_]; } @@ -152,7 +153,7 @@ class DbStorage : public std::enable_shared_from_this { public: explicit DbStorage(fs::path const& base_path, uint32_t db_snapshot_each_n_pbft_block = 0, uint32_t max_open_files = 0, uint32_t db_max_snapshots = 0, PbftPeriod db_revert_to_period = 0, addr_t node_addr = addr_t(), - bool rebuild = false, bool rebuild_columns = false); + bool rebuild = false); ~DbStorage(); DbStorage(const DbStorage&) = delete; @@ -175,6 +176,9 @@ class DbStorage : public std::enable_shared_from_this { void disableSnapshots(); void enableSnapshots(); + uint32_t getMajorVersion() const; + std::unique_ptr getColumnIterator(const Column& c); + // Genesis void setGenesisHash(const h256& genesis_hash); std::optional getGenesisHash(); diff --git a/libraries/core_libs/storage/src/migration/transaction_hashes.cpp b/libraries/core_libs/storage/src/migration/transaction_hashes.cpp index 1365fe2b9a..b5625a9fc2 100644 --- a/libraries/core_libs/storage/src/migration/transaction_hashes.cpp +++ b/libraries/core_libs/storage/src/migration/transaction_hashes.cpp @@ -14,21 +14,25 @@ struct OldTransactionsHashes { size_t count() const { return count_; } }; -void TransactionHashes::migrate() { - auto last_blk_num = - db_->lookup_int(final_chain::DBMetaKeys::LAST_NUMBER, DB::Columns::final_chain_meta); +TransactionHashes::TransactionHashes(std::shared_ptr db) : migration::Base(db) {} + +std::string TransactionHashes::id() { return "TransactionHashes"; } + +uint32_t TransactionHashes::dbVersion() { return 1; } +void TransactionHashes::migrate() { + auto it = db_->getColumnIterator(DB::Columns::final_chain_transaction_hashes_by_blk_number); auto batch = db_->createWriteBatch(); + // Get and save data in new format for all blocks - for (uint64_t p = 1; p <= last_blk_num; ++p) { + for (it->SeekToFirst(); it->Valid(); it->Next()) { ::taraxa::TransactionHashes new_data; - auto old_data = std::make_unique( - db_->lookup(p, DB::Columns::final_chain_transaction_hashes_by_blk_number)); + auto old_data = std::make_unique(it->value().ToString()); new_data.reserve(old_data->count()); for (size_t i = 0; i < new_data.capacity(); ++i) { new_data.emplace_back(old_data->get(i)); } - db_->insert(batch, DB::Columns::final_chain_transaction_hashes_by_blk_number, p, dev::rlp(new_data)); + db_->insert(batch, DB::Columns::final_chain_transaction_hashes_by_blk_number, it->key(), dev::rlp(new_data)); } db_->commitWriteBatch(batch); } diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index e5181e3184..eb8c29e329 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -20,8 +20,7 @@ static constexpr uint16_t DAG_BLOCKS_POS_IN_PERIOD_DATA = 2; static constexpr uint16_t TRANSACTIONS_POS_IN_PERIOD_DATA = 3; DbStorage::DbStorage(fs::path const& path, uint32_t db_snapshot_each_n_pbft_block, uint32_t max_open_files, - uint32_t db_max_snapshots, PbftPeriod db_revert_to_period, addr_t node_addr, bool rebuild, - bool rebuild_columns) + uint32_t db_max_snapshots, PbftPeriod db_revert_to_period, addr_t node_addr, bool rebuild) : path_(path), handles_(Columns::all.size()), kDbSnapshotsEachNblock(db_snapshot_each_n_pbft_block), @@ -61,9 +60,7 @@ DbStorage::DbStorage(fs::path const& path, uint32_t db_snapshot_each_n_pbft_bloc }); LOG_OBJECTS_CREATE("DBS"); - if (rebuild_columns) { - rebuildColumns(options); - } + rebuildColumns(options); // Iterate over the db folders and populate snapshot set loadSnapshots(); @@ -80,15 +77,15 @@ DbStorage::DbStorage(fs::path const& path, uint32_t db_snapshot_each_n_pbft_bloc dag_blocks_count_.store(getStatusField(StatusDbField::DagBlkCount)); dag_edge_count_.store(getStatusField(StatusDbField::DagEdgeCount)); - uint32_t major_version = getStatusField(StatusDbField::DbMajorVersion); + kMajorVersion_ = getStatusField(StatusDbField::DbMajorVersion); uint32_t minor_version = getStatusField(StatusDbField::DbMinorVersion); - if (major_version == 0 && minor_version == 0) { + if (kMajorVersion_ == 0 && minor_version == 0) { saveStatusField(StatusDbField::DbMajorVersion, TARAXA_DB_MAJOR_VERSION); saveStatusField(StatusDbField::DbMinorVersion, TARAXA_DB_MINOR_VERSION); } else { - if (major_version != TARAXA_DB_MAJOR_VERSION) { + if (kMajorVersion_ != TARAXA_DB_MAJOR_VERSION) { throw DbException(string("Database version mismatch. Version on disk ") + - getFormattedVersion({major_version, minor_version}) + + getFormattedVersion({kMajorVersion_, minor_version}) + " Node version:" + getFormattedVersion({TARAXA_DB_MAJOR_VERSION, TARAXA_DB_MINOR_VERSION})); } else if (minor_version != TARAXA_DB_MINOR_VERSION) { minor_version_changed_ = true; @@ -100,6 +97,10 @@ void DbStorage::rebuildColumns(const rocksdb::Options& options) { std::unique_ptr db; std::vector column_families; rocksdb::DB::ListColumnFamilies(options, db_path_.string(), &column_families); + if (column_families.empty()) { + LOG(log_wr_) << "DB isn't initialized in rebuildColumns. Skip it"; + return; + } std::vector descriptors; descriptors.reserve(column_families.size()); @@ -262,6 +263,12 @@ DbStorage::~DbStorage() { checkStatus(db_->Close()); } +uint32_t DbStorage::getMajorVersion() const { return kMajorVersion_; } + +std::unique_ptr DbStorage::getColumnIterator(const Column& c) { + return std::unique_ptr(db_->NewIterator(read_options_, handle(c))); +} + void DbStorage::checkStatus(rocksdb::Status const& status) { if (status.ok()) return; throw DbException(string("Db error. Status code: ") + std::to_string(status.code()) + From 1b991508fc93c74a83ed24f076420ffe265b86f8 Mon Sep 17 00:00:00 2001 From: Dmytro Kostenko Date: Thu, 13 Apr 2023 17:17:31 +0200 Subject: [PATCH 120/162] fix: add batch to migration base class --- .../include/storage/migration/migration_base.hpp | 14 ++++++++++---- .../storage/src/migration/transaction_hashes.cpp | 4 +--- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/libraries/core_libs/storage/include/storage/migration/migration_base.hpp b/libraries/core_libs/storage/include/storage/migration/migration_base.hpp index 1c323bd535..b1ce371b01 100644 --- a/libraries/core_libs/storage/include/storage/migration/migration_base.hpp +++ b/libraries/core_libs/storage/include/storage/migration/migration_base.hpp @@ -4,23 +4,29 @@ namespace taraxa::storage::migration { class Base { public: - Base(std::shared_ptr db) : db_(std::move(db)) {} + Base(std::shared_ptr db) : db_(std::move(db)), batch_(db_->createWriteBatch()) {} virtual ~Base() = default; virtual std::string id() = 0; // We need to specify version here, so in case of major version change(db reindex) we won't apply unneeded migrations virtual uint32_t dbVersion() = 0; - virtual void migrate() = 0; + + bool isApplied() { return db_->lookup_int(id(), DB::Columns::migrations).has_value(); } void apply() { if (db_->getMajorVersion() != dbVersion()) { return; } migrate(); setApplied(); + db_->commitWriteBatch(batch_); } - void setApplied() { db_->insert(DB::Columns::migrations, id(), true); } - bool isApplied() { return db_->lookup_int(id(), DB::Columns::migrations).has_value(); } protected: + // Method with custom logic. All db changes should be made using `batch_` + virtual void migrate() = 0; + + void setApplied() { db_->insert(batch_, DB::Columns::migrations, id(), true); } + std::shared_ptr db_; + DB::Batch batch_; }; } // namespace taraxa::storage::migration \ No newline at end of file diff --git a/libraries/core_libs/storage/src/migration/transaction_hashes.cpp b/libraries/core_libs/storage/src/migration/transaction_hashes.cpp index b5625a9fc2..63f5ab0b0e 100644 --- a/libraries/core_libs/storage/src/migration/transaction_hashes.cpp +++ b/libraries/core_libs/storage/src/migration/transaction_hashes.cpp @@ -22,7 +22,6 @@ uint32_t TransactionHashes::dbVersion() { return 1; } void TransactionHashes::migrate() { auto it = db_->getColumnIterator(DB::Columns::final_chain_transaction_hashes_by_blk_number); - auto batch = db_->createWriteBatch(); // Get and save data in new format for all blocks for (it->SeekToFirst(); it->Valid(); it->Next()) { @@ -32,8 +31,7 @@ void TransactionHashes::migrate() { for (size_t i = 0; i < new_data.capacity(); ++i) { new_data.emplace_back(old_data->get(i)); } - db_->insert(batch, DB::Columns::final_chain_transaction_hashes_by_blk_number, it->key(), dev::rlp(new_data)); + db_->insert(batch_, DB::Columns::final_chain_transaction_hashes_by_blk_number, it->key(), dev::rlp(new_data)); } - db_->commitWriteBatch(batch); } } // namespace taraxa::storage::migration \ No newline at end of file From bb4fd3a9e2ad424102bfa836966b71ef1af42860 Mon Sep 17 00:00:00 2001 From: Dmytro Kostenko Date: Mon, 17 Apr 2023 13:47:09 +0200 Subject: [PATCH 121/162] fix: remove conan bincrafters repo and add jsonrpccpp with external_project --- CMakeLists.txt | 17 +++++++- CMakeModules/ProjectJSONRPCCPP.cmake | 58 ++++++++++++++++++++++++++++ Dockerfile | 11 +++--- conanfile.py | 3 +- doc/building.md | 10 +++-- libraries/core_libs/CMakeLists.txt | 2 +- submodules/CMakeLists.txt | 2 - 7 files changed, 87 insertions(+), 16 deletions(-) create mode 100644 CMakeModules/ProjectJSONRPCCPP.cmake diff --git a/CMakeLists.txt b/CMakeLists.txt index 3f30a94ab6..2f3582ef8a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -48,6 +48,7 @@ endif() set(CMAKE_CXX_FLAGS_RELEASE "-O3") set(CMAKE_CXX_FLAGS_DEBUG "-g") set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-g -O2") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC") # Enable LTO option(TARAXA_ENABLE_LTO "Build taraxad with LTO (ON or OFF)" OFF) @@ -161,8 +162,15 @@ else() include(${CMAKE_BINARY_DIR}/conan.cmake) - conan_cmake_run(CONANFILE conanfile.py BUILD_TYPE ${CMAKE_BUILD_TYPE} BUILD missing - BASIC_SETUP CMAKE_TARGETS KEEP_RPATHS PROFILE ${CONAN_PROFILE}) + conan_cmake_run(CONANFILE conanfile.py + BUILD_TYPE ${CMAKE_BUILD_TYPE} + CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE} + BASIC_SETUP + CMAKE_TARGETS + KEEP_RPATHS + PROFILE ${CONAN_PROFILE} + BUILD missing + ) set(CONAN_EXPORTED true CACHE BOOL "Is conan already run on the project") endif() @@ -203,6 +211,11 @@ include(CMakeModules/git_info.cmake) find_package(GMP) find_package(MPFR) +include(ExternalProject) +# use JSONCPP library from conan for JSONRPCCPP build +set(JSONCPP_INCLUDE_DIR ${CONAN_INCLUDE_DIRS_JSONCPP}) +include(ProjectJSONRPCCPP) + # Add sub-directories cmakes add_subdirectory(submodules) add_subdirectory(libraries) diff --git a/CMakeModules/ProjectJSONRPCCPP.cmake b/CMakeModules/ProjectJSONRPCCPP.cmake new file mode 100644 index 0000000000..38736ac4d5 --- /dev/null +++ b/CMakeModules/ProjectJSONRPCCPP.cmake @@ -0,0 +1,58 @@ +set(prefix "${CMAKE_BINARY_DIR}/deps") +set(lib_path "${prefix}/lib") +set(include_path "${prefix}/include") + +ExternalProject_Add(jsonrpccpp + PREFIX "${prefix}" + DOWNLOAD_NAME libjson-rpc-cpp-v1.4.1.tar.gz + DOWNLOAD_NO_PROGRESS TRUE + URL https://github.com/cinemast/libjson-rpc-cpp/archive/refs/tags/v1.4.1.tar.gz + URL_HASH SHA256=7a057e50d6203e4ea0a10ba5e4dbf344c48b177e5a3bf82e850eb3a783c11eb5 + CMAKE_ARGS + -DCMAKE_BUILD_TYPE=Release + -DCMAKE_INSTALL_PREFIX= + -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + -DCMAKE_CXX_COMPILER_LAUNCHER=${DCMAKE_CXX_COMPILER_LAUNCHER} + -DJSONCPP_INCLUDE_DIR=${JSONCPP_INCLUDE_DIR} + -DBUILD_STATIC_LIBS=1 + # disable build of parts that we don't need + -DBUILD_SHARED_LIBS=0 + -DREDIS_SERVER=0 + -DREDIS_CLIENT=0 + -DCOMPILE_TESTS=0 + -DCOMPILE_STUBGEN=0 + -DCOMPILE_EXAMPLES=0 + -DWITH_COVERAGE=0 + -DHTTP_CLIENT=0 + BUILD_COMMAND ${CMAKE_COMMAND} --build --config Release + INSTALL_COMMAND ${CMAKE_COMMAND} --build --config Release --target install + BUILD_BYPRODUCTS "${lib_path}" + DOWNLOAD_EXTRACT_TIMESTAMP NEW + LOG_CONFIGURE 0 + LOG_BUILD 0 + LOG_INSTALL 0 +) + + +add_library(Jsonrpccpp-common STATIC IMPORTED) +set_property(TARGET Jsonrpccpp-common PROPERTY IMPORTED_CONFIGURATIONS Release) +set_property(TARGET Jsonrpccpp-common PROPERTY IMPORTED_LOCATION "${lib_path}/${CMAKE_STATIC_LIBRARY_PREFIX}jsonrpccpp-common${CMAKE_STATIC_LIBRARY_SUFFIX}") +set_property(TARGET Jsonrpccpp-common PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${include_path}) +add_dependencies(Jsonrpccpp-common jsonrpccpp) + +add_library(Jsonrpccpp-server STATIC IMPORTED) +set_property(TARGET Jsonrpccpp-server PROPERTY IMPORTED_CONFIGURATIONS Release) +set_property(TARGET Jsonrpccpp-server PROPERTY IMPORTED_LOCATION "${lib_path}/${CMAKE_STATIC_LIBRARY_PREFIX}jsonrpccpp-server${CMAKE_STATIC_LIBRARY_SUFFIX}") +set_property(TARGET Jsonrpccpp-server PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${include_path}) +add_dependencies(Jsonrpccpp-server jsonrpccpp) + +add_library(Jsonrpccpp-client STATIC IMPORTED) +set_property(TARGET Jsonrpccpp-client PROPERTY IMPORTED_CONFIGURATIONS Release) +set_property(TARGET Jsonrpccpp-client PROPERTY IMPORTED_LOCATION "${lib_path}/${CMAKE_STATIC_LIBRARY_PREFIX}jsonrpccpp-client${CMAKE_STATIC_LIBRARY_SUFFIX}") +set_property(TARGET Jsonrpccpp-client PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${include_path}) +add_dependencies(Jsonrpccpp-client jsonrpccpp) + +add_library(Jsonrpccpp INTERFACE) +target_link_libraries(Jsonrpccpp INTERFACE Jsonrpccpp-common Jsonrpccpp-server Jsonrpccpp-client) diff --git a/Dockerfile b/Dockerfile index c22343e765..23997bd2bf 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,6 +23,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive \ lsb-release \ libgmp-dev \ libmpfr-dev \ + libmicrohttpd-dev \ software-properties-common \ && rm -rf /var/lib/apt/lists/* @@ -72,15 +73,14 @@ ENV CONAN_REVISIONS_ENABLED=1 WORKDIR /opt/taraxa/ COPY conanfile.py . -RUN conan remote add -f bincrafters "https://bincrafters.jfrog.io/artifactory/api/conan/public-conan" \ - && conan profile new clang --detect \ +RUN conan profile new clang --detect \ && conan profile update settings.compiler=clang clang \ && conan profile update settings.compiler.version=$LLVM_VERSION clang \ && conan profile update settings.compiler.libcxx=libstdc++11 clang \ && conan profile update settings.build_type=RelWithDebInfo clang \ && conan profile update env.CC=clang-$LLVM_VERSION clang \ && conan profile update env.CXX=clang++-$LLVM_VERSION clang \ - && conan install --build missing -pr:b=clang . + && conan install --build missing -pr=clang . ################################################################### # Build stage - use builder image for actual build of taraxa node # @@ -98,8 +98,9 @@ RUN mkdir $BUILD_OUTPUT_DIR && cd $BUILD_OUTPUT_DIR \ && cmake -DCMAKE_BUILD_TYPE=RelWithDebInfo \ -DTARAXA_ENABLE_LTO=OFF \ -DTARAXA_STATIC_BUILD=OFF \ - ../ \ - && make -j$(nproc) all \ + ../ + +RUN cd $BUILD_OUTPUT_DIR && make -j$(nproc) all \ # Copy CMake generated Testfile to be able to trigger ctest from bin directory && cp tests/CTestTestfile.cmake bin/ \ # keep only required shared libraries and final binaries diff --git a/conanfile.py b/conanfile.py index cf82c04941..364e1d5d2a 100644 --- a/conanfile.py +++ b/conanfile.py @@ -21,7 +21,7 @@ def requirements(self): self.requires("lz4/1.9.4") self.requires("rocksdb/6.29.5") self.requires("prometheus-cpp/1.1.0") - self.requires("libjson-rpc-cpp/1.3.0@bincrafters/stable") + self.requires("jsoncpp/1.9.5") def _configure_boost_libs(self): self.options["boost"].without_atomic = False @@ -63,7 +63,6 @@ def configure(self): self.options["cppcheck"].have_rules = False self.options["rocksdb"].use_rtti = True self.options["rocksdb"].with_lz4 = True - self.options["libjson-rpc-cpp"].shared = False # mpir is required by cppcheck and it causing gmp confict self.options["mpir"].enable_gmpcompat = False diff --git a/doc/building.md b/doc/building.md index 628dae2a78..f016230256 100644 --- a/doc/building.md +++ b/doc/building.md @@ -29,7 +29,8 @@ will build out of the box without further effort: libsnappy-dev \ rapidjson-dev \ libgmp-dev \ - libmpfr-dev + libmpfr-dev \ + libmicrohttpd-dev # Optional. Needed to run py_test. This won't install on arm64 OS because package is missing in apt sudo add-apt-repository ppa:ethereum/ethereum @@ -90,7 +91,8 @@ will build out of the box without further effort: python3-pip \ rapidjson-dev \ libgmp-dev \ - libmpfr-dev + libmpfr-dev \ + libmicrohttpd-dev # Install conan package manager @@ -172,7 +174,7 @@ And optional: First you need to get (Brew)[https://brew.sh/] package manager. After that you need tot install dependencies with it. Clang-14 is used for compilation. brew update - brew install coreutils go autoconf automake gflags git libtool llvm@14 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr + brew install coreutils go autoconf automake gflags git libtool llvm@14 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr libmicrohttpd ### Clone the Repository @@ -243,7 +245,7 @@ You should be able to build project following default MacOS building process. Bu ### Install dependencies - /usr/local/bin/brew install coreutils go autoconf automake gflags git libtool llvm@13 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr + /usr/local/bin/brew install coreutils go autoconf automake gflags git libtool llvm@13 make pkg-config cmake conan snappy zstd rapidjson gmp mpfr libmicrohttpd ### Clone the Repository diff --git a/libraries/core_libs/CMakeLists.txt b/libraries/core_libs/CMakeLists.txt index 9857d5348e..b7bb353dcd 100644 --- a/libraries/core_libs/CMakeLists.txt +++ b/libraries/core_libs/CMakeLists.txt @@ -47,7 +47,7 @@ target_link_libraries(core_libs PUBLIC taraxa-evm p2p metrics - CONAN_PKG::libjson-rpc-cpp + Jsonrpccpp CONAN_PKG::rocksdb # GraphQL cppgraphqlgen::graphqlservice diff --git a/submodules/CMakeLists.txt b/submodules/CMakeLists.txt index a538dde2e6..3c5becf960 100644 --- a/submodules/CMakeLists.txt +++ b/submodules/CMakeLists.txt @@ -27,8 +27,6 @@ add_library( ) target_include_directories(ethash PUBLIC ${include_dir}) -include(ExternalProject) - # prefix of build dir set(BUILD_DIR_PREFIX "${CMAKE_BINARY_DIR}/deps") ## add not cmake target From 480b10d7d7a0584aa66c666dca0db9fc5087421d Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 14 Apr 2023 08:50:54 +0200 Subject: [PATCH 122/162] chore: remove prove() code that did nothing --- .../core_libs/consensus/include/final_chain/state_api.hpp | 2 -- libraries/core_libs/consensus/src/final_chain/state_api.cpp | 5 ----- submodules/taraxa-evm | 2 +- 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/libraries/core_libs/consensus/include/final_chain/state_api.hpp b/libraries/core_libs/consensus/include/final_chain/state_api.hpp index ae4535db74..5096eec020 100644 --- a/libraries/core_libs/consensus/include/final_chain/state_api.hpp +++ b/libraries/core_libs/consensus/include/final_chain/state_api.hpp @@ -34,8 +34,6 @@ class StateAPI { void update_state_config(const Config& new_config); - Proof prove(EthBlockNumber blk_num, const root_t& state_root, const addr_t& addr, - const std::vector& keys) const; std::optional get_account(EthBlockNumber blk_num, const addr_t& addr) const; u256 get_account_storage(EthBlockNumber blk_num, const addr_t& addr, const u256& key) const; bytes get_code_by_address(EthBlockNumber blk_num, const addr_t& addr) const; diff --git a/libraries/core_libs/consensus/src/final_chain/state_api.cpp b/libraries/core_libs/consensus/src/final_chain/state_api.cpp index 62fe456160..7e18f6c408 100644 --- a/libraries/core_libs/consensus/src/final_chain/state_api.cpp +++ b/libraries/core_libs/consensus/src/final_chain/state_api.cpp @@ -142,11 +142,6 @@ void StateAPI::update_state_config(const Config& new_config) { err_h.check(); } -Proof StateAPI::prove(EthBlockNumber blk_num, const root_t& state_root, const addr_t& addr, - const std::vector& keys) const { - return c_method_args_rlp(this_c_, blk_num, state_root, addr, keys); -} - std::optional StateAPI::get_account(EthBlockNumber blk_num, const addr_t& addr) const { return c_method_args_rlp, from_rlp, taraxa_evm_state_api_get_account>(this_c_, blk_num, addr); } diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 4a1e215d42..3d67a61eab 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 4a1e215d426204d1bce525e67a25f1b9cd9c3fbb +Subproject commit 3d67a61eab1e24a64b0edda0bda9116d7e82a809 From bb8a16e67c228d0e86f2e6962caaed97d319704a Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 14 Apr 2023 08:51:17 +0200 Subject: [PATCH 123/162] chore: optimize copies on state prune --- .../core_libs/consensus/src/final_chain/final_chain.cpp | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index b62b8d7e4b..c5ee4bbeaf 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -274,9 +274,10 @@ class FinalChainImpl final : public FinalChain { db_->compactColumn(DB::Columns::final_chain_blk_hash_by_number); db_->compactColumn(DB::Columns::final_chain_blk_number_by_hash); - boost::asio::post(prune_thread_, [this, last_block_to_keep, state_root_to_prune]() { - state_api_.prune(last_block_to_keep->state_root, state_root_to_prune, last_block_to_keep->number); - }); + boost::asio::post( + prune_thread_, + [this, to_keep = std::move(last_block_to_keep->state_root), to_prune = std::move(state_root_to_prune), + number = last_block_to_keep->number]() { state_api_.prune(to_keep, to_prune, number); }); } } From ffd22a156c3fe4a86f7f8c184089488abf4b7ccf Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 14 Apr 2023 08:53:19 +0200 Subject: [PATCH 124/162] chore: remove old structs --- .../include/final_chain/state_api_data.hpp | 14 -------------- .../consensus/src/final_chain/state_api_data.cpp | 2 -- submodules/taraxa-evm | 2 +- 3 files changed, 1 insertion(+), 17 deletions(-) diff --git a/libraries/core_libs/consensus/include/final_chain/state_api_data.hpp b/libraries/core_libs/consensus/include/final_chain/state_api_data.hpp index 81f9a07e21..6166e58af4 100644 --- a/libraries/core_libs/consensus/include/final_chain/state_api_data.hpp +++ b/libraries/core_libs/consensus/include/final_chain/state_api_data.hpp @@ -92,20 +92,6 @@ struct Account { h256 const& storage_root_eth() const; } const ZeroAccount; -struct TrieProof { - bytes value; - std::vector nodes; - - HAS_RLP_FIELDS -}; - -struct Proof { - TrieProof account_proof; - std::vector storage_proofs; - - HAS_RLP_FIELDS -}; - struct StateDescriptor { EthBlockNumber blk_num = 0; h256 state_root; diff --git a/libraries/core_libs/consensus/src/final_chain/state_api_data.cpp b/libraries/core_libs/consensus/src/final_chain/state_api_data.cpp index 17a84e3593..e41715dd2e 100644 --- a/libraries/core_libs/consensus/src/final_chain/state_api_data.cpp +++ b/libraries/core_libs/consensus/src/final_chain/state_api_data.cpp @@ -18,8 +18,6 @@ RLP_FIELDS_DEFINE(LogRecord, address, topics, data) RLP_FIELDS_DEFINE(ExecutionResult, code_retval, new_contract_addr, logs, gas_used, code_err, consensus_err) RLP_FIELDS_DEFINE(StateTransitionResult, execution_results, state_root, total_reward) RLP_FIELDS_DEFINE(Account, nonce, balance, storage_root_hash, code_hash, code_size) -RLP_FIELDS_DEFINE(TrieProof, value, nodes) -RLP_FIELDS_DEFINE(Proof, account_proof, storage_proofs) RLP_FIELDS_DEFINE(StateDescriptor, blk_num, state_root) RLP_FIELDS_DEFINE(Tracing, vmTrace, trace, stateDiff) } // namespace taraxa::state_api \ No newline at end of file diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 3d67a61eab..3610c2557e 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 3d67a61eab1e24a64b0edda0bda9116d7e82a809 +Subproject commit 3610c2557efa4967c620984f532cd5b602ef4f5d From f180a71b37d36cc7b00009a235fb0951dc3efffd Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 17 Apr 2023 15:39:06 +0200 Subject: [PATCH 125/162] chore: remove old cmake files --- CMakeModules/EthDependencies.cmake | 34 ----------- CMakeModules/EthExecutableHelper.cmake | 61 ------------------- CMakeModules/EthUtils.cmake | 82 -------------------------- CMakeModules/jsonrpcstubHelper.cmake | 25 ++++++++ 4 files changed, 25 insertions(+), 177 deletions(-) delete mode 100644 CMakeModules/EthDependencies.cmake delete mode 100644 CMakeModules/EthExecutableHelper.cmake delete mode 100644 CMakeModules/EthUtils.cmake create mode 100644 CMakeModules/jsonrpcstubHelper.cmake diff --git a/CMakeModules/EthDependencies.cmake b/CMakeModules/EthDependencies.cmake deleted file mode 100644 index 951bba9bb6..0000000000 --- a/CMakeModules/EthDependencies.cmake +++ /dev/null @@ -1,34 +0,0 @@ -# The Windows platform has not historically had any standard packaging system for delivering -# versioned releases of libraries. Homebrew and PPA perform that function for macOS and Ubuntu -# respectively, and there are analogous standards for other Linux distros. In the absense of -# such a standard, we have chosen to make a "fake packaging system" for cpp-ethereum, which is -# implemented in https://github.com/ethereum/cpp-dependencies. -# -# NOTE - In the last couple of years, the NuGet packaging system, first created for delivery -# of .NET packages, has added support for C++ packages, and it may be possible for us to migrate -# our "fake package server" to that real package server. That would certainly be preferable -# to rolling our own, but it also puts us at the mercy of intermediate package maintainers who -# may be inactive. There is not a fantastic range of packages available at the time of writing, -# so we might find that such a move turns us into becoming the package maintainer for our -# dependencies. Not a net win :-) -# -# "Windows - Try to use NuGet C++ packages" -# https://github.com/ethereum/webthree-umbrella/issues/509 -# -# Perhaps a better alternative is to step away from dependencies onto binary releases entirely, -# and switching to build-from-source for some (or all) of our dependencies, especially if they -# are small. That gives us total control, but at the cost of longer build times. That is the -# approach which Pawel has taken for LLVM in https://github.com/ethereum/evmjit. - -if (MSVC) - if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.0.0) - message(FATAL_ERROR "ERROR - As of the 1.3.0 release, cpp-ethereum only supports Visual Studio 2015 or newer.\nPlease download from https://www.visualstudio.com/en-us/products/visual-studio-community-vs.aspx.") - else() - get_filename_component(ETH_DEPENDENCY_INSTALL_DIR "${CMAKE_CURRENT_LIST_DIR}/../deps/x64" ABSOLUTE) - endif() - set (CMAKE_PREFIX_PATH ${ETH_DEPENDENCY_INSTALL_DIR} ${CMAKE_PREFIX_PATH}) -endif() - -# custom cmake scripts -set(ETH_CMAKE_DIR ${CMAKE_CURRENT_LIST_DIR}) -set(ETH_SCRIPTS_DIR ${ETH_CMAKE_DIR}/scripts) \ No newline at end of file diff --git a/CMakeModules/EthExecutableHelper.cmake b/CMakeModules/EthExecutableHelper.cmake deleted file mode 100644 index eaf2223b35..0000000000 --- a/CMakeModules/EthExecutableHelper.cmake +++ /dev/null @@ -1,61 +0,0 @@ -# -# this function requires the following variables to be specified: -# ETH_VERSION -# PROJECT_NAME -# PROJECT_VERSION -# PROJECT_COPYRIGHT_YEAR -# PROJECT_VENDOR -# PROJECT_DOMAIN_SECOND -# PROJECT_DOMAIN_FIRST -# SRC_LIST -# HEADERS -# -# params: -# ICON -# - -macro(eth_copy_dll EXECUTABLE DLL) - # dlls must be unsubstitud list variable (without ${}) in format - # optimized;path_to_dll.dll;debug;path_to_dlld.dll - if(DEFINED MSVC) - list(GET ${DLL} 1 DLL_RELEASE) - list(GET ${DLL} 3 DLL_DEBUG) - add_custom_command(TARGET ${EXECUTABLE} - PRE_BUILD - COMMAND ${CMAKE_COMMAND} ARGS - -DDLL_RELEASE="${DLL_RELEASE}" - -DDLL_DEBUG="${DLL_DEBUG}" - -DCONF="$" - -DDESTINATION="${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}" - -P "${ETH_SCRIPTS_DIR}/copydlls.cmake" - ) - endif() -endmacro() - -macro(eth_copy_dlls EXECUTABLE) - foreach(dll ${ARGN}) - eth_copy_dll(${EXECUTABLE} ${dll}) - endforeach(dll) -endmacro() - -macro(jsonrpcstub_create EXECUTABLE SPEC SERVERNAME SERVERDIR SERVERFILENAME CLIENTNAME CLIENTDIR CLIENTFILENAME) - if (ETH_JSON_RPC_STUB) - add_custom_target(${SPEC}stub) - add_custom_command( - TARGET ${SPEC}stub - POST_BUILD - DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${SPEC}" - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} - COMMAND ${CMAKE_COMMAND} -DETH_SPEC_PATH="${CMAKE_CURRENT_SOURCE_DIR}/${SPEC}" -DETH_SOURCE_DIR="${CMAKE_SOURCE_DIR}" -DETH_CMAKE_DIR="${ETH_CMAKE_DIR}" - -DETH_CLIENT_DIR="${CLIENTDIR}" - -DETH_CLIENT_NAME=${CLIENTNAME} - -DETH_CLIENT_FILENAME=${CLIENTFILENAME} - -DETH_SERVER_DIR="${SERVERDIR}" - -DETH_SERVER_NAME=${SERVERNAME} - -DETH_SERVER_FILENAME=${SERVERFILENAME} - -DETH_JSON_RPC_STUB="${ETH_JSON_RPC_STUB}" - -P "${ETH_SCRIPTS_DIR}/jsonrpcstub.cmake" - ) - add_dependencies(${EXECUTABLE} ${SPEC}stub) - endif () -endmacro() \ No newline at end of file diff --git a/CMakeModules/EthUtils.cmake b/CMakeModules/EthUtils.cmake deleted file mode 100644 index e41bf2bd32..0000000000 --- a/CMakeModules/EthUtils.cmake +++ /dev/null @@ -1,82 +0,0 @@ -# -# renames the file if it is different from its destination -include(CMakeParseArguments) -# -macro(replace_if_different SOURCE DST) - set(extra_macro_args ${ARGN}) - set(options CREATE) - set(one_value_args) - set(multi_value_args) - cmake_parse_arguments(REPLACE_IF_DIFFERENT "${options}" "${one_value_args}" "${multi_value_args}" "${extra_macro_args}") - - if (REPLACE_IF_DIFFERENT_CREATE AND (NOT (EXISTS "${DST}"))) - file(WRITE "${DST}" "") - endif() - - execute_process(COMMAND ${CMAKE_COMMAND} -E compare_files "${SOURCE}" "${DST}" RESULT_VARIABLE DIFFERENT OUTPUT_QUIET ERROR_QUIET) - - if (DIFFERENT) - execute_process(COMMAND ${CMAKE_COMMAND} -E rename "${SOURCE}" "${DST}") - else() - execute_process(COMMAND ${CMAKE_COMMAND} -E remove "${SOURCE}") - endif() -endmacro() - -macro(eth_add_test NAME) - - # parse arguments here - set(commands) - set(current_command "") - foreach (arg ${ARGN}) - if (arg STREQUAL "ARGS") - if (current_command) - list(APPEND commands ${current_command}) - endif() - set(current_command "") - else () - set(current_command "${current_command} ${arg}") - endif() - endforeach(arg) - list(APPEND commands ${current_command}) - - message(STATUS "test: ${NAME} | ${commands}") - - # create tests - set(index 0) - list(LENGTH commands count) - while (index LESS count) - list(GET commands ${index} test_arguments) - - set(run_test "--run_test=${NAME}") - add_test(NAME "${NAME}.${index}" COMMAND testeth ${run_test} ${test_arguments}) - - math(EXPR index "${index} + 1") - endwhile(index LESS count) - - # add target to run them - add_custom_target("test.${NAME}" - DEPENDS testeth - WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} - COMMAND ${CMAKE_COMMAND} -DETH_TEST_NAME="${NAME}" -DCTEST_COMMAND="${CTEST_COMMAND}" -P "${ETH_SCRIPTS_DIR}/runtest.cmake" - ) - -endmacro() - -# In Windows split repositories build we need to be checking whether or not -# Debug/Release or both versions were built for the config phase to run smoothly -macro(eth_check_library_link L) - if (${${L}_LIBRARY} AND ${${L}_LIBRARY} EQUAL "${L}_LIBRARY-NOTFOUND") - unset(${${L}_LIBRARY}) - endif() - if (${${L}_LIBRARY_DEBUG} AND ${${L}_LIBRARY_DEBUG} EQUAL "${L}_LIBRARY_DEBUG-NOTFOUND") - unset(${${L}_LIBRARY_DEBUG}) - endif() - if (${${L}_LIBRARY} AND ${${L}_LIBRARY_DEBUG}) - set(${L}_LIBRARIES optimized ${${L}_LIBRARY} debug ${${L}_LIBRARY_DEBUG}) - elseif (${${L}_LIBRARY}) - set(${L}_LIBRARIES ${${L}_LIBRARY}) - elseif (${${L}_LIBRARY_DEBUG}) - set(${L}_LIBRARIES ${${L}_LIBRARY_DEBUG}) - endif() -endmacro() - diff --git a/CMakeModules/jsonrpcstubHelper.cmake b/CMakeModules/jsonrpcstubHelper.cmake new file mode 100644 index 0000000000..66c1abe356 --- /dev/null +++ b/CMakeModules/jsonrpcstubHelper.cmake @@ -0,0 +1,25 @@ +# custom cmake scripts +set(ETH_CMAKE_DIR ${CMAKE_CURRENT_LIST_DIR}) +set(ETH_SCRIPTS_DIR ${ETH_CMAKE_DIR}/scripts) + +macro(jsonrpcstub_create EXECUTABLE SPEC SERVERNAME SERVERDIR SERVERFILENAME CLIENTNAME CLIENTDIR CLIENTFILENAME) + if (ETH_JSON_RPC_STUB) + add_custom_target(${SPEC}stub) + add_custom_command( + TARGET ${SPEC}stub + POST_BUILD + DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${SPEC}" + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + COMMAND ${CMAKE_COMMAND} -DETH_SPEC_PATH="${CMAKE_CURRENT_SOURCE_DIR}/${SPEC}" -DETH_SOURCE_DIR="${CMAKE_SOURCE_DIR}" -DETH_CMAKE_DIR="${ETH_CMAKE_DIR}" + -DETH_CLIENT_DIR="${CLIENTDIR}" + -DETH_CLIENT_NAME=${CLIENTNAME} + -DETH_CLIENT_FILENAME=${CLIENTFILENAME} + -DETH_SERVER_DIR="${SERVERDIR}" + -DETH_SERVER_NAME=${SERVERNAME} + -DETH_SERVER_FILENAME=${SERVERFILENAME} + -DETH_JSON_RPC_STUB="${ETH_JSON_RPC_STUB}" + -P "${ETH_SCRIPTS_DIR}/jsonrpcstub.cmake" + ) + add_dependencies(${EXECUTABLE} ${SPEC}stub) + endif () +endmacro() \ No newline at end of file From e295ed3bbf99e857a1c3f67382a9da69a6d1a93b Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Mon, 17 Apr 2023 13:40:56 +0200 Subject: [PATCH 126/162] chore: prune state_db manually --- libraries/cli/include/cli/config.hpp | 1 + libraries/cli/src/config.cpp | 4 +++ libraries/config/include/config/config.hpp | 1 + libraries/config/src/config.cpp | 12 ++++---- .../consensus/src/final_chain/final_chain.cpp | 28 +++++-------------- 5 files changed, 18 insertions(+), 28 deletions(-) diff --git a/libraries/cli/include/cli/config.hpp b/libraries/cli/include/cli/config.hpp index d2c91d75bf..99ed1cc28c 100644 --- a/libraries/cli/include/cli/config.hpp +++ b/libraries/cli/include/cli/config.hpp @@ -42,6 +42,7 @@ class Config { static constexpr const char* HELP = "help"; static constexpr const char* VERSION = "version"; static constexpr const char* WALLET = "wallet"; + static constexpr const char* PRUNE_STATE_DB = "prune-state-db"; static constexpr const char* NODE_COMMAND = "node"; static constexpr const char* ACCOUNT_COMMAND = "account"; diff --git a/libraries/cli/src/config.cpp b/libraries/cli/src/config.cpp index 2465a368f3..2a4c8fdf20 100644 --- a/libraries/cli/src/config.cpp +++ b/libraries/cli/src/config.cpp @@ -38,6 +38,8 @@ Config::Config(int argc, const char* argv[]) { bool destroy_db = false; bool rebuild_network = false; bool rebuild_db = false; + bool prune_state_db = false; + bool light_node = false; bool version = false; uint64_t rebuild_db_period = 0; @@ -129,6 +131,7 @@ Config::Config(int argc, const char* argv[]) { "Enables Test JsonRPC. Disabled by default"); node_command_options.add_options()(ENABLE_DEBUG, bpo::bool_switch(&enable_debug), "Enables Debug RPC interface. Disabled by default"); + node_command_options.add_options()(PRUNE_STATE_DB, bpo::bool_switch(&prune_state_db), "Prune state_db"); allowed_options.add(main_options); @@ -261,6 +264,7 @@ Config::Config(int argc, const char* argv[]) { } node_config_.db_config.db_revert_to_period = revert_to_period; node_config_.db_config.rebuild_db = rebuild_db; + node_config_.db_config.prune_state_db = prune_state_db; node_config_.db_config.rebuild_db_period = rebuild_db_period; node_config_.enable_test_rpc = enable_test_rpc; diff --git a/libraries/config/include/config/config.hpp b/libraries/config/include/config/config.hpp index c4c577e22d..9a4388d0cb 100644 --- a/libraries/config/include/config/config.hpp +++ b/libraries/config/include/config/config.hpp @@ -15,6 +15,7 @@ struct DBConfig { uint32_t db_max_open_files = 0; PbftPeriod db_revert_to_period = 0; bool rebuild_db = false; + bool prune_state_db = false; PbftPeriod rebuild_db_period = 0; }; diff --git a/libraries/config/src/config.cpp b/libraries/config/src/config.cpp index ddf67d0da4..91746e521d 100644 --- a/libraries/config/src/config.cpp +++ b/libraries/config/src/config.cpp @@ -98,13 +98,11 @@ FullNodeConfig::FullNodeConfig(const Json::Value &string_or_object, const Json:: } is_light_node = getConfigDataAsBoolean(root, {"is_light_node"}, true, is_light_node); - if (is_light_node) { - const auto min_light_node_history = (genesis.state.dpos.blocks_per_year * kDefaultLightNodeHistoryDays) / 365; - light_node_history = getConfigDataAsUInt(root, {"light_node_history"}, true, min_light_node_history); - if (light_node_history < min_light_node_history) { - throw ConfigException("Min. required light node history is " + std::to_string(min_light_node_history) + - " blocks (" + std::to_string(kDefaultLightNodeHistoryDays) + " days)"); - } + const auto min_light_node_history = (genesis.state.dpos.blocks_per_year * kDefaultLightNodeHistoryDays) / 365; + light_node_history = getConfigDataAsUInt(root, {"light_node_history"}, true, min_light_node_history); + if (light_node_history < min_light_node_history) { + throw ConfigException("Min. required light node history is " + std::to_string(min_light_node_history) + + " blocks (" + std::to_string(kDefaultLightNodeHistoryDays) + " days)"); } try { diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index c5ee4bbeaf..212a2a2127 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -20,11 +20,9 @@ class FinalChainImpl final : public FinalChain { const bool kLightNode = false; const uint64_t kLightNodeHistory = 0; const uint32_t kMaxLevelsPerPeriod; - const uint64_t kLightNodePruneOffset = 0; // It is not prepared to use more then 1 thread. Examine it if you want to change threads count boost::asio::thread_pool executor_thread_{1}; - boost::asio::thread_pool prune_thread_{1}; std::atomic num_executed_dag_blk_ = 0; std::atomic num_executed_trx_ = 0; @@ -61,10 +59,6 @@ class FinalChainImpl final : public FinalChain { kLightNode(config.is_light_node), kLightNodeHistory(config.light_node_history), kMaxLevelsPerPeriod(config.max_levels_per_period), - // This will provide a speific random offset based on node address for each node to prevent all light nodes - // performing prune at the same block height - kLightNodePruneOffset((*reinterpret_cast(node_addr.asBytes().data())) % - std::max(config.light_node_history, (uint64_t)1)), block_headers_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return get_block_header(blk); }), block_hashes_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return get_block_hash(blk); }), @@ -125,12 +119,14 @@ class FinalChainImpl final : public FinalChain { } delegation_delay_ = config.genesis.state.dpos.delegation_delay; + if (config.db_config.prune_state_db && last_blk_num.has_value() && *last_blk_num > kLightNodeHistory) { + LOG(log_si_) << "Pruning state db, this might take several minutes"; + prune(*last_blk_num - kLightNodeHistory); + LOG(log_si_) << "Pruning state db complete"; + } } - void stop() override { - executor_thread_.join(); - prune_thread_.join(); - } + void stop() override { executor_thread_.join(); } std::future> finalize( PeriodData&& new_blk, std::vector&& finalized_dag_blk_hashes, @@ -246,13 +242,6 @@ class FinalChainImpl final : public FinalChain { state_api_.create_snapshot(blk_header->number); } - if (kLightNode) { - // Actual history size will be between 100% and 105% of light_node_history_ to avoid deleting on every period - if ((((blk_header->number + kLightNodePruneOffset) % (std::max(kLightNodeHistory / 20, (uint64_t)1)) == 0)) && - blk_header->number > kLightNodeHistory) { - prune(blk_header->number - kLightNodeHistory); - } - } return result; } @@ -274,10 +263,7 @@ class FinalChainImpl final : public FinalChain { db_->compactColumn(DB::Columns::final_chain_blk_hash_by_number); db_->compactColumn(DB::Columns::final_chain_blk_number_by_hash); - boost::asio::post( - prune_thread_, - [this, to_keep = std::move(last_block_to_keep->state_root), to_prune = std::move(state_root_to_prune), - number = last_block_to_keep->number]() { state_api_.prune(to_keep, to_prune, number); }); + state_api_.prune(last_block_to_keep->state_root, state_root_to_prune, last_block_to_keep->number); } } From bda00c5d70b7c6f403b03a25f71c1772c5343c83 Mon Sep 17 00:00:00 2001 From: Dmytro Kostenko Date: Tue, 18 Apr 2023 11:02:04 +0200 Subject: [PATCH 127/162] feat: change how we process db versions changes --- libraries/core_libs/node/src/node.cpp | 8 ++++---- .../include/storage/migration/migration_base.hpp | 6 +++++- .../core_libs/storage/include/storage/storage.hpp | 2 ++ .../storage/src/migration/migration_manager.cpp | 6 +++--- libraries/core_libs/storage/src/storage.cpp | 11 +++++++---- 5 files changed, 21 insertions(+), 12 deletions(-) diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index b588b60184..b7c6292ec2 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -67,8 +67,8 @@ void FullNode::init() { conf_.db_config.db_max_open_files, conf_.db_config.db_max_snapshots, conf_.db_config.db_revert_to_period, node_addr, false); - if (db_->hasMinorVersionChanged()) { - LOG(log_si_) << "Minor DB version has changed. Rebuilding Db"; + if (db_->hasMajorVersionChanged()) { + LOG(log_si_) << "Major DB version has changed. Rebuilding Db"; conf_.db_config.rebuild_db = true; db_ = nullptr; old_db_ = std::make_shared(conf_.db_path, conf_.db_config.db_snapshot_each_n_pbft_block, @@ -77,12 +77,12 @@ void FullNode::init() { db_ = std::make_shared(conf_.db_path, conf_.db_config.db_snapshot_each_n_pbft_block, conf_.db_config.db_max_open_files, conf_.db_config.db_max_snapshots, conf_.db_config.db_revert_to_period, node_addr); + } else if (db_->hasMinorVersionChanged()) { + storage::migration::Manager(db_).applyAll(); } if (db_->getDagBlocksCount() == 0) { db_->setGenesisHash(conf_.genesis.genesisHash()); } - - storage::migration::Manager(db_).applyAll(); } LOG(log_nf_) << "DB initialized ..."; diff --git a/libraries/core_libs/storage/include/storage/migration/migration_base.hpp b/libraries/core_libs/storage/include/storage/migration/migration_base.hpp index b1ce371b01..6ea3f15dd7 100644 --- a/libraries/core_libs/storage/include/storage/migration/migration_base.hpp +++ b/libraries/core_libs/storage/include/storage/migration/migration_base.hpp @@ -1,4 +1,5 @@ #pragma once +#include "logger/logger.hpp" #include "storage/storage.hpp" namespace taraxa::storage::migration { @@ -11,8 +12,11 @@ class Base { virtual uint32_t dbVersion() = 0; bool isApplied() { return db_->lookup_int(id(), DB::Columns::migrations).has_value(); } - void apply() { + void apply(logger::Logger& log) { if (db_->getMajorVersion() != dbVersion()) { + LOG(log) << id() + << ": skip migration as it was made for different major db version. Could be removed from the code" + << std::endl; return; } migrate(); diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 96f18a4b34..2abf2304a2 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -144,6 +144,7 @@ class DbStorage : public std::enable_shared_from_this { std::set snapshots_; uint32_t kMajorVersion_; + bool major_version_changed_ = false; bool minor_version_changed_ = false; auto handle(Column const& col) const { return handles_[col.ordinal_]; } @@ -307,6 +308,7 @@ class DbStorage : public std::enable_shared_from_this { void addProposalPeriodDagLevelsMapToBatch(uint64_t level, PbftPeriod period, Batch& write_batch); bool hasMinorVersionChanged() { return minor_version_changed_; } + bool hasMajorVersionChanged() { return major_version_changed_; } void compactColumn(Column const& column) { db_->CompactRange({}, handle(column), nullptr, nullptr); } diff --git a/libraries/core_libs/storage/src/migration/migration_manager.cpp b/libraries/core_libs/storage/src/migration/migration_manager.cpp index e249a65547..7f5dabd999 100644 --- a/libraries/core_libs/storage/src/migration/migration_manager.cpp +++ b/libraries/core_libs/storage/src/migration/migration_manager.cpp @@ -11,9 +11,9 @@ Manager::Manager(std::shared_ptr db, const addr_t& node_addr) : db_(d void Manager::applyAll() { for (const auto& m : migrations_) { if (!m->isApplied()) { - LOG(log_nf_) << "Applying migration " << m->id(); - m->apply(); - LOG(log_nf_) << "Migration applied " << m->id(); + LOG(log_si_) << "Applying migration " << m->id(); + m->apply(log_si_); + LOG(log_si_) << "Migration applied " << m->id(); } } } diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index eb8c29e329..19409118a5 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -79,16 +79,19 @@ DbStorage::DbStorage(fs::path const& path, uint32_t db_snapshot_each_n_pbft_bloc kMajorVersion_ = getStatusField(StatusDbField::DbMajorVersion); uint32_t minor_version = getStatusField(StatusDbField::DbMinorVersion); - if (kMajorVersion_ == 0 && minor_version == 0) { + auto save_db_versions = [&]() { saveStatusField(StatusDbField::DbMajorVersion, TARAXA_DB_MAJOR_VERSION); saveStatusField(StatusDbField::DbMinorVersion, TARAXA_DB_MINOR_VERSION); + }; + if (kMajorVersion_ == 0 && minor_version == 0) { + save_db_versions(); } else { if (kMajorVersion_ != TARAXA_DB_MAJOR_VERSION) { - throw DbException(string("Database version mismatch. Version on disk ") + - getFormattedVersion({kMajorVersion_, minor_version}) + - " Node version:" + getFormattedVersion({TARAXA_DB_MAJOR_VERSION, TARAXA_DB_MINOR_VERSION})); + major_version_changed_ = true; + save_db_versions(); } else if (minor_version != TARAXA_DB_MINOR_VERSION) { minor_version_changed_ = true; + save_db_versions(); } } } From 3a985ec594828d01bbd2480e0da7cd2c452fa76e Mon Sep 17 00:00:00 2001 From: Dmytro Kostenko Date: Tue, 18 Apr 2023 13:26:02 +0200 Subject: [PATCH 128/162] fix: update db version only if rebuild_db or all migrations was applied --- CMakeLists.txt | 2 +- libraries/core_libs/node/src/node.cpp | 2 ++ .../storage/include/storage/storage.hpp | 1 + libraries/core_libs/storage/src/storage.cpp | 23 ++++++++----------- 4 files changed, 13 insertions(+), 15 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 2f3582ef8a..5ff575d0dd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -13,7 +13,7 @@ set(TARAXA_NET_VERSION 1) set(TARAXA_DB_MAJOR_VERSION 1) # Minor version should be modified when changes to the database are made in the tables that can be rebuilt from the # basic tables -set(TARAXA_DB_MINOR_VERSION 0) +set(TARAXA_DB_MINOR_VERSION 1) # Defines Taraxa library target. project(taraxa-node VERSION ${TARAXA_VERSION}) diff --git a/libraries/core_libs/node/src/node.cpp b/libraries/core_libs/node/src/node.cpp index b7c6292ec2..2e6002d89e 100644 --- a/libraries/core_libs/node/src/node.cpp +++ b/libraries/core_libs/node/src/node.cpp @@ -80,6 +80,8 @@ void FullNode::init() { } else if (db_->hasMinorVersionChanged()) { storage::migration::Manager(db_).applyAll(); } + db_->updateDbVersions(); + if (db_->getDagBlocksCount() == 0) { db_->setGenesisHash(conf_.genesis.genesisHash()); } diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 2abf2304a2..8d6f00182d 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -176,6 +176,7 @@ class DbStorage : public std::enable_shared_from_this { void loadSnapshots(); void disableSnapshots(); void enableSnapshots(); + void updateDbVersions(); uint32_t getMajorVersion() const; std::unique_ptr getColumnIterator(const Column& c); diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index 19409118a5..aeb076e2b8 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -79,23 +79,18 @@ DbStorage::DbStorage(fs::path const& path, uint32_t db_snapshot_each_n_pbft_bloc kMajorVersion_ = getStatusField(StatusDbField::DbMajorVersion); uint32_t minor_version = getStatusField(StatusDbField::DbMinorVersion); - auto save_db_versions = [&]() { - saveStatusField(StatusDbField::DbMajorVersion, TARAXA_DB_MAJOR_VERSION); - saveStatusField(StatusDbField::DbMinorVersion, TARAXA_DB_MINOR_VERSION); - }; - if (kMajorVersion_ == 0 && minor_version == 0) { - save_db_versions(); - } else { - if (kMajorVersion_ != TARAXA_DB_MAJOR_VERSION) { - major_version_changed_ = true; - save_db_versions(); - } else if (minor_version != TARAXA_DB_MINOR_VERSION) { - minor_version_changed_ = true; - save_db_versions(); - } + if (kMajorVersion_ != 0 && kMajorVersion_ != TARAXA_DB_MAJOR_VERSION) { + major_version_changed_ = true; + } else if (minor_version != TARAXA_DB_MINOR_VERSION) { + minor_version_changed_ = true; } } +void DbStorage::updateDbVersions() { + saveStatusField(StatusDbField::DbMajorVersion, TARAXA_DB_MAJOR_VERSION); + saveStatusField(StatusDbField::DbMinorVersion, TARAXA_DB_MINOR_VERSION); +} + void DbStorage::rebuildColumns(const rocksdb::Options& options) { std::unique_ptr db; std::vector column_families; From f0a8f9f4c393d06615286c5314bf4b016cb10480 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 19 Apr 2023 14:02:40 +0200 Subject: [PATCH 129/162] do not regenerate genesis --- charts/taraxa-node/Chart.yaml | 2 +- charts/taraxa-node/templates/consensus-node-configmap.yaml | 1 - .../taraxa-node/templates/consensus-node-light-configmap.yaml | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/charts/taraxa-node/Chart.yaml b/charts/taraxa-node/Chart.yaml index b638b10387..bd384e984d 100644 --- a/charts/taraxa-node/Chart.yaml +++ b/charts/taraxa-node/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 appVersion: "1.0" description: Kubernetes helm chart for Taraxa blockchain full node implementation. name: taraxa-node -version: 0.3.9 +version: 0.3.10 keywords: - blockchain - taraxa diff --git a/charts/taraxa-node/templates/consensus-node-configmap.yaml b/charts/taraxa-node/templates/consensus-node-configmap.yaml index a751e901a8..ba7b05621f 100644 --- a/charts/taraxa-node/templates/consensus-node-configmap.yaml +++ b/charts/taraxa-node/templates/consensus-node-configmap.yaml @@ -99,7 +99,6 @@ data: echo "Cleaning up old config..." rm -rf $CONFIG_PATH - rm -rf $GENESIS_PATH echo "Generating config" INDEX=${HOSTNAME##*-} diff --git a/charts/taraxa-node/templates/consensus-node-light-configmap.yaml b/charts/taraxa-node/templates/consensus-node-light-configmap.yaml index b1fa283c97..7d4adc594c 100644 --- a/charts/taraxa-node/templates/consensus-node-light-configmap.yaml +++ b/charts/taraxa-node/templates/consensus-node-light-configmap.yaml @@ -99,7 +99,6 @@ data: echo "Cleaning up old config..." rm -rf $CONFIG_PATH - rm -rf $GENESIS_PATH echo "Generating config" INDEX=${HOSTNAME##*-} From e78e5193062809067a21d79eb7c33fae3be78075 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Thu, 20 Apr 2023 16:03:59 +0200 Subject: [PATCH 130/162] chore: check state root on syncing --- .../consensus/include/pbft/pbft_manager.hpp | 7 ++++ .../consensus/src/pbft/pbft_manager.cpp | 38 ++++++++++++------- 2 files changed, 32 insertions(+), 13 deletions(-) diff --git a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp index 0685c1ce6c..c73c155118 100644 --- a/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp +++ b/libraries/core_libs/consensus/include/pbft/pbft_manager.hpp @@ -454,6 +454,13 @@ class PbftManager : public std::enable_shared_from_this { */ bool validatePbftBlock(const std::shared_ptr &pbft_block) const; + /** + * @brief Validates pbft block state root. It checks if: + * @param pbft_block PBFT block + * @return true if pbft block is valid, otherwise false + */ + bool validatePbftBlockStateRoot(const std::shared_ptr &pbft_block) const; + /** * @brief If there are enough certify votes, push the vote PBFT block in PBFT chain * @param pbft_block PBFT block diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index b40c09abe6..6bd9270dc3 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -1315,20 +1315,9 @@ std::shared_ptr PbftManager::identifyLeaderBlock_(PbftRound round, Pb return empty_leader_block; } -bool PbftManager::validatePbftBlock(const std::shared_ptr &pbft_block) const { - if (!pbft_block) { - LOG(log_er_) << "Unable to validate pbft block - no block provided"; - return false; - } - - // Validates pbft_block's previous block hash against pbft chain - if (!pbft_chain_->checkPbftBlockValidation(pbft_block)) { - return false; - } - - auto const &pbft_block_hash = pbft_block->getBlockHash(); - +bool PbftManager::validatePbftBlockStateRoot(const std::shared_ptr &pbft_block) const { auto period = pbft_block->getPeriod(); + auto const &pbft_block_hash = pbft_block->getBlockHash(); { h256 prev_state_root_hash; if (period > final_chain_->delegation_delay()) { @@ -1345,6 +1334,25 @@ bool PbftManager::validatePbftBlock(const std::shared_ptr &pbft_block return false; } } + return true; +} + +bool PbftManager::validatePbftBlock(const std::shared_ptr &pbft_block) const { + if (!pbft_block) { + LOG(log_er_) << "Unable to validate pbft block - no block provided"; + return false; + } + + // Validates pbft_block's previous block hash against pbft chain + if (!pbft_chain_->checkPbftBlockValidation(pbft_block)) { + return false; + } + + auto const &pbft_block_hash = pbft_block->getBlockHash(); + + if (!validatePbftBlockStateRoot(pbft_block)) { + return false; + } // Validates reward votes if (!vote_mgr_->checkRewardVotes(pbft_block, false).first) { @@ -1649,6 +1657,10 @@ std::optional>>> PbftMan return std::nullopt; } + if (!validatePbftBlockStateRoot(period_data.pbft_blk)) { + return std::nullopt; + } + // Check reward votes auto reward_votes = vote_mgr_->checkRewardVotes(period_data.pbft_blk, true); if (!reward_votes.first) { From 9b1756293e0dbfc931fd11ad630072a1184c47e6 Mon Sep 17 00:00:00 2001 From: Dmytro Kostenko Date: Fri, 21 Apr 2023 10:16:00 +0200 Subject: [PATCH 131/162] fix: use of all passed gas on reverts. add check to revert_reason test --- submodules/taraxa-evm | 2 +- tests/final_chain_test.cpp | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 3610c2557e..9eaa5f51ce 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 3610c2557efa4967c620984f532cd5b602ef4f5d +Subproject commit 9eaa5f51ce4dba40ad2c123bca058b2eab0cc948 diff --git a/tests/final_chain_test.cpp b/tests/final_chain_test.cpp index 8e5608620d..a15be2ae36 100644 --- a/tests/final_chain_test.cpp +++ b/tests/final_chain_test.cpp @@ -516,6 +516,13 @@ TEST_F(FinalChainTest, revert_reason) { EXPECT_THROW_WITH(dev::jsToInt(eth_json_rpc->eth_estimateGas(est)), std::exception, "evm: execution reverted: arg required"); EXPECT_THROW_WITH(eth_json_rpc->eth_call(est, "latest"), std::exception, "evm: execution reverted: arg required"); + + auto gas = 100000; + auto trx = std::make_shared(2, 0, 1, gas, dev::fromHex(call_data), sk, test_contract_addr); + auto result = advance({trx}, {0, 0, 1}); + auto receipt = result->trx_receipts.front(); + ASSERT_EQ(receipt.status_code, 0); // failed + ASSERT_GT(gas, receipt.gas_used); // we aren't spending all gas in such cases } } From d557974786196d80b7e961fd9011da9fbb60179e Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Mon, 24 Apr 2023 16:42:27 +0200 Subject: [PATCH 132/162] fix: setting reward votes period --- .../src/vote_manager/vote_manager.cpp | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp index 9b791c4ba7..8c37742755 100644 --- a/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp +++ b/libraries/core_libs/consensus/src/vote_manager/vote_manager.cpp @@ -31,19 +31,25 @@ VoteManager::VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, auto db_votes = db_->getAllTwoTPlusOneVotes(); - auto addVerifiedVotes = [this](const std::vector>& votes) { - bool reward_votes_info_set = false; + auto addVerifiedVotes = [this](const std::vector>& votes, bool set_reward_votes_info = false) { + bool rewards_info_already_set = false; for (const auto& vote : votes) { // Check if votes are unique per round, step & voter if (!isUniqueVote(vote).first) { continue; } - if (!reward_votes_info_set && vote->getType() == PbftVoteTypes::cert_vote) { - reward_votes_info_set = true; - reward_votes_block_hash_ = vote->getBlockHash(); - reward_votes_period_ = vote->getPeriod(); - reward_votes_round_ = vote->getRound(); + if (set_reward_votes_info && vote->getType() == PbftVoteTypes::cert_vote) { + if (!rewards_info_already_set) { + rewards_info_already_set = true; + reward_votes_block_hash_ = vote->getBlockHash(); + reward_votes_period_ = vote->getPeriod(); + reward_votes_round_ = vote->getRound(); + } else { + assert(reward_votes_block_hash_ == vote->getBlockHash()); + assert(reward_votes_period_ == vote->getPeriod()); + assert(reward_votes_round_ == vote->getRound()); + } } addVerifiedVote(vote); @@ -52,7 +58,7 @@ VoteManager::VoteManager(const addr_t& node_addr, const PbftConfig& pbft_config, }; // Load 2t+1 vote blocks votes - addVerifiedVotes(db_->getAllTwoTPlusOneVotes()); + addVerifiedVotes(db_->getAllTwoTPlusOneVotes(), true); // Load own votes const auto own_votes = db_->getOwnVerifiedVotes(); From 011f27e4e25973b88865f48925585e614ca77f1e Mon Sep 17 00:00:00 2001 From: rjonczy Date: Thu, 27 Apr 2023 15:14:50 +0200 Subject: [PATCH 133/162] empty commit to trigger ci From 0f6abd5fad871c1ea504f7f548132162740942f5 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 27 Apr 2023 10:37:42 +0200 Subject: [PATCH 134/162] feat: implement trace_replayTransaction --- libraries/core_libs/network/rpc/Debug.cpp | 52 +++++++++++++------ libraries/core_libs/network/rpc/Debug.h | 5 +- .../core_libs/network/rpc/Debug.jsonrpc.json | 9 ++++ libraries/core_libs/network/rpc/DebugClient.h | 11 ++++ libraries/core_libs/network/rpc/DebugFace.h | 8 +++ submodules/taraxa-evm | 2 +- 6 files changed, 70 insertions(+), 17 deletions(-) diff --git a/libraries/core_libs/network/rpc/Debug.cpp b/libraries/core_libs/network/rpc/Debug.cpp index 1a79ed1de9..4a94f48a10 100644 --- a/libraries/core_libs/network/rpc/Debug.cpp +++ b/libraries/core_libs/network/rpc/Debug.cpp @@ -15,19 +15,13 @@ namespace taraxa::net { Json::Value Debug::debug_traceTransaction(const std::string& transaction_hash) { Json::Value res; try { + auto [trx, loc] = get_transaction_with_location(transaction_hash); + if (!trx || !loc) { + res["status"] = "Transaction not found"; + return res; + } if (auto node = full_node_.lock()) { - const auto hash = jsToFixed<32>(transaction_hash); - const auto trx = node->getDB()->getTransaction(hash); - if (!trx) { - res["status"] = "Transaction not found"; - return res; - } - const auto loc = node->getFinalChain()->transaction_location(hash); - if (!loc) { - res["status"] = "Transaction not found"; - return res; - } - return util::readJsonFromString(node->getFinalChain()->trace_trx(to_eth_trx(trx), loc->blk_n)); + return util::readJsonFromString(node->getFinalChain()->trace_trx(to_eth_trx(std::move(trx)), loc->blk_n - 1)); } } catch (std::exception& e) { res["status"] = e.what(); @@ -65,6 +59,25 @@ Json::Value Debug::trace_call(const Json::Value& call_params, const Json::Value& return res; } +Json::Value Debug::trace_replayTransaction(const std::string& transaction_hash, const Json::Value& trace_params) { + Json::Value res; + try { + auto params = parse_tracking_parms(trace_params); + auto [trx, loc] = get_transaction_with_location(transaction_hash); + if (!trx || !loc) { + res["status"] = "Transaction not found"; + return res; + } + if (auto node = full_node_.lock()) { + return util::readJsonFromString( + node->getFinalChain()->trace_trx(to_eth_trx(std::move(trx)), loc->blk_n - 1, std::move(params))); + } + } catch (std::exception& e) { + res["status"] = e.what(); + } + return res; +} + state_api::Tracing Debug::parse_tracking_parms(const Json::Value& json) const { state_api::Tracing ret; if (!json.isArray() || json.empty()) { @@ -91,13 +104,13 @@ state_api::EVMTransaction Debug::to_eth_trx(const Json::Value& json, EthBlockNum } if (!json["from"].empty()) { - trx.from = toAddress(json["from"].asString()); + trx.from = to_address(json["from"].asString()); } else { trx.from = ZeroAddress; } if (!json["to"].empty() && json["to"].asString() != "0x" && !json["to"].asString().empty()) { - trx.to = toAddress(json["to"].asString()); + trx.to = to_address(json["to"].asString()); } if (!json["value"].empty()) { @@ -144,7 +157,7 @@ EthBlockNumber Debug::parse_blk_num(const string& blk_num_str) { return jsToInt(blk_num_str); } -Address Debug::toAddress(const string& s) const { +Address Debug::to_address(const string& s) const { try { if (auto b = fromHex(s.substr(0, 2) == "0x" ? s.substr(2) : s, WhenError::Throw); b.size() == Address::size) { return Address(b); @@ -154,4 +167,13 @@ Address Debug::toAddress(const string& s) const { throw InvalidAddress(); } +std::pair, std::optional> +Debug::get_transaction_with_location(const std::string& transaction_hash) const { + if (auto node = full_node_.lock()) { + const auto hash = jsToFixed<32>(transaction_hash); + return {node->getDB()->getTransaction(hash), node->getFinalChain()->transaction_location(hash)}; + } + return {}; +} + } // namespace taraxa::net \ No newline at end of file diff --git a/libraries/core_libs/network/rpc/Debug.h b/libraries/core_libs/network/rpc/Debug.h index 936e193177..19923be9d6 100644 --- a/libraries/core_libs/network/rpc/Debug.h +++ b/libraries/core_libs/network/rpc/Debug.h @@ -36,13 +36,16 @@ class Debug : public DebugFace { virtual Json::Value debug_traceCall(const Json::Value& param1, const std::string& param2) override; virtual Json::Value trace_call(const Json::Value& param1, const Json::Value& param2, const std::string& param3) override; + virtual Json::Value trace_replayTransaction(const std::string& param1, const Json::Value& param2) override; private: state_api::EVMTransaction to_eth_trx(std::shared_ptr t) const; state_api::EVMTransaction to_eth_trx(const Json::Value& json, EthBlockNumber blk_num); EthBlockNumber parse_blk_num(const string& blk_num_str); state_api::Tracing parse_tracking_parms(const Json::Value& json) const; - Address toAddress(const string& s) const; + Address to_address(const string& s) const; + std::pair, std::optional> + get_transaction_with_location(const std::string& transaction_hash) const; std::weak_ptr full_node_; const uint64_t kGasLimit = ((uint64_t)1 << 53) - 1; diff --git a/libraries/core_libs/network/rpc/Debug.jsonrpc.json b/libraries/core_libs/network/rpc/Debug.jsonrpc.json index 8a54721da9..ba0b72454c 100644 --- a/libraries/core_libs/network/rpc/Debug.jsonrpc.json +++ b/libraries/core_libs/network/rpc/Debug.jsonrpc.json @@ -25,5 +25,14 @@ ], "order": [], "returns": {} + }, + { + "name": "trace_replayTransaction", + "params": [ + "", + [] + ], + "order": [], + "returns": {} } ] \ No newline at end of file diff --git a/libraries/core_libs/network/rpc/DebugClient.h b/libraries/core_libs/network/rpc/DebugClient.h index 05d3904bd9..7aa5afc3b5 100644 --- a/libraries/core_libs/network/rpc/DebugClient.h +++ b/libraries/core_libs/network/rpc/DebugClient.h @@ -45,6 +45,17 @@ class DebugClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } + Json::Value trace_replayTransaction(const std::string& param1, + const Json::Value& param2) throw(jsonrpc::JsonRpcException) { + Json::Value p; + p.append(param1); + p.append(param2); + Json::Value result = this->CallMethod("trace_replayTransaction", p); + if (result.isObject()) + return result; + else + throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); + } }; } // namespace net diff --git a/libraries/core_libs/network/rpc/DebugFace.h b/libraries/core_libs/network/rpc/DebugFace.h index 3411cecb00..086bf90673 100644 --- a/libraries/core_libs/network/rpc/DebugFace.h +++ b/libraries/core_libs/network/rpc/DebugFace.h @@ -22,6 +22,10 @@ class DebugFace : public ServerInterface { jsonrpc::Procedure("trace_call", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_OBJECT, "param2", jsonrpc::JSON_ARRAY, "param3", jsonrpc::JSON_STRING, NULL), &taraxa::net::DebugFace::trace_callI); + this->bindAndAddMethod( + jsonrpc::Procedure("trace_replayTransaction", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", + jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_ARRAY, NULL), + &taraxa::net::DebugFace::trace_replayTransactionI); } inline virtual void debug_traceTransactionI(const Json::Value& request, Json::Value& response) { @@ -33,9 +37,13 @@ class DebugFace : public ServerInterface { inline virtual void trace_callI(const Json::Value& request, Json::Value& response) { response = this->trace_call(request[0u], request[1u], request[2u].asString()); } + inline virtual void trace_replayTransactionI(const Json::Value& request, Json::Value& response) { + response = this->trace_replayTransaction(request[0u].asString(), request[1u]); + } virtual Json::Value debug_traceTransaction(const std::string& param1) = 0; virtual Json::Value debug_traceCall(const Json::Value& param1, const std::string& param2) = 0; virtual Json::Value trace_call(const Json::Value& param1, const Json::Value& param2, const std::string& param3) = 0; + virtual Json::Value trace_replayTransaction(const std::string& param1, const Json::Value& param2) = 0; }; } // namespace net diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 9eaa5f51ce..983c116615 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 9eaa5f51ce4dba40ad2c123bca058b2eab0cc948 +Subproject commit 983c11661549954ad1d839347247511ff7759abe From c7ecdb98892409388e6bba2b138c9972defaaa70 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 28 Apr 2023 08:16:25 +0200 Subject: [PATCH 135/162] feat: add support for tracking blocks --- .../include/final_chain/final_chain.hpp | 8 ++--- .../include/final_chain/state_api.hpp | 4 +-- .../consensus/src/final_chain/final_chain.cpp | 20 +++++------ .../consensus/src/final_chain/state_api.cpp | 10 +++--- libraries/core_libs/network/rpc/Debug.cpp | 33 ++++++++++++++++--- libraries/core_libs/network/rpc/Debug.h | 1 + .../core_libs/network/rpc/Debug.jsonrpc.json | 9 +++++ libraries/core_libs/network/rpc/DebugClient.h | 11 +++++++ libraries/core_libs/network/rpc/DebugFace.h | 8 +++++ submodules/taraxa-evm | 2 +- 10 files changed, 79 insertions(+), 27 deletions(-) diff --git a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp index 30e97a429d..f2d7948c1f 100644 --- a/libraries/core_libs/consensus/include/final_chain/final_chain.hpp +++ b/libraries/core_libs/consensus/include/final_chain/final_chain.hpp @@ -177,14 +177,14 @@ class FinalChain { std::optional blk_n = {}) const = 0; /** - * @brief Trace execution of a new message call immediately without creating a transaction on the block chain. That + * @brief Trace execution of a new message call immediately without creating a transactions on the block chain. That * means that state would be reverted and not saved anywhere - * @param trx state_api::EVMTransaction + * @param trxs std::vector vector of transaction to trace * @param blk_n EthBlockNumber number of block we are getting state from * @return std::string */ - virtual std::string trace_trx(const state_api::EVMTransaction& trx, EthBlockNumber blk_n, - std::optional params = {}) const = 0; + virtual std::string trace(std::vector trx, EthBlockNumber blk_n, + std::optional params = {}) const = 0; /** * @brief total count of eligible votes are in DPOS precompiled contract diff --git a/libraries/core_libs/consensus/include/final_chain/state_api.hpp b/libraries/core_libs/consensus/include/final_chain/state_api.hpp index 5096eec020..c5349be356 100644 --- a/libraries/core_libs/consensus/include/final_chain/state_api.hpp +++ b/libraries/core_libs/consensus/include/final_chain/state_api.hpp @@ -38,8 +38,8 @@ class StateAPI { u256 get_account_storage(EthBlockNumber blk_num, const addr_t& addr, const u256& key) const; bytes get_code_by_address(EthBlockNumber blk_num, const addr_t& addr) const; ExecutionResult dry_run_transaction(EthBlockNumber blk_num, const EVMBlock& blk, const EVMTransaction& trx) const; - bytes trace_transaction(EthBlockNumber blk_num, const EVMBlock& blk, const EVMTransaction& trx, - std::optional params = {}) const; + bytes trace(EthBlockNumber blk_num, const EVMBlock& blk, const std::vector trx, + std::optional params = {}) const; StateDescriptor get_last_committed_state_descriptor() const; const StateTransitionResult& transition_state(const EVMBlock& block, const util::RangeView& transactions, diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index 212a2a2127..cd7d42cd97 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -407,17 +407,17 @@ class FinalChainImpl final : public FinalChain { trx); } - std::string trace_trx(const state_api::EVMTransaction& trx, EthBlockNumber blk_n, - std::optional params = {}) const override { + std::string trace(std::vector trxs, EthBlockNumber blk_n, + std::optional params = {}) const override { const auto blk_header = block_header(last_if_absent(blk_n)); - return dev::asString(state_api_.trace_transaction(blk_header->number, - { - blk_header->author, - blk_header->gas_limit, - blk_header->timestamp, - BlockHeader::difficulty(), - }, - trx, params)); + return dev::asString(state_api_.trace(blk_header->number, + { + blk_header->author, + blk_header->gas_limit, + blk_header->timestamp, + BlockHeader::difficulty(), + }, + trxs, params)); } uint64_t dpos_eligible_total_vote_count(EthBlockNumber blk_num) const override { diff --git a/libraries/core_libs/consensus/src/final_chain/state_api.cpp b/libraries/core_libs/consensus/src/final_chain/state_api.cpp index 7e18f6c408..6f0fa3a6a4 100644 --- a/libraries/core_libs/consensus/src/final_chain/state_api.cpp +++ b/libraries/core_libs/consensus/src/final_chain/state_api.cpp @@ -160,13 +160,13 @@ ExecutionResult StateAPI::dry_run_transaction(EthBlockNumber blk_num, const EVMB trx); } -bytes StateAPI::trace_transaction(EthBlockNumber blk_num, const EVMBlock& blk, const EVMTransaction& trx, - std::optional params) const { +bytes StateAPI::trace(EthBlockNumber blk_num, const EVMBlock& blk, const std::vector trxs, + std::optional params) const { if (params) { - return c_method_args_rlp(this_c_, blk_num, blk, trx, - *params); + return c_method_args_rlp(this_c_, blk_num, blk, trxs, + *params); } else { - return c_method_args_rlp(this_c_, blk_num, blk, trx); + return c_method_args_rlp(this_c_, blk_num, blk, trxs); } } diff --git a/libraries/core_libs/network/rpc/Debug.cpp b/libraries/core_libs/network/rpc/Debug.cpp index 4a94f48a10..3b506ebd69 100644 --- a/libraries/core_libs/network/rpc/Debug.cpp +++ b/libraries/core_libs/network/rpc/Debug.cpp @@ -21,7 +21,7 @@ Json::Value Debug::debug_traceTransaction(const std::string& transaction_hash) { return res; } if (auto node = full_node_.lock()) { - return util::readJsonFromString(node->getFinalChain()->trace_trx(to_eth_trx(std::move(trx)), loc->blk_n - 1)); + return util::readJsonFromString(node->getFinalChain()->trace({to_eth_trx(std::move(trx))}, loc->blk_n - 1)); } } catch (std::exception& e) { res["status"] = e.what(); @@ -35,7 +35,7 @@ Json::Value Debug::debug_traceCall(const Json::Value& call_params, const std::st const auto block = parse_blk_num(blk_num); auto trx = to_eth_trx(call_params, block); if (auto node = full_node_.lock()) { - return util::readJsonFromString(node->getFinalChain()->trace_trx(std::move(trx), block)); + return util::readJsonFromString(node->getFinalChain()->trace({std::move(trx)}, block)); } } catch (std::exception& e) { res["status"] = e.what(); @@ -48,10 +48,10 @@ Json::Value Debug::trace_call(const Json::Value& call_params, const Json::Value& Json::Value res; try { const auto block = parse_blk_num(blk_num); - auto trx = to_eth_trx(call_params, block); auto params = parse_tracking_parms(trace_params); if (auto node = full_node_.lock()) { - return util::readJsonFromString(node->getFinalChain()->trace_trx(std::move(trx), block, std::move(params))); + return util::readJsonFromString( + node->getFinalChain()->trace({to_eth_trx(call_params, block)}, block, std::move(params))); } } catch (std::exception& e) { res["status"] = e.what(); @@ -70,7 +70,30 @@ Json::Value Debug::trace_replayTransaction(const std::string& transaction_hash, } if (auto node = full_node_.lock()) { return util::readJsonFromString( - node->getFinalChain()->trace_trx(to_eth_trx(std::move(trx)), loc->blk_n - 1, std::move(params))); + node->getFinalChain()->trace({to_eth_trx(std::move(trx))}, loc->blk_n - 1, std::move(params))); + } + } catch (std::exception& e) { + res["status"] = e.what(); + } + return res; +} + +Json::Value Debug::trace_replayBlockTransactions(const std::string& block_num, const Json::Value& trace_params) { + Json::Value res; + try { + const auto block = parse_blk_num(block_num); + auto params = parse_tracking_parms(trace_params); + if (auto node = full_node_.lock()) { + auto transactions = node->getDB()->getPeriodTransactions(block); + if (!transactions.has_value() || transactions->empty()) { + res["status"] = "Block has no transactions"; + return res; + } + std::vector trxs; + trxs.reserve(transactions->size()); + std::transform(transactions->begin(), transactions->end(), std::back_inserter(trxs), + [this](auto t) { return to_eth_trx(std::move(t)); }); + return util::readJsonFromString(node->getFinalChain()->trace(std::move(trxs), block - 1, std::move(params))); } } catch (std::exception& e) { res["status"] = e.what(); diff --git a/libraries/core_libs/network/rpc/Debug.h b/libraries/core_libs/network/rpc/Debug.h index 19923be9d6..d318f9deee 100644 --- a/libraries/core_libs/network/rpc/Debug.h +++ b/libraries/core_libs/network/rpc/Debug.h @@ -37,6 +37,7 @@ class Debug : public DebugFace { virtual Json::Value trace_call(const Json::Value& param1, const Json::Value& param2, const std::string& param3) override; virtual Json::Value trace_replayTransaction(const std::string& param1, const Json::Value& param2) override; + virtual Json::Value trace_replayBlockTransactions(const std::string& param1, const Json::Value& param2) override; private: state_api::EVMTransaction to_eth_trx(std::shared_ptr t) const; diff --git a/libraries/core_libs/network/rpc/Debug.jsonrpc.json b/libraries/core_libs/network/rpc/Debug.jsonrpc.json index ba0b72454c..88c5411af1 100644 --- a/libraries/core_libs/network/rpc/Debug.jsonrpc.json +++ b/libraries/core_libs/network/rpc/Debug.jsonrpc.json @@ -34,5 +34,14 @@ ], "order": [], "returns": {} + }, + { + "name": "trace_replayBlockTransactions", + "params": [ + "", + [] + ], + "order": [], + "returns": {} } ] \ No newline at end of file diff --git a/libraries/core_libs/network/rpc/DebugClient.h b/libraries/core_libs/network/rpc/DebugClient.h index 7aa5afc3b5..f0faaf58ec 100644 --- a/libraries/core_libs/network/rpc/DebugClient.h +++ b/libraries/core_libs/network/rpc/DebugClient.h @@ -56,6 +56,17 @@ class DebugClient : public jsonrpc::Client { else throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); } + Json::Value trace_replayBlockTransactions(const std::string& param1, + const Json::Value& param2) throw(jsonrpc::JsonRpcException) { + Json::Value p; + p.append(param1); + p.append(param2); + Json::Value result = this->CallMethod("trace_replayBlockTransactions", p); + if (result.isObject()) + return result; + else + throw jsonrpc::JsonRpcException(jsonrpc::Errors::ERROR_CLIENT_INVALID_RESPONSE, result.toStyledString()); + } }; } // namespace net diff --git a/libraries/core_libs/network/rpc/DebugFace.h b/libraries/core_libs/network/rpc/DebugFace.h index 086bf90673..fbfd16f363 100644 --- a/libraries/core_libs/network/rpc/DebugFace.h +++ b/libraries/core_libs/network/rpc/DebugFace.h @@ -26,6 +26,10 @@ class DebugFace : public ServerInterface { jsonrpc::Procedure("trace_replayTransaction", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_ARRAY, NULL), &taraxa::net::DebugFace::trace_replayTransactionI); + this->bindAndAddMethod( + jsonrpc::Procedure("trace_replayBlockTransactions", jsonrpc::PARAMS_BY_POSITION, jsonrpc::JSON_OBJECT, "param1", + jsonrpc::JSON_STRING, "param2", jsonrpc::JSON_ARRAY, NULL), + &taraxa::net::DebugFace::trace_replayBlockTransactionsI); } inline virtual void debug_traceTransactionI(const Json::Value& request, Json::Value& response) { @@ -40,10 +44,14 @@ class DebugFace : public ServerInterface { inline virtual void trace_replayTransactionI(const Json::Value& request, Json::Value& response) { response = this->trace_replayTransaction(request[0u].asString(), request[1u]); } + inline virtual void trace_replayBlockTransactionsI(const Json::Value& request, Json::Value& response) { + response = this->trace_replayBlockTransactions(request[0u].asString(), request[1u]); + } virtual Json::Value debug_traceTransaction(const std::string& param1) = 0; virtual Json::Value debug_traceCall(const Json::Value& param1, const std::string& param2) = 0; virtual Json::Value trace_call(const Json::Value& param1, const Json::Value& param2, const std::string& param3) = 0; virtual Json::Value trace_replayTransaction(const std::string& param1, const Json::Value& param2) = 0; + virtual Json::Value trace_replayBlockTransactions(const std::string& param1, const Json::Value& param2) = 0; }; } // namespace net diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 983c116615..67b732df89 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 983c11661549954ad1d839347247511ff7759abe +Subproject commit 67b732df89bdded116837c03589d2da3a6b0f64c From 72e47567ade29e17cfbe5abb2606c61ce2375daf Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 2 May 2023 11:59:35 +0200 Subject: [PATCH 136/162] chore: add check for block 0 --- libraries/core_libs/network/rpc/Debug.cpp | 16 ++++++++++++---- submodules/taraxa-evm | 2 +- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/libraries/core_libs/network/rpc/Debug.cpp b/libraries/core_libs/network/rpc/Debug.cpp index 3b506ebd69..4e5e93b315 100644 --- a/libraries/core_libs/network/rpc/Debug.cpp +++ b/libraries/core_libs/network/rpc/Debug.cpp @@ -12,6 +12,11 @@ using namespace jsonrpc; using namespace taraxa; namespace taraxa::net { + +inline EthBlockNumber get_ctx_block_num(EthBlockNumber block_number) { + return (block_number >= 1) ? block_number - 1 : 0; +} + Json::Value Debug::debug_traceTransaction(const std::string& transaction_hash) { Json::Value res; try { @@ -21,7 +26,8 @@ Json::Value Debug::debug_traceTransaction(const std::string& transaction_hash) { return res; } if (auto node = full_node_.lock()) { - return util::readJsonFromString(node->getFinalChain()->trace({to_eth_trx(std::move(trx))}, loc->blk_n - 1)); + return util::readJsonFromString( + node->getFinalChain()->trace({to_eth_trx(std::move(trx))}, get_ctx_block_num(loc->blk_n))); } } catch (std::exception& e) { res["status"] = e.what(); @@ -70,7 +76,7 @@ Json::Value Debug::trace_replayTransaction(const std::string& transaction_hash, } if (auto node = full_node_.lock()) { return util::readJsonFromString( - node->getFinalChain()->trace({to_eth_trx(std::move(trx))}, loc->blk_n - 1, std::move(params))); + node->getFinalChain()->trace({to_eth_trx(std::move(trx))}, get_ctx_block_num(loc->blk_n), std::move(params))); } } catch (std::exception& e) { res["status"] = e.what(); @@ -93,7 +99,8 @@ Json::Value Debug::trace_replayBlockTransactions(const std::string& block_num, c trxs.reserve(transactions->size()); std::transform(transactions->begin(), transactions->end(), std::back_inserter(trxs), [this](auto t) { return to_eth_trx(std::move(t)); }); - return util::readJsonFromString(node->getFinalChain()->trace(std::move(trxs), block - 1, std::move(params))); + return util::readJsonFromString( + node->getFinalChain()->trace(std::move(trxs), get_ctx_block_num(block), std::move(params))); } } catch (std::exception& e) { res["status"] = e.what(); @@ -108,7 +115,8 @@ state_api::Tracing Debug::parse_tracking_parms(const Json::Value& json) const { } for (const auto& obj : json) { if (obj.asString() == "trace") ret.trace = true; - if (obj.asString() == "stateDiff") ret.stateDiff = true; + // Disabled for now + // if (obj.asString() == "stateDiff") ret.stateDiff = true; if (obj.asString() == "vmTrace") ret.vmTrace = true; } return ret; diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 67b732df89..6dafb2e792 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 67b732df89bdded116837c03589d2da3a6b0f64c +Subproject commit 6dafb2e79278cff343293b82ba25198924b8aed2 From d92796a0e89b9e450fde9eda8380eabb57c45f2b Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Tue, 2 May 2023 13:09:53 +0200 Subject: [PATCH 137/162] chore: simplify code --- .../core_libs/consensus/src/final_chain/state_api.cpp | 8 ++------ submodules/taraxa-evm | 2 +- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/libraries/core_libs/consensus/src/final_chain/state_api.cpp b/libraries/core_libs/consensus/src/final_chain/state_api.cpp index 6f0fa3a6a4..7cf3e17657 100644 --- a/libraries/core_libs/consensus/src/final_chain/state_api.cpp +++ b/libraries/core_libs/consensus/src/final_chain/state_api.cpp @@ -162,12 +162,8 @@ ExecutionResult StateAPI::dry_run_transaction(EthBlockNumber blk_num, const EVMB bytes StateAPI::trace(EthBlockNumber blk_num, const EVMBlock& blk, const std::vector trxs, std::optional params) const { - if (params) { - return c_method_args_rlp(this_c_, blk_num, blk, trxs, - *params); - } else { - return c_method_args_rlp(this_c_, blk_num, blk, trxs); - } + return c_method_args_rlp(this_c_, blk_num, blk, trxs, + params); } StateDescriptor StateAPI::get_last_committed_state_descriptor() const { diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 6dafb2e792..f12d621f69 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 6dafb2e79278cff343293b82ba25198924b8aed2 +Subproject commit f12d621f69f58a9fe17abf8f5064233a4b64e255 From 1ec921d75129bf6711e40227635e2d64400336f1 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Wed, 3 May 2023 07:52:24 +0200 Subject: [PATCH 138/162] chore: update evm --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index f12d621f69..88ab801e01 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit f12d621f69f58a9fe17abf8f5064233a4b64e255 +Subproject commit 88ab801e01aa6ceb15a5dc21a289a23111594195 From db9c16cc66859014209ae460f413fcb9691df717 Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Wed, 3 May 2023 08:49:11 +0200 Subject: [PATCH 139/162] chore: improve prune --- .../include/final_chain/state_api.hpp | 3 +-- .../consensus/src/dag/dag_manager.cpp | 3 +++ .../consensus/src/final_chain/final_chain.cpp | 20 ++++++++++------ .../consensus/src/final_chain/state_api.cpp | 5 ++-- submodules/taraxa-evm | 2 +- tests/full_node_test.cpp | 24 +++---------------- 6 files changed, 23 insertions(+), 34 deletions(-) diff --git a/libraries/core_libs/consensus/include/final_chain/state_api.hpp b/libraries/core_libs/consensus/include/final_chain/state_api.hpp index c5349be356..9d09067dbb 100644 --- a/libraries/core_libs/consensus/include/final_chain/state_api.hpp +++ b/libraries/core_libs/consensus/include/final_chain/state_api.hpp @@ -48,8 +48,7 @@ class StateAPI { const RewardsStats& rewards_stats = {}); void transition_state_commit(); void create_snapshot(PbftPeriod period); - void prune(const dev::h256& state_root_to_keep, const std::vector& state_root_to_prune, - EthBlockNumber blk_num); + void prune(const std::vector& state_root_to_keep, EthBlockNumber blk_num); // DPOS uint64_t dpos_eligible_total_vote_count(EthBlockNumber blk_num) const; diff --git a/libraries/core_libs/consensus/src/dag/dag_manager.cpp b/libraries/core_libs/consensus/src/dag/dag_manager.cpp index bfe041cdd4..82c91cdda0 100644 --- a/libraries/core_libs/consensus/src/dag/dag_manager.cpp +++ b/libraries/core_libs/consensus/src/dag/dag_manager.cpp @@ -287,6 +287,8 @@ void DagManager::clearLightNodeHistory() { // Actual history size will be between 100% and 110% of light_node_history_ to avoid deleting on every period if (((period_ % (std::max(light_node_history_ / 10, (uint64_t)1)) == 0)) && period_ > light_node_history_ && dag_expiry_level_ > max_levels_per_period_ + 1) { + // This will happen at most once a day so log a silent log + LOG(log_si_) << "Clear light node history"; const auto proposal_period = db_->getProposalPeriodForDagLevel(dag_expiry_level_ - max_levels_per_period_ - 1); assert(proposal_period); @@ -299,6 +301,7 @@ void DagManager::clearLightNodeHistory() { << " *proposal_period " << *proposal_period; LOG(log_tr_) << "Delete period history from: " << start << " to " << end; db_->clearPeriodDataHistory(end); + LOG(log_si_) << "Clear light node history completed"; } } diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index cd7d42cd97..ceb25d4d7b 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -119,9 +119,11 @@ class FinalChainImpl final : public FinalChain { } delegation_delay_ = config.genesis.state.dpos.delegation_delay; - if (config.db_config.prune_state_db && last_blk_num.has_value() && *last_blk_num > kLightNodeHistory) { + const auto kPruneblocksToKeep = kDagExpiryLevelLimit + kMaxLevelsPerPeriod + 1; + if ((config.db_config.prune_state_db || kLightNode) && last_blk_num.has_value() && + *last_blk_num > kPruneblocksToKeep) { LOG(log_si_) << "Pruning state db, this might take several minutes"; - prune(*last_blk_num - kLightNodeHistory); + prune(*last_blk_num - kPruneblocksToKeep); LOG(log_si_) << "Pruning state db complete"; } } @@ -246,13 +248,17 @@ class FinalChainImpl final : public FinalChain { } void prune(EthBlockNumber blk_n) override { - const auto last_block_to_keep = get_block_header(blk_n); + LOG(log_nf_) << "Pruning data older than " << blk_n; + auto last_block_to_keep = get_block_header(blk_n); if (last_block_to_keep) { - std::vector state_root_to_prune; - LOG(log_nf_) << "Pruning data older than " << blk_n; + auto block_to_keep = last_block_to_keep; + std::vector state_root_to_keep; + while (block_to_keep) { + state_root_to_keep.push_back(block_to_keep->state_root); + block_to_keep = get_block_header(block_to_keep->number + 1); + } auto block_to_prune = get_block_header(last_block_to_keep->number - 1); while (block_to_prune && block_to_prune->number > 0) { - state_root_to_prune.push_back(block_to_prune->state_root); db_->remove(DB::Columns::final_chain_blk_by_number, block_to_prune->number); db_->remove(DB::Columns::final_chain_blk_hash_by_number, block_to_prune->number); db_->remove(DB::Columns::final_chain_blk_number_by_hash, block_to_prune->hash); @@ -263,7 +269,7 @@ class FinalChainImpl final : public FinalChain { db_->compactColumn(DB::Columns::final_chain_blk_hash_by_number); db_->compactColumn(DB::Columns::final_chain_blk_number_by_hash); - state_api_.prune(last_block_to_keep->state_root, state_root_to_prune, last_block_to_keep->number); + state_api_.prune(state_root_to_keep, last_block_to_keep->number); } } diff --git a/libraries/core_libs/consensus/src/final_chain/state_api.cpp b/libraries/core_libs/consensus/src/final_chain/state_api.cpp index 7cf3e17657..b8d37375e7 100644 --- a/libraries/core_libs/consensus/src/final_chain/state_api.cpp +++ b/libraries/core_libs/consensus/src/final_chain/state_api.cpp @@ -204,9 +204,8 @@ void StateAPI::create_snapshot(PbftPeriod period) { err_h.check(); } -void StateAPI::prune(const dev::h256& state_root_to_keep, const std::vector& state_root_to_prune, - EthBlockNumber blk_num) { - return c_method_args_rlp(this_c_, state_root_to_keep, state_root_to_prune, blk_num); +void StateAPI::prune(const std::vector& state_root_to_keep, EthBlockNumber blk_num) { + return c_method_args_rlp(this_c_, state_root_to_keep, blk_num); } uint64_t StateAPI::dpos_eligible_total_vote_count(EthBlockNumber blk_num) const { diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 88ab801e01..4bb83fc4c6 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 88ab801e01aa6ceb15a5dc21a289a23111594195 +Subproject commit 4bb83fc4c6f74cdf4951baeea80d400102e0147f diff --git a/tests/full_node_test.cpp b/tests/full_node_test.cpp index 0b0ce29a37..132bb33ae0 100644 --- a/tests/full_node_test.cpp +++ b/tests/full_node_test.cpp @@ -685,31 +685,13 @@ TEST_F(FullNodeTest, sync_five_nodes) { // Prune state_db of one node auto prune_node = nodes[nodes.size() - 1]; - const uint32_t min_blocks_to_prune = 50; + const uint32_t min_blocks_to_prune = 30; // This ensures that we never prune blocks that are over proposal period - ASSERT_HAPPENS({20s, 100ms}, [&](auto &ctx) { - const auto max_level = prune_node->getDagManager()->getMaxLevel(); - const auto proposal_period = prune_node->getDB()->getProposalPeriodForDagLevel(max_level); - ASSERT_TRUE(proposal_period.has_value()); - context.dummy_transaction(); - WAIT_EXPECT_TRUE(ctx, ((*proposal_period) > min_blocks_to_prune)) + ASSERT_HAPPENS({40s, 100ms}, [&](auto &ctx) { + WAIT_EXPECT_TRUE(ctx, (prune_node->getPbftChain()->getPbftChainSize() > min_blocks_to_prune + kMaxLevelsPerPeriod)) }); prune_node->getFinalChain()->prune(min_blocks_to_prune); context.assert_balances_synced(); - - // transfer some coins to pruned node ... - context.coin_transfer(0, prune_node->getAddress(), init_bal, false); - context.wait_all_transactions_known(); - - std::cout << "Waiting until transaction is executed" << std::endl; - auto trx_cnt = context.getIssuedTrxCount(); - ASSERT_HAPPENS({20s, 500ms}, [&](auto &ctx) { - for (size_t i = 0; i < nodes.size(); ++i) - WAIT_EXPECT_EQ(ctx, nodes[i]->getDB()->getNumTransactionExecuted(), trx_cnt) - }); - - // Check balances after prune"; - context.assert_balances_synced(); } TEST_F(FullNodeTest, insert_anchor_and_compute_order) { From 6728c0cf6051378199e9fb8f652ef6314090b782 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 5 May 2023 14:34:55 +0200 Subject: [PATCH 140/162] feat: parallel execution of state pruing --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index 4bb83fc4c6..b147444796 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit 4bb83fc4c6f74cdf4951baeea80d400102e0147f +Subproject commit b14744479634cb8f29b4f13f3a8ab77742c166d0 From 2959dcb34f489d83918101302f644a3f3f9cc918 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Thu, 11 May 2023 08:05:44 +0200 Subject: [PATCH 141/162] chore: update evm repo --- submodules/taraxa-evm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index b147444796..c620c6fe72 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit b14744479634cb8f29b4f13f3a8ab77742c166d0 +Subproject commit c620c6fe72301f6eef8d0fe24c5bdf8db8e9f39d From 985f7dc604d57d40ae75858a5ccbb0fe67adbe6b Mon Sep 17 00:00:00 2001 From: Dmytro Kostenko Date: Fri, 28 Apr 2023 11:09:48 +0200 Subject: [PATCH 142/162] refactor: rewards stats passing --- .../common/include/common/range_view.hpp | 7 +-- .../include/final_chain/rewards_stats.hpp | 34 +++++++------ .../include/final_chain/state_api.hpp | 4 +- .../consensus/src/final_chain/final_chain.cpp | 25 ++++++---- .../src/final_chain/rewards_stats.cpp | 50 +++++++++---------- .../consensus/src/final_chain/state_api.cpp | 7 +-- .../pbft_block/include/pbft/pbft_block.hpp | 23 +++++---- tests/final_chain_test.cpp | 31 ++++++++++-- tests/state_api_test.cpp | 6 +-- 9 files changed, 104 insertions(+), 83 deletions(-) diff --git a/libraries/common/include/common/range_view.hpp b/libraries/common/include/common/range_view.hpp index 5c93b77870..8f6cbf29d4 100644 --- a/libraries/common/include/common/range_view.hpp +++ b/libraries/common/include/common/range_view.hpp @@ -3,7 +3,7 @@ #include #include -namespace taraxa::util::range_view { +namespace taraxa::util { template struct RangeView { @@ -72,9 +72,4 @@ auto make_range_view(Seq const &seq) { return RangeView(seq); } -} // namespace taraxa::util::range_view - -namespace taraxa::util { -using range_view::make_range_view; -using range_view::RangeView; } // namespace taraxa::util diff --git a/libraries/core_libs/consensus/include/final_chain/rewards_stats.hpp b/libraries/core_libs/consensus/include/final_chain/rewards_stats.hpp index ee925e16db..c5ce0a0b1c 100644 --- a/libraries/core_libs/consensus/include/final_chain/rewards_stats.hpp +++ b/libraries/core_libs/consensus/include/final_chain/rewards_stats.hpp @@ -16,20 +16,25 @@ namespace taraxa { class RewardsStats { public: /** - * @brief Process PeriodData and returns vector of validators, who included provided block.transactions as first in - * dag block, e.g. returned validator on position 2 included transaction block.transactions[2] as first in his dag - * block + * @brief setting block_author_, max_votes_weight_ and calls processStats function * - * @param block * @param dpos_vote_count - votes count for previous block * @param committee_size - * @return vector of validators */ - std::vector processStats(const PeriodData& block, uint64_t dpos_vote_count, uint32_t committee_size); + RewardsStats(const PeriodData& block, uint64_t dpos_vote_count, uint32_t committee_size); HAS_RLP_FIELDS private: + /** + * @brief Process PeriodData and returns vector of validators, who included provided block.transactions as first in + * dag block, e.g. returned validator on position 2 included transaction block.transactions[2] as first in his dag + * block + * + * @param block + * @return vector of validators + */ + void processStats(const PeriodData& block); /** * @brief In case unique tx_hash is provided, it is mapped to it's validator's address + validator's unique txs count * is incremented. If provided tx_hash was already processed, nothing happens @@ -56,15 +61,6 @@ class RewardsStats { */ bool addVote(const std::shared_ptr& vote); - /** - * @brief Prepares reward statistics bases on period data data - * - * @param sync_blk - * @param dpos_vote_count - votes count for previous block - * @param committee_size - */ - void initStats(const PeriodData& sync_blk, uint64_t dpos_vote_count, uint32_t committee_size); - private: struct ValidatorStats { // count of rewardable(with 1 or more unique transactions) DAG blocks produced by this validator @@ -76,8 +72,14 @@ class RewardsStats { HAS_RLP_FIELDS }; + // Pbft block author + addr_t block_author_; + // Transactions validators: tx hash -> validator that included it as first in his block - std::unordered_map txs_validators_; + std::unordered_map validator_by_tx_hash_; + + // Vector with all transactions validators + std::vector txs_validators_; // Txs stats: validator -> ValidatorStats std::unordered_map validators_stats_; diff --git a/libraries/core_libs/consensus/include/final_chain/state_api.hpp b/libraries/core_libs/consensus/include/final_chain/state_api.hpp index 9d09067dbb..691494456f 100644 --- a/libraries/core_libs/consensus/include/final_chain/state_api.hpp +++ b/libraries/core_libs/consensus/include/final_chain/state_api.hpp @@ -43,9 +43,7 @@ class StateAPI { StateDescriptor get_last_committed_state_descriptor() const; const StateTransitionResult& transition_state(const EVMBlock& block, const util::RangeView& transactions, - const util::RangeView& transactions_validators = {}, - const util::RangeView& uncles = {}, - const RewardsStats& rewards_stats = {}); + const std::vector& rewards_stats = {}); void transition_state_commit(); void create_snapshot(PbftPeriod period); void prune(const std::vector& state_root_to_keep, EthBlockNumber blk_num); diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index ceb25d4d7b..53256f9afe 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -144,19 +144,26 @@ class FinalChainImpl final : public FinalChain { EthBlockNumber delegation_delay() const override { return delegation_delay_; } + std::vector prepare_rewards_stats_(const PeriodData& blk) { + std::vector rewards_stats; + uint64_t dpos_vote_count = kCommitteeSize; + + // Block zero + if (!blk.previous_block_cert_votes.empty()) [[likely]] { + dpos_vote_count = dpos_eligible_total_vote_count(blk.previous_block_cert_votes[0]->getPeriod() - 1); + } + + rewards_stats.emplace_back(blk, dpos_vote_count, kCommitteeSize); + + return rewards_stats; + } + std::shared_ptr finalize_(PeriodData&& new_blk, std::vector&& finalized_dag_blk_hashes, std::shared_ptr&& anchor) { auto batch = db_->createWriteBatch(); - RewardsStats rewards_stats; - uint64_t dpos_vote_count = kCommitteeSize; - // Block zero - if (!new_blk.previous_block_cert_votes.empty()) [[unlikely]] { - dpos_vote_count = dpos_eligible_total_vote_count(new_blk.previous_block_cert_votes[0]->getPeriod() - 1); - } - // returns list of validators for new_blk.transactions - const std::vector txs_validators = rewards_stats.processStats(new_blk, dpos_vote_count, kCommitteeSize); + auto rewards_stats = prepare_rewards_stats_(new_blk); block_applying_emitter_.emit(block_header()->number + 1); @@ -180,7 +187,7 @@ class FinalChainImpl final : public FinalChain { auto const& [exec_results, state_root, total_reward] = state_api_.transition_state({new_blk.pbft_blk->getBeneficiary(), kBlockGasLimit, new_blk.pbft_blk->getTimestamp(), BlockHeader::difficulty()}, - to_state_api_transactions(new_blk.transactions), txs_validators, {}, rewards_stats); + to_state_api_transactions(new_blk.transactions), rewards_stats); TransactionReceipts receipts; receipts.reserve(exec_results.size()); diff --git a/libraries/core_libs/consensus/src/final_chain/rewards_stats.cpp b/libraries/core_libs/consensus/src/final_chain/rewards_stats.cpp index 809a48d227..45f20a82ea 100644 --- a/libraries/core_libs/consensus/src/final_chain/rewards_stats.cpp +++ b/libraries/core_libs/consensus/src/final_chain/rewards_stats.cpp @@ -2,25 +2,33 @@ #include +#include "pbft/pbft_block.hpp" + namespace taraxa { +RewardsStats::RewardsStats(const PeriodData& block, uint64_t dpos_vote_count, uint32_t committee_size) + : block_author_(block.pbft_blk->getBeneficiary()), + max_votes_weight_(std::min(committee_size, dpos_vote_count)) { + processStats(block); +} + bool RewardsStats::addTransaction(const trx_hash_t& tx_hash, const addr_t& validator) { - auto found_tx = txs_validators_.find(tx_hash); + auto found_tx = validator_by_tx_hash_.find(tx_hash); // Already processed tx - if (found_tx != txs_validators_.end()) { + if (found_tx != validator_by_tx_hash_.end()) { return false; } // New tx - txs_validators_[tx_hash] = validator; + validator_by_tx_hash_[tx_hash] = validator; return true; } std::optional RewardsStats::getTransactionValidator(const trx_hash_t& tx_hash) { - auto found_tx = txs_validators_.find(tx_hash); - if (found_tx == txs_validators_.end()) { + auto found_tx = validator_by_tx_hash_.find(tx_hash); + if (found_tx == validator_by_tx_hash_.end()) { return {}; } @@ -52,12 +60,12 @@ std::set toTrxHashesSet(const SharedTransactions& transactions) { return block_transactions_hashes_; } -void RewardsStats::initStats(const PeriodData& sync_blk, uint64_t dpos_vote_count, uint32_t committee_size) { - txs_validators_.reserve(sync_blk.transactions.size()); - validators_stats_.reserve(std::max(sync_blk.dag_blocks.size(), sync_blk.previous_block_cert_votes.size())); - auto block_transactions_hashes_ = toTrxHashesSet(sync_blk.transactions); +void RewardsStats::processStats(const PeriodData& block) { + validator_by_tx_hash_.reserve(block.transactions.size()); + validators_stats_.reserve(std::max(block.dag_blocks.size(), block.previous_block_cert_votes.size())); + auto block_transactions_hashes_ = toTrxHashesSet(block.transactions); - for (const auto& dag_block : sync_blk.dag_blocks) { + for (const auto& dag_block : block.dag_blocks) { const addr_t& dag_block_author = dag_block.getSender(); bool has_unique_transactions = false; for (const auto& tx_hash : dag_block.getTrxs()) { @@ -78,34 +86,24 @@ void RewardsStats::initStats(const PeriodData& sync_blk, uint64_t dpos_vote_coun } } // total_unique_txs_count_ should be always equal to transactions count in block - assert(txs_validators_.size() == sync_blk.transactions.size()); + assert(validator_by_tx_hash_.size() == block.transactions.size()); - max_votes_weight_ = std::min(committee_size, dpos_vote_count); - for (const auto& vote : sync_blk.previous_block_cert_votes) { + for (const auto& vote : block.previous_block_cert_votes) { addVote(vote); } -} - -std::vector RewardsStats::processStats(const PeriodData& block, uint64_t dpos_vote_count, - uint32_t committee_size) { - initStats(block, dpos_vote_count, committee_size); - - // Dag blocks validators that included transactions to be executed as first in their blocks - std::vector txs_validators; - txs_validators.reserve(block.transactions.size()); + txs_validators_.reserve(block.transactions.size()); for (const auto& tx : block.transactions) { // Non-executed trxs auto tx_validator = getTransactionValidator(tx->getHash()); assert(tx_validator.has_value()); - txs_validators.push_back(*tx_validator); + txs_validators_.push_back(*tx_validator); } - - return txs_validators; } RLP_FIELDS_DEFINE(RewardsStats::ValidatorStats, dag_blocks_count_, vote_weight_) -RLP_FIELDS_DEFINE(RewardsStats, validators_stats_, total_dag_blocks_count_, total_votes_weight_, max_votes_weight_) +RLP_FIELDS_DEFINE(RewardsStats, block_author_, validators_stats_, txs_validators_, total_dag_blocks_count_, + total_votes_weight_, max_votes_weight_) } // namespace taraxa \ No newline at end of file diff --git a/libraries/core_libs/consensus/src/final_chain/state_api.cpp b/libraries/core_libs/consensus/src/final_chain/state_api.cpp index b8d37375e7..c73ceb2f4b 100644 --- a/libraries/core_libs/consensus/src/final_chain/state_api.cpp +++ b/libraries/core_libs/consensus/src/final_chain/state_api.cpp @@ -177,14 +177,11 @@ StateDescriptor StateAPI::get_last_committed_state_descriptor() const { const StateTransitionResult& StateAPI::transition_state(const EVMBlock& block, const util::RangeView& transactions, - const util::RangeView& transactions_validators, - const util::RangeView& uncles, - const RewardsStats& rewards_stats) { + const std::vector& rewards_stats) { result_buf_transition_state_.execution_results.clear(); rlp_enc_transition_state_.clear(); c_method_args_rlp( - this_c_, rlp_enc_transition_state_, result_buf_transition_state_, block, transactions, transactions_validators, - uncles, rewards_stats); + this_c_, rlp_enc_transition_state_, result_buf_transition_state_, block, transactions, rewards_stats); return result_buf_transition_state_; } diff --git a/libraries/types/pbft_block/include/pbft/pbft_block.hpp b/libraries/types/pbft_block/include/pbft/pbft_block.hpp index a830a3afa5..9aa3a35cbf 100644 --- a/libraries/types/pbft_block/include/pbft/pbft_block.hpp +++ b/libraries/types/pbft_block/include/pbft/pbft_block.hpp @@ -16,7 +16,7 @@ namespace taraxa { */ /** - * @brief The PbftBlockk class is a PBFT block class that includes PBFT block hash, previous PBFT block hash, DAG anchor + * @brief The PbftBlock class is a PBFT block class that includes PBFT block hash, previous PBFT block hash, DAG anchor * hash, DAG blocks ordering hash, period number, timestamp, proposer address, and proposer signature. */ class PbftBlock { @@ -35,8 +35,8 @@ class PbftBlock { PbftBlock(const blk_hash_t& prev_blk_hash, const blk_hash_t& dag_blk_hash_as_pivot, const blk_hash_t& order_hash, const blk_hash_t& prev_state_root, PbftPeriod period, const addr_t& beneficiary, const secret_t& sk, std::vector&& reward_votes); - explicit PbftBlock(dev::RLP const& rlp); - explicit PbftBlock(bytes const& RLP); + explicit PbftBlock(const dev::RLP& rlp); + explicit PbftBlock(const bytes& RLP); /** * @brief Secure Hash Algorithm 3 @@ -77,33 +77,33 @@ class PbftBlock { * @param dag_blks DAG blocks hashes * @return PBFT block with DAG blocks in JSON */ - static Json::Value toJson(PbftBlock const& b, std::vector const& dag_blks); + static Json::Value toJson(const PbftBlock& b, const std::vector& dag_blks); /** * @brief Get PBFT block hash * @return PBFT block hash */ - auto const& getBlockHash() const { return block_hash_; } + const auto& getBlockHash() const { return block_hash_; } /** * @brief Get previous PBFT block hash * @return previous PBFT block hash */ - auto const& getPrevBlockHash() const { return prev_block_hash_; } + const auto& getPrevBlockHash() const { return prev_block_hash_; } /** * @brief Get DAG anchor hash for the finalized PBFT block * @return DAG anchor hash */ - auto const& getPivotDagBlockHash() const { return dag_block_hash_as_pivot_; } + const auto& getPivotDagBlockHash() const { return dag_block_hash_as_pivot_; } /** * @brief Get DAG blocks ordering hash * @return DAG blocks ordering hash */ - auto const& getOrderHash() const { return order_hash_; } + const auto& getOrderHash() const { return order_hash_; } - auto const& getPrevStateRoot() const { return prev_state_root_hash_; } + const auto& getPrevStateRoot() const { return prev_state_root_hash_; } /** * @brief Get period number @@ -121,7 +121,8 @@ class PbftBlock { * @brief Get PBFT block proposer address * @return PBFT block proposer address */ - auto const& getBeneficiary() const { return beneficiary_; } + const auto& getBeneficiary() const { return beneficiary_; } + const auto& getRewardVotes() const { return reward_votes_; } private: @@ -136,7 +137,7 @@ class PbftBlock { */ void checkUniqueRewardVotes(); }; -std::ostream& operator<<(std::ostream& strm, PbftBlock const& pbft_blk); +std::ostream& operator<<(std::ostream& strm, const PbftBlock& pbft_blk); /** @}*/ diff --git a/tests/final_chain_test.cpp b/tests/final_chain_test.cpp index a15be2ae36..781223a14b 100644 --- a/tests/final_chain_test.cpp +++ b/tests/final_chain_test.cpp @@ -49,12 +49,15 @@ struct FinalChainTest : WithDataDir { for (const auto& trx : trxs) { trx_hashes.emplace_back(trx->getHash()); } - DagBlock dag_blk({}, {}, {}, trx_hashes, {}, {}, secret_t::random()); + + auto proposer_keys = dev::KeyPair::create(); + DagBlock dag_blk({}, {}, {}, trx_hashes, {}, {}, proposer_keys.secret()); db->saveDagBlock(dag_blk); std::vector reward_votes_hashes; auto pbft_block = std::make_shared(kNullBlockHash, kNullBlockHash, kNullBlockHash, kNullBlockHash, expected_blk_num, - addr_t::random(), dev::KeyPair::create().secret(), std::move(reward_votes_hashes)); + addr_t::random(), proposer_keys.secret(), std::move(reward_votes_hashes)); + std::vector> votes; PeriodData period_data(pbft_block, votes); period_data.dag_blocks.push_back(dag_blk); @@ -62,7 +65,6 @@ struct FinalChainTest : WithDataDir { auto batch = db->createWriteBatch(); db->savePeriodData(period_data, batch); - db->commitWriteBatch(batch); auto result = SUT->finalize(std::move(period_data), {dag_blk.getHash()}).get(); @@ -447,6 +449,9 @@ TEST_F(FinalChainTest, failed_transaction_fee) { auto trx2_1 = std::make_shared(2, 101, 1, gas, dev::bytes(), sk, receiver); advance({trx1}); + auto blk = SUT->block_header(expected_blk_num); + auto proposer_balance = SUT->getBalance(blk->author); + EXPECT_EQ(proposer_balance.first, 21000); advance({trx2}); advance({trx3}); @@ -600,6 +605,26 @@ TEST_F(FinalChainTest, incorrect_estimation_regress) { } } +TEST_F(FinalChainTest, fee_rewards_distribution) { + auto sender_keys = dev::KeyPair::create(); + auto gas = 30000; + + const auto& receiver = dev::KeyPair::create().address(); + const auto& addr = sender_keys.address(); + const auto& sk = sender_keys.secret(); + cfg.genesis.state.initial_balances = {}; + cfg.genesis.state.initial_balances[addr] = 100000; + init(); + const auto gas_price = 1; + auto trx1 = std::make_shared(1, 100, gas_price, gas, dev::bytes(), sk, receiver); + + auto res = advance({trx1}); + auto gas_used = res->trx_receipts.front().gas_used; + auto blk = SUT->block_header(expected_blk_num); + auto proposer_balance = SUT->getBalance(blk->author); + EXPECT_EQ(proposer_balance.first, gas_used * gas_price); +} + // This test should be last as state_api isn't destructed correctly because of exception TEST_F(FinalChainTest, initial_validator_exceed_maximum_stake) { const dev::KeyPair key = dev::KeyPair::create(); diff --git a/tests/state_api_test.cpp b/tests/state_api_test.cpp index 49ce8fbe72..9d27f86220 100644 --- a/tests/state_api_test.cpp +++ b/tests/state_api_test.cpp @@ -25,9 +25,8 @@ struct TestBlock { h256 state_root; EVMBlock evm_block; vector transactions; - vector uncle_blocks; - RLP_FIELDS_DEFINE_INPLACE(hash, state_root, evm_block, transactions, uncle_blocks) + RLP_FIELDS_DEFINE_INPLACE(hash, state_root, evm_block, transactions) }; template @@ -202,8 +201,7 @@ TEST_F(StateAPITest, DISABLED_eth_mainnet_smoke) { progress_pct_log_threshold += 10; } auto const& test_block = test_blocks[blk_num]; - auto const& result = - SUT.transition_state(test_block.evm_block, test_block.transactions, {}, test_block.uncle_blocks); + auto const& result = SUT.transition_state(test_block.evm_block, test_block.transactions); ASSERT_EQ(result.state_root, test_block.state_root); SUT.transition_state_commit(); } From 363dc8ef406aeed45237acf2edf3c9121e490b10 Mon Sep 17 00:00:00 2001 From: Dmytro Kostenko Date: Fri, 5 May 2023 13:18:34 +0200 Subject: [PATCH 143/162] refector: move process rewards stats functionality to separate class --- libraries/core_libs/CMakeLists.txt | 6 +++ .../final_chain/contract_interface.hpp | 1 - .../include/final_chain/state_api.hpp | 4 +- .../block_stats.hpp} | 16 ++++---- .../include/rewards/rewards_stats.hpp | 17 ++++++++ .../consensus/src/final_chain/final_chain.cpp | 24 +++-------- .../consensus/src/final_chain/state_api.cpp | 2 +- .../block_stats.cpp} | 20 ++++----- .../consensus/src/rewards/rewards_stats.cpp | 41 +++++++++++++++++++ submodules/taraxa-evm | 2 +- 10 files changed, 91 insertions(+), 42 deletions(-) rename libraries/core_libs/consensus/include/{final_chain/rewards_stats.hpp => rewards/block_stats.hpp} (83%) create mode 100644 libraries/core_libs/consensus/include/rewards/rewards_stats.hpp rename libraries/core_libs/consensus/src/{final_chain/rewards_stats.cpp => rewards/block_stats.cpp} (81%) create mode 100644 libraries/core_libs/consensus/src/rewards/rewards_stats.cpp diff --git a/libraries/core_libs/CMakeLists.txt b/libraries/core_libs/CMakeLists.txt index b7bb353dcd..b5d3266a2c 100644 --- a/libraries/core_libs/CMakeLists.txt +++ b/libraries/core_libs/CMakeLists.txt @@ -15,11 +15,15 @@ file(GLOB_RECURSE STORAGE_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/storage/*.cpp) file(GLOB_RECURSE NODE_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/node/*.hpp) file(GLOB_RECURSE NODE_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/node/*.cpp) +file(GLOB_RECURSE REWARDS_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/rewards/*.hpp) +file(GLOB_RECURSE REWARDS_SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/rewards/*.cpp) + set(HEADERS ${CONSENSUS_HEADERS} ${NETWORK_HEADERS} ${STORAGE_HEADERS} ${NODE_HEADERS} + ${REWARDS_HEADERS} ) set(SOURCES @@ -28,6 +32,7 @@ set(SOURCES ${STORAGE_SOURCES} ${NODE_SOURCES} ${GRAPHQL_GENERATED_SOURCES} + ${REWARDS_SOURCES} ) add_library(core_libs ${SOURCES} ${HEADERS}) @@ -36,6 +41,7 @@ target_include_directories(core_libs PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/consensu target_include_directories(core_libs PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/network/include) target_include_directories(core_libs PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/node/include) target_include_directories(core_libs PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/storage/include) +target_include_directories(core_libs PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/rewards/include) target_include_directories(core_libs PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}) # GraphQL target_include_directories(core_libs PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/network/graphql/gen) diff --git a/libraries/core_libs/consensus/include/final_chain/contract_interface.hpp b/libraries/core_libs/consensus/include/final_chain/contract_interface.hpp index 9c4820edec..a4b7ddabb3 100644 --- a/libraries/core_libs/consensus/include/final_chain/contract_interface.hpp +++ b/libraries/core_libs/consensus/include/final_chain/contract_interface.hpp @@ -6,7 +6,6 @@ #include "common/types.hpp" #include "final_chain/final_chain.hpp" -#include "final_chain/rewards_stats.hpp" namespace taraxa::final_chain { class ContractInterface { diff --git a/libraries/core_libs/consensus/include/final_chain/state_api.hpp b/libraries/core_libs/consensus/include/final_chain/state_api.hpp index 691494456f..c30763f864 100644 --- a/libraries/core_libs/consensus/include/final_chain/state_api.hpp +++ b/libraries/core_libs/consensus/include/final_chain/state_api.hpp @@ -5,8 +5,8 @@ #include #include "common/range_view.hpp" -#include "final_chain/rewards_stats.hpp" #include "final_chain/state_api_data.hpp" +#include "rewards/block_stats.hpp" #include "storage/storage.hpp" namespace taraxa::state_api { @@ -43,7 +43,7 @@ class StateAPI { StateDescriptor get_last_committed_state_descriptor() const; const StateTransitionResult& transition_state(const EVMBlock& block, const util::RangeView& transactions, - const std::vector& rewards_stats = {}); + const std::vector& rewards_stats = {}); void transition_state_commit(); void create_snapshot(PbftPeriod period); void prune(const std::vector& state_root_to_keep, EthBlockNumber blk_num); diff --git a/libraries/core_libs/consensus/include/final_chain/rewards_stats.hpp b/libraries/core_libs/consensus/include/rewards/block_stats.hpp similarity index 83% rename from libraries/core_libs/consensus/include/final_chain/rewards_stats.hpp rename to libraries/core_libs/consensus/include/rewards/block_stats.hpp index c5ce0a0b1c..a3f0f057ca 100644 --- a/libraries/core_libs/consensus/include/final_chain/rewards_stats.hpp +++ b/libraries/core_libs/consensus/include/rewards/block_stats.hpp @@ -7,13 +7,13 @@ #include "pbft/period_data.hpp" #include "vote/vote.hpp" -namespace taraxa { +namespace taraxa::rewards { /** * @class RewardsStats * @brief RewardsStats contains rewards statistics for single pbft block */ -class RewardsStats { +class BlockStats { public: /** * @brief setting block_author_, max_votes_weight_ and calls processStats function @@ -21,18 +21,15 @@ class RewardsStats { * @param dpos_vote_count - votes count for previous block * @param committee_size */ - RewardsStats(const PeriodData& block, uint64_t dpos_vote_count, uint32_t committee_size); + BlockStats(const PeriodData& block, uint64_t dpos_vote_count, uint32_t committee_size); HAS_RLP_FIELDS private: /** - * @brief Process PeriodData and returns vector of validators, who included provided block.transactions as first in - * dag block, e.g. returned validator on position 2 included transaction block.transactions[2] as first in his dag - * block + * @brief Process PeriodData and save stats in class for future serialization. returns * * @param block - * @return vector of validators */ void processStats(const PeriodData& block); /** @@ -78,7 +75,8 @@ class RewardsStats { // Transactions validators: tx hash -> validator that included it as first in his block std::unordered_map validator_by_tx_hash_; - // Vector with all transactions validators + // Vector with all transactions validators, who included provided block.transactions as first in dag block, + // e.g. returned validator on position 2 included transaction block.transactions[2] as first in his dag block std::vector txs_validators_; // Txs stats: validator -> ValidatorStats @@ -94,4 +92,4 @@ class RewardsStats { uint64_t max_votes_weight_{0}; }; -} // namespace taraxa \ No newline at end of file +} // namespace taraxa::rewards \ No newline at end of file diff --git a/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp b/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp new file mode 100644 index 0000000000..062d6a7b8d --- /dev/null +++ b/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp @@ -0,0 +1,17 @@ +#include "config/hardfork.hpp" +#include "rewards/block_stats.hpp" + +namespace taraxa::rewards { +class Stats { + public: + Stats(uint32_t committee_size, std::function&& dpos_eligible_total_vote_count); + + std::vector getStats(const PeriodData& current_blk); + + private: + BlockStats getBlockStats(const PeriodData& current_blk); + + const uint32_t kCommitteeSize; + const std::function dpos_eligible_total_vote_count_; +}; +} // namespace taraxa::rewards \ No newline at end of file diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index 53256f9afe..785ba45e5c 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -6,20 +6,21 @@ #include "common/constants.hpp" #include "common/thread_pool.hpp" #include "final_chain/cache.hpp" -#include "final_chain/rewards_stats.hpp" #include "final_chain/trie_common.hpp" +#include "rewards/rewards_stats.hpp" #include "vote/vote.hpp" namespace taraxa::final_chain { class FinalChainImpl final : public FinalChain { std::shared_ptr db_; - const uint32_t kCommitteeSize; const uint64_t kBlockGasLimit; StateAPI state_api_; const bool kLightNode = false; const uint64_t kLightNodeHistory = 0; const uint32_t kMaxLevelsPerPeriod; + const uint32_t kRewardsDistributionInterval = 100; + rewards::Stats rewards_; // It is not prepared to use more then 1 thread. Examine it if you want to change threads count boost::asio::thread_pool executor_thread_{1}; @@ -49,7 +50,6 @@ class FinalChainImpl final : public FinalChain { public: FinalChainImpl(const std::shared_ptr& db, const taraxa::FullNodeConfig& config, const addr_t& node_addr) : db_(db), - kCommitteeSize(config.genesis.pbft.committee_size), kBlockGasLimit(config.genesis.pbft.gas_limit), state_api_([this](auto n) { return block_hash(n).value_or(ZeroHash()); }, // config.genesis.state, config.opts_final_chain, @@ -59,6 +59,8 @@ class FinalChainImpl final : public FinalChain { kLightNode(config.is_light_node), kLightNodeHistory(config.light_node_history), kMaxLevelsPerPeriod(config.max_levels_per_period), + rewards_(config.genesis.pbft.committee_size, + [this](EthBlockNumber n) { return dpos_eligible_total_vote_count(n); }), block_headers_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return get_block_header(blk); }), block_hashes_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return get_block_hash(blk); }), @@ -144,26 +146,12 @@ class FinalChainImpl final : public FinalChain { EthBlockNumber delegation_delay() const override { return delegation_delay_; } - std::vector prepare_rewards_stats_(const PeriodData& blk) { - std::vector rewards_stats; - uint64_t dpos_vote_count = kCommitteeSize; - - // Block zero - if (!blk.previous_block_cert_votes.empty()) [[likely]] { - dpos_vote_count = dpos_eligible_total_vote_count(blk.previous_block_cert_votes[0]->getPeriod() - 1); - } - - rewards_stats.emplace_back(blk, dpos_vote_count, kCommitteeSize); - - return rewards_stats; - } - std::shared_ptr finalize_(PeriodData&& new_blk, std::vector&& finalized_dag_blk_hashes, std::shared_ptr&& anchor) { auto batch = db_->createWriteBatch(); - auto rewards_stats = prepare_rewards_stats_(new_blk); + auto rewards_stats = rewards_.getStats(new_blk); block_applying_emitter_.emit(block_header()->number + 1); diff --git a/libraries/core_libs/consensus/src/final_chain/state_api.cpp b/libraries/core_libs/consensus/src/final_chain/state_api.cpp index c73ceb2f4b..3e29cf8b97 100644 --- a/libraries/core_libs/consensus/src/final_chain/state_api.cpp +++ b/libraries/core_libs/consensus/src/final_chain/state_api.cpp @@ -177,7 +177,7 @@ StateDescriptor StateAPI::get_last_committed_state_descriptor() const { const StateTransitionResult& StateAPI::transition_state(const EVMBlock& block, const util::RangeView& transactions, - const std::vector& rewards_stats) { + const std::vector& rewards_stats) { result_buf_transition_state_.execution_results.clear(); rlp_enc_transition_state_.clear(); c_method_args_rlp( diff --git a/libraries/core_libs/consensus/src/final_chain/rewards_stats.cpp b/libraries/core_libs/consensus/src/rewards/block_stats.cpp similarity index 81% rename from libraries/core_libs/consensus/src/final_chain/rewards_stats.cpp rename to libraries/core_libs/consensus/src/rewards/block_stats.cpp index 45f20a82ea..2e6d1bfbeb 100644 --- a/libraries/core_libs/consensus/src/final_chain/rewards_stats.cpp +++ b/libraries/core_libs/consensus/src/rewards/block_stats.cpp @@ -1,18 +1,18 @@ -#include "final_chain/rewards_stats.hpp" +#include "rewards/block_stats.hpp" #include #include "pbft/pbft_block.hpp" -namespace taraxa { +namespace taraxa::rewards { -RewardsStats::RewardsStats(const PeriodData& block, uint64_t dpos_vote_count, uint32_t committee_size) +BlockStats::BlockStats(const PeriodData& block, uint64_t dpos_vote_count, uint32_t committee_size) : block_author_(block.pbft_blk->getBeneficiary()), max_votes_weight_(std::min(committee_size, dpos_vote_count)) { processStats(block); } -bool RewardsStats::addTransaction(const trx_hash_t& tx_hash, const addr_t& validator) { +bool BlockStats::addTransaction(const trx_hash_t& tx_hash, const addr_t& validator) { auto found_tx = validator_by_tx_hash_.find(tx_hash); // Already processed tx @@ -26,7 +26,7 @@ bool RewardsStats::addTransaction(const trx_hash_t& tx_hash, const addr_t& valid return true; } -std::optional RewardsStats::getTransactionValidator(const trx_hash_t& tx_hash) { +std::optional BlockStats::getTransactionValidator(const trx_hash_t& tx_hash) { auto found_tx = validator_by_tx_hash_.find(tx_hash); if (found_tx == validator_by_tx_hash_.end()) { return {}; @@ -35,7 +35,7 @@ std::optional RewardsStats::getTransactionValidator(const trx_hash_t& tx return {found_tx->second}; } -bool RewardsStats::addVote(const std::shared_ptr& vote) { +bool BlockStats::addVote(const std::shared_ptr& vote) { // Set valid cert vote to validator auto& validator_stats = validators_stats_[vote->getVoterAddr()]; assert(validator_stats.vote_weight_ == 0); @@ -60,7 +60,7 @@ std::set toTrxHashesSet(const SharedTransactions& transactions) { return block_transactions_hashes_; } -void RewardsStats::processStats(const PeriodData& block) { +void BlockStats::processStats(const PeriodData& block) { validator_by_tx_hash_.reserve(block.transactions.size()); validators_stats_.reserve(std::max(block.dag_blocks.size(), block.previous_block_cert_votes.size())); auto block_transactions_hashes_ = toTrxHashesSet(block.transactions); @@ -102,8 +102,8 @@ void RewardsStats::processStats(const PeriodData& block) { } } -RLP_FIELDS_DEFINE(RewardsStats::ValidatorStats, dag_blocks_count_, vote_weight_) -RLP_FIELDS_DEFINE(RewardsStats, block_author_, validators_stats_, txs_validators_, total_dag_blocks_count_, +RLP_FIELDS_DEFINE(BlockStats::ValidatorStats, dag_blocks_count_, vote_weight_) +RLP_FIELDS_DEFINE(BlockStats, block_author_, validators_stats_, txs_validators_, total_dag_blocks_count_, total_votes_weight_, max_votes_weight_) -} // namespace taraxa \ No newline at end of file +} // namespace taraxa::rewards \ No newline at end of file diff --git a/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp b/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp new file mode 100644 index 0000000000..fa201db1ce --- /dev/null +++ b/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp @@ -0,0 +1,41 @@ +#include "rewards/rewards_stats.hpp" + +#include "storage/storage.hpp" + +namespace taraxa::rewards { +Stats::Stats(uint32_t committee_size, std::function&& dpos_eligible_total_vote_count) + : kCommitteeSize(committee_size), dpos_eligible_total_vote_count_(dpos_eligible_total_vote_count) {} + +// std::vector processBlockHardfork(const PeriodData& current_blk, uint32_t interval) { +// const auto current = current_blk.pbft_blk->getPeriod(); +// // skip for intermediate blocks +// if (current % interval != 0) { +// return {}; +// } + +// std::vector rewards_stats; +// rewards_stats.reserve(interval); +// // add rewards stats for (last_distribution_block, current_block) +// for (auto p = current - interval + 1; p < current; ++p) { +// auto blk = PeriodData(db_->getPeriodDataRaw(p)); +// rewards_stats.emplace_back(get_block_rewards_stats(blk)); +// } +// // add current block rewards stats +// rewards_stats.emplace_back(get_block_rewards_stats(current_blk)); + +// return rewards_stats; +// } + +BlockStats Stats::getBlockStats(const PeriodData& blk) { + uint64_t dpos_vote_count = kCommitteeSize; + + // Block zero + if (!blk.previous_block_cert_votes.empty()) [[likely]] { + dpos_vote_count = dpos_eligible_total_vote_count_(blk.previous_block_cert_votes[0]->getPeriod() - 1); + } + + return BlockStats{blk, dpos_vote_count, kCommitteeSize}; +} + +std::vector Stats::getStats(const PeriodData& current_blk) { return {getBlockStats(current_blk)}; } +} // namespace taraxa::rewards \ No newline at end of file diff --git a/submodules/taraxa-evm b/submodules/taraxa-evm index c620c6fe72..ffd207a4cb 160000 --- a/submodules/taraxa-evm +++ b/submodules/taraxa-evm @@ -1 +1 @@ -Subproject commit c620c6fe72301f6eef8d0fe24c5bdf8db8e9f39d +Subproject commit ffd207a4cb80919c1cdbf26f919b3a158ce497f3 From 184a7a25c0d350039afcaee180d66845342ad4f1 Mon Sep 17 00:00:00 2001 From: Dmytro Kostenko Date: Tue, 9 May 2023 12:39:19 +0200 Subject: [PATCH 144/162] feat: add option to change rewards distribution frequency with hardfork --- .../config_jsons/devnet/devnet_genesis.json | 4 + .../config_jsons/mainnet/mainnet_genesis.json | 4 + .../config_jsons/testnet/testnet_genesis.json | 4 + libraries/config/CMakeLists.txt | 4 +- libraries/config/include/config/hardfork.hpp | 13 +- .../config/include/config/state_config.hpp | 4 +- libraries/config/src/hardfork.cpp | 17 +- libraries/config/src/state_config.cpp | 4 +- .../consensus/include/rewards/block_stats.hpp | 4 +- .../include/rewards/rewards_stats.hpp | 43 +++- .../consensus/src/final_chain/final_chain.cpp | 4 +- .../consensus/src/rewards/rewards_stats.cpp | 80 +++++--- .../storage/include/storage/storage.hpp | 3 + libraries/core_libs/storage/src/storage.cpp | 10 + tests/CMakeLists.txt | 3 + tests/rewards_stats_test.cpp | 185 ++++++++++++++++++ .../test_util/include/test_util/test_util.hpp | 2 +- tests/test_util/src/test_util.cpp | 6 +- 18 files changed, 350 insertions(+), 44 deletions(-) create mode 100644 tests/rewards_stats_test.cpp diff --git a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json index f86fc92377..ff7bf49bb2 100644 --- a/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/devnet/devnet_genesis.json @@ -226,6 +226,10 @@ "7e4aa664f71de4e9d0b4a6473d796372639bdcde": "0x1027e72f1f12813088000000", "ee1326fbf7d9322e5ea02c6fe5eb63535fceccd1": "0x52b7d2dcc80cd2e4000000" }, + "hardforks": { + "rewards_distribution_frequency": { + } + }, "gas_price": { "blocks": 200, "percentile": 60, diff --git a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json index 5c9777aae6..a488289bce 100644 --- a/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/mainnet/mainnet_genesis.json @@ -243,6 +243,10 @@ } ] }, + "hardforks": { + "rewards_distribution_frequency": { + } + }, "initial_balances": { "723304d1357a2334fcf902aa3d232f5139080a1b": "0xd53323b7ca3737afbb45000", "b0800c7af0a6aec0ff8dbe01708bd8e300c6305b": "0x208b1d135e4a8000", diff --git a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json index f07f92f352..7a7f30890f 100644 --- a/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json +++ b/libraries/cli/include/cli/config_jsons/testnet/testnet_genesis.json @@ -1698,6 +1698,10 @@ "a903715b57d3bf62e098a6a643c6924d9bdacec4": "0x170a0f5040e50400000", "5bd47fef8e8dcb6677c2957ecd78b8232354f145": "0x191cf61eb2bec223400" }, + "hardforks": { + "rewards_distribution_frequency": { + } + }, "gas_price": { "blocks": 200, "percentile": 60, diff --git a/libraries/config/CMakeLists.txt b/libraries/config/CMakeLists.txt index 75616c110c..5df88e6af7 100644 --- a/libraries/config/CMakeLists.txt +++ b/libraries/config/CMakeLists.txt @@ -7,7 +7,7 @@ set(HEADERS include/config/dag_config.hpp include/config/pbft_config.hpp include/config/state_config.hpp - # include/config/hardfork.hpp + include/config/hardfork.hpp ) set(SOURCES @@ -18,7 +18,7 @@ set(SOURCES src/dag_config.cpp src/pbft_config.cpp src/state_config.cpp - # src/hardfork.cpp + src/hardfork.cpp ) # Configure file with version diff --git a/libraries/config/include/config/hardfork.hpp b/libraries/config/include/config/hardfork.hpp index e0ca9262a5..f0364b5dbc 100644 --- a/libraries/config/include/config/hardfork.hpp +++ b/libraries/config/include/config/hardfork.hpp @@ -5,7 +5,18 @@ #include "common/encoding_rlp.hpp" struct Hardforks { - uint64_t fix_genesis_fork_block = 0; + /* + * @brief key is block number at which change is applied and value is new distribution interval. + * Default distribution frequency is every block + * To change rewards distribution frequency we should add a new element in map below. + * For example {{101, 20}, {201, 10}} means: + * 1. for blocks [1,100] we are distributing rewards every block + * 2. for blocks [101, 200] rewards are distributed every 20 block. On blocks 120, 140, etc. + * 3. for blocks after 201 rewards are distributed every 10 block. On blocks 210, 220, etc. + */ + using RewardsDistributionMap = std::map; + RewardsDistributionMap rewards_distribution_frequency; + HAS_RLP_FIELDS }; diff --git a/libraries/config/include/config/state_config.hpp b/libraries/config/include/config/state_config.hpp index 1cbda1b401..15a0a6da18 100644 --- a/libraries/config/include/config/state_config.hpp +++ b/libraries/config/include/config/state_config.hpp @@ -6,7 +6,7 @@ #include "common/encoding_rlp.hpp" #include "common/types.hpp" #include "common/vrf_wrapper.hpp" -// #include "config/hardfork.hpp" +#include "config/hardfork.hpp" namespace taraxa::state_api { @@ -61,7 +61,7 @@ struct Config { EVMChainConfig evm_chain_config; BalanceMap initial_balances; DPOSConfig dpos; - // Hardforks hardforks; + Hardforks hardforks; HAS_RLP_FIELDS }; diff --git a/libraries/config/src/hardfork.cpp b/libraries/config/src/hardfork.cpp index 950558fc19..fc4f804c0e 100644 --- a/libraries/config/src/hardfork.cpp +++ b/libraries/config/src/hardfork.cpp @@ -2,14 +2,23 @@ Json::Value enc_json(const Hardforks& obj) { Json::Value json(Json::objectValue); - json["fix_genesis_fork_block"] = dev::toJS(obj.fix_genesis_fork_block); + + auto& rewards = json["rewards_distribution_frequency"]; + rewards = Json::objectValue; + for (auto i = obj.rewards_distribution_frequency.begin(); i != obj.rewards_distribution_frequency.end(); ++i) { + rewards[std::to_string(i->first)] = i->second; + } return json; } void dec_json(const Json::Value& json, Hardforks& obj) { - if (auto const& e = json["fix_genesis_fork_block"]) { - obj.fix_genesis_fork_block = dev::getUInt(e); + if (const auto& e = json["rewards_distribution_frequency"]) { + assert(e.isObject()); + + for (auto itr = e.begin(); itr != e.end(); ++itr) { + obj.rewards_distribution_frequency[itr.key().asUInt64()] = itr->asUInt64(); + } } } -RLP_FIELDS_DEFINE(Hardforks, fix_genesis_fork_block) +RLP_FIELDS_DEFINE(Hardforks, rewards_distribution_frequency) diff --git a/libraries/config/src/state_config.cpp b/libraries/config/src/state_config.cpp index 29278f78e8..a284a65e3e 100644 --- a/libraries/config/src/state_config.cpp +++ b/libraries/config/src/state_config.cpp @@ -22,14 +22,14 @@ void dec_json(const Json::Value& /*json*/, uint64_t chain_id, EVMChainConfig& ob void append_json(Json::Value& json, const Config& obj) { json["evm_chain_config"] = enc_json(obj.evm_chain_config); json["initial_balances"] = enc_json(obj.initial_balances); - // json["hardforks"] = enc_json(obj.hardforks); + json["hardforks"] = enc_json(obj.hardforks); json["dpos"] = enc_json(obj.dpos); } void dec_json(const Json::Value& json, Config& obj) { dec_json(json["evm_chain_config"], json["chain_id"].asUInt(), obj.evm_chain_config); dec_json(json["initial_balances"], obj.initial_balances); - // dec_json(json["hardforks"], obj.hardforks); + dec_json(json["hardforks"], obj.hardforks); dec_json(json["dpos"], obj.dpos); } diff --git a/libraries/core_libs/consensus/include/rewards/block_stats.hpp b/libraries/core_libs/consensus/include/rewards/block_stats.hpp index a3f0f057ca..2e80eff84b 100644 --- a/libraries/core_libs/consensus/include/rewards/block_stats.hpp +++ b/libraries/core_libs/consensus/include/rewards/block_stats.hpp @@ -15,6 +15,8 @@ namespace taraxa::rewards { */ class BlockStats { public: + // Needed for RLP + BlockStats() = default; /** * @brief setting block_author_, max_votes_weight_ and calls processStats function * @@ -58,7 +60,7 @@ class BlockStats { */ bool addVote(const std::shared_ptr& vote); - private: + protected: struct ValidatorStats { // count of rewardable(with 1 or more unique transactions) DAG blocks produced by this validator uint32_t dag_blocks_count_ = 0; diff --git a/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp b/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp index 062d6a7b8d..22fca73188 100644 --- a/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp +++ b/libraries/core_libs/consensus/include/rewards/rewards_stats.hpp @@ -1,17 +1,54 @@ #include "config/hardfork.hpp" #include "rewards/block_stats.hpp" +#include "storage/storage.hpp" namespace taraxa::rewards { +/* + * @brief class that is managing rewards stats processing and hardforks(intervals changes) + * So intermediate blocks stats are stored in the vector in data(to restore on the node restart) + * and full list of interval stats is returned in the end of interval + */ class Stats { public: - Stats(uint32_t committee_size, std::function&& dpos_eligible_total_vote_count); + Stats(uint32_t committee_size, const Hardforks::RewardsDistributionMap& rdm, std::shared_ptr db, + std::function&& dpos_eligible_total_vote_count); - std::vector getStats(const PeriodData& current_blk); + /* + * @brief processing passed block and returns stats that should be processed at this block + * @param current_blk block to process + * @return vector that should be processed at current block + */ + std::vector processStats(const PeriodData& current_blk); - private: + protected: + /* + * @brief load current interval stats from database + */ + void loadFromDb(); + /* + * @brief returns rewards distribution frequency for specified period + */ + uint32_t getCurrentDistributionFrequency(uint64_t current_period) const; + /* + * @brief gets all needed data and makes(processes) BlocksStats + * @param current_blk block to process + * @return block statistics needed for rewards distribution + */ BlockStats getBlockStats(const PeriodData& current_blk); + /* + * @brief saves stats to database to not lose this data in case of node restart + */ + void saveBlockStats(uint64_t number, const BlockStats& stats); + /* + * @brief called on start of new rewards interval. clears blocks_stats_ collection + * and removes all data saved in db column + */ + void clear(); const uint32_t kCommitteeSize; + const Hardforks::RewardsDistributionMap kRewardsDistributionFrequency; + std::shared_ptr db_; const std::function dpos_eligible_total_vote_count_; + std::vector blocks_stats_; }; } // namespace taraxa::rewards \ No newline at end of file diff --git a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp index 785ba45e5c..752e256d27 100644 --- a/libraries/core_libs/consensus/src/final_chain/final_chain.cpp +++ b/libraries/core_libs/consensus/src/final_chain/final_chain.cpp @@ -59,7 +59,7 @@ class FinalChainImpl final : public FinalChain { kLightNode(config.is_light_node), kLightNodeHistory(config.light_node_history), kMaxLevelsPerPeriod(config.max_levels_per_period), - rewards_(config.genesis.pbft.committee_size, + rewards_(config.genesis.pbft.committee_size, config.genesis.state.hardforks.rewards_distribution_frequency, db_, [this](EthBlockNumber n) { return dpos_eligible_total_vote_count(n); }), block_headers_cache_(config.final_chain_cache_in_blocks, [this](uint64_t blk) { return get_block_header(blk); }), @@ -151,7 +151,7 @@ class FinalChainImpl final : public FinalChain { std::shared_ptr&& anchor) { auto batch = db_->createWriteBatch(); - auto rewards_stats = rewards_.getStats(new_blk); + auto rewards_stats = rewards_.processStats(new_blk); block_applying_emitter_.emit(block_header()->number + 1); diff --git a/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp b/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp index fa201db1ce..1d4a03f5e5 100644 --- a/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp +++ b/libraries/core_libs/consensus/src/rewards/rewards_stats.cpp @@ -3,28 +3,41 @@ #include "storage/storage.hpp" namespace taraxa::rewards { -Stats::Stats(uint32_t committee_size, std::function&& dpos_eligible_total_vote_count) - : kCommitteeSize(committee_size), dpos_eligible_total_vote_count_(dpos_eligible_total_vote_count) {} - -// std::vector processBlockHardfork(const PeriodData& current_blk, uint32_t interval) { -// const auto current = current_blk.pbft_blk->getPeriod(); -// // skip for intermediate blocks -// if (current % interval != 0) { -// return {}; -// } - -// std::vector rewards_stats; -// rewards_stats.reserve(interval); -// // add rewards stats for (last_distribution_block, current_block) -// for (auto p = current - interval + 1; p < current; ++p) { -// auto blk = PeriodData(db_->getPeriodDataRaw(p)); -// rewards_stats.emplace_back(get_block_rewards_stats(blk)); -// } -// // add current block rewards stats -// rewards_stats.emplace_back(get_block_rewards_stats(current_blk)); - -// return rewards_stats; -// } +Stats::Stats(uint32_t committee_size, const Hardforks::RewardsDistributionMap& rdm, std::shared_ptr db, + std::function&& dpos_eligible_total_vote_count) + : kCommitteeSize(committee_size), + kRewardsDistributionFrequency(rdm), + db_(std::move(db)), + dpos_eligible_total_vote_count_(dpos_eligible_total_vote_count) { + loadFromDb(); +} + +void Stats::loadFromDb() { + auto i = db_->getColumnIterator(DB::Columns::block_rewards_stats); + for (i->SeekToFirst(); i->Valid(); i->Next()) { + blocks_stats_.push_back(util::rlp_dec(dev::RLP(i->value().ToString()))); + } +} + +void Stats::saveBlockStats(uint64_t period, const BlockStats& stats) { + dev::RLPStream encoding; + stats.rlp(encoding); + + db_->insert(DB::Columns::block_rewards_stats, period, encoding.out()); +} + +uint32_t Stats::getCurrentDistributionFrequency(uint64_t current_block) const { + auto itr = kRewardsDistributionFrequency.upper_bound(current_block); + if (kRewardsDistributionFrequency.empty() || itr == kRewardsDistributionFrequency.begin()) { + return 1; + } + return (--itr)->second; +} + +void Stats::clear() { + blocks_stats_.clear(); + db_->deleteColumnData(DB::Columns::block_rewards_stats); +} BlockStats Stats::getBlockStats(const PeriodData& blk) { uint64_t dpos_vote_count = kCommitteeSize; @@ -37,5 +50,26 @@ BlockStats Stats::getBlockStats(const PeriodData& blk) { return BlockStats{blk, dpos_vote_count, kCommitteeSize}; } -std::vector Stats::getStats(const PeriodData& current_blk) { return {getBlockStats(current_blk)}; } +std::vector Stats::processStats(const PeriodData& current_blk) { + const auto current_period = current_blk.pbft_blk->getPeriod(); + const auto frequency = getCurrentDistributionFrequency(current_period); + + // Distribute rewards every block + if (frequency == 1) { + return {getBlockStats(current_blk)}; + } + + blocks_stats_.push_back(getBlockStats(current_blk)); + // Blocks between distribution. Process and save for future processing + if (current_period % frequency != 0) { + // Save to db, so in case of restart data could be just loaded for the period + saveBlockStats(current_period, *blocks_stats_.rbegin()); + return {}; + } + + std::vector res(std::move(blocks_stats_)); + clear(); + return res; +} + } // namespace taraxa::rewards \ No newline at end of file diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 8d6f00182d..1dedb67234 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -121,6 +121,8 @@ class DbStorage : public std::enable_shared_from_this { COLUMN(final_chain_log_blooms_index); COLUMN_W_COMP(sortition_params_change, getIntComparator()); + COLUMN_W_COMP(block_rewards_stats, getIntComparator()); + #undef COLUMN #undef COLUMN_W_COMP }; @@ -177,6 +179,7 @@ class DbStorage : public std::enable_shared_from_this { void disableSnapshots(); void enableSnapshots(); void updateDbVersions(); + void deleteColumnData(const Column& c); uint32_t getMajorVersion() const; std::unique_ptr getColumnIterator(const Column& c); diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index aeb076e2b8..feb9d05f4b 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -91,6 +91,16 @@ void DbStorage::updateDbVersions() { saveStatusField(StatusDbField::DbMinorVersion, TARAXA_DB_MINOR_VERSION); } +void DbStorage::deleteColumnData(const Column& c) { + checkStatus(db_->DropColumnFamily(handle(c))); + + auto options = rocksdb::ColumnFamilyOptions(); + if (c.comparator_) { + options.comparator = c.comparator_; + } + checkStatus(db_->CreateColumnFamily(options, c.name(), &handles_[c.ordinal_])); +} + void DbStorage::rebuildColumns(const rocksdb::Options& options) { std::unique_ptr db; std::vector column_families; diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index b5b9bc2bf3..f9613497b3 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -69,6 +69,9 @@ add_executable(vote_test vote_test.cpp) target_link_libraries(vote_test test_util) add_test(vote_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/vote_test) +add_executable(rewards_stats_test rewards_stats_test.cpp) +target_link_libraries(rewards_stats_test test_util) +add_test(rewards_stats_test ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/rewards_stats_test) add_executable(tarcap_threadpool_test tarcap_threadpool_test.cpp) target_link_libraries(tarcap_threadpool_test test_util) diff --git a/tests/rewards_stats_test.cpp b/tests/rewards_stats_test.cpp new file mode 100644 index 0000000000..cce156c735 --- /dev/null +++ b/tests/rewards_stats_test.cpp @@ -0,0 +1,185 @@ +#include "rewards/rewards_stats.hpp" + +#include +#include +#include +#include + +#include "test_util/gtest.hpp" +#include "test_util/samples.hpp" + +namespace taraxa::core_tests { + +auto g_secret = dev::Secret("3800b2875669d9b2053c1aff9224ecfdc411423aac5b5a73d7a45ced1c3b9dcd", + dev::Secret::ConstructFromStringType::FromHex); +auto g_key_pair = Lazy([] { return dev::KeyPair(g_secret); }); + +struct RewardsStatsTest : NodesTest {}; + +class TestableRewardsStats : public rewards::Stats { + public: + TestableRewardsStats(const Hardforks::RewardsDistributionMap& rdm, std::shared_ptr db) + : rewards::Stats(100, rdm, db, [](auto) { return 100; }) {} + std::vector getStats() { return blocks_stats_; } +}; + +class TestableBlockStats : public rewards::BlockStats { + public: + const addr_t& getAuthor() const { return block_author_; } +}; + +TEST_F(RewardsStatsTest, defaultDistribution) { + auto db = std::make_shared(data_dir / "db"); + + std::vector> empty_votes; + auto rewards_stats = TestableRewardsStats({}, db); + + for (auto i = 1; i < 5; ++i) { + PeriodData block(make_simple_pbft_block(blk_hash_t(i), i), empty_votes); + auto stats = rewards_stats.processStats(block); + ASSERT_EQ(stats.size(), 1); + ASSERT_TRUE(rewards_stats.getStats().empty()); + } +} + +TEST_F(RewardsStatsTest, statsSaving) { + auto db = std::make_shared(data_dir / "db"); + + // distribute every 5 blocks + Hardforks::RewardsDistributionMap distribution{{0, 5}}; + + std::vector> empty_votes; + std::vector block_authors; + { + auto rewards_stats = TestableRewardsStats(distribution, db); + + for (auto i = 1; i < 5; ++i) { + auto kp = dev::KeyPair::create(); + block_authors.push_back(kp.address()); + + PeriodData block(make_simple_pbft_block(blk_hash_t(i), i, kp.secret()), empty_votes); + auto stats = rewards_stats.processStats(block); + ASSERT_EQ(rewards_stats.getStats().size(), block_authors.size()); + ASSERT_TRUE(stats.empty()); + } + } + { + // Load from db + auto rewards_stats = TestableRewardsStats(distribution, db); + auto stats = rewards_stats.getStats(); + ASSERT_EQ(rewards_stats.getStats().size(), block_authors.size()); + + for (size_t i = 0; i < stats.size(); ++i) { + auto stats_with_get = reinterpret_cast(&stats[i]); + ASSERT_EQ(stats_with_get->getAuthor(), block_authors[i]); + } + } +} + +TEST_F(RewardsStatsTest, statsCleaning) { + auto db = std::make_shared(data_dir / "db"); + + // distribute every 5 blocks + Hardforks::RewardsDistributionMap distribution{{0, 5}}; + + std::vector> empty_votes; + std::vector block_authors; + { + auto rewards_stats = TestableRewardsStats(distribution, db); + + for (auto i = 1; i < 5; ++i) { + auto kp = dev::KeyPair::create(); + block_authors.push_back(kp.address()); + + PeriodData block(make_simple_pbft_block(blk_hash_t(i), i, kp.secret()), empty_votes); + auto stats = rewards_stats.processStats(block); + ASSERT_EQ(rewards_stats.getStats().size(), block_authors.size()); + ASSERT_TRUE(stats.empty()); + } + + // Process block 5 after which we should have no stats elements in db + PeriodData block(make_simple_pbft_block(blk_hash_t(5), 5), empty_votes); + rewards_stats.processStats(block); + } + + // Load from db + auto rewards_stats = TestableRewardsStats(distribution, db); + ASSERT_TRUE(rewards_stats.getStats().empty()); +} + +TEST_F(RewardsStatsTest, statsProcessing) { + auto db = std::make_shared(data_dir / "db"); + // distribute every 10 blocks + auto rewards_stats = TestableRewardsStats({{0, 10}}, db); + + std::vector> empty_votes; + std::vector block_authors; + + // make blocks [1,9] and process them. output of processStats should be empty + for (auto i = 1; i < 10; ++i) { + auto kp = dev::KeyPair::create(); + block_authors.push_back(kp.address()); + + PeriodData block(make_simple_pbft_block(blk_hash_t(i), i, kp.secret()), empty_votes); + auto stats = rewards_stats.processStats(block); + ASSERT_TRUE(stats.empty()); + ASSERT_EQ(rewards_stats.getStats().size(), block_authors.size()); + } + + auto kp = dev::KeyPair::create(); + block_authors.push_back(kp.address()); + + PeriodData block(make_simple_pbft_block(blk_hash_t(10), 10, kp.secret()), empty_votes); + auto stats = rewards_stats.processStats(block); + ASSERT_EQ(stats.size(), block_authors.size()); + + for (size_t i = 0; i < stats.size(); ++i) { + auto stats_with_get = reinterpret_cast(&stats[i]); + ASSERT_EQ(stats_with_get->getAuthor(), block_authors[i]); + } + ASSERT_TRUE(rewards_stats.getStats().empty()); +} + +TEST_F(RewardsStatsTest, distributionChange) { + auto db = std::make_shared(data_dir / "db"); + + Hardforks::RewardsDistributionMap distribution{{6, 5}, {11, 2}}; + + auto rewards_stats = TestableRewardsStats(distribution, db); + + std::vector> empty_votes; + uint64_t period = 1; + for (; period <= 5; ++period) { + PeriodData block(make_simple_pbft_block(blk_hash_t(period), period), empty_votes); + auto stats = rewards_stats.processStats(block); + ASSERT_FALSE(stats.empty()); + } + { + // make blocks [1,9] and process them. output of processStats should be empty + for (; period < 10; ++period) { + PeriodData block(make_simple_pbft_block(blk_hash_t(period), period), empty_votes); + auto stats = rewards_stats.processStats(block); + ASSERT_TRUE(stats.empty()); + } + PeriodData block(make_simple_pbft_block(blk_hash_t(period), period), empty_votes); + auto stats = rewards_stats.processStats(block); + } + + PeriodData block(make_simple_pbft_block(blk_hash_t(period), period), empty_votes); + auto stats = rewards_stats.processStats(block); +} + +} // namespace taraxa::core_tests + +using namespace taraxa; +int main(int argc, char** argv) { + taraxa::static_init(); + + auto logging = logger::createDefaultLoggingConfig(); + logging.verbosity = logger::Verbosity::Error; + addr_t node_addr; + logging.InitLogging(node_addr); + + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} \ No newline at end of file diff --git a/tests/test_util/include/test_util/test_util.hpp b/tests/test_util/include/test_util/test_util.hpp index cdbd8659dc..951a22dec3 100644 --- a/tests/test_util/include/test_util/test_util.hpp +++ b/tests/test_util/include/test_util/test_util.hpp @@ -169,7 +169,7 @@ state_api::BalanceMap effective_initial_balances(const state_api::Config& cfg); u256 own_effective_genesis_bal(const FullNodeConfig& cfg); std::shared_ptr make_simple_pbft_block(const h256& hash, uint64_t period, - const h256& anchor_hash = kNullBlockHash); + const secret_t& pk = secret_t::random()); std::vector getOrderedDagBlocks(const std::shared_ptr& db); diff --git a/tests/test_util/src/test_util.cpp b/tests/test_util/src/test_util.cpp index 6599c4ea84..7c045c3df4 100644 --- a/tests/test_util/src/test_util.cpp +++ b/tests/test_util/src/test_util.cpp @@ -96,10 +96,10 @@ u256 own_effective_genesis_bal(const FullNodeConfig& cfg) { return effective_initial_balances(cfg.genesis.state)[dev::toAddress(dev::Secret(cfg.node_secret))]; } -std::shared_ptr make_simple_pbft_block(const h256& hash, uint64_t period, const h256& anchor_hash) { +std::shared_ptr make_simple_pbft_block(const h256& hash, uint64_t period, const secret_t& pk) { std::vector reward_votes_hashes; - return std::make_shared(hash, anchor_hash, kNullBlockHash, kNullBlockHash, period, addr_t(0), - secret_t::random(), std::move(reward_votes_hashes)); + return std::make_shared(hash, kNullBlockHash, kNullBlockHash, kNullBlockHash, period, addr_t(0), pk, + std::move(reward_votes_hashes)); } std::vector getOrderedDagBlocks(const std::shared_ptr& db) { From a2bfdee93b77f2e70c8bfbed1f1c1f98a82179e9 Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 12 May 2023 10:12:53 +0200 Subject: [PATCH 145/162] feat: clean LOG.old* files on db start --- .../storage/include/storage/storage.hpp | 4 +++ libraries/core_libs/storage/src/storage.cpp | 30 +++++++++++++++++-- 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/libraries/core_libs/storage/include/storage/storage.hpp b/libraries/core_libs/storage/include/storage/storage.hpp index 8d6f00182d..60986942e4 100644 --- a/libraries/core_libs/storage/include/storage/storage.hpp +++ b/libraries/core_libs/storage/include/storage/storage.hpp @@ -178,6 +178,10 @@ class DbStorage : public std::enable_shared_from_this { void enableSnapshots(); void updateDbVersions(); + // For removal of LOG.old.* files in the database + void removeOldLogFiles() const; + void removeFilesWithPattern(const std::string& directory, const std::regex& pattern) const; + uint32_t getMajorVersion() const; std::unique_ptr getColumnIterator(const Column& c); diff --git a/libraries/core_libs/storage/src/storage.cpp b/libraries/core_libs/storage/src/storage.cpp index aeb076e2b8..160529236a 100644 --- a/libraries/core_libs/storage/src/storage.cpp +++ b/libraries/core_libs/storage/src/storage.cpp @@ -4,6 +4,7 @@ #include #include #include +#include #include "config/version.hpp" #include "dag/sortition_params_manager.hpp" @@ -42,6 +43,8 @@ DbStorage::DbStorage(fs::path const& path, uint32_t db_snapshot_each_n_pbft_bloc } fs::create_directories(db_path_); + removeOldLogFiles(); + rocksdb::Options options; options.create_missing_column_families = true; options.create_if_missing = true; @@ -86,6 +89,26 @@ DbStorage::DbStorage(fs::path const& path, uint32_t db_snapshot_each_n_pbft_bloc } } +void DbStorage::removeOldLogFiles() const { + const std::regex filePattern("LOG\\.old\\.\\d+"); + removeFilesWithPattern(db_path_, filePattern); + removeFilesWithPattern(state_db_path_, filePattern); +} + +void DbStorage::removeFilesWithPattern(const std::string& directory, const std::regex& pattern) const { + try { + for (const auto& entry : std::filesystem::directory_iterator(directory)) { + const std::string& filename = entry.path().filename().string(); + if (std::regex_match(filename, pattern)) { + std::filesystem::remove(entry.path()); + LOG(log_dg_) << "Removed file: " << filename << std::endl; + } + } + } catch (const std::filesystem::filesystem_error& e) { + LOG(log_dg_) << "Error accessing directory: " << e.what() << std::endl; + } +} + void DbStorage::updateDbVersions() { saveStatusField(StatusDbField::DbMajorVersion, TARAXA_DB_MAJOR_VERSION); saveStatusField(StatusDbField::DbMinorVersion, TARAXA_DB_MINOR_VERSION); @@ -454,7 +477,8 @@ void DbStorage::clearPeriodDataHistory(PbftPeriod end_period) { auto start_slice = toSlice(start_period); auto end_slice = toSlice(end_period); for (auto period = start_period; period < end_period; period++) { - // Find transactions included in the old blocks and delete data related to these transactions to free disk space + // Find transactions included in the old blocks and delete data related to these transactions to free disk + // space auto trx_hashes_raw = lookup(period, DB::Columns::final_chain_transaction_hashes_by_blk_number); auto hashes_count = trx_hashes_raw.size() / trx_hash_t::size; for (uint32_t i = 0; i < hashes_count; i++) { @@ -471,8 +495,8 @@ void DbStorage::clearPeriodDataHistory(PbftPeriod end_period) { commitWriteBatch(write_batch); db_->DeleteRange(write_options_, handle(Columns::period_data), start_slice, end_slice); - // Deletion alone does not guarantee that the disk space is freed, these CompactRange methods actually compact the - // data in the database and free disk space + // Deletion alone does not guarantee that the disk space is freed, these CompactRange methods actually compact + // the data in the database and free disk space db_->CompactRange({}, handle(Columns::period_data), &start_slice, &end_slice); db_->CompactRange({}, handle(Columns::final_chain_receipt_by_trx_hash), nullptr, nullptr); db_->CompactRange({}, handle(Columns::final_chain_transaction_hashes_by_blk_number), nullptr, nullptr); From d964bafb279d4d9644404ff152cd5b34a736fd9b Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Fri, 12 May 2023 17:01:27 +0200 Subject: [PATCH 146/162] chore: increase timers for more stable test runs --- tests/network_test.cpp | 8 ++++---- tests/p2p_test.cpp | 2 +- tests/test_util/src/test_util.cpp | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/network_test.cpp b/tests/network_test.cpp index c8097b8440..d40c6d94ab 100644 --- a/tests/network_test.cpp +++ b/tests/network_test.cpp @@ -52,7 +52,7 @@ TEST_F(NetworkTest, transfer_block) { SharedTransactions transactions({g_signed_trx_samples[0], g_signed_trx_samples[1]}); nw2->getSpecificHandler()->onNewTransactions(std::move(transactions)); - EXPECT_HAPPENS({10s, 200ms}, [&](auto& ctx) { + EXPECT_HAPPENS({60s, 100ms}, [&](auto& ctx) { nw1->setPendingPeersToReady(); nw2->setPendingPeersToReady(); WAIT_EXPECT_EQ(ctx, nw1->getPeerCount(), 1) @@ -318,7 +318,7 @@ TEST_F(NetworkTest, transfer_transaction) { nw1->start(); nw2->start(); - EXPECT_HAPPENS({20s, 100ms}, [&](auto& ctx) { + EXPECT_HAPPENS({60s, 100ms}, [&](auto& ctx) { nw1->setPendingPeersToReady(); nw2->setPendingPeersToReady(); WAIT_EXPECT_EQ(ctx, nw1->getPeerCount(), 1) @@ -365,7 +365,7 @@ TEST_F(NetworkTest, save_network) { nw2->start(); nw3->start(); - EXPECT_HAPPENS({120s, 500ms}, [&](auto& ctx) { + EXPECT_HAPPENS({120s, 100ms}, [&](auto& ctx) { nw1->setPendingPeersToReady(); nw2->setPendingPeersToReady(); nw3->setPendingPeersToReady(); @@ -382,7 +382,7 @@ TEST_F(NetworkTest, save_network) { nw2->start(); nw3->start(); - EXPECT_HAPPENS({120s, 500ms}, [&](auto& ctx) { + EXPECT_HAPPENS({120s, 100ms}, [&](auto& ctx) { nw2->setPendingPeersToReady(); nw3->setPendingPeersToReady(); WAIT_EXPECT_EQ(ctx, nw2->getPeerCount(), 1) diff --git a/tests/p2p_test.cpp b/tests/p2p_test.cpp index 1e9dcf31be..9a215160a7 100644 --- a/tests/p2p_test.cpp +++ b/tests/p2p_test.cpp @@ -311,7 +311,7 @@ TEST_F(P2PTest, multiple_capabilities) { std::filesystem::remove_all("/tmp/nw3"); }; auto wait_for_connection = [](std::shared_ptr nw1, std::shared_ptr nw2) { - EXPECT_HAPPENS({15s, 500ms}, [&](auto &ctx) { + EXPECT_HAPPENS({60s, 100ms}, [&](auto &ctx) { nw1->setPendingPeersToReady(); nw2->setPendingPeersToReady(); WAIT_EXPECT_EQ(ctx, nw1->getPeerCount(), 1) diff --git a/tests/test_util/src/test_util.cpp b/tests/test_util/src/test_util.cpp index 7c045c3df4..7c13b18e86 100644 --- a/tests/test_util/src/test_util.cpp +++ b/tests/test_util/src/test_util.cpp @@ -302,7 +302,7 @@ std::vector NodesTest::make_node_cfgs(size_t total_count bool NodesTest::wait_connect(const std::vector>& nodes) { auto num_peers_connected = nodes.size() - 1; - return wait({30s, 1s}, [&](auto& ctx) { + return wait({60s, 100ms}, [&](auto& ctx) { for (const auto& node : nodes) { if (ctx.fail_if(node->getNetwork()->getPeerCount() < num_peers_connected)) { return; From 4bf032bb1cd72e5f91d74ee3b73721677174ee5a Mon Sep 17 00:00:00 2001 From: Matus Kysel Date: Mon, 15 May 2023 10:22:03 +0200 Subject: [PATCH 147/162] chore: rework how Host is destroyed --- libraries/aleth/libp2p/Host.cpp | 58 ++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 27 deletions(-) diff --git a/libraries/aleth/libp2p/Host.cpp b/libraries/aleth/libp2p/Host.cpp index 4d626d95ec..1e773acf73 100644 --- a/libraries/aleth/libp2p/Host.cpp +++ b/libraries/aleth/libp2p/Host.cpp @@ -55,7 +55,7 @@ Host::Host(std::string _clientVersion, KeyPair const& kp, NetworkConfig _n, Tara handshake_ctx.port = m_listenPort; handshake_ctx.client_version = m_clientVersion; handshake_ctx.on_success = [this](auto const& id, auto const& rlp, auto frame_coder, auto socket) { - ba::post(strand_, [=, this, _ = shared_from_this(), rlp = rlp.data().cropped(0, rlp.actualSize()).toBytes(), + ba::post(strand_, [=, this, rlp = rlp.data().cropped(0, rlp.actualSize()).toBytes(), frame_coder = std::move(frame_coder)]() mutable { startPeerSession(id, RLP(rlp), std::move(frame_coder), socket); }); @@ -94,11 +94,15 @@ Host::Host(std::string _clientVersion, KeyPair const& kp, NetworkConfig _n, Tara } } LOG(m_logger) << "devp2p started. Node id: " << id(); - runAcceptor(); //!!! this needs to be post to session_ioc_ as main_loop_body handles peer/session related stuff // and it should not be execute for bootnodes, but it needs to bind with strand_ // as it touching same structures as discovery part !!! - ba::post(session_ioc_, [this] { ba::post(strand_, [this] { main_loop_body(); }); }); + ba::post(session_ioc_, [this] { + ba::post(strand_, [this] { + runAcceptor(); + main_loop_body(); + }); + }); } std::shared_ptr Host::make(std::string _clientVersion, CapabilitiesFactory const& cap_factory, KeyPair const& kp, @@ -116,10 +120,6 @@ std::shared_ptr Host::make(std::string _clientVersion, CapabilitiesFactory } Host::~Host() { - // reset io_context (allows manually polling network, below) - ioc_.stop(); - session_ioc_.restart(); - // shutdown acceptor from same executor ba::post(m_tcp4Acceptor.get_executor(), [this] { m_tcp4Acceptor.cancel(); @@ -136,9 +136,13 @@ Host::~Host() { s->disconnect(ClientQuit); } } - while (0 < session_ioc_.poll()) + // We need to poll both as strand_ is ioc_ + while (0 < session_ioc_.poll() + ioc_.poll()) ; save_state(); + + ioc_.restart(); + session_ioc_.restart(); } ba::io_context::count_type Host::do_work() { @@ -423,7 +427,7 @@ void Host::runAcceptor() { } else { // incoming connection; we don't yet know nodeid auto handshake = make_shared(handshake_ctx_, socket); - ba::post(strand_, [=, this, this_shared = shared_from_this()] { + ba::post(strand_, [=, this] { m_connecting.push_back(handshake); handshake->start(); }); @@ -462,24 +466,24 @@ void Host::connect(shared_ptr const& _p) { bi::tcp::endpoint ep(_p->get_endpoint()); cnetdetails << "Attempting connection to " << _p->id << "@" << ep << " from " << id(); auto socket = make_shared(bi::tcp::socket(make_strand(session_ioc_))); - socket->ref().async_connect( - ep, ba::bind_executor(strand_, [=, this, this_shared = shared_from_this()](boost::system::error_code const& ec) { - _p->m_lastAttempted = chrono::system_clock::now(); - _p->m_failedAttempts++; - - if (ec) { - cnetdetails << "Connection refused to node " << _p->id << "@" << ep << " (" << ec.message() << ")"; - // Manually set error (session not present) - _p->m_lastDisconnect = TCPError; - } else { - cnetdetails << "Starting RLPX handshake with " << _p->id << "@" << ep; - auto handshake = make_shared(handshake_ctx_, socket, _p->id); - m_connecting.push_back(handshake); - - handshake->start(); - } - m_pendingPeerConns.erase(nptr); - })); + socket->ref().async_connect(ep, ba::bind_executor(strand_, [=, this](boost::system::error_code const& ec) { + _p->m_lastAttempted = chrono::system_clock::now(); + _p->m_failedAttempts++; + + if (ec) { + cnetdetails << "Connection refused to node " << _p->id << "@" << ep << " (" + << ec.message() << ")"; + // Manually set error (session not present) + _p->m_lastDisconnect = TCPError; + } else { + cnetdetails << "Starting RLPX handshake with " << _p->id << "@" << ep; + auto handshake = make_shared(handshake_ctx_, socket, _p->id); + m_connecting.push_back(handshake); + + handshake->start(); + } + m_pendingPeerConns.erase(nptr); + })); } PeerSessionInfos Host::peerSessionInfos() const { From 02765ac4eff25622c2d6122275053285fb79930a Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Wed, 10 May 2023 15:05:58 +0200 Subject: [PATCH 148/162] chore: transaction pool overflow improvements --- .../consensus/include/transaction/transaction_queue.hpp | 2 +- .../core_libs/consensus/src/transaction/transaction_manager.cpp | 2 +- .../core_libs/network/include/network/tarcap/taraxa_peer.hpp | 2 +- tests/network_test.cpp | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/libraries/core_libs/consensus/include/transaction/transaction_queue.hpp b/libraries/core_libs/consensus/include/transaction/transaction_queue.hpp index 2b1d167989..eff42ef513 100644 --- a/libraries/core_libs/consensus/include/transaction/transaction_queue.hpp +++ b/libraries/core_libs/consensus/include/transaction/transaction_queue.hpp @@ -144,7 +144,7 @@ class TransactionQueue { // If transactions are dropped within last kTransactionOverflowTimeLimit seconds, dag blocks with missing transactions // will not be treated as malicious - const std::chrono::seconds kTransactionOverflowTimeLimit{300}; + const std::chrono::seconds kTransactionOverflowTimeLimit{600}; // Limit when non proposable transactions expire const size_t kNonProposableTransactionsPeriodExpiryLimit = 10; diff --git a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp index 725978795b..0e1c9ce7ac 100644 --- a/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp +++ b/libraries/core_libs/consensus/src/transaction/transaction_manager.cpp @@ -380,7 +380,7 @@ std::optional TransactionManager::getBlockTransactions(DagBl transactions.emplace_back(std::move(trx)); } } else { - LOG(log_er_) << "Block " << blk.getHash() << " has missing transaction " << finalizedTransactions.second; + LOG(log_nf_) << "Block " << blk.getHash() << " has missing transaction " << finalizedTransactions.second; return std::nullopt; } diff --git a/libraries/core_libs/network/include/network/tarcap/taraxa_peer.hpp b/libraries/core_libs/network/include/network/tarcap/taraxa_peer.hpp index 8defcb5886..34df483adc 100644 --- a/libraries/core_libs/network/include/network/tarcap/taraxa_peer.hpp +++ b/libraries/core_libs/network/include/network/tarcap/taraxa_peer.hpp @@ -127,7 +127,7 @@ class TaraxaPeer : public boost::noncopyable { const uint64_t kMaxSuspiciousPacketPerMinute = 1000; // Performance extensive dag syncing is only allowed to be requested once each kDagSyncingLimit seconds - const uint64_t kDagSyncingLimit = 300; + const uint64_t kDagSyncingLimit = 60; // Packets stats for packets sent by *this TaraxaPeer PacketsStats sent_packets_stats_; diff --git a/tests/network_test.cpp b/tests/network_test.cpp index c8097b8440..c2b9091c80 100644 --- a/tests/network_test.cpp +++ b/tests/network_test.cpp @@ -1396,7 +1396,7 @@ TEST_F(NetworkTest, suspicious_packets) { TEST_F(NetworkTest, dag_syncing_limit) { network::tarcap::TaraxaPeer peer1, peer2; - const uint64_t dag_sync_limit = 300; + const uint64_t dag_sync_limit = 60; EXPECT_TRUE(peer1.dagSyncingAllowed()); peer1.peer_dag_synced_ = true; From 09a5da17c6d96b6ac2131e73f81b48570835bd5c Mon Sep 17 00:00:00 2001 From: mfrankovi Date: Wed, 12 Apr 2023 15:32:12 +0200 Subject: [PATCH 149/162] chore: improve sync-gossip transition --- .../consensus/src/pbft/pbft_manager.cpp | 19 ++++++++++++++----- .../get_pbft_sync_packet_handler.hpp | 2 +- .../get_pbft_sync_packet_handler.cpp | 8 ++++++-- .../packets_handlers/vote_packet_handler.cpp | 10 +++++----- 4 files changed, 26 insertions(+), 13 deletions(-) diff --git a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp index 6bd9270dc3..e02c55c8fd 100644 --- a/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp +++ b/libraries/core_libs/consensus/src/pbft/pbft_manager.cpp @@ -301,6 +301,11 @@ bool PbftManager::tryPushCertVotesBlock() { auto pbft_block = getValidPbftProposedBlock(current_pbft_period, certified_block_hash); if (!pbft_block) { LOG(log_er_) << "Invalid certified block " << certified_block_hash; + auto net = network_.lock(); + // If block/reward votes are missing but block is cert voted other nodes probably advanced, sync + if (net) { + net->restartSyncingPbft(); + } return false; } @@ -664,12 +669,16 @@ bool PbftManager::stateOperations_() { // Process synced blocks pushSyncedPbftBlocksIntoChain(); - // (Re)broadcast votes if needed - broadcastVotes(); + auto net = network_.lock(); + // Only broadcast votes and try to push cert voted block if node is not syncing + if (net && !net->pbft_syncing()) { + // (Re)broadcast votes if needed + broadcastVotes(); - // Check if there is 2t+1 cert votes for some valid block, if so - push it into the chain - if (tryPushCertVotesBlock()) { - return true; + // Check if there is 2t+1 cert votes for some valid block, if so - push it into the chain + if (tryPushCertVotesBlock()) { + return true; + } } // Check if there is 2t+1 next votes for some valid block, if so - advance round diff --git a/libraries/core_libs/network/include/network/tarcap/packets_handlers/get_pbft_sync_packet_handler.hpp b/libraries/core_libs/network/include/network/tarcap/packets_handlers/get_pbft_sync_packet_handler.hpp index 856bc52eca..bccf58638f 100644 --- a/libraries/core_libs/network/include/network/tarcap/packets_handlers/get_pbft_sync_packet_handler.hpp +++ b/libraries/core_libs/network/include/network/tarcap/packets_handlers/get_pbft_sync_packet_handler.hpp @@ -20,7 +20,7 @@ class GetPbftSyncPacketHandler final : public PacketHandler { std::shared_ptr vote_mgr, std::shared_ptr db, const addr_t& node_addr); - void sendPbftBlocks(dev::p2p::NodeID const& peer_id, PbftPeriod from_period, size_t blocks_to_transfer, + void sendPbftBlocks(const std::shared_ptr& peer, PbftPeriod from_period, size_t blocks_to_transfer, bool pbft_chain_synced); // Packet type that is processed by this handler diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/get_pbft_sync_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/get_pbft_sync_packet_handler.cpp index c5b0113f89..7d569f08ca 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/get_pbft_sync_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/get_pbft_sync_packet_handler.cpp @@ -59,12 +59,13 @@ void GetPbftSyncPacketHandler::process(const PacketData &packet_data, } LOG(log_tr_) << "Will send " << blocks_to_transfer << " PBFT blocks to " << packet_data.from_node_id_; - sendPbftBlocks(packet_data.from_node_id_, height_to_sync, blocks_to_transfer, pbft_chain_synced); + sendPbftBlocks(peer, height_to_sync, blocks_to_transfer, pbft_chain_synced); } // api for pbft syncing -void GetPbftSyncPacketHandler::sendPbftBlocks(dev::p2p::NodeID const &peer_id, PbftPeriod from_period, +void GetPbftSyncPacketHandler::sendPbftBlocks(const std::shared_ptr &peer, PbftPeriod from_period, size_t blocks_to_transfer, bool pbft_chain_synced) { + const auto &peer_id = peer->getId(); LOG(log_tr_) << "sendPbftBlocks: peer want to sync from pbft chain height " << from_period << ", will send at most " << blocks_to_transfer << " pbft blocks to " << peer_id; @@ -95,6 +96,9 @@ void GetPbftSyncPacketHandler::sendPbftBlocks(dev::p2p::NodeID const &peer_id, P } LOG(log_dg_) << "Sending PbftSyncPacket period " << block_period << " to " << peer_id; sealAndSend(peer_id, SubprotocolPacketType::PbftSyncPacket, std::move(s)); + if (pbft_chain_synced && last_block) { + peer->syncing_ = false; + } } } diff --git a/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp b/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp index 3051d87122..5810b0518a 100644 --- a/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp +++ b/libraries/core_libs/network/src/tarcap/packets_handlers/vote_packet_handler.cpp @@ -40,6 +40,11 @@ void VotePacketHandler::process(const PacketData &packet_data, const std::shared LOG(log_dg_) << "Received PBFT vote " << vote->getHash(); } + // Update peer's max chain size + if (peer_chain_size.has_value() && *peer_chain_size > peer->pbft_chain_size_) { + peer->pbft_chain_size_ = *peer_chain_size; + } + const auto vote_hash = vote->getHash(); if (!isPbftRelevantVote(vote)) { @@ -74,11 +79,6 @@ void VotePacketHandler::process(const PacketData &packet_data, const std::shared // Do not mark it before, as peers have small caches of known votes. Only mark gossiping votes peer->markVoteAsKnown(vote_hash); onNewPbftVote(vote, pbft_block); - - // Update peer's max chain size - if (peer_chain_size.has_value() && vote->getVoter() == peer->getId() && *peer_chain_size > peer->pbft_chain_size_) { - peer->pbft_chain_size_ = *peer_chain_size; - } } void VotePacketHandler::onNewPbftVote(const std::shared_ptr &vote, const std::shared_ptr &block, From 73e281b69e0061ff2b63692be11e8d2b9513eece Mon Sep 17 00:00:00 2001 From: rjonczy Date: Tue, 16 May 2023 14:48:55 +0200 Subject: [PATCH 150/162] improvement to build process --- .circleci/config.yml | 140 +++++++++++++++++++++++++++++++++++-------- Dockerfile | 84 +------------------------- builder.Dockerfile | 81 +++++++++++++++++++++++++ 3 files changed, 196 insertions(+), 109 deletions(-) create mode 100644 builder.Dockerfile diff --git a/.circleci/config.yml b/.circleci/config.yml index 7e64d59226..7cd6c5d5a2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -26,7 +26,6 @@ orbs: github-release: izumin5210/github-release@0.1.1 gcp-gcr: circleci/gcp-gcr@0.13.0 gcp-cli: circleci/gcp-cli@2.4.1 - docker-cache: cci-x/docker-registry-image-cache@0.2.12 helm-release: taraxa/helm-release@0.1.1 commands: @@ -328,39 +327,46 @@ commands: echo "export HELM_TEST_NAME=pr-${PR}" >> $BASH_ENV echo "export DOCKER_BRANCH_TAG=$(./scripts/docker_tag_from_branch.sh $CIRCLE_BRANCH)" >> $BASH_ENV echo "export GCP_IMAGE=gcr.io/${GOOGLE_PROJECT_ID}/${IMAGE}" >> $BASH_ENV - - run: - name: Clean old Images if disk available is lower than 15G - command: | - for time in 120 96 72 48 24 12 6 3 1 0 - do - if [ $(df /var/lib/docker --block-size=1073741824 --output=avail|grep -v Avail) -lt 15 ]; then - df /var/lib/docker --block-size=1073741824 --output=avail - echo "Pruning images older than ${time}h" - docker image prune -a --force --filter "until=${time}h" - fi - done build: - description: Builds docker images + description: Builds docker image (node) steps: - - run: - name: Build builder image - command: | - docker build -t ${IMAGE}:${VERSION} --target builder . - run: name: Checkout Submodules command: | git submodule sync git submodule update --init --recursive --jobs 8 - run: - name: Build ctest image + name: Compile and build binaries command: | docker build -t ${IMAGE}-ctest:${VERSION} --target build . - run: - name: Build taraxad image + name: Build final Docker image (${IMAGE}) command: | docker build -t ${IMAGE}:${VERSION} . + build_builder: + description: Builds docker image (builder) + steps: + - run: + name: Build builder image + command: | + docker build -t ${IMAGE}:${VERSION} -f builder.Dockerfile . + + tag_builder: + description: Tags docker images (builder) + steps: + - run: + name: Tag images + command: | + + if [[ ${CIRCLE_TAG} != "" ]];then + docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:${CIRCLE_TAG} + docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:latest + else + docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:${VERSION} + fi + tag: description: Tags docker images steps: @@ -395,9 +401,9 @@ commands: # tag images for 'feature' branches (based on PRs) if [[ ${PR} != "false" ]]; then - docker tag ${IMAGE}:${VERSION} ${GCP_IMAGE}:pr-${PR}-${CIRCLE_BUILD_NUM} - docker tag ${IMAGE}:${VERSION} ${GCP_IMAGE}:pr-${PR}-${CIRCLE_SHA1} - docker tag ${IMAGE}:${VERSION} ${GCP_IMAGE}:pr-${PR}-${SHORT_GIT_HASH} + echo docker tag ${IMAGE}:${VERSION} ${GCP_IMAGE}:pr-${PR}-${CIRCLE_BUILD_NUM} + echo docker tag ${IMAGE}:${VERSION} ${GCP_IMAGE}:pr-${PR}-${CIRCLE_SHA1} + echo docker tag ${IMAGE}:${VERSION} ${GCP_IMAGE}:pr-${PR}-${SHORT_GIT_HASH} fi # tag images for release (based on tag) @@ -462,8 +468,15 @@ commands: - run: name: Push Images command: | - docker push taraxa/${IMAGE}:${CIRCLE_TAG} - docker push taraxa/${IMAGE}:latest + + if [[ ${CIRCLE_TAG} != "" ]];then + docker push taraxa/${IMAGE}:${CIRCLE_TAG} + docker push taraxa/${IMAGE}:latest + else + docker push taraxa/${IMAGE}:${VERSION} + fi + + test: description: Run tests @@ -691,6 +704,34 @@ jobs: - store_artifacts: path: tmp_docker + build-builder-docker-image: + environment: + - IMAGE: taraxa-builder + - CONAN_REVISIONS_ENABLED: 1 + machine: + image: ubuntu-2204:2022.04.2 + docker_layer_caching: true + resource_class: large + steps: + - checkout + - prepare + - run: + name: List images restored from DLC + command: | + docker images + - build_builder + - run: + name: List images to be saved in DLC + command: | + docker images + - run: + name: Show layers of taraxa-builder image + command: | + docker history taraxa-builder:${VERSION} + - tag_builder + - push_dockerhub + + release-docker-image: environment: - IMAGE: taraxa-node @@ -995,6 +1036,47 @@ workflows: - build-linux - build-mac + + # ### workflows for builder image ### # + # run this workflow for specific branches 'builder/*' + build-builder-docker-image: + when: + and: + - not: << pipeline.parameters.deploy_prnet >> + - not: << pipeline.parameters.redeploy_prnet >> + - not: << pipeline.parameters.cleanup_prnet >> + jobs: + - build-builder-docker-image: + filters: + branches: + only: + - /^builder\/.*/ + - /^chore\/builder-.*/ + - /^fix\/builder-.*/ + - /^feature\/builder-.*/ + context: + - DOCKERHUB + + # run this workflow for specific tags 'builder/*' + release-builder-docker-image: + when: + and: + - not: << pipeline.parameters.deploy_prnet >> + - not: << pipeline.parameters.redeploy_prnet >> + - not: << pipeline.parameters.cleanup_prnet >> + jobs: + - build-builder-docker-image: + filters: + branches: + ignore: /.*/ + tags: + only: /^builder-v\d+.\d+.\d+/ + context: + - DOCKERHUB + # ### workflows for builder image ### # + + + # ### workflows for taraxa-node image ### # # run this workflow for all branches apart those reserved for chart build-docker-image: when: @@ -1011,6 +1093,10 @@ workflows: - /^chore\/chart-.*/ - /^fix\/chart-.*/ - /^feature\/chart-.*/ + - /^builder\/.*/ + - /^chore\/builder-.*/ + - /^fix\/builder-.*/ + - /^feature\/builder-.*/ context: - TARAXA - GCP @@ -1035,7 +1121,9 @@ workflows: - K8S - GCR - DOCKERHUB + # ### workflows for taraxa-node image ### # + # ### workflows for taraxa-node helm chart ### # # run this workflow for branches specified below build-helm-chart: when: @@ -1061,7 +1149,6 @@ workflows: - /^fix\/chart-.*/ - /^feature\/chart-.*/ - # run this workflow for tags, like chart-vX.Y.Z release-helm-chart: jobs: @@ -1077,4 +1164,5 @@ workflows: branches: ignore: /.*/ tags: - only: /^chart-v\d+.\d+.\d+/ \ No newline at end of file + only: /^chart-v\d+.\d+.\d+/ + # ### workflows for taraxa-node helm chart ### # diff --git a/Dockerfile b/Dockerfile index 23997bd2bf..d347ffc8f0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,91 +1,9 @@ -# Default output dir containing build artifacts ARG BUILD_OUTPUT_DIR=cmake-docker-build-debug -############################################# -# builder image - contains all dependencies # -############################################# -FROM ubuntu:22.04 as builder - - -# deps versions -ARG LLVM_VERSION=14 - -# Install standard packages -RUN apt-get update && DEBIAN_FRONTEND=noninteractive \ - apt-get install -y --no-install-recommends \ - tzdata \ - && apt-get install -y \ - tar \ - git \ - curl \ - wget \ - python3-pip \ - lsb-release \ - libgmp-dev \ - libmpfr-dev \ - libmicrohttpd-dev \ - software-properties-common \ - && rm -rf /var/lib/apt/lists/* - -# install solc for py_test if arch is not arm64 because it is not availiable -RUN \ -if [ `arch` != "aarch64" ]; \ -then \ - add-apt-repository ppa:ethereum/ethereum \ - && apt-get update \ - && apt install solc; \ -fi - -# install standart tools -RUN add-apt-repository ppa:ethereum/ethereum \ - && apt-get update \ - && apt-get install -y \ - clang-format-$LLVM_VERSION \ - clang-tidy-$LLVM_VERSION \ - llvm-$LLVM_VERSION \ - golang-go \ - ca-certificates \ - libtool \ - autoconf \ - binutils \ - cmake \ - ccache \ - # this libs are required for arm build by go part - libzstd-dev \ - libsnappy-dev \ - # replace this with conan dependency - rapidjson-dev \ - && rm -rf /var/lib/apt/lists/* - -ENV CXX="clang++-${LLVM_VERSION}" -ENV CC="clang-${LLVM_VERSION}" - -# HACK remove this when update to conan 2.0 -RUN ln -s /usr/bin/clang-${LLVM_VERSION} /usr/bin/clang -RUN ln -s /usr/bin/clang++-${LLVM_VERSION} /usr/bin/clang++ - -# Install conan -RUN pip3 install conan==1.59.0 - -ENV CONAN_REVISIONS_ENABLED=1 - -# Install conan deps -WORKDIR /opt/taraxa/ -COPY conanfile.py . - -RUN conan profile new clang --detect \ - && conan profile update settings.compiler=clang clang \ - && conan profile update settings.compiler.version=$LLVM_VERSION clang \ - && conan profile update settings.compiler.libcxx=libstdc++11 clang \ - && conan profile update settings.build_type=RelWithDebInfo clang \ - && conan profile update env.CC=clang-$LLVM_VERSION clang \ - && conan profile update env.CXX=clang++-$LLVM_VERSION clang \ - && conan install --build missing -pr=clang . - ################################################################### # Build stage - use builder image for actual build of taraxa node # ################################################################### -FROM builder as build +FROM taraxa/taraxa-builder:a0bf43231 as build # Default output dir containing build artifacts ARG BUILD_OUTPUT_DIR diff --git a/builder.Dockerfile b/builder.Dockerfile new file mode 100644 index 0000000000..1ecf5c8458 --- /dev/null +++ b/builder.Dockerfile @@ -0,0 +1,81 @@ +############################################# +# builder image - contains all dependencies # +############################################# +FROM ubuntu:22.04 + +# deps versions +ARG LLVM_VERSION=14 + +# Install standard packages +RUN apt-get update && DEBIAN_FRONTEND=noninteractive \ + apt-get install -y --no-install-recommends \ + tzdata \ + && apt-get install -y \ + tar \ + git \ + curl \ + wget \ + python3-pip \ + lsb-release \ + libgmp-dev \ + libmpfr-dev \ + libmicrohttpd-dev \ + software-properties-common \ + && rm -rf /var/lib/apt/lists/* + +# install solc for py_test if arch is not arm64 because it is not availiable +RUN \ +if [ `arch` != "aarch64" ]; \ +then \ + add-apt-repository ppa:ethereum/ethereum \ + && apt-get update \ + && apt install solc; \ +fi + +# install standart tools +RUN add-apt-repository ppa:ethereum/ethereum \ + && apt-get update \ + && apt-get install -y \ + clang-format-$LLVM_VERSION \ + clang-tidy-$LLVM_VERSION \ + llvm-$LLVM_VERSION \ + golang-go \ + ca-certificates \ + libtool \ + autoconf \ + binutils \ + cmake \ + ccache \ + # this libs are required for arm build by go part + libzstd-dev \ + libsnappy-dev \ + # replace this with conan dependency + rapidjson-dev \ + && rm -rf /var/lib/apt/lists/* + +ENV CXX="clang++-${LLVM_VERSION}" +ENV CC="clang-${LLVM_VERSION}" + +# HACK remove this when update to conan 2.0 +RUN ln -s /usr/bin/clang-${LLVM_VERSION} /usr/bin/clang +RUN ln -s /usr/bin/clang++-${LLVM_VERSION} /usr/bin/clang++ + +# Install conan +RUN pip3 install conan==1.59.0 + +ENV CONAN_REVISIONS_ENABLED=1 + +# Install conan deps +WORKDIR /opt/taraxa/ +COPY conanfile.py . + +RUN conan profile new clang --detect \ + && conan profile update settings.compiler=clang clang \ + && conan profile update settings.compiler.version=$LLVM_VERSION clang \ + && conan profile update settings.compiler.libcxx=libstdc++11 clang \ + && conan profile update settings.build_type=RelWithDebInfo clang \ + && conan profile update env.CC=clang-$LLVM_VERSION clang \ + && conan profile update env.CXX=clang++-$LLVM_VERSION clang \ + && conan install --build missing -pr=clang . + + From 2247c918e427ce01dfdb15ea6ac7b0f92aacc22c Mon Sep 17 00:00:00 2001 From: rjonczy Date: Tue, 16 May 2023 15:06:45 +0200 Subject: [PATCH 151/162] fix to docker tags --- .circleci/config.yml | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 7cd6c5d5a2..9535867667 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -329,7 +329,7 @@ commands: echo "export GCP_IMAGE=gcr.io/${GOOGLE_PROJECT_ID}/${IMAGE}" >> $BASH_ENV build: - description: Builds docker image (node) + description: Builds docker image (${IMAGE}) steps: - run: name: Checkout Submodules @@ -346,7 +346,7 @@ commands: docker build -t ${IMAGE}:${VERSION} . build_builder: - description: Builds docker image (builder) + description: Builds docker image (${IMAGE}) steps: - run: name: Build builder image @@ -361,6 +361,7 @@ commands: command: | if [[ ${CIRCLE_TAG} != "" ]];then + TAG=$() docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:${CIRCLE_TAG} docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:latest else @@ -401,14 +402,15 @@ commands: # tag images for 'feature' branches (based on PRs) if [[ ${PR} != "false" ]]; then - echo docker tag ${IMAGE}:${VERSION} ${GCP_IMAGE}:pr-${PR}-${CIRCLE_BUILD_NUM} - echo docker tag ${IMAGE}:${VERSION} ${GCP_IMAGE}:pr-${PR}-${CIRCLE_SHA1} - echo docker tag ${IMAGE}:${VERSION} ${GCP_IMAGE}:pr-${PR}-${SHORT_GIT_HASH} + docker tag ${IMAGE}:${VERSION} ${GCP_IMAGE}:pr-${PR}-${CIRCLE_BUILD_NUM} + docker tag ${IMAGE}:${VERSION} ${GCP_IMAGE}:pr-${PR}-${CIRCLE_SHA1} + docker tag ${IMAGE}:${VERSION} ${GCP_IMAGE}:pr-${PR}-${SHORT_GIT_HASH} fi # tag images for release (based on tag) if [[ ${CIRCLE_TAG} != "" ]];then - docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:${CIRCLE_TAG} + TAG=$(echo ${CIRCLE_TAG} | sed 's/^builder-//g') + docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:${TAG} docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:latest fi @@ -470,7 +472,8 @@ commands: command: | if [[ ${CIRCLE_TAG} != "" ]];then - docker push taraxa/${IMAGE}:${CIRCLE_TAG} + TAG=$(echo ${CIRCLE_TAG} | sed 's/^builder-//g') + docker push taraxa/${IMAGE}:${TAG} docker push taraxa/${IMAGE}:latest else docker push taraxa/${IMAGE}:${VERSION} From dfbb266ca29584f7d3e52fa33f66cc48fc3cabd5 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Tue, 16 May 2023 15:24:53 +0200 Subject: [PATCH 152/162] couple fixes --- .circleci/config.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 9535867667..f9dd72f287 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -361,8 +361,8 @@ commands: command: | if [[ ${CIRCLE_TAG} != "" ]];then - TAG=$() - docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:${CIRCLE_TAG} + TAG=$(echo ${CIRCLE_TAG} | sed 's/^builder-//g') + docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:${TAG} docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:latest else docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:${VERSION} @@ -409,8 +409,7 @@ commands: # tag images for release (based on tag) if [[ ${CIRCLE_TAG} != "" ]];then - TAG=$(echo ${CIRCLE_TAG} | sed 's/^builder-//g') - docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:${TAG} + docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:${CIRCLE_TAG} docker tag ${IMAGE}:${VERSION} taraxa/${IMAGE}:latest fi From e94057c3fcefe50a95c0b3ed71411243ddbdf20d Mon Sep 17 00:00:00 2001 From: rjonczy Date: Tue, 16 May 2023 15:31:06 +0200 Subject: [PATCH 153/162] changed based image of builder --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index d347ffc8f0..f7dcc07e97 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ ARG BUILD_OUTPUT_DIR=cmake-docker-build-debug ################################################################### # Build stage - use builder image for actual build of taraxa node # ################################################################### -FROM taraxa/taraxa-builder:a0bf43231 as build +FROM taraxa/taraxa-builder:v0.1.0 as build # Default output dir containing build artifacts ARG BUILD_OUTPUT_DIR From b9dccdd2f1596820e17060cc92eca39059a89db6 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 17 May 2023 15:18:59 +0200 Subject: [PATCH 154/162] build multiarch builder --- .circleci/config.yml | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index f9dd72f287..96739f8b19 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -346,12 +346,16 @@ commands: docker build -t ${IMAGE}:${VERSION} . build_builder: - description: Builds docker image (${IMAGE}) + description: Builds docker image steps: - run: - name: Build builder image - command: | - docker build -t ${IMAGE}:${VERSION} -f builder.Dockerfile . + name: Build builder image (multiarch) + command: | + # docker build -t ${IMAGE}:${VERSION} -f builder.Dockerfile . + docker buildx build \ + --output=type=local \ + --platform linux/arm64,linux/amd64 \ + --tag taraxa/${IMAGE}:{VERSION} tag_builder: description: Tags docker images (builder) @@ -730,8 +734,8 @@ jobs: name: Show layers of taraxa-builder image command: | docker history taraxa-builder:${VERSION} - - tag_builder - - push_dockerhub +# - tag_builder +# - push_dockerhub release-docker-image: From 2a2830192c9d655338b5f23f84b249b877bab9f1 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 17 May 2023 15:23:13 +0200 Subject: [PATCH 155/162] fix --- .circleci/config.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 96739f8b19..9bb8233d3e 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -355,7 +355,8 @@ commands: docker buildx build \ --output=type=local \ --platform linux/arm64,linux/amd64 \ - --tag taraxa/${IMAGE}:{VERSION} + --tag taraxa/${IMAGE}:{VERSION} \ + -f builder.Dockerfile . tag_builder: description: Tags docker images (builder) From 1343feafb5bf5c4bd44c05cf6bd361fdcde43a0f Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 17 May 2023 15:29:18 +0200 Subject: [PATCH 156/162] fix --- .circleci/config.yml | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 9bb8233d3e..75124f455f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -352,11 +352,7 @@ commands: name: Build builder image (multiarch) command: | # docker build -t ${IMAGE}:${VERSION} -f builder.Dockerfile . - docker buildx build \ - --output=type=local \ - --platform linux/arm64,linux/amd64 \ - --tag taraxa/${IMAGE}:{VERSION} \ - -f builder.Dockerfile . + docker buildx build --output=type=local --platform linux/arm64,linux/amd64 --tag taraxa/${IMAGE}:{VERSION} -f builder.Dockerfile . tag_builder: description: Tags docker images (builder) From d3b57e6acfd029555d442296c8b13626c317a1e2 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 17 May 2023 15:30:30 +0200 Subject: [PATCH 157/162] fix --- .circleci/config.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 75124f455f..e10ce753a9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -351,6 +351,7 @@ commands: - run: name: Build builder image (multiarch) command: | + echo "hello" # docker build -t ${IMAGE}:${VERSION} -f builder.Dockerfile . docker buildx build --output=type=local --platform linux/arm64,linux/amd64 --tag taraxa/${IMAGE}:{VERSION} -f builder.Dockerfile . From e78d548e661bb02a8b68db0e5935634b6a06cd23 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 17 May 2023 15:31:28 +0200 Subject: [PATCH 158/162] fix ident --- .circleci/config.yml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index e10ce753a9..e4dd899dd4 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -349,11 +349,10 @@ commands: description: Builds docker image steps: - run: - name: Build builder image (multiarch) - command: | - echo "hello" - # docker build -t ${IMAGE}:${VERSION} -f builder.Dockerfile . - docker buildx build --output=type=local --platform linux/arm64,linux/amd64 --tag taraxa/${IMAGE}:{VERSION} -f builder.Dockerfile . + name: Build builder image (multiarch) + command: | + # docker build -t ${IMAGE}:${VERSION} -f builder.Dockerfile . + docker buildx build --output=type=local --platform linux/arm64,linux/amd64 --tag taraxa/${IMAGE}:{VERSION} -f builder.Dockerfile . tag_builder: description: Tags docker images (builder) From 686f8e92da8fdf3156ebd3b52b3ab52e989c95bb Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 17 May 2023 15:48:28 +0200 Subject: [PATCH 159/162] fix --- .circleci/config.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index e4dd899dd4..ed025f573f 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -352,7 +352,11 @@ commands: name: Build builder image (multiarch) command: | # docker build -t ${IMAGE}:${VERSION} -f builder.Dockerfile . - docker buildx build --output=type=local --platform linux/arm64,linux/amd64 --tag taraxa/${IMAGE}:{VERSION} -f builder.Dockerfile . + docker buildx build \ + --output=type=local,dst=/tmp \ + --platform linux/arm64,linux/amd64 \ + --tag taraxa/${IMAGE}:{VERSION} \ + -f builder.Dockerfile . tag_builder: description: Tags docker images (builder) From 9691ab05c6b91dfca2feea8f6e1fdbed4a656f94 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 17 May 2023 15:51:20 +0200 Subject: [PATCH 160/162] f --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index ed025f573f..8b45dc0d6b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -353,7 +353,7 @@ commands: command: | # docker build -t ${IMAGE}:${VERSION} -f builder.Dockerfile . docker buildx build \ - --output=type=local,dst=/tmp \ + --output=type=local,dest=/tmp \ --platform linux/arm64,linux/amd64 \ --tag taraxa/${IMAGE}:{VERSION} \ -f builder.Dockerfile . From 7fd22409674b6498959eb1742c835552a935076c Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 17 May 2023 15:57:11 +0200 Subject: [PATCH 161/162] debug --- .circleci/config.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 8b45dc0d6b..a35e9889bb 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -355,7 +355,7 @@ commands: docker buildx build \ --output=type=local,dest=/tmp \ --platform linux/arm64,linux/amd64 \ - --tag taraxa/${IMAGE}:{VERSION} \ + --tag taraxa/${IMAGE}:123 \ -f builder.Dockerfile . tag_builder: From f33c8b2ecdc01617b921f857b7e9d3a61d51d437 Mon Sep 17 00:00:00 2001 From: rjonczy Date: Wed, 17 May 2023 16:10:49 +0200 Subject: [PATCH 162/162] added builder --- .circleci/config.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index a35e9889bb..1d1faae4e2 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -348,6 +348,11 @@ commands: build_builder: description: Builds docker image steps: + - run: + name: Create builder + command: | + docker buildx create --name taraxa-builder --use --bootstrap + - run: name: Build builder image (multiarch) command: |