Skip to content

Commit

Permalink
Merge pull request #2681 from Taraxa-project/dag_rewards_chg
Browse files Browse the repository at this point in the history
chore: dag rewards
  • Loading branch information
mfrankovi authored Feb 2, 2024
2 parents 61c6f09 + 20afac5 commit dc11f4f
Show file tree
Hide file tree
Showing 6 changed files with 156 additions and 9 deletions.
3 changes: 3 additions & 0 deletions libraries/config/include/config/hardfork.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,9 @@ struct HardforksConfig {
// Aspen hardfork implements new yield curve
AspenHardfork aspen_hf;

bool isAspenHardforkPartOne(uint64_t block_number) const { return block_number >= aspen_hf.block_num_part_one; }
bool isAspenHardforkPartTwo(uint64_t block_number) const { return block_number >= aspen_hf.block_num_part_two; }

HAS_RLP_FIELDS
};

Expand Down
20 changes: 18 additions & 2 deletions libraries/core_libs/consensus/include/rewards/block_stats.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,10 @@ class BlockStats {
*
* @param dpos_vote_count - votes count for previous block
* @param committee_size
* @param aspen_dag_rewards - aspen dag rewards
*/
BlockStats(const PeriodData& block, const std::vector<gas_t>& trxs_gas_used, uint64_t dpos_vote_count,
uint32_t committee_size);
uint32_t committee_size, const bool aspen_dag_rewards = false);

HAS_RLP_FIELDS

Expand All @@ -33,8 +34,23 @@ class BlockStats {
* @brief Process PeriodData and save stats in class for future serialization. returns
*
* @param block
* @param aspen_dag_rewards
*/
void processStats(const PeriodData& block);
void processStats(const PeriodData& block, const bool aspen_dag_rewards);

/**
* @brief Process Dag blocks and save stats in class for future serialization. returns
*
* @param block
*/
void processDagBlocks(const PeriodData& block);

/**
* @brief Process Dag blocks and save stats in class for future serialization with aspen HF changes. returns
*
* @param block
*/
void processDagBlocksAspen(const PeriodData& block);

/**
* @brief Prepare fee_by_trx_hash_ map with trx fee by trx hash
Expand Down
38 changes: 32 additions & 6 deletions libraries/core_libs/consensus/src/rewards/block_stats.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,11 @@
namespace taraxa::rewards {

BlockStats::BlockStats(const PeriodData& block, const std::vector<gas_t>& trxs_gas_used, uint64_t dpos_vote_count,
uint32_t committee_size)
uint32_t committee_size, const bool aspen_dag_reward)
: block_author_(block.pbft_blk->getBeneficiary()),
max_votes_weight_(std::min<uint64_t>(committee_size, dpos_vote_count)) {
initFeeByTrxHash(block.transactions, trxs_gas_used);
processStats(block);
processStats(block, aspen_dag_reward);
}

void BlockStats::initFeeByTrxHash(const SharedTransactions& transactions, const std::vector<gas_t>& trxs_gas_used) {
Expand Down Expand Up @@ -67,13 +67,24 @@ std::set<trx_hash_t> toTrxHashesSet(const SharedTransactions& transactions) {
return block_transactions_hashes_;
}

void BlockStats::processStats(const PeriodData& block) {
void BlockStats::processStats(const PeriodData& block, const bool aspen_dag_rewards) {
// total unique transactions count should be always equal to transactions count in block
assert(fee_by_trx_hash_.size() == block.transactions.size());

validators_stats_.reserve(std::max(block.dag_blocks.size(), block.previous_block_cert_votes.size()));
auto block_transactions_hashes_ = toTrxHashesSet(block.transactions);
if (aspen_dag_rewards) {
processDagBlocksAspen(block);
} else {
processDagBlocks(block);
}

for (const auto& vote : block.previous_block_cert_votes) {
addVote(vote);
}
}

void BlockStats::processDagBlocks(const PeriodData& block) {
auto block_transactions_hashes_ = toTrxHashesSet(block.transactions);
for (const auto& dag_block : block.dag_blocks) {
const addr_t& dag_block_author = dag_block.getSender();
bool has_unique_transactions = false;
Expand All @@ -94,9 +105,24 @@ void BlockStats::processStats(const PeriodData& block) {
total_dag_blocks_count_ += 1;
}
}
}

for (const auto& vote : block.previous_block_cert_votes) {
addVote(vote);
void BlockStats::processDagBlocksAspen(const PeriodData& block) {
uint32_t min_difficulty = UINT32_MAX;
for (const auto& dag_block : block.dag_blocks) {
if (dag_block.getDifficulty() < min_difficulty) {
min_difficulty = dag_block.getDifficulty();
}
}
for (const auto& dag_block : block.dag_blocks) {
const addr_t& dag_block_author = dag_block.getSender();
if (dag_block.getDifficulty() == min_difficulty) {
validators_stats_[dag_block_author].dag_blocks_count_ += 1;
total_dag_blocks_count_ += 1;
}
for (const auto& tx_hash : dag_block.getTrxs()) {
addTransaction(tx_hash, dag_block_author);
}
}
}

Expand Down
3 changes: 2 additions & 1 deletion libraries/core_libs/consensus/src/rewards/rewards_stats.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,8 @@ BlockStats Stats::getBlockStats(const PeriodData& blk, const std::vector<gas_t>&
return BlockStats{blk, {}, dpos_vote_count, kCommitteeSize};
}

return BlockStats{blk, trxs_fees, dpos_vote_count, kCommitteeSize};
const auto aspen_hf_part_two = kHardforksConfig.isAspenHardforkPartTwo(blk.pbft_blk->getPeriod());
return BlockStats{blk, trxs_fees, dpos_vote_count, kCommitteeSize, aspen_hf_part_two};
}

std::vector<BlockStats> Stats::processStats(const PeriodData& current_blk, const std::vector<gas_t>& trxs_gas_used,
Expand Down
2 changes: 2 additions & 0 deletions libraries/core_libs/node/src/node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -431,6 +431,8 @@ void FullNode::rebuildDb() {
}
stop_async = true;
fut.wait();
// Handles the race case if some blocks are still in the queue
pbft_mgr_->pushSyncedPbftBlocksIntoChain();
LOG(log_si_) << "Rebuild completed";
}

Expand Down
99 changes: 99 additions & 0 deletions tests/rewards_stats_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -227,6 +227,105 @@ TEST_F(RewardsStatsTest, feeRewards) {
}
}

TEST_F(RewardsStatsTest, dagBlockRewards) {
auto db = std::make_shared<DbStorage>(data_dir / "db");
auto batch = db->createWriteBatch();

std::vector<std::shared_ptr<Vote>> empty_votes;
HardforksConfig hfc;
hfc.aspen_hf.block_num_part_two = 4;

// Create two reward stats to test before and after aspen hardfork part 2
rewards::Stats pre_aspen_reward_stats(100, HardforksConfig{0, 0, {}, {}, MagnoliaHardfork{0, 0}, AspenHardfork{1, 6}},
db, [](auto) { return 100; });
rewards::Stats post_aspen_reward_stats(
100, HardforksConfig{0, 0, {}, {}, MagnoliaHardfork{0, 0}, AspenHardfork{1, 4}}, db, [](auto) { return 100; });

// Create pbft block with 5 dag blocks
auto dag_key1 = dev::KeyPair::create();
auto dag_key2 = dev::KeyPair::create();
auto dag_key3 = dev::KeyPair::create();
auto dag_key4 = dev::KeyPair::create();
auto dag_key5 = dev::KeyPair::create();
vrf_wrapper::vrf_sk_t vrfs(
"854821a22e1841f79f0a62409197e930eb347c05ede6456b82b07ec36acbd2fce86c6f2cd1e076ddf8eaf48cee078bd68b74063c3e229b1a"
"5e993c791bdb56d6");
auto trxs = samples::createSignedTrxSamples(1, 3, g_secret);

PeriodData block(make_simple_pbft_block(blk_hash_t(1), 5), empty_votes);
SortitionParams sortition_params(0xfff, 16, 21, 23, 0x64);

vdf_sortition::VdfSortition vdf1(sortition_params, vrfs,
vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(1)), 1, 1);
DagBlock dag_blk1({}, {}, {}, {trxs[0]->getHash()}, 0, vdf1, dag_key1.secret());
block.dag_blocks.push_back(dag_blk1);

vdf_sortition::VdfSortition vdf2(sortition_params, vrfs,
vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(2)), 1, 1);
DagBlock dag_blk2({}, {}, {}, {trxs[1]->getHash()}, 0, vdf2, dag_key2.secret());
block.dag_blocks.push_back(dag_blk2);

vdf_sortition::VdfSortition vdf3(sortition_params, vrfs,
vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(3)), 1, 1);
DagBlock dag_blk3({}, {}, {}, {trxs[0]->getHash()}, 0, vdf3, dag_key3.secret());
block.dag_blocks.push_back(dag_blk3);

vdf_sortition::VdfSortition vdf4(sortition_params, vrfs,
vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(4)), 1, 1);
DagBlock dag_blk4({}, {}, {}, {trxs[1]->getHash()}, 0, vdf4, dag_key4.secret());
block.dag_blocks.push_back(dag_blk4);

vdf_sortition::VdfSortition vdf5(sortition_params, vrfs,
vrf_wrapper::VrfSortitionBase::makeVrfInput(1, blk_hash_t(5)), 1, 1);
DagBlock dag_blk5({}, {}, {}, {trxs[2]->getHash()}, 0, vdf5, dag_key5.secret());
block.dag_blocks.push_back(dag_blk5);
block.transactions = trxs;

ASSERT_EQ(dag_blk1.getDifficulty(), 17);
ASSERT_EQ(dag_blk2.getDifficulty(), 17);
ASSERT_EQ(dag_blk3.getDifficulty(), 16);
ASSERT_EQ(dag_blk4.getDifficulty(), 17);
ASSERT_EQ(dag_blk5.getDifficulty(), 16);

std::vector<size_t> gas_used{10, 20, 30};

// Process rewards before aspen hf, expect dag_blocks_count to match blocks that include unique transactions which is
// blocks 1, 2 and 5
auto stats = pre_aspen_reward_stats.processStats(block, gas_used, batch);
ASSERT_EQ(stats.size(), 1);
auto stats_with_get = reinterpret_cast<TestableBlockStats*>(&stats[0]);
ASSERT_EQ(stats_with_get->getValidatorStats().size(), 3);
ASSERT_TRUE(stats_with_get->getValidatorStats().contains(dev::toAddress(dag_key1.pub())));
ASSERT_TRUE(stats_with_get->getValidatorStats().contains(dev::toAddress(dag_key2.pub())));
ASSERT_TRUE(stats_with_get->getValidatorStats().contains(dev::toAddress(dag_key5.pub())));
ASSERT_EQ(stats_with_get->getValidatorStats().find(dev::toAddress(dag_key1.pub()))->second.dag_blocks_count_, 1);
ASSERT_EQ(stats_with_get->getValidatorStats().find(dev::toAddress(dag_key2.pub()))->second.dag_blocks_count_, 1);
ASSERT_EQ(stats_with_get->getValidatorStats().find(dev::toAddress(dag_key5.pub()))->second.dag_blocks_count_, 1);

// Process rewards after aspen hf, expect dag_blocks_count to match blocks with smallest difficulty which is blocks 3
// and 5 Verify fees rewards to be the same before and after the HF
auto post_stats = post_aspen_reward_stats.processStats(block, gas_used, batch);
ASSERT_EQ(post_stats.size(), 1);
auto post_stats_with_get = reinterpret_cast<TestableBlockStats*>(&post_stats[0]);
ASSERT_EQ(post_stats_with_get->getValidatorStats().size(), 4);
ASSERT_TRUE(post_stats_with_get->getValidatorStats().contains(dev::toAddress(dag_key1.pub())));
ASSERT_TRUE(post_stats_with_get->getValidatorStats().contains(dev::toAddress(dag_key2.pub())));
ASSERT_TRUE(post_stats_with_get->getValidatorStats().contains(dev::toAddress(dag_key3.pub())));
ASSERT_TRUE(post_stats_with_get->getValidatorStats().contains(dev::toAddress(dag_key5.pub())));
ASSERT_EQ(post_stats_with_get->getValidatorStats().find(dev::toAddress(dag_key1.pub()))->second.dag_blocks_count_, 0);
ASSERT_EQ(post_stats_with_get->getValidatorStats().find(dev::toAddress(dag_key2.pub()))->second.dag_blocks_count_, 0);
ASSERT_EQ(post_stats_with_get->getValidatorStats().find(dev::toAddress(dag_key3.pub()))->second.dag_blocks_count_, 1);
ASSERT_EQ(post_stats_with_get->getValidatorStats().find(dev::toAddress(dag_key5.pub()))->second.dag_blocks_count_, 1);

ASSERT_EQ(stats_with_get->getValidatorStats().find(dev::toAddress(dag_key1.pub()))->second.fees_rewards_,
post_stats_with_get->getValidatorStats().find(dev::toAddress(dag_key1.pub()))->second.fees_rewards_);
ASSERT_EQ(stats_with_get->getValidatorStats().find(dev::toAddress(dag_key2.pub()))->second.fees_rewards_,
post_stats_with_get->getValidatorStats().find(dev::toAddress(dag_key2.pub()))->second.fees_rewards_);
ASSERT_EQ(stats_with_get->getValidatorStats().find(dev::toAddress(dag_key5.pub()))->second.fees_rewards_,
post_stats_with_get->getValidatorStats().find(dev::toAddress(dag_key5.pub()))->second.fees_rewards_);
ASSERT_EQ(post_stats_with_get->getValidatorStats().find(dev::toAddress(dag_key3.pub()))->second.fees_rewards_, 0);
}

} // namespace taraxa::core_tests

using namespace taraxa;
Expand Down

0 comments on commit dc11f4f

Please sign in to comment.