Skip to content

Commit

Permalink
MOVEONLY: DisconnectedBlockTransactions from txmempool to validation
Browse files Browse the repository at this point in the history
This is only used by validation, so it should live there.
Recommended to review with --color-moved=dimmed_zebra
  • Loading branch information
glozow committed Sep 4, 2023
1 parent 6006e7b commit db20e51
Show file tree
Hide file tree
Showing 2 changed files with 78 additions and 78 deletions.
77 changes: 0 additions & 77 deletions src/txmempool.h
Original file line number Diff line number Diff line change
Expand Up @@ -837,81 +837,4 @@ class CCoinsViewMemPool : public CCoinsViewBacked
void PackageAddTransaction(const CTransactionRef& tx);
};

/**
* DisconnectedBlockTransactions
* During the reorg, it's desirable to re-add previously confirmed transactions
* to the mempool, so that anything not re-confirmed in the new chain is
* available to be mined. However, it's more efficient to wait until the reorg
* is complete and process all still-unconfirmed transactions at that time,
* since we expect most confirmed transactions to (typically) still be
* confirmed in the new chain, and re-accepting to the memory pool is expensive
* (and therefore better to not do in the middle of reorg-processing).
* Instead, store the disconnected transactions (in order!) as we go, remove any
* that are included in blocks in the new chain, and then process the remaining
* still-unconfirmed transactions at the end.
*/
struct DisconnectedBlockTransactions {
uint64_t cachedInnerUsage = 0;
std::list<CTransactionRef> queuedTx;
using Queue = decltype(queuedTx);
std::unordered_map<uint256, Queue::iterator, SaltedTxidHasher> iters_by_txid;

// It's almost certainly a logic bug if we don't clear out queuedTx before
// destruction, as we add to it while disconnecting blocks, and then we
// need to re-process remaining transactions to ensure mempool consistency.
// For now, assert() that we've emptied out this object on destruction.
// This assert() can always be removed if the reorg-processing code were
// to be refactored such that this assumption is no longer true (for
// instance if there was some other way we cleaned up the mempool after a
// reorg, besides draining this object).
~DisconnectedBlockTransactions() { assert(queuedTx.empty()); }

size_t DynamicMemoryUsage() const {
// std::list has 3 pointers per entry
return cachedInnerUsage + memusage::DynamicUsage(iters_by_txid) + memusage::MallocUsage(3 * sizeof(void*)) * queuedTx.size();
}

void addTransaction(const CTransactionRef& tx)
{
// Add new transactions to the end.
auto it = queuedTx.insert(queuedTx.end(), tx);
iters_by_txid.emplace(tx->GetHash(), it);
cachedInnerUsage += RecursiveDynamicUsage(tx);
}

// Remove entries that are in this block.
void removeForBlock(const std::vector<CTransactionRef>& vtx)
{
// Short-circuit in the common case of a block being added to the tip
if (queuedTx.empty()) {
return;
}
for (const auto& tx : vtx) {
auto iter = iters_by_txid.find(tx->GetHash());
if (iter != iters_by_txid.end()) {
auto list_iter = iter->second;
iters_by_txid.erase(iter);
cachedInnerUsage -= RecursiveDynamicUsage(*list_iter);
queuedTx.erase(list_iter);
}
}
}

// Remove the first entry and update memory usage.
void remove_first()
{
cachedInnerUsage -= RecursiveDynamicUsage(queuedTx.front());
iters_by_txid.erase(queuedTx.front()->GetHash());
queuedTx.pop_front();
}

void clear()
{
cachedInnerUsage = 0;
iters_by_txid.clear();
queuedTx.clear();
}
};

#endif // BITCOIN_TXMEMPOOL_H
79 changes: 78 additions & 1 deletion src/validation.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ class CBlockTreeDB;
class CTxMemPool;
class ChainstateManager;
struct ChainTxData;
struct DisconnectedBlockTransactions;
struct PrecomputedTransactionData;
struct LockPoints;
struct AssumeutxoData;
Expand Down Expand Up @@ -233,6 +232,84 @@ struct PackageMempoolAcceptResult
: m_tx_results{ {wtxid, result} } {}
};


/**
* DisconnectedBlockTransactions
* During the reorg, it's desirable to re-add previously confirmed transactions
* to the mempool, so that anything not re-confirmed in the new chain is
* available to be mined. However, it's more efficient to wait until the reorg
* is complete and process all still-unconfirmed transactions at that time,
* since we expect most confirmed transactions to (typically) still be
* confirmed in the new chain, and re-accepting to the memory pool is expensive
* (and therefore better to not do in the middle of reorg-processing).
* Instead, store the disconnected transactions (in order!) as we go, remove any
* that are included in blocks in the new chain, and then process the remaining
* still-unconfirmed transactions at the end.
*/
struct DisconnectedBlockTransactions {
uint64_t cachedInnerUsage = 0;
std::list<CTransactionRef> queuedTx;
using Queue = decltype(queuedTx);
std::unordered_map<uint256, Queue::iterator, SaltedTxidHasher> iters_by_txid;

// It's almost certainly a logic bug if we don't clear out queuedTx before
// destruction, as we add to it while disconnecting blocks, and then we
// need to re-process remaining transactions to ensure mempool consistency.
// For now, assert() that we've emptied out this object on destruction.
// This assert() can always be removed if the reorg-processing code were
// to be refactored such that this assumption is no longer true (for
// instance if there was some other way we cleaned up the mempool after a
// reorg, besides draining this object).
~DisconnectedBlockTransactions() { assert(queuedTx.empty()); }

size_t DynamicMemoryUsage() const {
// std::list has 3 pointers per entry
return cachedInnerUsage + memusage::DynamicUsage(iters_by_txid) + memusage::MallocUsage(3 * sizeof(void*)) * queuedTx.size();
}

void addTransaction(const CTransactionRef& tx)
{
// Add new transactions to the end.
auto it = queuedTx.insert(queuedTx.end(), tx);
iters_by_txid.emplace(tx->GetHash(), it);
cachedInnerUsage += RecursiveDynamicUsage(tx);
}

// Remove entries that are in this block.
void removeForBlock(const std::vector<CTransactionRef>& vtx)
{
// Short-circuit in the common case of a block being added to the tip
if (queuedTx.empty()) {
return;
}
for (const auto& tx : vtx) {
auto iter = iters_by_txid.find(tx->GetHash());
if (iter != iters_by_txid.end()) {
auto list_iter = iter->second;
iters_by_txid.erase(iter);
cachedInnerUsage -= RecursiveDynamicUsage(*list_iter);
queuedTx.erase(list_iter);
}
}
}

// Remove the first entry and update memory usage.
void remove_first()
{
cachedInnerUsage -= RecursiveDynamicUsage(queuedTx.front());
iters_by_txid.erase(queuedTx.front()->GetHash());
queuedTx.pop_front();
}

void clear()
{
cachedInnerUsage = 0;
iters_by_txid.clear();
queuedTx.clear();
}
};

/**
* Try to add a transaction to the mempool. This is an internal function and is exposed only for testing.
* Client code should use ChainstateManager::ProcessTransaction()
Expand Down

0 comments on commit db20e51

Please sign in to comment.