Skip to content

Commit

Permalink
- Removed the memory copy of metadata from the blockchain db and made…
Browse files Browse the repository at this point in the history
… the backend responsible for providing fast access to the chain metadata. - A memory copy of the metadata was added to lmdb_db, and the backend was made responsible for restoring the memory copy and keeping it in sync with the persistent stored version. - Removed all unused memory chain metadata functions and parameters from the blockchain db, mempool, consensus manager and difficulty adjustment manager. - The validation train, block validators and transaction validators were updated to not need access to the chain metadata as it can be obtained from the blockchain backend. - Modified the rewind_to_height function to update the metadata with the set of rewind db transactions ensuring it does not get updated when the db transaction fails. - Added fetch_metadata to blockchain backend trait and provided implementations for lmdb_db, memory_db and mock_db. (#1666)

Merge pull request #1666

- Removed the memory copy of metadata from the blockchain db and made the backend responsible for providing fast access to the chain metadata. - A memory copy of the metadata was added to lmdb_db, and the backend was made responsible for restoring the memory copy and keeping it in sync with the persistent stored version. - Removed all unused memory chain metadata functions and parameters from the blockchain db, mempool, consensus manager and difficulty adjustment manager. - The validation train, block validators and transaction validators were updated to not need access to the chain metadata as it can be obtained from the blockchain backend. - Modified the rewind_to_height function to update the metadata with the set of rewind db transactions ensuring it does not get updated when the db transaction fails. - Added fetch_metadata to blockchain backend trait and provided implementations for lmdb_db, memory_db and mock_db.

* pull/1666/head:
  - Removed the memory copy of metadata from the blockchain db and made the backend responsible for providing fast access to the chain metadata. - A memory copy of the metadata was added to lmdb_db, and the backend was made responsible for restoring the memory copy and keeping it in sync with the persistent stored version. - Removed all unused memory chain metadata functions and parameters from the blockchain db, mempool, consensus manager and difficulty adjustment manager. - The validation train, block validators and transaction validators were updated to not need access to the chain metadata as it can be obtained from the blockchain backend. - Modified the rewind_to_height function to update the metadata with the set of rewind db transactions ensuring it does not get updated when the db transaction fails. - Added fetch_metadata to blockchain backend trait and provided implementations for lmdb_db, memory_db and mock_db.
  • Loading branch information
sdbondi committed Apr 3, 2020
2 parents 63e1cb9 + 65da071 commit e262239
Show file tree
Hide file tree
Showing 15 changed files with 319 additions and 416 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -234,10 +234,9 @@ where T: BlockchainBackend + 'static
Ok(NodeCommsResponse::NewBlock(block))
},
NodeCommsRequest::GetTargetDifficulty(pow_algo) => {
let (db, metadata) = &self.blockchain_db.db_and_metadata_read_access()?;
let db = &self.blockchain_db.db_read_access()?;
Ok(NodeCommsResponse::TargetDifficulty(
self.consensus_manager
.get_target_difficulty(metadata, &**db, *pow_algo)?,
self.consensus_manager.get_target_difficulty(&**db, *pow_algo)?,
))
},
}
Expand Down
268 changes: 48 additions & 220 deletions base_layer/core/src/chain_storage/blockchain_database.rs

Large diffs are not rendered by default.

118 changes: 109 additions & 9 deletions base_layer/core/src/chain_storage/lmdb_db/lmdb_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,22 @@
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

use crate::{
blocks::{blockheader::BlockHeader, Block},
blocks::{
blockheader::{BlockHash, BlockHeader},
Block,
},
chain_storage::{
blockchain_database::BlockchainBackend,
db_transaction::{DbKey, DbKeyValuePair, DbTransaction, DbValue, MetadataValue, MmrTree, WriteOperation},
db_transaction::{
DbKey,
DbKeyValuePair,
DbTransaction,
DbValue,
MetadataKey,
MetadataValue,
MmrTree,
WriteOperation,
},
error::ChainStorageError,
lmdb_db::{
lmdb::{lmdb_delete, lmdb_exists, lmdb_for_each, lmdb_get, lmdb_insert, lmdb_len, lmdb_replace},
Expand All @@ -42,7 +54,9 @@ use crate::{
LMDB_DB_UTXO_MMR_CP_BACKEND,
},
memory_db::MemDbVec,
ChainMetadata,
},
proof_of_work::Difficulty,
transactions::{
transaction::{TransactionKernel, TransactionOutput},
types::{HashDigest, HashOutput},
Expand Down Expand Up @@ -76,6 +90,7 @@ where D: Digest
{
env: Arc<Environment>,
metadata_db: DatabaseRef,
mem_metadata: ChainMetadata, // Memory copy of stored metadata
headers_db: DatabaseRef,
block_hashes_db: DatabaseRef,
utxos_db: DatabaseRef,
Expand Down Expand Up @@ -122,12 +137,23 @@ where D: Digest + Send + Sync
.db()
.clone(),
);
// Restore memory metadata
let env = store.env();
let metadata_db = store
.get_handle(LMDB_DB_METADATA)
.ok_or_else(|| ChainStorageError::CriticalError)?
.db()
.clone();
let metadata = ChainMetadata {
height_of_longest_chain: fetch_chain_height(&env, &metadata_db)?,
best_block: fetch_best_block(&env, &metadata_db)?,
pruning_horizon: fetch_pruning_horizon(&env, &metadata_db)?,
accumulated_difficulty: fetch_accumulated_work(&env, &metadata_db)?,
};

Ok(Self {
metadata_db: store
.get_handle(LMDB_DB_METADATA)
.ok_or_else(|| ChainStorageError::CriticalError)?
.db()
.clone(),
metadata_db,
mem_metadata: metadata,
headers_db: store
.get_handle(LMDB_DB_HEADERS)
.ok_or_else(|| ChainStorageError::CriticalError)?
Expand Down Expand Up @@ -172,7 +198,7 @@ where D: Digest + Send + Sync
range_proof_mmr: MmrCache::new(MemDbVec::new(), range_proof_checkpoints.clone(), mmr_cache_config)?,
range_proof_checkpoints,
curr_range_proof_checkpoint: MerkleCheckPoint::new(Vec::new(), Bitmap::create()),
env: store.env(),
env,
})
}

Expand Down Expand Up @@ -276,13 +302,15 @@ where D: Digest + Send + Sync
// changes committed to the backend databases. CreateMmrCheckpoint and RewindMmr txns will be performed after these
// txns have been successfully applied.
fn apply_mmr_and_storage_txs(&mut self, tx: &DbTransaction) -> Result<(), ChainStorageError> {
let mut update_mem_metadata = false;
let txn = WriteTransaction::new(self.env.clone()).map_err(|e| ChainStorageError::AccessError(e.to_string()))?;
{
for op in tx.operations.iter() {
match op {
WriteOperation::Insert(insert) => match insert {
DbKeyValuePair::Metadata(k, v) => {
lmdb_replace(&txn, &self.metadata_db, &(k.clone() as u32), &v)?;
update_mem_metadata = true;
},
DbKeyValuePair::BlockHeader(k, v) => {
if lmdb_exists(&self.env, &self.headers_db, &k)? {
Expand Down Expand Up @@ -389,7 +417,18 @@ where D: Digest + Send + Sync
}
}
}
txn.commit().map_err(|e| ChainStorageError::AccessError(e.to_string()))
txn.commit()
.map_err(|e| ChainStorageError::AccessError(e.to_string()))?;

if update_mem_metadata {
self.mem_metadata = ChainMetadata {
height_of_longest_chain: fetch_chain_height(&self.env, &self.metadata_db)?,
best_block: fetch_best_block(&self.env, &self.metadata_db)?,
pruning_horizon: fetch_pruning_horizon(&self.env, &self.metadata_db)?,
accumulated_difficulty: fetch_accumulated_work(&self.env, &self.metadata_db)?,
};
}
Ok(())
}

// Returns the leaf index of the hash. If the hash is in the newly added hashes it returns the future MMR index for
Expand Down Expand Up @@ -645,6 +684,67 @@ where D: Digest + Send + Sync
Ok(None)
}
}

/// Returns the metadata of the chain.
fn fetch_metadata(&self) -> Result<ChainMetadata, ChainStorageError> {
Ok(self.mem_metadata.clone())
}
}

// Fetches the chain height from the provided metadata db.
fn fetch_chain_height(env: &Environment, db: &Database) -> Result<Option<u64>, ChainStorageError> {
let k = MetadataKey::ChainHeight;
let val: Option<MetadataValue> = lmdb_get(&env, &db, &(k as u32))?;
let val: Option<DbValue> = val.map(DbValue::Metadata);
Ok(
if let Some(DbValue::Metadata(MetadataValue::ChainHeight(height))) = val {
height
} else {
None
},
)
}

// Fetches the best block hash from the provided metadata db.
fn fetch_best_block(env: &Environment, db: &Database) -> Result<Option<BlockHash>, ChainStorageError> {
let k = MetadataKey::BestBlock;
let val: Option<MetadataValue> = lmdb_get(&env, &db, &(k as u32))?;
let val: Option<DbValue> = val.map(DbValue::Metadata);
Ok(
if let Some(DbValue::Metadata(MetadataValue::BestBlock(best_block))) = val {
best_block
} else {
None
},
)
}

// Fetches the accumulated work from the provided metadata db.
fn fetch_accumulated_work(env: &Environment, db: &Database) -> Result<Option<Difficulty>, ChainStorageError> {
let k = MetadataKey::AccumulatedWork;
let val: Option<MetadataValue> = lmdb_get(&env, &db, &(k as u32))?;
let val: Option<DbValue> = val.map(DbValue::Metadata);
Ok(
if let Some(DbValue::Metadata(MetadataValue::AccumulatedWork(accumulated_work))) = val {
accumulated_work
} else {
None
},
)
}

// Fetches the pruning horizon from the provided metadata db.
fn fetch_pruning_horizon(env: &Environment, db: &Database) -> Result<u64, ChainStorageError> {
let k = MetadataKey::PruningHorizon;
let val: Option<MetadataValue> = lmdb_get(&env, &db, &(k as u32))?;
let val: Option<DbValue> = val.map(DbValue::Metadata);
Ok(
if let Some(DbValue::Metadata(MetadataValue::PruningHorizon(pruning_horizon))) = val {
pruning_horizon
} else {
2880
},
)
}

// Calculated the new checkpoint count after rewinding a set number of steps back.
Expand Down
79 changes: 76 additions & 3 deletions base_layer/core/src/chain_storage/memory_db/memory_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,24 @@
//! This is a memory-based blockchain database, generally only useful for testing purposes
use crate::{
blocks::{Block, BlockHeader},
blocks::{blockheader::BlockHash, Block, BlockHeader},
chain_storage::{
blockchain_database::BlockchainBackend,
db_transaction::{DbKey, DbKeyValuePair, DbTransaction, DbValue, MetadataValue, MmrTree, WriteOperation},
db_transaction::{
DbKey,
DbKeyValuePair,
DbTransaction,
DbValue,
MetadataKey,
MetadataValue,
MmrTree,
WriteOperation,
},
error::ChainStorageError,
memory_db::MemDbVec,
ChainMetadata,
},
proof_of_work::Difficulty,
transactions::{
transaction::{TransactionKernel, TransactionOutput},
types::HashOutput,
Expand Down Expand Up @@ -95,7 +106,7 @@ where D: Digest
}

impl<D> MemoryDatabase<D>
where D: Digest
where D: Digest + Send + Sync
{
pub fn new(mmr_cache_config: MmrCacheConfig) -> Self {
let utxo_checkpoints = MemDbVec::<MerkleCheckPoint>::new();
Expand Down Expand Up @@ -133,6 +144,58 @@ where D: Digest
.read()
.map_err(|e| ChainStorageError::AccessError(e.to_string()))
}

// Fetches the chain metadata chain height.
fn fetch_chain_height(&self) -> Result<Option<u64>, ChainStorageError> {
Ok(
if let Some(DbValue::Metadata(MetadataValue::ChainHeight(height))) =
self.fetch(&DbKey::Metadata(MetadataKey::ChainHeight))?
{
height
} else {
None
},
)
}

// Fetches the chain metadata best block hash.
fn fetch_best_block(&self) -> Result<Option<BlockHash>, ChainStorageError> {
Ok(
if let Some(DbValue::Metadata(MetadataValue::BestBlock(best_block))) =
self.fetch(&DbKey::Metadata(MetadataKey::BestBlock))?
{
best_block
} else {
None
},
)
}

// Fetches the chain metadata accumulated work.
fn fetch_accumulated_work(&self) -> Result<Option<Difficulty>, ChainStorageError> {
Ok(
if let Some(DbValue::Metadata(MetadataValue::AccumulatedWork(accumulated_work))) =
self.fetch(&DbKey::Metadata(MetadataKey::AccumulatedWork))?
{
accumulated_work
} else {
None
},
)
}

// Fetches the chain metadata pruning horizon.
fn fetch_pruning_horizon(&self) -> Result<u64, ChainStorageError> {
Ok(
if let Some(DbValue::Metadata(MetadataValue::PruningHorizon(pruning_horizon))) =
self.fetch(&DbKey::Metadata(MetadataKey::PruningHorizon))?
{
pruning_horizon
} else {
2880
},
)
}
}

impl<D> BlockchainBackend for MemoryDatabase<D>
Expand Down Expand Up @@ -451,6 +514,16 @@ where D: Digest + Send + Sync
Ok(None)
}
}

/// Returns the metadata of the chain.
fn fetch_metadata(&self) -> Result<ChainMetadata, ChainStorageError> {
Ok(ChainMetadata {
height_of_longest_chain: self.fetch_chain_height()?,
best_block: self.fetch_best_block()?,
pruning_horizon: self.fetch_pruning_horizon()?,
accumulated_difficulty: self.fetch_accumulated_work()?,
})
}
}

impl<D> Clone for MemoryDatabase<D>
Expand Down
14 changes: 4 additions & 10 deletions base_layer/core/src/consensus/consensus_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ use crate::{
},
Block,
},
chain_storage::{BlockchainBackend, ChainMetadata, ChainStorageError},
chain_storage::{BlockchainBackend, ChainStorageError},
consensus::{emission::EmissionSchedule, network::Network, ConsensusConstants},
proof_of_work::{DiffAdjManager, DiffAdjManagerError, Difficulty, DifficultyAdjustmentError, PowAlgorithm},
transactions::tari_amount::MicroTari,
Expand Down Expand Up @@ -115,14 +115,13 @@ impl ConsensusManager {
/// Returns the estimated target difficulty for the specified PoW algorithm at the chain tip.
pub fn get_target_difficulty<B: BlockchainBackend>(
&self,
metadata: &ChainMetadata,
db: &B,
pow_algo: PowAlgorithm,
) -> Result<Difficulty, ConsensusManagerError>
{
match self.access_diff_adj()?.as_ref() {
Some(v) => v
.get_target_difficulty(metadata, db, pow_algo)
.get_target_difficulty(db, pow_algo)
.map_err(ConsensusManagerError::DifficultyAdjustmentManagerError),
None => Err(ConsensusManagerError::MissingDifficultyAdjustmentManager),
}
Expand All @@ -145,15 +144,10 @@ impl ConsensusManager {
}

/// Returns the median timestamp of the past 11 blocks at the chain tip.
pub fn get_median_timestamp<B: BlockchainBackend>(
&self,
metadata: &ChainMetadata,
db: &B,
) -> Result<EpochTime, ConsensusManagerError>
{
pub fn get_median_timestamp<B: BlockchainBackend>(&self, db: &B) -> Result<EpochTime, ConsensusManagerError> {
match self.access_diff_adj()?.as_ref() {
Some(v) => v
.get_median_timestamp(metadata, db)
.get_median_timestamp(db)
.map_err(ConsensusManagerError::DifficultyAdjustmentManagerError),
None => Err(ConsensusManagerError::MissingDifficultyAdjustmentManager),
}
Expand Down
7 changes: 5 additions & 2 deletions base_layer/core/src/helpers/mock_backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,10 @@
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
// USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//

use crate::{
blocks::{Block, BlockHeader},
chain_storage::{BlockchainBackend, ChainStorageError, DbKey, DbTransaction, DbValue, MmrTree},
chain_storage::{BlockchainBackend, ChainMetadata, ChainStorageError, DbKey, DbTransaction, DbValue, MmrTree},
transactions::{
transaction::{TransactionKernel, TransactionOutput},
types::HashOutput,
Expand Down Expand Up @@ -113,4 +112,8 @@ impl BlockchainBackend for MockBackend {
fn fetch_last_header(&self) -> Result<Option<BlockHeader>, ChainStorageError> {
unimplemented!()
}

fn fetch_metadata(&self) -> Result<ChainMetadata, ChainStorageError> {
unimplemented!()
}
}
4 changes: 2 additions & 2 deletions base_layer/core/src/mempool/mempool_storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,9 @@ where T: BlockchainBackend
tx.body.kernels()[0].excess_sig.get_signature().to_hex()
);
// The transaction is already internally consistent
let (db, metadata) = self.blockchain_db.db_and_metadata_read_access()?;
let db = self.blockchain_db.db_read_access()?;

match self.validator.validate(&tx, &db, &metadata) {
match self.validator.validate(&tx, &db) {
Ok(()) => {
self.unconfirmed_pool.insert(tx)?;
Ok(TxStorageResponse::UnconfirmedPool)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,9 +102,9 @@ where T: BlockchainBackend
// We dont care about tx's that appeared in valid blocks. Those tx's will time out in orphan pool and remove
// them selves.
for (tx_key, tx) in self.txs_by_signature.iter() {
let (db, metadata) = self.blockchain_db.db_and_metadata_read_access()?;
let db = self.blockchain_db.db_read_access()?;

match self.validator.validate(&tx, &db, &metadata) {
match self.validator.validate(&tx, &db) {
Ok(()) => {
trace!(
target: LOG_TARGET,
Expand Down
Loading

0 comments on commit e262239

Please sign in to comment.