Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[DNM] PIBD Task / Issue Tracker #3695

Merged
merged 22 commits into from
Oct 18, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
89730b7
[PIBD_IMPL] Introduce PIBD state into sync workflow (#3685)
yeastplume Jan 12, 2022
009a02e
add pibd receive messages to network, and basic calls to desegmenter …
yeastplume Jan 17, 2022
41a86b4
[PIBD_IMPL] PIBD Desegmenter State (#3688)
yeastplume Jan 20, 2022
436bacf
[PIBD_IMPL] Bitmap accumulator reconstruction + TxHashset set reconst…
yeastplume Jan 25, 2022
24202f0
[PIBD_IMPL] PMMR Reassembly from Segments (#3690)
yeastplume Jan 28, 2022
169e106
[PIBD_IMPL] PIBD tree sync via network and kill/resume functionality …
yeastplume Feb 8, 2022
3ea233d
[PIBD_IMPL] Finalize PIBD download and move state to chain validation…
yeastplume Feb 15, 2022
5630cf2
[PIBD_IMPL] PIBD Stats + Retry on validation errors (#3694)
yeastplume Feb 17, 2022
bf48e52
[PIBD_IMPL] Update number of simultaneous peer requests for segments …
yeastplume Feb 24, 2022
21b1ac5
[PIBD_IMPL] Thread simplification + More TUI Updates + Stop State Pro…
yeastplume Mar 1, 2022
b08a6dd
revert to previous method of applying segments (#3699)
yeastplume Mar 2, 2022
09d6f41
fix for deadlock issue (#3700)
yeastplume Mar 9, 2022
50450ba
update Cargo.lock for next release
yeastplume Mar 22, 2022
6a7b66b
[PIBD_IMPL] Catch-Up functionality + Fixes based on testing (#3702)
yeastplume Mar 30, 2022
eda31ab
documentation updates + todo fixes (#3703)
yeastplume Apr 1, 2022
aa2a2a9
add pibd abort timeout case (#3704)
yeastplume Apr 5, 2022
5efd70a
[PIBD_IMPL] BitmapAccumulator Serialization Fix (#3705)
yeastplume Apr 20, 2022
41f3aaf
Merge DNSSeed scope changes into pibd impl branch (#3708)
yeastplume May 16, 2022
a441b78
move all PIBD-related constants into pibd_params modules (#3711)
yeastplume Jun 3, 2022
e13c9d1
merge from master (thiserror conversion update)
yeastplume Jul 14, 2022
6412fd1
Merge branch 'master' into pibd_impl
yeastplume Jul 28, 2022
3524b70
remove potential double read lock during compaction
yeastplume Sep 1, 2022
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 18 additions & 18 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion api/src/handlers/chain_api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ impl ChainResetHandler {
pub fn reset_chain_head(&self, hash: Hash) -> Result<(), Error> {
let chain = w(&self.chain)?;
let header = chain.get_block_header(&hash)?;
chain.reset_chain_head(&header)?;
chain.reset_chain_head(&header, true)?;

// Reset the sync status and clear out any sync error.
w(&self.sync_state)?.reset();
Expand Down
129 changes: 106 additions & 23 deletions chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,14 @@ impl Chain {
/// Reset both head and header_head to the provided header.
/// Handles simple rewind and more complex fork scenarios.
/// Used by the reset_chain_head owner api endpoint.
pub fn reset_chain_head<T: Into<Tip>>(&self, head: T) -> Result<(), Error> {
/// Caller can choose not to rewind headers, which can be used
/// during PIBD scenarios where it's desirable to restart the PIBD process
/// without re-downloading the header chain
pub fn reset_chain_head<T: Into<Tip>>(
&self,
head: T,
rewind_headers: bool,
) -> Result<(), Error> {
let head = head.into();

let mut header_pmmr = self.header_pmmr.write();
Expand All @@ -247,19 +254,44 @@ impl Chain {
},
)?;

// If the rewind of full blocks was successful then we can rewind the header MMR.
// Rewind and reapply headers to reset the header MMR.
txhashset::header_extending(&mut header_pmmr, &mut batch, |ext, batch| {
self.rewind_and_apply_header_fork(&header, ext, batch)?;
batch.save_header_head(&head)?;
Ok(())
})?;
if rewind_headers {
// If the rewind of full blocks was successful then we can rewind the header MMR.
// Rewind and reapply headers to reset the header MMR.
txhashset::header_extending(&mut header_pmmr, &mut batch, |ext, batch| {
self.rewind_and_apply_header_fork(&header, ext, batch)?;
batch.save_header_head(&head)?;
Ok(())
})?;
}

batch.commit()?;

Ok(())
}

/// Reset prune lists (when PIBD resets and rolls back the
/// entire chain, the prune list needs to be manually wiped
/// as it's currently not included as part of rewind)
pub fn reset_prune_lists(&self) -> Result<(), Error> {
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
let mut batch = self.store.batch()?;

txhashset::extending(&mut header_pmmr, &mut txhashset, &mut batch, |ext, _| {
let extension = &mut ext.extension;
extension.reset_prune_lists();
Ok(())
})?;
Ok(())
}

/// Reset PIBD head
pub fn reset_pibd_head(&self) -> Result<(), Error> {
let batch = self.store.batch()?;
batch.save_pibd_head(&self.genesis().into())?;
Ok(())
}

/// Are we running with archive_mode enabled?
pub fn archive_mode(&self) -> bool {
self.archive_mode
Expand All @@ -275,6 +307,11 @@ impl Chain {
self.txhashset.clone()
}

/// return genesis header
pub fn genesis(&self) -> BlockHeader {
self.genesis.clone()
}

/// Shared store instance.
pub fn store(&self) -> Arc<store::ChainStore> {
self.store.clone()
Expand Down Expand Up @@ -665,8 +702,15 @@ impl Chain {
// ensure the view is consistent.
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
self.rewind_and_apply_fork(&header, ext, batch)?;
ext.extension
.validate(&self.genesis, fast_validation, &NoStatus, &header)?;
ext.extension.validate(
&self.genesis,
fast_validation,
&NoStatus,
None,
None,
&header,
None,
)?;
Ok(())
})
}
Expand Down Expand Up @@ -867,21 +911,22 @@ impl Chain {

/// instantiate desegmenter (in same lazy fashion as segmenter, though this should not be as
/// expensive an operation)
pub fn desegmenter(&self, archive_header: &BlockHeader) -> Result<Desegmenter, Error> {
pub fn desegmenter(
&self,
archive_header: &BlockHeader,
) -> Result<Arc<RwLock<Option<Desegmenter>>>, Error> {
// Use our cached desegmenter if we have one and the associated header matches.
if let Some(d) = self.pibd_desegmenter.read().as_ref() {
if let Some(d) = self.pibd_desegmenter.write().as_ref() {
if d.header() == archive_header {
return Ok(d.clone());
return Ok(self.pibd_desegmenter.clone());
}
}
// If no desegmenter or headers don't match init
// TODO: (Check whether we can do this.. we *should* be able to modify this as the desegmenter
// is in flight and we cross a horizon boundary, but needs more thinking)

let desegmenter = self.init_desegmenter(archive_header)?;
let mut cache = self.pibd_desegmenter.write();
*cache = Some(desegmenter.clone());

return Ok(desegmenter);
Ok(self.pibd_desegmenter.clone())
}

/// initialize a desegmenter, which is capable of extending the hashset by appending
Expand All @@ -898,6 +943,7 @@ impl Chain {
self.txhashset(),
self.header_pmmr.clone(),
header.clone(),
self.genesis.clone(),
self.store.clone(),
))
}
Expand All @@ -923,6 +969,17 @@ impl Chain {
self.get_header_by_height(txhashset_height)
}

/// Return the Block Header at the txhashset horizon, considering only the
/// contents of the header PMMR
pub fn txhashset_archive_header_header_only(&self) -> Result<BlockHeader, Error> {
let header_head = self.header_head()?;
let threshold = global::state_sync_threshold() as u64;
let archive_interval = global::txhashset_archive_interval();
let mut txhashset_height = header_head.height.saturating_sub(threshold);
txhashset_height = txhashset_height.saturating_sub(txhashset_height % archive_interval);
self.get_header_by_height(txhashset_height)
}

// Special handling to make sure the whole kernel set matches each of its
// roots in each block header, without truncation. We go back header by
// header, rewind and check each root. This fixes a potential weakness in
Expand Down Expand Up @@ -1028,7 +1085,7 @@ impl Chain {
txhashset_data: File,
status: &dyn TxHashsetWriteStatus,
) -> Result<bool, Error> {
status.on_setup();
status.on_setup(None, None, None, None);

// Initial check whether this txhashset is needed or not
let fork_point = self.fork_point()?;
Expand Down Expand Up @@ -1068,7 +1125,7 @@ impl Chain {

let header_pmmr = self.header_pmmr.read();
let batch = self.store.batch()?;
txhashset.verify_kernel_pos_index(&self.genesis, &header_pmmr, &batch)?;
txhashset.verify_kernel_pos_index(&self.genesis, &header_pmmr, &batch, None, None)?;
}

// all good, prepare a new batch and update all the required records
Expand All @@ -1087,7 +1144,7 @@ impl Chain {
// Validate the extension, generating the utxo_sum and kernel_sum.
// Full validation, including rangeproofs and kernel signature verification.
let (utxo_sum, kernel_sum) =
extension.validate(&self.genesis, false, status, &header)?;
extension.validate(&self.genesis, false, status, None, None, &header, None)?;

// Save the block_sums (utxo_sum, kernel_sum) to the db for use later.
batch.save_block_sums(
Expand Down Expand Up @@ -1161,6 +1218,7 @@ impl Chain {
fn remove_historical_blocks(
&self,
header_pmmr: &txhashset::PMMRHandle<BlockHeader>,
archive_header: BlockHeader,
batch: &store::Batch<'_>,
) -> Result<(), Error> {
if self.archive_mode() {
Expand All @@ -1181,7 +1239,6 @@ impl Chain {
// TODO: Check this, compaction selects a different horizon
// block from txhashset horizon/PIBD segmenter when using
// Automated testing chain
let archive_header = self.txhashset_archive_header()?;
if archive_header.height < cutoff {
cutoff = archive_header.height;
horizon = head.height - archive_header.height;
Expand Down Expand Up @@ -1241,6 +1298,10 @@ impl Chain {
}
}

// Retrieve archive header here, so as not to attempt a read
// lock while removing historical blocks
let archive_header = self.txhashset_archive_header()?;

// Take a write lock on the txhashet and start a new writeable db batch.
let header_pmmr = self.header_pmmr.read();
let mut txhashset = self.txhashset.write();
Expand All @@ -1260,7 +1321,7 @@ impl Chain {

// If we are not in archival mode remove historical blocks from the db.
if !self.archive_mode() {
self.remove_historical_blocks(&header_pmmr, &batch)?;
self.remove_historical_blocks(&header_pmmr, archive_header, &batch)?;
}

// Make sure our output_pos index is consistent with the UTXO set.
Expand Down Expand Up @@ -1616,9 +1677,31 @@ fn setup_head(
// Note: We are rewinding and validating against a writeable extension.
// If validation is successful we will truncate the backend files
// to match the provided block header.
let header = batch.get_block_header(&head.last_block_h)?;
let mut pibd_in_progress = false;
let header = {
let head = batch.get_block_header(&head.last_block_h)?;
let pibd_tip = store.pibd_head()?;
let pibd_head = batch.get_block_header(&pibd_tip.last_block_h)?;
if pibd_head.height > head.height {
pibd_in_progress = true;
pibd_head
} else {
head
}
};

let res = txhashset::extending(header_pmmr, txhashset, &mut batch, |ext, batch| {
// If we're still downloading via PIBD, don't worry about sums and validations just yet
// We still want to rewind to the last completed block to ensure a consistent state
if pibd_in_progress {
debug!(
"init: PIBD appears to be in progress at height {}, hash {}, not validating, will attempt to continue",
header.height,
header.hash()
);
return Ok(());
}

pipe::rewind_and_apply_fork(&header, ext, batch, &|_| Ok(()))?;

let extension = &mut ext.extension;
Expand Down
7 changes: 7 additions & 0 deletions chain/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -177,12 +177,19 @@ pub enum Error {
/// Conversion
source: segment::SegmentError,
},
/// We've decided to halt the PIBD process due to lack of supporting peers or
/// otherwise failing to progress for a certain amount of time
#[error("Aborting PIBD error")]
AbortingPIBDError,
/// The segmenter is associated to a different block header
#[error("Segmenter header mismatch")]
SegmenterHeaderMismatch,
/// Segment height not within allowed range
#[error("Invalid segment height")]
InvalidSegmentHeight,
/// Other issue with segment
#[error("Invalid segment: {0}")]
InvalidSegment(String),
}

impl Error {
Expand Down
1 change: 1 addition & 0 deletions chain/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ use grin_util as util;
mod chain;
mod error;
pub mod linked_list;
pub mod pibd_params;
pub mod pipe;
pub mod store;
pub mod txhashset;
Expand Down
Loading