Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PIBD_IMPL] Small TODO Cleanup #3703

Merged
merged 1 commit into from
Apr 1, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 6 additions & 3 deletions chain/src/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -226,6 +226,9 @@ impl Chain {
/// Reset both head and header_head to the provided header.
/// Handles simple rewind and more complex fork scenarios.
/// Used by the reset_chain_head owner api endpoint.
/// Caller can choose not to rewind headers, which can be used
/// during PIBD scenarios where it's desirable to restart the PIBD process
/// without re-downloading the header chain
pub fn reset_chain_head<T: Into<Tip>>(
&self,
head: T,
Expand Down Expand Up @@ -266,7 +269,9 @@ impl Chain {
Ok(())
}

/// Reset prune lists (when PIBD resets)
/// Reset prune lists (when PIBD resets and rolls back the
/// entire chain, the prune list needs to be manually wiped
/// as it's currently not included as part of rewind)
pub fn reset_prune_lists(&self) -> Result<(), Error> {
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
Expand Down Expand Up @@ -917,8 +922,6 @@ impl Chain {
}
}

// TODO: (Check whether we can do this.. we *should* be able to modify this as the desegmenter
// is in flight and we cross a horizon boundary, but needs more thinking)
let desegmenter = self.init_desegmenter(archive_header)?;
let mut cache = self.pibd_desegmenter.write();
*cache = Some(desegmenter.clone());
Expand Down
12 changes: 2 additions & 10 deletions chain/src/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@
use crate::core::consensus::HeaderDifficultyInfo;
use crate::core::core::hash::{Hash, Hashed};
use crate::core::core::{Block, BlockHeader, BlockSums};
use crate::core::global;
use crate::core::pow::Difficulty;
use crate::core::ser::{DeserializationMode, ProtocolVersion, Readable, Writeable};
use crate::core::{genesis, global, global::ChainTypes};
use crate::linked_list::MultiIndex;
use crate::types::{CommitPos, Tip};
use crate::util::secp::pedersen::Commitment;
Expand Down Expand Up @@ -83,17 +83,9 @@ impl ChainStore {
"PIBD_HEAD".to_owned()
});

// todo: fix duplication in batch below
match res {
Ok(r) => Ok(r),
Err(_) => {
let gen = match global::get_chain_type() {
ChainTypes::Mainnet => genesis::genesis_main(),
ChainTypes::Testnet => genesis::genesis_test(),
_ => genesis::genesis_dev(),
};
Ok(Tip::from_header(&gen.header))
}
Err(_) => Ok(Tip::from_header(&global::get_genesis_block().header)),
}
}

Expand Down
86 changes: 13 additions & 73 deletions chain/src/txhashset/desegmenter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ impl Desegmenter {

/// Check progress, update status if needed, returns true if all required
/// segments are in place
pub fn check_progress(&self, status: Arc<SyncState>) -> bool {
pub fn check_progress(&self, status: Arc<SyncState>) -> Result<bool, Error> {
let mut latest_block_height = 0;

let local_output_mmr_size;
Expand Down Expand Up @@ -183,9 +183,9 @@ impl Desegmenter {

// TODO: Unwraps
let tip = Tip::from_header(&h);
let batch = self.store.batch().unwrap();
batch.save_pibd_head(&tip).unwrap();
batch.commit().unwrap();
let batch = self.store.batch()?;
batch.save_pibd_head(&tip)?;
batch.commit()?;

status.update_pibd_progress(
false,
Expand All @@ -200,11 +200,11 @@ impl Desegmenter {
&& self.bitmap_cache.is_some()
{
// All is complete
return true;
return Ok(true);
}
}

false
Ok(false)
}

/// Once the PIBD set is downloaded, we need to ensure that the respective leaf sets
Expand All @@ -223,11 +223,8 @@ impl Desegmenter {
Ok(())
}

/// TODO: This is largely copied from chain.rs txhashset_write and related functions,
/// the idea being that these will eventually be broken out to perform validation while
/// segments are still being downloaded and applied. Current validation logic is all tied up
/// around unzipping, so re-developing this logic separate from the txhashset version
/// will to allow this to happen more cleanly
/// This is largely copied from chain.rs txhashset_write and related functions,
/// the idea being that the txhashset version will eventually be removed
pub fn validate_complete_state(
&self,
status: Arc<SyncState>,
Expand All @@ -239,7 +236,7 @@ impl Desegmenter {
txhashset.roots().validate(&self.archive_header)?;
}

// TODO: Keep track of this in the DB so we can pick up where we left off if needed
// TODO: Possibly Keep track of this in the DB so we can pick up where we left off if needed
let last_rangeproof_validation_pos = 0;

// Validate kernel history
Expand Down Expand Up @@ -348,7 +345,7 @@ impl Desegmenter {
{
// Save the new head to the db and rebuild the header by height index.
let tip = Tip::from_header(&self.archive_header);
// TODO: Throw error

batch.save_body_head(&tip)?;

// Reset the body tail to the body head after a txhashset write
Expand All @@ -372,8 +369,7 @@ impl Desegmenter {
}

/// Apply next set of segments that are ready to be appended to their respective trees,
/// and kick off any validations that can happen. TODO: figure out where and how
/// this should be called considering any thread blocking implications
/// and kick off any validations that can happen.
pub fn apply_next_segments(&mut self) -> Result<(), Error> {
let next_bmp_idx = self.next_required_bitmap_segment_index();
if let Some(bmp_idx) = next_bmp_idx {
Expand Down Expand Up @@ -561,10 +557,6 @@ impl Desegmenter {

/// 'Finalize' the bitmap accumulator, storing an in-memory copy of the bitmap for
/// use in further validation and setting the accumulator on the underlying txhashset
/// TODO: Could be called automatically when we have the calculated number of
/// required segments for the archive header
/// TODO: Accumulator will likely need to be stored locally to deal with server
/// being shut down and restarted
pub fn finalize_bitmap(&mut self) -> Result<(), Error> {
trace!(
"pibd_desegmenter: finalizing and caching bitmap - accumulator root: {}",
Expand Down Expand Up @@ -630,58 +622,6 @@ impl Desegmenter {
}
}

/// Apply a list of segments, in a single extension
pub fn _apply_segments(
&mut self,
output_segments: Vec<Segment<OutputIdentifier>>,
rp_segments: Vec<Segment<RangeProof>>,
kernel_segments: Vec<Segment<TxKernel>>,
) -> Result<(), Error> {
let t = self.txhashset.clone();
let s = self.store.clone();
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = t.write();
let mut batch = s.batch()?;
txhashset::extending(
&mut header_pmmr,
&mut txhashset,
&mut batch,
|ext, _batch| {
let extension = &mut ext.extension;
// outputs
for segment in output_segments {
let id = segment.identifier().idx;
if let Err(e) = extension.apply_output_segment(segment) {
debug!("pibd_desegmenter: applying output segment at idx {}", id);
error!("Error applying output segment {}, {}", id, e);
break;
}
}
for segment in rp_segments {
let id = segment.identifier().idx;
if let Err(e) = extension.apply_rangeproof_segment(segment) {
debug!(
"pibd_desegmenter: applying rangeproof segment at idx {}",
id
);
error!("Error applying rangeproof segment {}, {}", id, e);
break;
}
}
for segment in kernel_segments {
let id = segment.identifier().idx;
if let Err(e) = extension.apply_kernel_segment(segment) {
debug!("pibd_desegmenter: applying kernel segment at idx {}", id);
error!("Error applying kernel segment {}, {}", id, e);
break;
}
}
Ok(())
},
)?;
Ok(())
}

/// Whether our list already contains this bitmap segment
fn has_bitmap_segment_with_id(&self, seg_id: SegmentIdentifier) -> bool {
self.bitmap_segment_cache
Expand Down Expand Up @@ -798,6 +738,8 @@ impl Desegmenter {
// Special case here. If the mmr size is 1, this is a fresh chain
// with naught but a humble genesis block. We need segment 0, (and
// also need to skip the genesis block when applying the segment)
// note this is implementation-specific, the code for creating
// a new chain creates the genesis block pmmr entries by default

let mut cur_segment_count = if local_output_mmr_size == 1 {
0
Expand Down Expand Up @@ -856,8 +798,6 @@ impl Desegmenter {
}

/// Whether our list already contains this rangeproof segment
/// TODO: Refactor all these similar functions, but will require some time
/// refining traits
fn has_rangeproof_segment_with_id(&self, seg_id: SegmentIdentifier) -> bool {
self.rangeproof_segment_cache
.iter()
Expand Down
4 changes: 1 addition & 3 deletions chain/src/txhashset/txhashset.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1295,8 +1295,6 @@ impl<'a> Extension<'a> {
.leaf_idx_iter(BitmapAccumulator::chunk_start_idx(min_idx)),
size,
)
// TODO: will need to set bitmap cache here if it's ever needed
// outside of PIBD sync
}

/// Sets the bitmap accumulator (as received during PIBD sync)
Expand Down Expand Up @@ -1402,7 +1400,7 @@ impl<'a> Extension<'a> {
/// Apply an output segment to the output PMMR. must be called in order
/// Sort and apply hashes and leaves within a segment to output pmmr, skipping over
/// genesis position.
/// TODO NB: Would like to make this more generic but the hard casting of pmmrs
/// NB: Would like to make this more generic but the hard casting of pmmrs
/// held by this struct makes it awkward to do so

pub fn apply_output_segment(
Expand Down
12 changes: 11 additions & 1 deletion core/src/global.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ use crate::consensus::{
DMA_WINDOW, GRIN_BASE, INITIAL_DIFFICULTY, KERNEL_WEIGHT, MAX_BLOCK_WEIGHT, OUTPUT_WEIGHT,
PROOFSIZE, SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD,
};
use crate::core::block::HeaderVersion;
use crate::core::block::{Block, HeaderVersion};
use crate::genesis;
use crate::pow::{
self, new_cuckaroo_ctx, new_cuckarood_ctx, new_cuckaroom_ctx, new_cuckarooz_ctx,
new_cuckatoo_ctx, no_cuckaroo_ctx, PoWContext, Proof,
Expand Down Expand Up @@ -201,6 +202,15 @@ pub fn get_chain_type() -> ChainTypes {
})
}

/// Return genesis block for the active chain type
pub fn get_genesis_block() -> Block {
match get_chain_type() {
ChainTypes::Mainnet => genesis::genesis_main(),
ChainTypes::Testnet => genesis::genesis_test(),
_ => genesis::genesis_dev(),
}
}

/// One time initialization of the global future time limit
/// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_future_time_limit(new_ftl: u64) {
Expand Down
4 changes: 2 additions & 2 deletions servers/src/grin/sync/state_sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ impl StateSync {
let desegmenter = self.chain.desegmenter(&archive_header).unwrap();
// All segments in, validate
if let Some(d) = desegmenter.read().as_ref() {
if d.check_progress(self.sync_state.clone()) {
if let Ok(true) = d.check_progress(self.sync_state.clone()) {
if let Err(e) = d.check_update_leaf_set_state() {
error!("error updating PIBD leaf set: {}", e);
self.sync_state.update_pibd_progress(
Expand Down Expand Up @@ -263,7 +263,7 @@ impl StateSync {
// requests we want to send to peers
let mut next_segment_ids = vec![];
if let Some(d) = desegmenter.write().as_mut() {
if d.check_progress(self.sync_state.clone()) {
if let Ok(true) = d.check_progress(self.sync_state.clone()) {
return true;
}
// Figure out the next segments we need
Expand Down