Skip to content

Commit

Permalink
Merge #1267: Simplify Esplora::update_local_chain and add tests
Browse files Browse the repository at this point in the history
c5afbaa ci: update zstd-sys version to work with MSRV 1.63 (Steve Myers)
929b5dd refactor(esplora): better variable naming and docs (志宇)
216648b chore(esplora): Clarify consistency guarantees (LLFourn)
63fa710 fix(esplora): reuse returned height instead of zipping (志宇)
6f824cf test(esplora): introduce test cases for `update_local_chain` (志宇)
f05e850 feat(esplora): greatly simplify `update_local_chain` (志宇)

Pull request description:

  Fixes #1199

  ### Description

  After a second look at the `update_local_chain` implementations, it was clear that they were over complicated. This PR simplifies the `EsploraExt::update_local_chain` method(s) of the `bdk_esplora` crate and adds a whole bunch of tests.

  ### Notes to the reviewers

  The description of #1199 is very brief, however @danielabrozzoni mentioned about potentially-problematic logic with `ASSUME_FINAL_DEPTH`. The logic was indeed convoluted and it did not need to be that way. This PR removes the need for `ASSUME_FINAL_DEPTH`.

  ### Changelog notice

  Fixed
  - Simplified `EsploraExt::update_local_chain` logic.

  ### Checklists

  #### All Submissions:

  * [x] I've signed all my commits
  * [x] I followed the [contribution guidelines](https://github.com/bitcoindevkit/bdk/blob/master/CONTRIBUTING.md)
  * [x] I ran `cargo fmt` and `cargo clippy` before committing

  #### Bugfixes:

  ~* [ ] This pull request breaks the existing API~
  ~* [ ] I've added tests to reproduce the issue which are now passing~ (there are now lots of tests though)
  * [x] I'm linking the issue being fixed by this PR

ACKs for top commit:
  evanlinjin:
    ACK c5afbaa
  notmandatory:
    utACK c5afbaa

Tree-SHA512: f588493a4643f0f68d3181f27adf91793d4e336be6e853a26289e0916ed83169e1067260b75e627f190842f691ec095e668c0b799ca80e7da2849dd28de32754
  • Loading branch information
notmandatory committed Jan 31, 2024
2 parents 070fffb + c5afbaa commit 8ad52f7
Show file tree
Hide file tree
Showing 5 changed files with 297 additions and 168 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/cont_integration.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ jobs:
- name: Pin dependencies for MSRV
if: matrix.rust.version == '1.63.0'
run: |
cargo update -p zip --precise "0.6.2"
cargo update -p zstd-sys --precise "2.0.8+zstd.1.5.5"
cargo update -p time --precise "0.3.20"
cargo update -p jobserver --precise "0.1.26"
cargo update -p home --precise "0.5.5"
Expand Down
127 changes: 45 additions & 82 deletions crates/esplora/src/async_ext.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,14 @@ use async_trait::async_trait;
use bdk_chain::collections::btree_map;
use bdk_chain::{
bitcoin::{BlockHash, OutPoint, ScriptBuf, Txid},
collections::{BTreeMap, BTreeSet},
collections::BTreeMap,
local_chain::{self, CheckPoint},
BlockId, ConfirmationTimeHeightAnchor, TxGraph,
};
use esplora_client::{Error, TxStatus};
use futures::{stream::FuturesOrdered, TryStreamExt};

use crate::{anchor_from_status, ASSUME_FINAL_DEPTH};
use crate::anchor_from_status;

/// Trait to extend the functionality of [`esplora_client::AsyncClient`].
///
Expand All @@ -26,6 +26,12 @@ pub trait EsploraAsyncExt {
///
/// The result of this method can be applied to [`LocalChain::apply_update`].
///
/// ## Consistency
///
/// The chain update returned is guaranteed to be consistent as long as there is not a *large* re-org
/// during the call. The size of re-org we can tollerate is server dependent but will be at
/// least 10.
///
/// [`LocalChain`]: bdk_chain::local_chain::LocalChain
/// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
/// [`LocalChain::apply_update`]: bdk_chain::local_chain::LocalChain::apply_update
Expand Down Expand Up @@ -85,103 +91,60 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
local_tip: CheckPoint,
request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
) -> Result<local_chain::Update, Error> {
let request_heights = request_heights.into_iter().collect::<BTreeSet<_>>();
let new_tip_height = self.get_height().await?;

// atomically fetch blocks from esplora
let mut fetched_blocks = {
let heights = (0..=new_tip_height).rev();
let hashes = self
.get_blocks(Some(new_tip_height))
.await?
.into_iter()
.map(|b| b.id);
heights.zip(hashes).collect::<BTreeMap<u32, BlockHash>>()
};
// Fetch latest N (server dependent) blocks from Esplora. The server guarantees these are
// consistent.
let mut fetched_blocks = self
.get_blocks(None)
.await?
.into_iter()
.map(|b| (b.time.height, b.id))
.collect::<BTreeMap<u32, BlockHash>>();
let new_tip_height = fetched_blocks
.keys()
.last()
.copied()
.expect("must have atleast one block");

// fetch heights that the caller is interested in
// Fetch blocks of heights that the caller is interested in, skipping blocks that are
// already fetched when constructing `fetched_blocks`.
for height in request_heights {
// do not fetch blocks higher than remote tip
if height > new_tip_height {
continue;
}
// only fetch what is missing
if let btree_map::Entry::Vacant(entry) = fetched_blocks.entry(height) {
let hash = self.get_block_hash(height).await?;
entry.insert(hash);
// ❗The return value of `get_block_hash` is not strictly guaranteed to be consistent
// with the chain at the time of `get_blocks` above (there could have been a deep
// re-org). Since `get_blocks` returns 10 (or so) blocks we are assuming that it's
// not possible to have a re-org deeper than that.
entry.insert(self.get_block_hash(height).await?);
}
}

// find the earliest point of agreement between local chain and fetched chain
let earliest_agreement_cp = {
let mut earliest_agreement_cp = Option::<CheckPoint>::None;

let local_tip_height = local_tip.height();
for local_cp in local_tip.iter() {
let local_block = local_cp.block_id();

// the updated hash (block hash at this height after the update), can either be:
// 1. a block that already existed in `fetched_blocks`
// 2. a block that exists locally and at least has a depth of ASSUME_FINAL_DEPTH
// 3. otherwise we can freshly fetch the block from remote, which is safe as it
// is guaranteed that this would be at or below ASSUME_FINAL_DEPTH from the
// remote tip
let updated_hash = match fetched_blocks.entry(local_block.height) {
btree_map::Entry::Occupied(entry) => *entry.get(),
btree_map::Entry::Vacant(entry) => *entry.insert(
if local_tip_height - local_block.height >= ASSUME_FINAL_DEPTH {
local_block.hash
} else {
self.get_block_hash(local_block.height).await?
},
),
};

// since we may introduce blocks below the point of agreement, we cannot break
// here unconditionally - we only break if we guarantee there are no new heights
// below our current local checkpoint
if local_block.hash == updated_hash {
earliest_agreement_cp = Some(local_cp);

let first_new_height = *fetched_blocks
.keys()
.next()
.expect("must have at least one new block");
if first_new_height >= local_block.height {
break;
}
}
// Ensure `fetched_blocks` can create an update that connects with the original chain by
// finding a "Point of Agreement".
for (height, local_hash) in local_tip.iter().map(|cp| (cp.height(), cp.hash())) {
if height > new_tip_height {
continue;
}

earliest_agreement_cp
};

let tip = {
// first checkpoint to use for the update chain
let first_cp = match earliest_agreement_cp {
Some(cp) => cp,
None => {
let (&height, &hash) = fetched_blocks
.iter()
.next()
.expect("must have at least one new block");
CheckPoint::new(BlockId { height, hash })
let fetched_hash = match fetched_blocks.entry(height) {
btree_map::Entry::Occupied(entry) => *entry.get(),
btree_map::Entry::Vacant(entry) => {
*entry.insert(self.get_block_hash(height).await?)
}
};
// transform fetched chain into the update chain
fetched_blocks
// we exclude anything at or below the first cp of the update chain otherwise
// building the chain will fail
.split_off(&(first_cp.height() + 1))
.into_iter()
.map(|(height, hash)| BlockId { height, hash })
.fold(first_cp, |prev_cp, block| {
prev_cp.push(block).expect("must extend checkpoint")
})
};

// We have found point of agreement so the update will connect!
if fetched_hash == local_hash {
break;
}
}

Ok(local_chain::Update {
tip,
tip: CheckPoint::from_block_ids(fetched_blocks.into_iter().map(BlockId::from))
.expect("must be in height order"),
introduce_older_blocks: true,
})
}
Expand Down
125 changes: 43 additions & 82 deletions crates/esplora/src/blocking_ext.rs
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
use std::thread::JoinHandle;

use bdk_chain::collections::btree_map;
use bdk_chain::collections::{BTreeMap, BTreeSet};
use bdk_chain::collections::BTreeMap;
use bdk_chain::{
bitcoin::{BlockHash, OutPoint, ScriptBuf, Txid},
local_chain::{self, CheckPoint},
BlockId, ConfirmationTimeHeightAnchor, TxGraph,
};
use esplora_client::{Error, TxStatus};

use crate::{anchor_from_status, ASSUME_FINAL_DEPTH};
use crate::anchor_from_status;

/// Trait to extend the functionality of [`esplora_client::BlockingClient`].
///
Expand All @@ -24,6 +24,12 @@ pub trait EsploraExt {
///
/// The result of this method can be applied to [`LocalChain::apply_update`].
///
/// ## Consistency
///
/// The chain update returned is guaranteed to be consistent as long as there is not a *large* re-org
/// during the call. The size of re-org we can tollerate is server dependent but will be at
/// least 10.
///
/// [`LocalChain`]: bdk_chain::local_chain::LocalChain
/// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
/// [`LocalChain::apply_update`]: bdk_chain::local_chain::LocalChain::apply_update
Expand Down Expand Up @@ -78,102 +84,57 @@ impl EsploraExt for esplora_client::BlockingClient {
local_tip: CheckPoint,
request_heights: impl IntoIterator<Item = u32>,
) -> Result<local_chain::Update, Error> {
let request_heights = request_heights.into_iter().collect::<BTreeSet<_>>();
let new_tip_height = self.get_height()?;

// atomically fetch blocks from esplora
let mut fetched_blocks = {
let heights = (0..=new_tip_height).rev();
let hashes = self
.get_blocks(Some(new_tip_height))?
.into_iter()
.map(|b| b.id);
heights.zip(hashes).collect::<BTreeMap<u32, BlockHash>>()
};
// Fetch latest N (server dependent) blocks from Esplora. The server guarantees these are
// consistent.
let mut fetched_blocks = self
.get_blocks(None)?
.into_iter()
.map(|b| (b.time.height, b.id))
.collect::<BTreeMap<u32, BlockHash>>();
let new_tip_height = fetched_blocks
.keys()
.last()
.copied()
.expect("must atleast have one block");

// fetch heights that the caller is interested in
// Fetch blocks of heights that the caller is interested in, skipping blocks that are
// already fetched when constructing `fetched_blocks`.
for height in request_heights {
// do not fetch blocks higher than remote tip
if height > new_tip_height {
continue;
}
// only fetch what is missing
if let btree_map::Entry::Vacant(entry) = fetched_blocks.entry(height) {
let hash = self.get_block_hash(height)?;
entry.insert(hash);
// ❗The return value of `get_block_hash` is not strictly guaranteed to be consistent
// with the chain at the time of `get_blocks` above (there could have been a deep
// re-org). Since `get_blocks` returns 10 (or so) blocks we are assuming that it's
// not possible to have a re-org deeper than that.
entry.insert(self.get_block_hash(height)?);
}
}

// find the earliest point of agreement between local chain and fetched chain
let earliest_agreement_cp = {
let mut earliest_agreement_cp = Option::<CheckPoint>::None;

let local_tip_height = local_tip.height();
for local_cp in local_tip.iter() {
let local_block = local_cp.block_id();

// the updated hash (block hash at this height after the update), can either be:
// 1. a block that already existed in `fetched_blocks`
// 2. a block that exists locally and at least has a depth of ASSUME_FINAL_DEPTH
// 3. otherwise we can freshly fetch the block from remote, which is safe as it
// is guaranteed that this would be at or below ASSUME_FINAL_DEPTH from the
// remote tip
let updated_hash = match fetched_blocks.entry(local_block.height) {
btree_map::Entry::Occupied(entry) => *entry.get(),
btree_map::Entry::Vacant(entry) => *entry.insert(
if local_tip_height - local_block.height >= ASSUME_FINAL_DEPTH {
local_block.hash
} else {
self.get_block_hash(local_block.height)?
},
),
};

// since we may introduce blocks below the point of agreement, we cannot break
// here unconditionally - we only break if we guarantee there are no new heights
// below our current local checkpoint
if local_block.hash == updated_hash {
earliest_agreement_cp = Some(local_cp);

let first_new_height = *fetched_blocks
.keys()
.next()
.expect("must have at least one new block");
if first_new_height >= local_block.height {
break;
}
}
// Ensure `fetched_blocks` can create an update that connects with the original chain by
// finding a "Point of Agreement".
for (height, local_hash) in local_tip.iter().map(|cp| (cp.height(), cp.hash())) {
if height > new_tip_height {
continue;
}

earliest_agreement_cp
};

let tip = {
// first checkpoint to use for the update chain
let first_cp = match earliest_agreement_cp {
Some(cp) => cp,
None => {
let (&height, &hash) = fetched_blocks
.iter()
.next()
.expect("must have at least one new block");
CheckPoint::new(BlockId { height, hash })
}
let fetched_hash = match fetched_blocks.entry(height) {
btree_map::Entry::Occupied(entry) => *entry.get(),
btree_map::Entry::Vacant(entry) => *entry.insert(self.get_block_hash(height)?),
};
// transform fetched chain into the update chain
fetched_blocks
// we exclude anything at or below the first cp of the update chain otherwise
// building the chain will fail
.split_off(&(first_cp.height() + 1))
.into_iter()
.map(|(height, hash)| BlockId { height, hash })
.fold(first_cp, |prev_cp, block| {
prev_cp.push(block).expect("must extend checkpoint")
})
};

// We have found point of agreement so the update will connect!
if fetched_hash == local_hash {
break;
}
}

Ok(local_chain::Update {
tip,
tip: CheckPoint::from_block_ids(fetched_blocks.into_iter().map(BlockId::from))
.expect("must be in height order"),
introduce_older_blocks: true,
})
}
Expand Down
2 changes: 0 additions & 2 deletions crates/esplora/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@ mod async_ext;
#[cfg(feature = "async")]
pub use async_ext::*;

const ASSUME_FINAL_DEPTH: u32 = 15;

fn anchor_from_status(status: &TxStatus) -> Option<ConfirmationTimeHeightAnchor> {
if let TxStatus {
block_height: Some(height),
Expand Down
Loading

0 comments on commit 8ad52f7

Please sign in to comment.