diff --git a/.gitignore b/.gitignore
index 95285763a..f3ee3a8e4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,3 +7,5 @@ Cargo.lock
 
 # Example persisted files.
 *.db
+bdk_wallet_esplora_async_example.dat
+bdk_wallet_esplora_blocking_example.dat
diff --git a/crates/bdk/src/wallet/coin_selection.rs b/crates/bdk/src/wallet/coin_selection.rs
index 5122a1493..f2e4324cb 100644
--- a/crates/bdk/src/wallet/coin_selection.rs
+++ b/crates/bdk/src/wallet/coin_selection.rs
@@ -219,8 +219,6 @@ impl CoinSelectionResult {
 pub trait CoinSelectionAlgorithm: core::fmt::Debug {
     /// Perform the coin selection
     ///
-    /// - `database`: a reference to the wallet's database that can be used to lookup additional
-    ///               details for a specific UTXO
     /// - `required_utxos`: the utxos that must be spent regardless of `target_amount` with their
     ///                     weight cost
     /// - `optional_utxos`: the remaining available utxos to satisfy `target_amount` with their
diff --git a/crates/bdk/src/wallet/error.rs b/crates/bdk/src/wallet/error.rs
index 46cf8ef3c..f94c645dd 100644
--- a/crates/bdk/src/wallet/error.rs
+++ b/crates/bdk/src/wallet/error.rs
@@ -46,7 +46,7 @@ impl std::error::Error for MiniscriptPsbtError {}
 #[derive(Debug)]
 /// Error returned from [`TxBuilder::finish`]
 ///
-/// [`TxBuilder::finish`]: crate::wallet::tx_builder::TxBuilder::finish
+/// [`TxBuilder::finish`]: super::tx_builder::TxBuilder::finish
 pub enum CreateTxError<P> {
     /// There was a problem with the descriptors passed in
     Descriptor(DescriptorError),
@@ -248,9 +248,7 @@ impl<P> From<coin_selection::Error> for CreateTxError<P> {
 impl<P: core::fmt::Display + core::fmt::Debug> std::error::Error for CreateTxError<P> {}
 
 #[derive(Debug)]
-/// Error returned from [`Wallet::build_fee_bump`]
-///
-/// [`Wallet::build_fee_bump`]: super::Wallet::build_fee_bump
+/// Error returned from [`crate::Wallet::build_fee_bump`]
 pub enum BuildFeeBumpError {
     /// Happens when trying to spend an UTXO that is not in the internal database
     UnknownUtxo(OutPoint),
diff --git a/crates/bdk/src/wallet/mod.rs b/crates/bdk/src/wallet/mod.rs
index bd33a9047..2fd259d79 100644
--- a/crates/bdk/src/wallet/mod.rs
+++ b/crates/bdk/src/wallet/mod.rs
@@ -20,6 +20,7 @@ use alloc::{
     vec::Vec,
 };
 pub use bdk_chain::keychain::Balance;
+use bdk_chain::spk_client::{FullScanRequest, SyncRequest};
 use bdk_chain::{
     indexed_tx_graph,
     keychain::{self, KeychainTxOutIndex},
@@ -28,7 +29,7 @@ use bdk_chain::{
     },
     tx_graph::{CanonicalTx, TxGraph},
     Append, BlockId, ChainPosition, ConfirmationTime, ConfirmationTimeHeightAnchor, FullTxOut,
-    IndexedTxGraph, Persist, PersistBackend,
+    IndexedTxGraph, Persist, PersistBackend, SpkIterator,
 };
 use bitcoin::secp256k1::{All, Secp256k1};
 use bitcoin::sighash::{EcdsaSighashType, TapSighashType};
@@ -42,6 +43,7 @@ use core::fmt;
 use core::ops::Deref;
 use descriptor::error::Error as DescriptorError;
 use miniscript::psbt::{PsbtExt, PsbtInputExt, PsbtInputSatisfier};
+use miniscript::{Descriptor, DescriptorPublicKey};
 
 use bdk_chain::tx_graph::CalculateFeeError;
 
@@ -942,7 +944,7 @@ impl<D> Wallet<D> {
     /// # let mut wallet: Wallet<()> = todo!();
     /// # let txid:Txid = todo!();
     /// let tx = wallet.get_tx(txid).expect("transaction").tx_node.tx;
-    /// let fee = wallet.calculate_fee(tx).expect("fee");
+    /// let fee = wallet.calculate_fee(&tx).expect("fee");
     /// ```
     ///
     /// ```rust, no_run
@@ -973,7 +975,7 @@ impl<D> Wallet<D> {
     /// # let mut wallet: Wallet<()> = todo!();
     /// # let txid:Txid = todo!();
     /// let tx = wallet.get_tx(txid).expect("transaction").tx_node.tx;
-    /// let fee_rate = wallet.calculate_fee_rate(tx).expect("fee rate");
+    /// let fee_rate = wallet.calculate_fee_rate(&tx).expect("fee rate");
     /// ```
     ///
     /// ```rust, no_run
@@ -981,8 +983,8 @@ impl<D> Wallet<D> {
     /// # use bdk::Wallet;
     /// # let mut wallet: Wallet<()> = todo!();
     /// # let mut psbt: PartiallySignedTransaction = todo!();
-    /// let tx = &psbt.clone().extract_tx();
-    /// let fee_rate = wallet.calculate_fee_rate(tx).expect("fee rate");
+    /// let tx = psbt.clone().extract_tx();
+    /// let fee_rate = wallet.calculate_fee_rate(&tx).expect("fee rate");
     /// ```
     /// [`insert_txout`]: Self::insert_txout
     pub fn calculate_fee_rate(&self, tx: &Transaction) -> Result<FeeRate, CalculateFeeError> {
@@ -1003,8 +1005,8 @@ impl<D> Wallet<D> {
     /// # use bdk::Wallet;
     /// # let mut wallet: Wallet<()> = todo!();
     /// # let txid:Txid = todo!();
-    /// let tx = wallet.get_tx(txid).expect("transaction").tx_node.tx;
-    /// let (sent, received) = wallet.sent_and_received(tx);
+    /// let tx = wallet.get_tx(txid).expect("tx exists").tx_node.tx;
+    /// let (sent, received) = wallet.sent_and_received(&tx);
     /// ```
     ///
     /// ```rust, no_run
@@ -1065,7 +1067,7 @@ impl<D> Wallet<D> {
     pub fn get_tx(
         &self,
         txid: Txid,
-    ) -> Option<CanonicalTx<'_, Transaction, ConfirmationTimeHeightAnchor>> {
+    ) -> Option<CanonicalTx<'_, Arc<Transaction>, ConfirmationTimeHeightAnchor>> {
         let graph = self.indexed_graph.graph();
 
         Some(CanonicalTx {
@@ -1128,18 +1130,13 @@ impl<D> Wallet<D> {
                 // anchor tx to checkpoint with lowest height that is >= position's height
                 let anchor = self
                     .chain
-                    .blocks()
-                    .range(height..)
-                    .next()
+                    .query_from(height)
                     .ok_or(InsertTxError::ConfirmationHeightCannotBeGreaterThanTip {
                         tip_height: self.chain.tip().height(),
                         tx_height: height,
                     })
-                    .map(|(&anchor_height, &hash)| ConfirmationTimeHeightAnchor {
-                        anchor_block: BlockId {
-                            height: anchor_height,
-                            hash,
-                        },
+                    .map(|anchor_cp| ConfirmationTimeHeightAnchor {
+                        anchor_block: anchor_cp.block_id(),
                         confirmation_height: height,
                         confirmation_time: time,
                     })?;
@@ -1167,7 +1164,8 @@ impl<D> Wallet<D> {
     /// Iterate over the transactions in the wallet.
     pub fn transactions(
         &self,
-    ) -> impl Iterator<Item = CanonicalTx<'_, Transaction, ConfirmationTimeHeightAnchor>> + '_ {
+    ) -> impl Iterator<Item = CanonicalTx<'_, Arc<Transaction>, ConfirmationTimeHeightAnchor>> + '_
+    {
         self.indexed_graph
             .graph()
             .list_chain_txs(&self.chain, self.chain.tip().block_id())
@@ -1670,6 +1668,7 @@ impl<D> Wallet<D> {
         let mut tx = graph
             .get_tx(txid)
             .ok_or(BuildFeeBumpError::TransactionNotFound(txid))?
+            .as_ref()
             .clone();
 
         let pos = graph
@@ -1739,7 +1738,7 @@ impl<D> Wallet<D> {
                                 sequence: Some(txin.sequence),
                                 psbt_input: Box::new(psbt::Input {
                                     witness_utxo: Some(txout.clone()),
-                                    non_witness_utxo: Some(prev_tx.clone()),
+                                    non_witness_utxo: Some(prev_tx.as_ref().clone()),
                                     ..Default::default()
                                 }),
                             },
@@ -2295,7 +2294,7 @@ impl<D> Wallet<D> {
                 psbt_input.witness_utxo = Some(prev_tx.output[prev_output.vout as usize].clone());
             }
             if !desc.is_taproot() && (!desc.is_witness() || !only_witness_utxo) {
-                psbt_input.non_witness_utxo = Some(prev_tx.clone());
+                psbt_input.non_witness_utxo = Some(prev_tx.as_ref().clone());
             }
         }
         Ok(psbt_input)
@@ -2500,6 +2499,31 @@ impl<D> Wallet<D> {
             .batch_insert_relevant_unconfirmed(unconfirmed_txs);
         self.persist.stage(ChangeSet::from(indexed_graph_changeset));
     }
+
+    /// Create a [`SyncRequest`] for this wallet for all revealed spks.
+    ///
+    /// This is the first step when performing a spk-based wallet sync, the returned [`SyncRequest`] collects
+    /// all revealed script pub keys from the wallet keychain needed to start a blockchain sync with a spk based
+    /// blockchain client.
+    pub fn sync_revealed_spks_request(&self) -> SyncRequest {
+        let chain_tip = self.local_chain().tip();
+        self.spk_index().sync_revealed_spks_request(chain_tip)
+    }
+
+    /// Create a [`FullScanRequest] for this wallet.
+    ///
+    /// This is the first step when performing a spk-based wallet full scan, the returned [`FullScanRequest]
+    /// collects iterators for the wallet's keychain script pub keys needed to start a blockchain full scan
+    /// with a spk based blockchain client.
+    ///
+    /// This operation is generally only used when importing or restoring a previously used wallet
+    /// in which the list of used scripts is not known.
+    pub fn full_scan_request(
+        &self,
+    ) -> FullScanRequest<KeychainKind, SpkIterator<Descriptor<DescriptorPublicKey>>> {
+        let chain_tip = self.local_chain().tip();
+        self.spk_index().full_scan_request(chain_tip)
+    }
 }
 
 impl<D> AsRef<bdk_chain::tx_graph::TxGraph<ConfirmationTimeHeightAnchor>> for Wallet<D> {
diff --git a/crates/bdk/tests/wallet.rs b/crates/bdk/tests/wallet.rs
index e367b0bb5..b31b44bb2 100644
--- a/crates/bdk/tests/wallet.rs
+++ b/crates/bdk/tests/wallet.rs
@@ -208,12 +208,12 @@ fn test_get_funded_wallet_sent_and_received() {
 
     let mut tx_amounts: Vec<(Txid, (u64, u64))> = wallet
         .transactions()
-        .map(|ct| (ct.tx_node.txid, wallet.sent_and_received(ct.tx_node.tx)))
+        .map(|ct| (ct.tx_node.txid, wallet.sent_and_received(&ct.tx_node)))
         .collect();
     tx_amounts.sort_by(|a1, a2| a1.0.cmp(&a2.0));
 
     let tx = wallet.get_tx(txid).expect("transaction").tx_node.tx;
-    let (sent, received) = wallet.sent_and_received(tx);
+    let (sent, received) = wallet.sent_and_received(&tx);
 
     // The funded wallet contains a tx with a 76_000 sats input and two outputs, one spending 25_000
     // to a foreign address and one returning 50_000 back to the wallet as change. The remaining 1000
@@ -227,7 +227,7 @@ fn test_get_funded_wallet_tx_fees() {
     let (wallet, txid) = get_funded_wallet(get_test_wpkh());
 
     let tx = wallet.get_tx(txid).expect("transaction").tx_node.tx;
-    let tx_fee = wallet.calculate_fee(tx).expect("transaction fee");
+    let tx_fee = wallet.calculate_fee(&tx).expect("transaction fee");
 
     // The funded wallet contains a tx with a 76_000 sats input and two outputs, one spending 25_000
     // to a foreign address and one returning 50_000 back to the wallet as change. The remaining 1000
@@ -240,7 +240,9 @@ fn test_get_funded_wallet_tx_fee_rate() {
     let (wallet, txid) = get_funded_wallet(get_test_wpkh());
 
     let tx = wallet.get_tx(txid).expect("transaction").tx_node.tx;
-    let tx_fee_rate = wallet.calculate_fee_rate(tx).expect("transaction fee rate");
+    let tx_fee_rate = wallet
+        .calculate_fee_rate(&tx)
+        .expect("transaction fee rate");
 
     // The funded wallet contains a tx with a 76_000 sats input and two outputs, one spending 25_000
     // to a foreign address and one returning 50_000 back to the wallet as change. The remaining 1000
@@ -1307,7 +1309,7 @@ fn test_add_foreign_utxo_where_outpoint_doesnt_match_psbt_input() {
             .add_foreign_utxo(
                 utxo2.outpoint,
                 psbt::Input {
-                    non_witness_utxo: Some(tx1),
+                    non_witness_utxo: Some(tx1.as_ref().clone()),
                     ..Default::default()
                 },
                 satisfaction_weight
@@ -1320,7 +1322,7 @@ fn test_add_foreign_utxo_where_outpoint_doesnt_match_psbt_input() {
             .add_foreign_utxo(
                 utxo2.outpoint,
                 psbt::Input {
-                    non_witness_utxo: Some(tx2),
+                    non_witness_utxo: Some(tx2.as_ref().clone()),
                     ..Default::default()
                 },
                 satisfaction_weight
@@ -1384,7 +1386,7 @@ fn test_add_foreign_utxo_only_witness_utxo() {
         let mut builder = builder.clone();
         let tx2 = wallet2.get_tx(txid2).unwrap().tx_node.tx;
         let psbt_input = psbt::Input {
-            non_witness_utxo: Some(tx2.clone()),
+            non_witness_utxo: Some(tx2.as_ref().clone()),
             ..Default::default()
         };
         builder
@@ -3050,7 +3052,8 @@ fn test_taproot_sign_using_non_witness_utxo() {
     let mut psbt = builder.finish().unwrap();
 
     psbt.inputs[0].witness_utxo = None;
-    psbt.inputs[0].non_witness_utxo = Some(wallet.get_tx(prev_txid).unwrap().tx_node.tx.clone());
+    psbt.inputs[0].non_witness_utxo =
+        Some(wallet.get_tx(prev_txid).unwrap().tx_node.as_ref().clone());
     assert!(
         psbt.inputs[0].non_witness_utxo.is_some(),
         "Previous tx should be present in the database"
diff --git a/crates/bitcoind_rpc/tests/test_emitter.rs b/crates/bitcoind_rpc/tests/test_emitter.rs
index 2161db0df..97946da99 100644
--- a/crates/bitcoind_rpc/tests/test_emitter.rs
+++ b/crates/bitcoind_rpc/tests/test_emitter.rs
@@ -57,12 +57,15 @@ pub fn test_sync_local_chain() -> anyhow::Result<()> {
     }
 
     assert_eq!(
-        local_chain.blocks(),
-        &exp_hashes
+        local_chain
+            .iter_checkpoints()
+            .map(|cp| (cp.height(), cp.hash()))
+            .collect::<BTreeSet<_>>(),
+        exp_hashes
             .iter()
             .enumerate()
             .map(|(i, hash)| (i as u32, *hash))
-            .collect(),
+            .collect::<BTreeSet<_>>(),
         "final local_chain state is unexpected",
     );
 
@@ -110,12 +113,15 @@ pub fn test_sync_local_chain() -> anyhow::Result<()> {
     }
 
     assert_eq!(
-        local_chain.blocks(),
-        &exp_hashes
+        local_chain
+            .iter_checkpoints()
+            .map(|cp| (cp.height(), cp.hash()))
+            .collect::<BTreeSet<_>>(),
+        exp_hashes
             .iter()
             .enumerate()
             .map(|(i, hash)| (i as u32, *hash))
-            .collect(),
+            .collect::<BTreeSet<_>>(),
         "final local_chain state is unexpected after reorg",
     );
 
diff --git a/crates/chain/Cargo.toml b/crates/chain/Cargo.toml
index 0a77708a1..6c5a59915 100644
--- a/crates/chain/Cargo.toml
+++ b/crates/chain/Cargo.toml
@@ -15,7 +15,7 @@ readme = "README.md"
 [dependencies]
 # For no-std, remember to enable the bitcoin/no-std feature
 bitcoin = { version = "0.30.0", default-features = false }
-serde_crate = { package = "serde", version = "1", optional = true, features = ["derive"] }
+serde_crate = { package = "serde", version = "1", optional = true, features = ["derive", "rc"] }
 
 # Use hashbrown as a feature flag to have HashSet and HashMap from it.
 hashbrown = { version = "0.9.1", optional = true, features = ["serde"] }
diff --git a/crates/chain/src/keychain/txout_index.rs b/crates/chain/src/keychain/txout_index.rs
index 79f98fad2..8cb8b3a0c 100644
--- a/crates/chain/src/keychain/txout_index.rs
+++ b/crates/chain/src/keychain/txout_index.rs
@@ -5,12 +5,15 @@ use crate::{
     spk_iter::BIP32_MAX_INDEX,
     SpkIterator, SpkTxOutIndex,
 };
-use bitcoin::{OutPoint, Script, Transaction, TxOut, Txid};
+use alloc::vec::Vec;
+use bitcoin::{OutPoint, Script, ScriptBuf, Transaction, TxOut, Txid};
 use core::{
     fmt::Debug,
     ops::{Bound, RangeBounds},
 };
 
+use crate::local_chain::CheckPoint;
+use crate::spk_client::{FullScanRequest, SyncRequest};
 use crate::Append;
 
 const DEFAULT_LOOKAHEAD: u32 = 25;
@@ -110,13 +113,13 @@ pub struct KeychainTxOutIndex<K> {
     lookahead: u32,
 }
 
-impl<K> Default for KeychainTxOutIndex<K> {
+impl<K: Clone + Ord + Debug + Send> Default for KeychainTxOutIndex<K> {
     fn default() -> Self {
         Self::new(DEFAULT_LOOKAHEAD)
     }
 }
 
-impl<K: Clone + Ord + Debug> Indexer for KeychainTxOutIndex<K> {
+impl<K: Clone + Ord + Debug + Send + 'static> Indexer for KeychainTxOutIndex<K> {
     type ChangeSet = super::ChangeSet<K>;
 
     fn index_txout(&mut self, outpoint: OutPoint, txout: &TxOut) -> Self::ChangeSet {
@@ -134,20 +137,20 @@ impl<K: Clone + Ord + Debug> Indexer for KeychainTxOutIndex<K> {
         changeset
     }
 
-    fn initial_changeset(&self) -> Self::ChangeSet {
-        super::ChangeSet(self.last_revealed.clone())
-    }
-
     fn apply_changeset(&mut self, changeset: Self::ChangeSet) {
         self.apply_changeset(changeset)
     }
 
+    fn initial_changeset(&self) -> Self::ChangeSet {
+        super::ChangeSet(self.last_revealed.clone())
+    }
+
     fn is_tx_relevant(&self, tx: &bitcoin::Transaction) -> bool {
         self.inner.is_relevant(tx)
     }
 }
 
-impl<K> KeychainTxOutIndex<K> {
+impl<K: Clone + Ord + Debug + Send> KeychainTxOutIndex<K> {
     /// Construct a [`KeychainTxOutIndex`] with the given `lookahead`.
     ///
     /// The `lookahead` is the number of script pubkeys to derive and cache from the internal
@@ -169,7 +172,7 @@ impl<K> KeychainTxOutIndex<K> {
 }
 
 /// Methods that are *re-exposed* from the internal [`SpkTxOutIndex`].
-impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
+impl<K: Clone + Ord + Debug + Send> KeychainTxOutIndex<K> {
     /// Return a reference to the internal [`SpkTxOutIndex`].
     ///
     /// **WARNING:** The internal index will contain lookahead spks. Refer to
@@ -291,7 +294,7 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
     }
 }
 
-impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
+impl<K: Clone + Ord + Debug + Send + 'static> KeychainTxOutIndex<K> {
     /// Return a reference to the internal map of keychain to descriptors.
     pub fn keychains(&self) -> &BTreeMap<K, Descriptor<DescriptorPublicKey>> {
         &self.keychains
@@ -669,6 +672,43 @@ impl<K: Clone + Ord + Debug> KeychainTxOutIndex<K> {
             .collect()
     }
 
+    /// Create a [`SyncRequest`] for this [`KeychainTxOutIndex`] for all revealed spks.
+    ///
+    /// This is the first step when performing a spk-based wallet sync, the returned [`SyncRequest`] collects
+    /// all revealed script pub keys needed to start a blockchain sync with a spk based blockchain client. A
+    /// [`CheckPoint`] representing the current chain tip must be provided.
+    pub fn sync_revealed_spks_request(&self, chain_tip: CheckPoint) -> SyncRequest {
+        // Sync all revealed SPKs
+        let spks = self
+            .revealed_spks()
+            .map(|(_keychain, index, spk)| (index, ScriptBuf::from(spk)))
+            .collect::<Vec<(u32, ScriptBuf)>>();
+
+        let mut req = SyncRequest::new(chain_tip);
+        req.add_spks(spks);
+        req
+    }
+
+    /// Create a [`FullScanRequest`] for this [`KeychainTxOutIndex`].
+    ///
+    /// This is the first step when performing a spk-based full scan, the returned [`FullScanRequest`]
+    /// collects iterators for the index's keychain script pub keys to start a blockchain full scan with a
+    /// spk based blockchain client. A [`CheckPoint`] representing the current chain tip must be provided.
+    ///
+    /// This operation is generally only used when importing or restoring previously used keychains
+    /// in which the list of used scripts is not known.
+    pub fn full_scan_request(
+        &self,
+        chain_tip: CheckPoint,
+    ) -> FullScanRequest<K, SpkIterator<Descriptor<DescriptorPublicKey>>> {
+        let spks_by_keychain: BTreeMap<K, SpkIterator<Descriptor<DescriptorPublicKey>>> =
+            self.all_unbounded_spk_iters();
+
+        let mut req = FullScanRequest::new(chain_tip);
+        req.add_spks_by_keychain(spks_by_keychain);
+        req
+    }
+
     /// Applies the derivation changeset to the [`KeychainTxOutIndex`], extending the number of
     /// derived scripts per keychain, as specified in the `changeset`.
     pub fn apply_changeset(&mut self, changeset: super::ChangeSet<K>) {
diff --git a/crates/chain/src/lib.rs b/crates/chain/src/lib.rs
index 206566971..9d2cb06eb 100644
--- a/crates/chain/src/lib.rs
+++ b/crates/chain/src/lib.rs
@@ -52,6 +52,9 @@ mod spk_iter;
 #[cfg(feature = "miniscript")]
 pub use spk_iter::*;
 
+/// Helper types for use with spk-based blockchain clients.
+pub mod spk_client;
+
 #[allow(unused_imports)]
 #[macro_use]
 extern crate alloc;
diff --git a/crates/chain/src/local_chain.rs b/crates/chain/src/local_chain.rs
index 9be62dee3..5d6034ff3 100644
--- a/crates/chain/src/local_chain.rs
+++ b/crates/chain/src/local_chain.rs
@@ -34,6 +34,20 @@ struct CPInner {
     prev: Option<Arc<CPInner>>,
 }
 
+impl PartialEq for CheckPoint {
+    fn eq(&self, other: &Self) -> bool {
+        let mut self_cps = self.iter().map(|cp| cp.block_id());
+        let mut other_cps = other.iter().map(|cp| cp.block_id());
+        loop {
+            match (self_cps.next(), other_cps.next()) {
+                (Some(self_cp), Some(other_cp)) if self_cp == other_cp => continue,
+                (None, None) => break true,
+                _ => break false,
+            }
+        }
+    }
+}
+
 impl CheckPoint {
     /// Construct a new base block at the front of a linked list.
     pub fn new(block: BlockId) -> Self {
@@ -148,6 +162,23 @@ impl CheckPoint {
     pub fn iter(&self) -> CheckPointIter {
         self.clone().into_iter()
     }
+
+    /// Query for checkpoint at `height`.
+    ///
+    /// Returns `None` if checkpoint at `height` does not exist.
+    pub fn query(&self, height: u32) -> Option<Self> {
+        self.iter()
+            // optimization to avoid iterating the entire chain if we do not get a direct hit
+            .take_while(|cp| cp.height() >= height)
+            .find(|cp| cp.height() == height)
+    }
+
+    /// Query for checkpoint that is greater or equal to `height`.
+    ///
+    /// Returns `None` if no checkpoints has a height equal or greater than `height`.
+    pub fn query_from(&self, height: u32) -> Option<Self> {
+        self.iter().take_while(|cp| cp.height() >= height).last()
+    }
 }
 
 /// Iterates over checkpoints backwards.
@@ -188,7 +219,7 @@ impl IntoIterator for CheckPoint {
 /// Script-pubkey based syncing mechanisms may not introduce transactions in a chronological order
 /// so some updates require introducing older blocks (to anchor older transactions). For
 /// script-pubkey based syncing, `introduce_older_blocks` would typically be `true`.
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, PartialEq)]
 pub struct Update {
     /// The update chain's new tip.
     pub tip: CheckPoint,
@@ -202,21 +233,19 @@ pub struct Update {
 }
 
 /// This is a local implementation of [`ChainOracle`].
-#[derive(Debug, Clone)]
+#[derive(Debug, Clone, PartialEq)]
 pub struct LocalChain {
     tip: CheckPoint,
-    index: BTreeMap<u32, BlockHash>,
-}
-
-impl PartialEq for LocalChain {
-    fn eq(&self, other: &Self) -> bool {
-        self.index == other.index
-    }
 }
 
+// TODO: Figure out whether we can get rid of this
 impl From<LocalChain> for BTreeMap<u32, BlockHash> {
     fn from(value: LocalChain) -> Self {
-        value.index
+        value
+            .tip
+            .iter()
+            .map(|cp| (cp.height(), cp.hash()))
+            .collect()
     }
 }
 
@@ -228,18 +257,16 @@ impl ChainOracle for LocalChain {
         block: BlockId,
         chain_tip: BlockId,
     ) -> Result<Option<bool>, Self::Error> {
-        if block.height > chain_tip.height {
-            return Ok(None);
+        let chain_tip_cp = match self.tip.query(chain_tip.height) {
+            // we can only determine whether `block` is in chain of `chain_tip` if `chain_tip` can
+            // be identified in chain
+            Some(cp) if cp.hash() == chain_tip.hash => cp,
+            _ => return Ok(None),
+        };
+        match chain_tip_cp.query(block.height) {
+            Some(cp) => Ok(Some(cp.hash() == block.hash)),
+            None => Ok(None),
         }
-        Ok(
-            match (
-                self.index.get(&block.height),
-                self.index.get(&chain_tip.height),
-            ) {
-                (Some(cp), Some(tip_cp)) => Some(*cp == block.hash && *tip_cp == chain_tip.hash),
-                _ => None,
-            },
-        )
     }
 
     fn get_chain_tip(&self) -> Result<BlockId, Self::Error> {
@@ -250,7 +277,7 @@ impl ChainOracle for LocalChain {
 impl LocalChain {
     /// Get the genesis hash.
     pub fn genesis_hash(&self) -> BlockHash {
-        self.index.get(&0).copied().expect("must have genesis hash")
+        self.tip.query(0).expect("genesis must exist").hash()
     }
 
     /// Construct [`LocalChain`] from genesis `hash`.
@@ -259,7 +286,6 @@ impl LocalChain {
         let height = 0;
         let chain = Self {
             tip: CheckPoint::new(BlockId { height, hash }),
-            index: core::iter::once((height, hash)).collect(),
         };
         let changeset = chain.initial_changeset();
         (chain, changeset)
@@ -276,7 +302,6 @@ impl LocalChain {
         let (mut chain, _) = Self::from_genesis_hash(genesis_hash);
         chain.apply_changeset(&changeset)?;
 
-        debug_assert!(chain._check_index_is_consistent_with_tip());
         debug_assert!(chain._check_changeset_is_applied(&changeset));
 
         Ok(chain)
@@ -284,18 +309,11 @@ impl LocalChain {
 
     /// Construct a [`LocalChain`] from a given `checkpoint` tip.
     pub fn from_tip(tip: CheckPoint) -> Result<Self, MissingGenesisError> {
-        let mut chain = Self {
-            tip,
-            index: BTreeMap::new(),
-        };
-        chain.reindex(0);
-
-        if chain.index.get(&0).copied().is_none() {
+        let genesis_cp = tip.iter().last().expect("must have at least one element");
+        if genesis_cp.height() != 0 {
             return Err(MissingGenesisError);
         }
-
-        debug_assert!(chain._check_index_is_consistent_with_tip());
-        Ok(chain)
+        Ok(Self { tip })
     }
 
     /// Constructs a [`LocalChain`] from a [`BTreeMap`] of height to [`BlockHash`].
@@ -303,12 +321,11 @@ impl LocalChain {
     /// The [`BTreeMap`] enforces the height order. However, the caller must ensure the blocks are
     /// all of the same chain.
     pub fn from_blocks(blocks: BTreeMap<u32, BlockHash>) -> Result<Self, MissingGenesisError> {
-        if !blocks.contains_key(&0) {
+        if blocks.get(&0).is_none() {
             return Err(MissingGenesisError);
         }
 
         let mut tip: Option<CheckPoint> = None;
-
         for block in &blocks {
             match tip {
                 Some(curr) => {
@@ -321,13 +338,9 @@ impl LocalChain {
             }
         }
 
-        let chain = Self {
-            index: blocks,
+        Ok(Self {
             tip: tip.expect("already checked to have genesis"),
-        };
-
-        debug_assert!(chain._check_index_is_consistent_with_tip());
-        Ok(chain)
+        })
     }
 
     /// Get the highest checkpoint.
@@ -494,9 +507,7 @@ impl LocalChain {
                 None => LocalChain::from_blocks(extension)?.tip(),
             };
             self.tip = new_tip;
-            self.reindex(start_height);
 
-            debug_assert!(self._check_index_is_consistent_with_tip());
             debug_assert!(self._check_changeset_is_applied(changeset));
         }
 
@@ -509,16 +520,16 @@ impl LocalChain {
     ///
     /// Replacing the block hash of an existing checkpoint will result in an error.
     pub fn insert_block(&mut self, block_id: BlockId) -> Result<ChangeSet, AlterCheckPointError> {
-        if let Some(&original_hash) = self.index.get(&block_id.height) {
+        if let Some(original_cp) = self.tip.query(block_id.height) {
+            let original_hash = original_cp.hash();
             if original_hash != block_id.hash {
                 return Err(AlterCheckPointError {
                     height: block_id.height,
                     original_hash,
                     update_hash: Some(block_id.hash),
                 });
-            } else {
-                return Ok(ChangeSet::default());
             }
+            return Ok(ChangeSet::default());
         }
 
         let mut changeset = ChangeSet::default();
@@ -542,33 +553,41 @@ impl LocalChain {
     /// This will fail with [`MissingGenesisError`] if the caller attempts to disconnect from the
     /// genesis block.
     pub fn disconnect_from(&mut self, block_id: BlockId) -> Result<ChangeSet, MissingGenesisError> {
-        if self.index.get(&block_id.height) != Some(&block_id.hash) {
-            return Ok(ChangeSet::default());
-        }
-
-        let changeset = self
-            .index
-            .range(block_id.height..)
-            .map(|(&height, _)| (height, None))
-            .collect::<ChangeSet>();
-        self.apply_changeset(&changeset).map(|_| changeset)
-    }
-
-    /// Reindex the heights in the chain from (and including) `from` height
-    fn reindex(&mut self, from: u32) {
-        let _ = self.index.split_off(&from);
-        for cp in self.iter_checkpoints() {
-            if cp.height() < from {
+        let mut remove_from = Option::<CheckPoint>::None;
+        let mut changeset = ChangeSet::default();
+        for cp in self.tip().iter() {
+            let cp_id = cp.block_id();
+            if cp_id.height < block_id.height {
                 break;
             }
-            self.index.insert(cp.height(), cp.hash());
+            changeset.insert(cp_id.height, None);
+            if cp_id == block_id {
+                remove_from = Some(cp);
+            }
         }
+        self.tip = match remove_from.map(|cp| cp.prev()) {
+            // The checkpoint below the earliest checkpoint to remove will be the new tip.
+            Some(Some(new_tip)) => new_tip,
+            // If there is no checkpoint below the earliest checkpoint to remove, it means the
+            // "earliest checkpoint to remove" is the genesis block. We disallow removing the
+            // genesis block.
+            Some(None) => return Err(MissingGenesisError),
+            // If there is nothing to remove, we return an empty changeset.
+            None => return Ok(ChangeSet::default()),
+        };
+        Ok(changeset)
     }
 
     /// Derives an initial [`ChangeSet`], meaning that it can be applied to an empty chain to
     /// recover the current chain.
     pub fn initial_changeset(&self) -> ChangeSet {
-        self.index.iter().map(|(k, v)| (*k, Some(*v))).collect()
+        self.tip
+            .iter()
+            .map(|cp| {
+                let block_id = cp.block_id();
+                (block_id.height, Some(block_id.hash))
+            })
+            .collect()
     }
 
     /// Iterate over checkpoints in descending height order.
@@ -578,28 +597,43 @@ impl LocalChain {
         }
     }
 
-    /// Get a reference to the internal index mapping the height to block hash.
-    pub fn blocks(&self) -> &BTreeMap<u32, BlockHash> {
-        &self.index
-    }
-
-    fn _check_index_is_consistent_with_tip(&self) -> bool {
-        let tip_history = self
-            .tip
-            .iter()
-            .map(|cp| (cp.height(), cp.hash()))
-            .collect::<BTreeMap<_, _>>();
-        self.index == tip_history
-    }
-
     fn _check_changeset_is_applied(&self, changeset: &ChangeSet) -> bool {
-        for (height, exp_hash) in changeset {
-            if self.index.get(height) != exp_hash.as_ref() {
-                return false;
+        let mut curr_cp = self.tip.clone();
+        for (height, exp_hash) in changeset.iter().rev() {
+            match curr_cp.query(*height) {
+                Some(query_cp) => {
+                    if query_cp.height() != *height || Some(query_cp.hash()) != *exp_hash {
+                        return false;
+                    }
+                    curr_cp = query_cp;
+                }
+                None => {
+                    if exp_hash.is_some() {
+                        return false;
+                    }
+                }
             }
         }
         true
     }
+
+    /// Query for checkpoint at given `height` (if it exists).
+    ///
+    /// This is a shorthand for calling [`CheckPoint::query`] on the [`tip`].
+    ///
+    /// [`tip`]: LocalChain::tip
+    pub fn query(&self, height: u32) -> Option<CheckPoint> {
+        self.tip.query(height)
+    }
+
+    /// Query for checkpoint that is greater or equal to `height`.
+    ///
+    /// This is a shorthand for calling [`CheckPoint::query_from`] on the [`tip`].
+    ///
+    /// [`tip`]: LocalChain::tip
+    pub fn query_from(&self, height: u32) -> Option<CheckPoint> {
+        self.tip.query_from(height)
+    }
 }
 
 /// An error which occurs when a [`LocalChain`] is constructed without a genesis checkpoint.
diff --git a/crates/chain/src/spk_client.rs b/crates/chain/src/spk_client.rs
new file mode 100644
index 000000000..e52a6f3d8
--- /dev/null
+++ b/crates/chain/src/spk_client.rs
@@ -0,0 +1,212 @@
+use crate::collections::BTreeMap;
+use crate::local_chain::CheckPoint;
+use crate::{local_chain, ConfirmationTimeHeightAnchor, TxGraph};
+use alloc::{boxed::Box, vec::Vec};
+use bitcoin::{OutPoint, ScriptBuf, Txid};
+use core::default::Default;
+use core::fmt::Debug;
+use std::sync::Arc;
+
+/// Helper types for use with spk-based blockchain clients.
+
+type InspectSpkFn = Arc<Box<dyn Fn(u32, &ScriptBuf) + Send + Sync + 'static>>;
+type InspectKeychainSpkFn<K> = Arc<Box<dyn Fn(K, u32, &ScriptBuf) + Send + Sync + 'static>>;
+type InspectTxidFn = Arc<Box<dyn Fn(&Txid) + Send + Sync + 'static>>;
+type InspectOutPointFn = Arc<Box<dyn Fn(&OutPoint) + Send + Sync + 'static>>;
+
+/// Data required to perform a spk-based blockchain client sync.
+///
+/// A client sync fetches relevant chain data for a known list of scripts, transaction ids and
+/// outpoints. The sync process also updates the chain from the given [`CheckPoint`].
+pub struct SyncRequest {
+    /// A checkpoint for the current chain [`LocalChain::tip`].
+    /// The sync process will return a new chain update that extends this tip.
+    ///
+    /// [`LocalChain::tip`]: local_chain::LocalChain::tip
+    pub chain_tip: CheckPoint,
+    /// Transactions that spend from or to these indexed script pubkeys.
+    spks: Vec<(u32, ScriptBuf)>,
+    /// Transactions with these txids.
+    txids: Vec<Txid>,
+    /// Transactions with these outpoints or spend from these outpoints.
+    outpoints: Vec<OutPoint>,
+    /// An optional call-back function to inspect sync'd spks
+    inspect_spks: Option<InspectSpkFn>,
+    /// An optional call-back function to inspect sync'd txids
+    inspect_txids: Option<InspectTxidFn>,
+    /// An optional call-back function to inspect sync'd outpoints
+    inspect_outpoints: Option<InspectOutPointFn>,
+}
+
+fn null_inspect_spks(_index: u32, _spk: &ScriptBuf) {}
+fn null_inspect_keychain_spks<K>(_keychain: K, _index: u32, _spk: &ScriptBuf) {}
+fn null_inspect_txids(_txid: &Txid) {}
+fn null_inspect_outpoints(_outpoint: &OutPoint) {}
+
+impl SyncRequest {
+    /// Create a new [`SyncRequest`] from the current chain tip [`CheckPoint`].
+    pub fn new(chain_tip: CheckPoint) -> Self {
+        Self {
+            chain_tip,
+            spks: Default::default(),
+            txids: Default::default(),
+            outpoints: Default::default(),
+            inspect_spks: Default::default(),
+            inspect_txids: Default::default(),
+            inspect_outpoints: Default::default(),
+        }
+    }
+
+    /// Add [`ScriptBuf`]s to be sync'd with this request.
+    pub fn add_spks(&mut self, spks: impl IntoIterator<Item = (u32, ScriptBuf)>) {
+        self.spks.extend(spks.into_iter())
+    }
+
+    /// Take the [`ScriptBuf`]s to be sync'd with this request.
+    pub fn take_spks(&mut self) -> impl Iterator<Item = (u32, ScriptBuf)> {
+        let spks = core::mem::take(&mut self.spks);
+        let inspect = self
+            .inspect_spks
+            .take()
+            .unwrap_or(Arc::new(Box::new(null_inspect_spks)));
+        spks.into_iter()
+            .inspect(move |(index, spk)| inspect(*index, spk))
+    }
+
+    /// Add a function that will be called for each [`ScriptBuf`] sync'd in this request.
+    pub fn inspect_spks(&mut self, inspect: impl Fn(u32, &ScriptBuf) + Send + Sync + 'static) {
+        self.inspect_spks = Some(Arc::new(Box::new(inspect)))
+    }
+
+    /// Add [`Txid`]s to be sync'd with this request.
+    pub fn add_txids(&mut self, txids: impl IntoIterator<Item = Txid>) {
+        self.txids.extend(txids.into_iter())
+    }
+
+    /// Take the [`Txid`]s to be sync'd with this request.
+    pub fn take_txids(&mut self) -> impl Iterator<Item = Txid> {
+        let txids = core::mem::take(&mut self.txids);
+        let inspect = self
+            .inspect_txids
+            .clone()
+            .unwrap_or(Arc::new(Box::new(null_inspect_txids)));
+        txids.into_iter().inspect(move |t| inspect(t))
+    }
+
+    /// Add a function that will be called for each [`Txid`] sync'd in this request.
+    pub fn inspect_txids(&mut self, inspect: impl Fn(&Txid) + Send + Sync + 'static) {
+        self.inspect_txids = Some(Arc::new(Box::new(inspect)))
+    }
+
+    /// Add [`OutPoint`]s to be sync'd with this request.
+    pub fn add_outpoints(&mut self, outpoints: impl IntoIterator<Item = OutPoint>) {
+        self.outpoints.extend(outpoints.into_iter())
+    }
+
+    /// Take the [`OutPoint`]s to be sync'd with this request.
+    pub fn take_outpoints(&mut self) -> impl Iterator<Item = OutPoint> {
+        let outpoints = core::mem::take(&mut self.outpoints);
+        let inspect = self
+            .inspect_outpoints
+            .take()
+            .unwrap_or(Arc::new(Box::new(null_inspect_outpoints)));
+        outpoints.into_iter().inspect(move |o| inspect(o))
+    }
+
+    /// Add a function that will be called for each [`OutPoint`] sync'd in this request.
+    pub fn inspect_outpoints(&mut self, inspect: impl Fn(&OutPoint) + Send + Sync + 'static) {
+        self.inspect_outpoints = Some(Arc::new(Box::new(inspect)))
+    }
+}
+
+/// Data returned from a spk-based blockchain client sync.
+///
+/// See also [`SyncRequest`].
+pub struct SyncResult {
+    /// The update to apply to the receiving [`TxGraph`].
+    pub graph_update: TxGraph<ConfirmationTimeHeightAnchor>,
+    /// The update to apply to the receiving [`LocalChain`](local_chain::LocalChain).
+    pub chain_update: local_chain::Update,
+}
+
+/// Data required to perform a spk-based blockchain client full scan.
+///
+/// A client full scan iterates through all the scripts for the given keychains, fetching relevant
+/// data until some stop gap number of scripts is found that have no data. This operation is
+/// generally only used when importing or restoring previously used keychains in which the list of
+/// used scripts is not known. The full scan process also updates the chain from the given [`CheckPoint`].
+pub struct FullScanRequest<K, I> {
+    /// A checkpoint for the current [`LocalChain::tip`].
+    /// The full scan process will return a new chain update that extends this tip.
+    ///
+    /// [`LocalChain::tip`]: local_chain::LocalChain::tip
+    pub chain_tip: CheckPoint,
+    /// Iterators of script pubkeys indexed by the keychain index.
+    spks_by_keychain: BTreeMap<K, I>,
+    /// An optional call-back function to inspect scanned spks
+    inspect_spks: Option<InspectKeychainSpkFn<K>>,
+}
+
+/// Create a new [`FullScanRequest`] from the current chain tip [`CheckPoint`].
+impl<K: Ord + Clone + Send + Debug + 'static, I: Iterator<Item = (u32, ScriptBuf)> + Send>
+    FullScanRequest<K, I>
+{
+    /// Create a new [`FullScanRequest`] from the current chain tip [`CheckPoint`].
+    pub fn new(chain_tip: CheckPoint) -> Self {
+        Self {
+            chain_tip,
+            spks_by_keychain: Default::default(),
+            inspect_spks: Default::default(),
+        }
+    }
+
+    /// Add map of keychain's to tuple of index, [`ScriptBuf`] iterators to be scanned with this
+    /// request.
+    ///
+    /// Adding a map with a keychain that has already been added will overwrite the previously added
+    /// keychain [`ScriptBuf`] iterator.
+    pub fn add_spks_by_keychain(&mut self, spks_by_keychain: BTreeMap<K, I>) {
+        self.spks_by_keychain.extend(spks_by_keychain)
+    }
+
+    /// Take the map of keychain, [`ScriptBuf`]s to be full scanned with this request.
+    pub fn take_spks_by_keychain(
+        &mut self,
+    ) -> BTreeMap<
+        K,
+        Box<impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send>,
+    > {
+        let spks = core::mem::take(&mut self.spks_by_keychain);
+        let inspect = self
+            .inspect_spks
+            .clone()
+            .unwrap_or(Arc::new(Box::new(null_inspect_keychain_spks)));
+
+        spks.into_iter()
+            .map(move |(k, spk_iter)| {
+                let inspect = inspect.clone();
+                let keychain = k.clone();
+                let spk_iter_inspected =
+                    Box::new(spk_iter.inspect(move |(i, spk)| inspect(keychain.clone(), *i, spk)));
+                (k, spk_iter_inspected)
+            })
+            .collect()
+    }
+
+    /// Add a function that will be called for each [`ScriptBuf`] sync'd in this request.
+    pub fn inspect_spks(&mut self, inspect: impl Fn(K, u32, &ScriptBuf) + Send + Sync + 'static) {
+        self.inspect_spks = Some(Arc::new(Box::new(inspect)))
+    }
+}
+
+/// Data returned from a spk-based blockchain client full scan.
+///
+/// See also [`FullScanRequest`].
+pub struct FullScanResult<K: Ord + Clone + Send + Debug> {
+    /// The update to apply to the receiving [`LocalChain`](local_chain::LocalChain).
+    pub graph_update: TxGraph<ConfirmationTimeHeightAnchor>,
+    /// The update to apply to the receiving [`TxGraph`].
+    pub chain_update: local_chain::Update,
+    /// Last active indices for the corresponding keychains (`K`).
+    pub last_active_indices: BTreeMap<K, u32>,
+}
diff --git a/crates/chain/src/tx_graph.rs b/crates/chain/src/tx_graph.rs
index 34cbccf5c..4a7538cab 100644
--- a/crates/chain/src/tx_graph.rs
+++ b/crates/chain/src/tx_graph.rs
@@ -1,26 +1,27 @@
 //! Module for structures that store and traverse transactions.
 //!
-//! [`TxGraph`] contains transactions and indexes them so you can easily traverse the graph of those transactions.
-//! `TxGraph` is *monotone* in that you can always insert a transaction -- it doesn't care whether that
-//! transaction is in the current best chain or whether it conflicts with any of the
-//! existing transactions or what order you insert the transactions. This means that you can always
-//! combine two [`TxGraph`]s together, without resulting in inconsistencies.
-//! Furthermore, there is currently no way to delete a transaction.
+//! [`TxGraph`] contains transactions and indexes them so you can easily traverse the graph of
+//! those transactions. `TxGraph` is *monotone* in that you can always insert a transaction -- it
+//! does not care whether that transaction is in the current best chain or whether it conflicts with
+//! any of the existing transactions or what order you insert the transactions. This means that you
+//! can always combine two [`TxGraph`]s together, without resulting in inconsistencies. Furthermore,
+//! there is currently no way to delete a transaction.
 //!
-//! Transactions can be either whole or partial (i.e., transactions for which we only
-//! know some outputs, which we usually call "floating outputs"; these are usually inserted
-//! using the [`insert_txout`] method.).
+//! Transactions can be either whole or partial (i.e., transactions for which we only know some
+//! outputs, which we usually call "floating outputs"; these are usually inserted using the
+//! [`insert_txout`] method.).
 //!
-//! The graph contains transactions in the form of [`TxNode`]s. Each node contains the
-//! txid, the transaction (whole or partial), the blocks it's anchored in (see the [`Anchor`]
-//! documentation for more details), and the timestamp of the last time we saw
-//! the transaction as unconfirmed.
+//! The graph contains transactions in the form of [`TxNode`]s. Each node contains the txid, the
+//! transaction (whole or partial), the blocks that it is anchored to (see the [`Anchor`]
+//! documentation for more details), and the timestamp of the last time we saw the transaction as
+//! unconfirmed.
 //!
 //! Conflicting transactions are allowed to coexist within a [`TxGraph`]. This is useful for
 //! identifying and traversing conflicts and descendants of a given transaction. Some [`TxGraph`]
-//! methods only consider "canonical" (i.e., in the best chain or in mempool) transactions,
-//! we decide which transactions are canonical based on anchors `last_seen_unconfirmed`;
-//! see the [`try_get_chain_position`] documentation for more details.
+//! methods only consider transactions that are "canonical" (i.e., in the best chain or in mempool).
+//! We decide which transactions are canonical based on the transaction's anchors and the
+//! `last_seen` (as unconfirmed) timestamp; see the [`try_get_chain_position`] documentation for
+//! more details.
 //!
 //! The [`ChangeSet`] reports changes made to a [`TxGraph`]; it can be used to either save to
 //! persistent storage, or to be applied to another [`TxGraph`].
@@ -30,10 +31,22 @@
 //!
 //! # Applying changes
 //!
-//! Methods that apply changes to [`TxGraph`] will return [`ChangeSet`].
-//! [`ChangeSet`] can be applied back to a [`TxGraph`] or be used to inform persistent storage
+//! Methods that change the state of [`TxGraph`] will return [`ChangeSet`]s.
+//! [`ChangeSet`]s can be applied back to a [`TxGraph`] or be used to inform persistent storage
 //! of the changes to [`TxGraph`].
 //!
+//! # Generics
+//!
+//! Anchors are represented as generics within `TxGraph<A>`. To make use of all functionality of the
+//! `TxGraph`, anchors (`A`) should implement [`Anchor`].
+//!
+//! Anchors are made generic so that different types of data can be stored with how a transaction is
+//! *anchored* to a given block. An example of this is storing a merkle proof of the transaction to
+//! the confirmation block - this can be done with a custom [`Anchor`] type. The minimal [`Anchor`]
+//! type would just be a [`BlockId`] which just represents the height and hash of the block which
+//! the transaction is contained in. Note that a transaction can be contained in multiple
+//! conflicting blocks (by nature of the Bitcoin network).
+//!
 //! ```
 //! # use bdk_chain::BlockId;
 //! # use bdk_chain::tx_graph::TxGraph;
@@ -76,10 +89,11 @@
 //! [`insert_txout`]: TxGraph::insert_txout
 
 use crate::{
-    collections::*, keychain::Balance, local_chain::LocalChain, Anchor, Append, BlockId,
-    ChainOracle, ChainPosition, FullTxOut,
+    collections::*, keychain::Balance, Anchor, Append, BlockId, ChainOracle, ChainPosition,
+    FullTxOut,
 };
 use alloc::collections::vec_deque::VecDeque;
+use alloc::sync::Arc;
 use alloc::vec::Vec;
 use bitcoin::{OutPoint, Script, Transaction, TxOut, Txid};
 use core::fmt::{self, Formatter};
@@ -122,7 +136,7 @@ pub struct TxNode<'a, T, A> {
     /// Txid of the transaction.
     pub txid: Txid,
     /// A partial or full representation of the transaction.
-    pub tx: &'a T,
+    pub tx: T,
     /// The blocks that the transaction is "anchored" in.
     pub anchors: &'a BTreeSet<A>,
     /// The last-seen unix timestamp of the transaction as unconfirmed.
@@ -133,7 +147,7 @@ impl<'a, T, A> Deref for TxNode<'a, T, A> {
     type Target = T;
 
     fn deref(&self) -> &Self::Target {
-        self.tx
+        &self.tx
     }
 }
 
@@ -143,7 +157,7 @@ impl<'a, T, A> Deref for TxNode<'a, T, A> {
 /// outputs).
 #[derive(Clone, Debug, PartialEq)]
 enum TxNodeInternal {
-    Whole(Transaction),
+    Whole(Arc<Transaction>),
     Partial(BTreeMap<u32, TxOut>),
 }
 
@@ -198,6 +212,7 @@ impl<A> TxGraph<A> {
     pub fn all_txouts(&self) -> impl Iterator<Item = (OutPoint, &TxOut)> {
         self.txs.iter().flat_map(|(txid, (tx, _, _))| match tx {
             TxNodeInternal::Whole(tx) => tx
+                .as_ref()
                 .output
                 .iter()
                 .enumerate()
@@ -229,13 +244,13 @@ impl<A> TxGraph<A> {
     }
 
     /// Iterate over all full transactions in the graph.
-    pub fn full_txs(&self) -> impl Iterator<Item = TxNode<'_, Transaction, A>> {
+    pub fn full_txs(&self) -> impl Iterator<Item = TxNode<'_, Arc<Transaction>, A>> {
         self.txs
             .iter()
             .filter_map(|(&txid, (tx, anchors, last_seen))| match tx {
                 TxNodeInternal::Whole(tx) => Some(TxNode {
                     txid,
-                    tx,
+                    tx: tx.clone(),
                     anchors,
                     last_seen_unconfirmed: *last_seen,
                 }),
@@ -248,16 +263,16 @@ impl<A> TxGraph<A> {
     /// Refer to [`get_txout`] for getting a specific [`TxOut`].
     ///
     /// [`get_txout`]: Self::get_txout
-    pub fn get_tx(&self, txid: Txid) -> Option<&Transaction> {
+    pub fn get_tx(&self, txid: Txid) -> Option<Arc<Transaction>> {
         self.get_tx_node(txid).map(|n| n.tx)
     }
 
     /// Get a transaction node by txid. This only returns `Some` for full transactions.
-    pub fn get_tx_node(&self, txid: Txid) -> Option<TxNode<'_, Transaction, A>> {
+    pub fn get_tx_node(&self, txid: Txid) -> Option<TxNode<'_, Arc<Transaction>, A>> {
         match &self.txs.get(&txid)? {
             (TxNodeInternal::Whole(tx), anchors, last_seen) => Some(TxNode {
                 txid,
-                tx,
+                tx: tx.clone(),
                 anchors,
                 last_seen_unconfirmed: *last_seen,
             }),
@@ -268,7 +283,7 @@ impl<A> TxGraph<A> {
     /// Obtains a single tx output (if any) at the specified outpoint.
     pub fn get_txout(&self, outpoint: OutPoint) -> Option<&TxOut> {
         match &self.txs.get(&outpoint.txid)?.0 {
-            TxNodeInternal::Whole(tx) => tx.output.get(outpoint.vout as usize),
+            TxNodeInternal::Whole(tx) => tx.as_ref().output.get(outpoint.vout as usize),
             TxNodeInternal::Partial(txouts) => txouts.get(&outpoint.vout),
         }
     }
@@ -279,6 +294,7 @@ impl<A> TxGraph<A> {
     pub fn tx_outputs(&self, txid: Txid) -> Option<BTreeMap<u32, &TxOut>> {
         Some(match &self.txs.get(&txid)?.0 {
             TxNodeInternal::Whole(tx) => tx
+                .as_ref()
                 .output
                 .iter()
                 .enumerate()
@@ -356,16 +372,15 @@ impl<A> TxGraph<A> {
         &self,
         txid: Txid,
     ) -> impl DoubleEndedIterator<Item = (u32, &HashSet<Txid>)> + '_ {
-        let start = OutPoint { txid, vout: 0 };
-        let end = OutPoint {
-            txid,
-            vout: u32::MAX,
-        };
+        let start = OutPoint::new(txid, 0);
+        let end = OutPoint::new(txid, u32::MAX);
         self.spends
             .range(start..=end)
             .map(|(outpoint, spends)| (outpoint.vout, spends))
     }
+}
 
+impl<A: Clone + Ord> TxGraph<A> {
     /// Creates an iterator that filters and maps ancestor transactions.
     ///
     /// The iterator starts with the ancestors of the supplied `tx` (ancestor transactions of `tx`
@@ -379,13 +394,10 @@ impl<A> TxGraph<A> {
     ///
     /// The supplied closure returns an `Option<T>`, allowing the caller to map each `Transaction`
     /// it visits and decide whether to visit ancestors.
-    pub fn walk_ancestors<'g, F, O>(
-        &'g self,
-        tx: &'g Transaction,
-        walk_map: F,
-    ) -> TxAncestors<'g, A, F>
+    pub fn walk_ancestors<'g, T, F, O>(&'g self, tx: T, walk_map: F) -> TxAncestors<'g, A, F>
     where
-        F: FnMut(usize, &'g Transaction) -> Option<O> + 'g,
+        T: Into<Arc<Transaction>>,
+        F: FnMut(usize, Arc<Transaction>) -> Option<O> + 'g,
     {
         TxAncestors::new_exclude_root(self, tx, walk_map)
     }
@@ -406,7 +418,9 @@ impl<A> TxGraph<A> {
     {
         TxDescendants::new_exclude_root(self, txid, walk_map)
     }
+}
 
+impl<A> TxGraph<A> {
     /// Creates an iterator that both filters and maps conflicting transactions (this includes
     /// descendants of directly-conflicting transactions, which are also considered conflicts).
     ///
@@ -419,7 +433,7 @@ impl<A> TxGraph<A> {
     where
         F: FnMut(usize, Txid) -> Option<O> + 'g,
     {
-        let txids = self.direct_conflitcs(tx).map(|(_, txid)| txid);
+        let txids = self.direct_conflicts(tx).map(|(_, txid)| txid);
         TxDescendants::from_multiple_include_root(self, txids, walk_map)
     }
 
@@ -430,7 +444,7 @@ impl<A> TxGraph<A> {
     /// Note that this only returns directly conflicting txids and won't include:
     /// - descendants of conflicting transactions (which are technically also conflicting)
     /// - transactions conflicting with the given transaction's ancestors
-    pub fn direct_conflitcs<'g>(
+    pub fn direct_conflicts<'g>(
         &'g self,
         tx: &'g Transaction,
     ) -> impl Iterator<Item = (usize, Txid)> + '_ {
@@ -467,9 +481,7 @@ impl<A: Clone + Ord> TxGraph<A> {
         new_graph.apply_changeset(self.initial_changeset().map_anchors(f));
         new_graph
     }
-}
 
-impl<A: Clone + Ord> TxGraph<A> {
     /// Construct a new [`TxGraph`] from a list of transactions.
     pub fn new(txs: impl IntoIterator<Item = Transaction>) -> Self {
         let mut new = Self::default();
@@ -506,9 +518,10 @@ impl<A: Clone + Ord> TxGraph<A> {
     /// The [`ChangeSet`] returned will be empty if `tx` already exists.
     pub fn insert_tx(&mut self, tx: Transaction) -> ChangeSet<A> {
         let mut update = Self::default();
-        update
-            .txs
-            .insert(tx.txid(), (TxNodeInternal::Whole(tx), BTreeSet::new(), 0));
+        update.txs.insert(
+            tx.txid(),
+            (TxNodeInternal::Whole(tx.into()), BTreeSet::new(), 0),
+        );
         self.apply_update(update)
     }
 
@@ -567,7 +580,8 @@ impl<A: Clone + Ord> TxGraph<A> {
 
     /// Applies [`ChangeSet`] to [`TxGraph`].
     pub fn apply_changeset(&mut self, changeset: ChangeSet<A>) {
-        for tx in changeset.txs {
+        for wrapped_tx in changeset.txs {
+            let tx = wrapped_tx.as_ref();
             let txid = tx.txid();
 
             tx.input
@@ -582,18 +596,20 @@ impl<A: Clone + Ord> TxGraph<A> {
 
             match self.txs.get_mut(&txid) {
                 Some((tx_node @ TxNodeInternal::Partial(_), _, _)) => {
-                    *tx_node = TxNodeInternal::Whole(tx);
+                    *tx_node = TxNodeInternal::Whole(wrapped_tx.clone());
                 }
                 Some((TxNodeInternal::Whole(tx), _, _)) => {
                     debug_assert_eq!(
-                        tx.txid(),
+                        tx.as_ref().txid(),
                         txid,
                         "tx should produce txid that is same as key"
                     );
                 }
                 None => {
-                    self.txs
-                        .insert(txid, (TxNodeInternal::Whole(tx), BTreeSet::new(), 0));
+                    self.txs.insert(
+                        txid,
+                        (TxNodeInternal::Whole(wrapped_tx), BTreeSet::new(), 0),
+                    );
                 }
             }
         }
@@ -630,7 +646,7 @@ impl<A: Clone + Ord> TxGraph<A> {
     /// The [`ChangeSet`] would be the set difference between `update` and `self` (transactions that
     /// exist in `update` but not in `self`).
     pub(crate) fn determine_changeset(&self, update: TxGraph<A>) -> ChangeSet<A> {
-        let mut changeset = ChangeSet::default();
+        let mut changeset = ChangeSet::<A>::default();
 
         for (&txid, (update_tx_node, _, update_last_seen)) in &update.txs {
             let prev_last_seen: u64 = match (self.txs.get(&txid), update_tx_node) {
@@ -680,69 +696,6 @@ impl<A: Clone + Ord> TxGraph<A> {
 }
 
 impl<A: Anchor> TxGraph<A> {
-    /// Find missing block heights of `chain`.
-    ///
-    /// This works by scanning through anchors, and seeing whether the anchor block of the anchor
-    /// exists in the [`LocalChain`]. The returned iterator does not output duplicate heights.
-    pub fn missing_heights<'a>(&'a self, chain: &'a LocalChain) -> impl Iterator<Item = u32> + 'a {
-        // Map of txids to skip.
-        //
-        // Usually, if a height of a tx anchor is missing from the chain, we would want to return
-        // this height in the iterator. The exception is when the tx is confirmed in chain. All the
-        // other missing-height anchors of this tx can be skipped.
-        //
-        // * Some(true)  => skip all anchors of this txid
-        // * Some(false) => do not skip anchors of this txid
-        // * None        => we do not know whether we can skip this txid
-        let mut txids_to_skip = HashMap::<Txid, bool>::new();
-
-        // Keeps track of the last height emitted so we don't double up.
-        let mut last_height_emitted = Option::<u32>::None;
-
-        self.anchors
-            .iter()
-            .filter(move |(_, txid)| {
-                let skip = *txids_to_skip.entry(*txid).or_insert_with(|| {
-                    let tx_anchors = match self.txs.get(txid) {
-                        Some((_, anchors, _)) => anchors,
-                        None => return true,
-                    };
-                    let mut has_missing_height = false;
-                    for anchor_block in tx_anchors.iter().map(Anchor::anchor_block) {
-                        match chain.blocks().get(&anchor_block.height) {
-                            None => {
-                                has_missing_height = true;
-                                continue;
-                            }
-                            Some(chain_hash) => {
-                                if chain_hash == &anchor_block.hash {
-                                    return true;
-                                }
-                            }
-                        }
-                    }
-                    !has_missing_height
-                });
-                #[cfg(feature = "std")]
-                debug_assert!({
-                    println!("txid={} skip={}", txid, skip);
-                    true
-                });
-                !skip
-            })
-            .filter_map(move |(a, _)| {
-                let anchor_block = a.anchor_block();
-                if Some(anchor_block.height) != last_height_emitted
-                    && !chain.blocks().contains_key(&anchor_block.height)
-                {
-                    last_height_emitted = Some(anchor_block.height);
-                    Some(anchor_block.height)
-                } else {
-                    None
-                }
-            })
-    }
-
     /// Get the position of the transaction in `chain` with tip `chain_tip`.
     ///
     /// Chain data is fetched from `chain`, a [`ChainOracle`] implementation.
@@ -791,10 +744,10 @@ impl<A: Anchor> TxGraph<A> {
             TxNodeInternal::Whole(tx) => {
                 // A coinbase tx that is not anchored in the best chain cannot be unconfirmed and
                 // should always be filtered out.
-                if tx.is_coin_base() {
+                if tx.as_ref().is_coin_base() {
                     return Ok(None);
                 }
-                tx
+                tx.clone()
             }
             TxNodeInternal::Partial(_) => {
                 // Partial transactions (outputs only) cannot have conflicts.
@@ -811,8 +764,8 @@ impl<A: Anchor> TxGraph<A> {
         // First of all, we retrieve all our ancestors. Since we're using `new_include_root`, the
         // resulting array will also include `tx`
         let unconfirmed_ancestor_txs =
-            TxAncestors::new_include_root(self, tx, |_, ancestor_tx: &Transaction| {
-                let tx_node = self.get_tx_node(ancestor_tx.txid())?;
+            TxAncestors::new_include_root(self, tx.clone(), |_, ancestor_tx: Arc<Transaction>| {
+                let tx_node = self.get_tx_node(ancestor_tx.as_ref().txid())?;
                 // We're filtering the ancestors to keep only the unconfirmed ones (= no anchors in
                 // the best chain)
                 for block in tx_node.anchors {
@@ -828,8 +781,10 @@ impl<A: Anchor> TxGraph<A> {
 
         // We determine our tx's last seen, which is the max between our last seen,
         // and our unconf descendants' last seen.
-        let unconfirmed_descendants_txs =
-            TxDescendants::new_include_root(self, tx.txid(), |_, descendant_txid: Txid| {
+        let unconfirmed_descendants_txs = TxDescendants::new_include_root(
+            self,
+            tx.as_ref().txid(),
+            |_, descendant_txid: Txid| {
                 let tx_node = self.get_tx_node(descendant_txid)?;
                 // We're filtering the ancestors to keep only the unconfirmed ones (= no anchors in
                 // the best chain)
@@ -841,8 +796,9 @@ impl<A: Anchor> TxGraph<A> {
                     }
                 }
                 Some(Ok(tx_node))
-            })
-            .collect::<Result<Vec<_>, C::Error>>()?;
+            },
+        )
+        .collect::<Result<Vec<_>, C::Error>>()?;
 
         let tx_last_seen = unconfirmed_descendants_txs
             .iter()
@@ -853,7 +809,8 @@ impl<A: Anchor> TxGraph<A> {
         // Now we traverse our ancestors and consider all their conflicts
         for tx_node in unconfirmed_ancestor_txs {
             // We retrieve all the transactions conflicting with this specific ancestor
-            let conflicting_txs = self.walk_conflicts(tx_node.tx, |_, txid| self.get_tx_node(txid));
+            let conflicting_txs =
+                self.walk_conflicts(tx_node.tx.as_ref(), |_, txid| self.get_tx_node(txid));
 
             // If a conflicting tx is in the best chain, or has `last_seen` higher than this ancestor, then
             // this tx cannot exist in the best chain
@@ -867,7 +824,7 @@ impl<A: Anchor> TxGraph<A> {
                     return Ok(None);
                 }
                 if conflicting_tx.last_seen_unconfirmed == *last_seen
-                    && conflicting_tx.txid() > tx.txid()
+                    && conflicting_tx.as_ref().txid() > tx.as_ref().txid()
                 {
                     // Conflicting tx has priority if txid of conflicting tx > txid of original tx
                     return Ok(None);
@@ -960,7 +917,7 @@ impl<A: Anchor> TxGraph<A> {
         &'a self,
         chain: &'a C,
         chain_tip: BlockId,
-    ) -> impl Iterator<Item = Result<CanonicalTx<'a, Transaction, A>, C::Error>> {
+    ) -> impl Iterator<Item = Result<CanonicalTx<'a, Arc<Transaction>, A>, C::Error>> {
         self.full_txs().filter_map(move |tx| {
             self.try_get_chain_position(chain, chain_tip, tx.txid)
                 .map(|v| {
@@ -982,7 +939,7 @@ impl<A: Anchor> TxGraph<A> {
         &'a self,
         chain: &'a C,
         chain_tip: BlockId,
-    ) -> impl Iterator<Item = CanonicalTx<'a, Transaction, A>> {
+    ) -> impl Iterator<Item = CanonicalTx<'a, Arc<Transaction>, A>> {
         self.try_list_chain_txs(chain, chain_tip)
             .map(|r| r.expect("oracle is infallible"))
     }
@@ -1021,7 +978,7 @@ impl<A: Anchor> TxGraph<A> {
                         None => return Ok(None),
                     };
 
-                    let txout = match tx_node.tx.output.get(op.vout as usize) {
+                    let txout = match tx_node.tx.as_ref().output.get(op.vout as usize) {
                         Some(txout) => txout.clone(),
                         None => return Ok(None),
                     };
@@ -1043,7 +1000,7 @@ impl<A: Anchor> TxGraph<A> {
                             txout,
                             chain_position,
                             spent_by,
-                            is_on_coinbase: tx_node.tx.is_coin_base(),
+                            is_on_coinbase: tx_node.tx.as_ref().is_coin_base(),
                         },
                     )))
                 },
@@ -1209,7 +1166,7 @@ impl<A: Anchor> TxGraph<A> {
 #[must_use]
 pub struct ChangeSet<A = ()> {
     /// Added transactions.
-    pub txs: BTreeSet<Transaction>,
+    pub txs: BTreeSet<Arc<Transaction>>,
     /// Added txouts.
     pub txouts: BTreeMap<OutPoint, TxOut>,
     /// Added anchors.
@@ -1247,8 +1204,6 @@ impl<A> ChangeSet<A> {
     ///
     /// This is useful if you want to find which heights you need to fetch data about in order to
     /// confirm or exclude these anchors.
-    ///
-    /// See also: [`TxGraph::missing_heights`]
     pub fn anchor_heights(&self) -> impl Iterator<Item = u32> + '_
     where
         A: Anchor,
@@ -1263,24 +1218,6 @@ impl<A> ChangeSet<A> {
                 !duplicate
             })
     }
-
-    /// Returns an iterator for the [`anchor_heights`] in this changeset that are not included in
-    /// `local_chain`. This tells you which heights you need to include in `local_chain` in order
-    /// for it to conclusively act as a [`ChainOracle`] for the transaction anchors this changeset
-    /// will add.
-    ///
-    /// [`ChainOracle`]: crate::ChainOracle
-    /// [`anchor_heights`]: Self::anchor_heights
-    pub fn missing_heights_from<'a>(
-        &'a self,
-        local_chain: &'a LocalChain,
-    ) -> impl Iterator<Item = u32> + 'a
-    where
-        A: Anchor,
-    {
-        self.anchor_heights()
-            .filter(move |height| !local_chain.blocks().contains_key(height))
-    }
 }
 
 impl<A: Ord> Append for ChangeSet<A> {
@@ -1345,7 +1282,7 @@ impl<A> AsRef<TxGraph<A>> for TxGraph<A> {
 pub struct TxAncestors<'g, A, F> {
     graph: &'g TxGraph<A>,
     visited: HashSet<Txid>,
-    queue: VecDeque<(usize, &'g Transaction)>,
+    queue: VecDeque<(usize, Arc<Transaction>)>,
     filter_map: F,
 }
 
@@ -1353,13 +1290,13 @@ impl<'g, A, F> TxAncestors<'g, A, F> {
     /// Creates a `TxAncestors` that includes the starting `Transaction` when iterating.
     pub(crate) fn new_include_root(
         graph: &'g TxGraph<A>,
-        tx: &'g Transaction,
+        tx: impl Into<Arc<Transaction>>,
         filter_map: F,
     ) -> Self {
         Self {
             graph,
             visited: Default::default(),
-            queue: [(0, tx)].into(),
+            queue: [(0, tx.into())].into(),
             filter_map,
         }
     }
@@ -1367,7 +1304,7 @@ impl<'g, A, F> TxAncestors<'g, A, F> {
     /// Creates a `TxAncestors` that excludes the starting `Transaction` when iterating.
     pub(crate) fn new_exclude_root(
         graph: &'g TxGraph<A>,
-        tx: &'g Transaction,
+        tx: impl Into<Arc<Transaction>>,
         filter_map: F,
     ) -> Self {
         let mut ancestors = Self {
@@ -1376,7 +1313,7 @@ impl<'g, A, F> TxAncestors<'g, A, F> {
             queue: Default::default(),
             filter_map,
         };
-        ancestors.populate_queue(1, tx);
+        ancestors.populate_queue(1, tx.into());
         ancestors
     }
 
@@ -1389,12 +1326,13 @@ impl<'g, A, F> TxAncestors<'g, A, F> {
         filter_map: F,
     ) -> Self
     where
-        I: IntoIterator<Item = &'g Transaction>,
+        I: IntoIterator,
+        I::Item: Into<Arc<Transaction>>,
     {
         Self {
             graph,
             visited: Default::default(),
-            queue: txs.into_iter().map(|tx| (0, tx)).collect(),
+            queue: txs.into_iter().map(|tx| (0, tx.into())).collect(),
             filter_map,
         }
     }
@@ -1408,7 +1346,8 @@ impl<'g, A, F> TxAncestors<'g, A, F> {
         filter_map: F,
     ) -> Self
     where
-        I: IntoIterator<Item = &'g Transaction>,
+        I: IntoIterator,
+        I::Item: Into<Arc<Transaction>>,
     {
         let mut ancestors = Self {
             graph,
@@ -1417,12 +1356,12 @@ impl<'g, A, F> TxAncestors<'g, A, F> {
             filter_map,
         };
         for tx in txs {
-            ancestors.populate_queue(1, tx);
+            ancestors.populate_queue(1, tx.into());
         }
         ancestors
     }
 
-    fn populate_queue(&mut self, depth: usize, tx: &'g Transaction) {
+    fn populate_queue(&mut self, depth: usize, tx: Arc<Transaction>) {
         let ancestors = tx
             .input
             .iter()
@@ -1436,7 +1375,7 @@ impl<'g, A, F> TxAncestors<'g, A, F> {
 
 impl<'g, A, F, O> Iterator for TxAncestors<'g, A, F>
 where
-    F: FnMut(usize, &'g Transaction) -> Option<O>,
+    F: FnMut(usize, Arc<Transaction>) -> Option<O>,
 {
     type Item = O;
 
@@ -1445,7 +1384,7 @@ where
             // we have exhausted all paths when queue is empty
             let (ancestor_depth, tx) = self.queue.pop_front()?;
             // ignore paths when user filters them out
-            let item = match (self.filter_map)(ancestor_depth, tx) {
+            let item = match (self.filter_map)(ancestor_depth, tx.clone()) {
                 Some(item) => item,
                 None => continue,
             };
diff --git a/crates/chain/tests/test_indexed_tx_graph.rs b/crates/chain/tests/test_indexed_tx_graph.rs
index 41b1d4d3e..0fd2a71b8 100644
--- a/crates/chain/tests/test_indexed_tx_graph.rs
+++ b/crates/chain/tests/test_indexed_tx_graph.rs
@@ -1,13 +1,13 @@
 #[macro_use]
 mod common;
 
-use std::collections::BTreeSet;
+use std::{collections::BTreeSet, sync::Arc};
 
 use bdk_chain::{
     indexed_tx_graph::{self, IndexedTxGraph},
     keychain::{self, Balance, KeychainTxOutIndex},
     local_chain::LocalChain,
-    tx_graph, BlockId, ChainPosition, ConfirmationHeightAnchor,
+    tx_graph, ChainPosition, ConfirmationHeightAnchor,
 };
 use bitcoin::{secp256k1::Secp256k1, OutPoint, Script, ScriptBuf, Transaction, TxIn, TxOut};
 use miniscript::Descriptor;
@@ -66,7 +66,7 @@ fn insert_relevant_txs() {
 
     let changeset = indexed_tx_graph::ChangeSet {
         graph: tx_graph::ChangeSet {
-            txs: txs.clone().into(),
+            txs: txs.iter().cloned().map(Arc::new).collect(),
             ..Default::default()
         },
         indexer: keychain::ChangeSet([((), 9_u32)].into()),
@@ -80,7 +80,6 @@ fn insert_relevant_txs() {
     assert_eq!(graph.initial_changeset(), changeset,);
 }
 
-#[test]
 /// Ensure consistency IndexedTxGraph list_* and balance methods. These methods lists
 /// relevant txouts and utxos from the information fetched from a ChainOracle (here a LocalChain).
 ///
@@ -108,7 +107,7 @@ fn insert_relevant_txs() {
 ///
 /// Finally Add more blocks to local chain until tx1 coinbase maturity hits.
 /// Assert maturity at coinbase maturity inflection height. Block height 98 and 99.
-
+#[test]
 fn test_list_owned_txouts() {
     // Create Local chains
     let local_chain = LocalChain::from_blocks((0..150).map(|i| (i as u32, h!("random"))).collect())
@@ -213,10 +212,8 @@ fn test_list_owned_txouts() {
             (
                 *tx,
                 local_chain
-                    .blocks()
-                    .get(&height)
-                    .cloned()
-                    .map(|hash| BlockId { height, hash })
+                    .query(height)
+                    .map(|cp| cp.block_id())
                     .map(|anchor_block| ConfirmationHeightAnchor {
                         anchor_block,
                         confirmation_height: anchor_block.height,
@@ -231,9 +228,8 @@ fn test_list_owned_txouts() {
         |height: u32,
          graph: &IndexedTxGraph<ConfirmationHeightAnchor, KeychainTxOutIndex<String>>| {
             let chain_tip = local_chain
-                .blocks()
-                .get(&height)
-                .map(|&hash| BlockId { height, hash })
+                .query(height)
+                .map(|cp| cp.block_id())
                 .unwrap_or_else(|| panic!("block must exist at {}", height));
             let txouts = graph
                 .graph()
diff --git a/crates/chain/tests/test_local_chain.rs b/crates/chain/tests/test_local_chain.rs
index c1a1cd7f9..b601a17f9 100644
--- a/crates/chain/tests/test_local_chain.rs
+++ b/crates/chain/tests/test_local_chain.rs
@@ -528,6 +528,45 @@ fn checkpoint_from_block_ids() {
     }
 }
 
+#[test]
+fn checkpoint_query() {
+    struct TestCase {
+        chain: LocalChain,
+        /// The heights we want to call [`CheckPoint::query`] with, represented as an inclusive
+        /// range.
+        ///
+        /// If a [`CheckPoint`] exists at that height, we expect [`CheckPoint::query`] to return
+        /// it. If not, [`CheckPoint::query`] should return `None`.
+        query_range: (u32, u32),
+    }
+
+    let test_cases = [
+        TestCase {
+            chain: local_chain![(0, h!("_")), (1, h!("A"))],
+            query_range: (0, 2),
+        },
+        TestCase {
+            chain: local_chain![(0, h!("_")), (2, h!("B")), (3, h!("C"))],
+            query_range: (0, 3),
+        },
+    ];
+
+    for t in test_cases.into_iter() {
+        let tip = t.chain.tip();
+        for h in t.query_range.0..=t.query_range.1 {
+            let query_result = tip.query(h);
+            let exp_hash = t.chain.query(h).map(|cp| cp.hash());
+            match query_result {
+                Some(cp) => {
+                    assert_eq!(Some(cp.hash()), exp_hash);
+                    assert_eq!(cp.height(), h);
+                }
+                None => assert!(query_result.is_none()),
+            }
+        }
+    }
+}
+
 #[test]
 fn local_chain_apply_header_connected_to() {
     fn header_from_prev_blockhash(prev_blockhash: BlockHash) -> Header {
diff --git a/crates/chain/tests/test_tx_graph.rs b/crates/chain/tests/test_tx_graph.rs
index 37e8c7192..11ac8032a 100644
--- a/crates/chain/tests/test_tx_graph.rs
+++ b/crates/chain/tests/test_tx_graph.rs
@@ -13,6 +13,7 @@ use bitcoin::{
 use common::*;
 use core::iter;
 use rand::RngCore;
+use std::sync::Arc;
 use std::vec;
 
 #[test]
@@ -119,7 +120,7 @@ fn insert_txouts() {
         assert_eq!(
             graph.insert_tx(update_txs.clone()),
             ChangeSet {
-                txs: [update_txs.clone()].into(),
+                txs: [Arc::new(update_txs.clone())].into(),
                 ..Default::default()
             }
         );
@@ -143,7 +144,7 @@ fn insert_txouts() {
     assert_eq!(
         changeset,
         ChangeSet {
-            txs: [update_txs.clone()].into(),
+            txs: [Arc::new(update_txs.clone())].into(),
             txouts: update_ops.clone().into(),
             anchors: [(conf_anchor, update_txs.txid()), (unconf_anchor, h!("tx2"))].into(),
             last_seen: [(h!("tx2"), 1000000)].into()
@@ -194,7 +195,7 @@ fn insert_txouts() {
     assert_eq!(
         graph.initial_changeset(),
         ChangeSet {
-            txs: [update_txs.clone()].into(),
+            txs: [Arc::new(update_txs.clone())].into(),
             txouts: update_ops.into_iter().chain(original_ops).collect(),
             anchors: [(conf_anchor, update_txs.txid()), (unconf_anchor, h!("tx2"))].into(),
             last_seen: [(h!("tx2"), 1000000)].into()
@@ -276,7 +277,10 @@ fn insert_tx_can_retrieve_full_tx_from_graph() {
 
     let mut graph = TxGraph::<()>::default();
     let _ = graph.insert_tx(tx.clone());
-    assert_eq!(graph.get_tx(tx.txid()), Some(&tx));
+    assert_eq!(
+        graph.get_tx(tx.txid()).map(|tx| tx.as_ref().clone()),
+        Some(tx)
+    );
 }
 
 #[test]
@@ -643,7 +647,7 @@ fn test_walk_ancestors() {
         ..common::new_tx(0)
     };
 
-    let mut graph = TxGraph::<BlockId>::new(vec![
+    let mut graph = TxGraph::<BlockId>::new([
         tx_a0.clone(),
         tx_b0.clone(),
         tx_b1.clone(),
@@ -664,17 +668,17 @@ fn test_walk_ancestors() {
 
     let ancestors = [
         graph
-            .walk_ancestors(&tx_c0, |depth, tx| Some((depth, tx)))
+            .walk_ancestors(tx_c0.clone(), |depth, tx| Some((depth, tx)))
             .collect::<Vec<_>>(),
         graph
-            .walk_ancestors(&tx_d0, |depth, tx| Some((depth, tx)))
+            .walk_ancestors(tx_d0.clone(), |depth, tx| Some((depth, tx)))
             .collect::<Vec<_>>(),
         graph
-            .walk_ancestors(&tx_e0, |depth, tx| Some((depth, tx)))
+            .walk_ancestors(tx_e0.clone(), |depth, tx| Some((depth, tx)))
             .collect::<Vec<_>>(),
         // Only traverse unconfirmed ancestors of tx_e0 this time
         graph
-            .walk_ancestors(&tx_e0, |depth, tx| {
+            .walk_ancestors(tx_e0.clone(), |depth, tx| {
                 let tx_node = graph.get_tx_node(tx.txid())?;
                 for block in tx_node.anchors {
                     match local_chain.is_block_in_chain(block.anchor_block(), tip.block_id()) {
@@ -701,8 +705,14 @@ fn test_walk_ancestors() {
         vec![(1, &tx_d1), (2, &tx_c2), (2, &tx_c3), (3, &tx_b2)],
     ];
 
-    for (txids, expected_txids) in ancestors.iter().zip(expected_ancestors.iter()) {
-        assert_eq!(txids, expected_txids);
+    for (txids, expected_txids) in ancestors.into_iter().zip(expected_ancestors) {
+        assert_eq!(
+            txids,
+            expected_txids
+                .into_iter()
+                .map(|(i, tx)| (i, Arc::new(tx.clone())))
+                .collect::<Vec<_>>()
+        );
     }
 }
 
@@ -1048,139 +1058,6 @@ fn test_changeset_last_seen_append() {
     }
 }
 
-#[test]
-fn test_missing_blocks() {
-    /// An anchor implementation for testing, made up of `(the_anchor_block, random_data)`.
-    #[derive(Debug, Clone, Eq, PartialEq, PartialOrd, Ord, core::hash::Hash)]
-    struct TestAnchor(BlockId);
-
-    impl Anchor for TestAnchor {
-        fn anchor_block(&self) -> BlockId {
-            self.0
-        }
-    }
-
-    struct Scenario<'a> {
-        name: &'a str,
-        graph: TxGraph<TestAnchor>,
-        chain: LocalChain,
-        exp_heights: &'a [u32],
-    }
-
-    const fn new_anchor(height: u32, hash: BlockHash) -> TestAnchor {
-        TestAnchor(BlockId { height, hash })
-    }
-
-    fn new_scenario<'a>(
-        name: &'a str,
-        graph_anchors: &'a [(Txid, TestAnchor)],
-        chain: &'a [(u32, BlockHash)],
-        exp_heights: &'a [u32],
-    ) -> Scenario<'a> {
-        Scenario {
-            name,
-            graph: {
-                let mut g = TxGraph::default();
-                for (txid, anchor) in graph_anchors {
-                    let _ = g.insert_anchor(*txid, anchor.clone());
-                }
-                g
-            },
-            chain: {
-                let (mut c, _) = LocalChain::from_genesis_hash(h!("genesis"));
-                for (height, hash) in chain {
-                    let _ = c.insert_block(BlockId {
-                        height: *height,
-                        hash: *hash,
-                    });
-                }
-                c
-            },
-            exp_heights,
-        }
-    }
-
-    fn run(scenarios: &[Scenario]) {
-        for scenario in scenarios {
-            let Scenario {
-                name,
-                graph,
-                chain,
-                exp_heights,
-            } = scenario;
-
-            let heights = graph.missing_heights(chain).collect::<Vec<_>>();
-            assert_eq!(&heights, exp_heights, "scenario: {}", name);
-        }
-    }
-
-    run(&[
-        new_scenario(
-            "2 txs with the same anchor (2:B) which is missing from chain",
-            &[
-                (h!("tx_1"), new_anchor(2, h!("B"))),
-                (h!("tx_2"), new_anchor(2, h!("B"))),
-            ],
-            &[(1, h!("A")), (3, h!("C"))],
-            &[2],
-        ),
-        new_scenario(
-            "2 txs with different anchors at the same height, one of the anchors is missing",
-            &[
-                (h!("tx_1"), new_anchor(2, h!("B1"))),
-                (h!("tx_2"), new_anchor(2, h!("B2"))),
-            ],
-            &[(1, h!("A")), (2, h!("B1"))],
-            &[],
-        ),
-        new_scenario(
-            "tx with 2 anchors of same height which are missing from the chain",
-            &[
-                (h!("tx"), new_anchor(3, h!("C1"))),
-                (h!("tx"), new_anchor(3, h!("C2"))),
-            ],
-            &[(1, h!("A")), (4, h!("D"))],
-            &[3],
-        ),
-        new_scenario(
-            "tx with 2 anchors at the same height, chain has this height but does not match either anchor",
-            &[
-                (h!("tx"), new_anchor(4, h!("D1"))),
-                (h!("tx"), new_anchor(4, h!("D2"))),
-            ],
-            &[(4, h!("D3")), (5, h!("E"))],
-            &[],
-        ),
-        new_scenario(
-            "tx with 2 anchors at different heights, one anchor exists in chain, should return nothing",
-            &[
-                (h!("tx"), new_anchor(3, h!("C"))),
-                (h!("tx"), new_anchor(4, h!("D"))),
-            ],
-            &[(4, h!("D")), (5, h!("E"))],
-            &[],
-        ),
-        new_scenario(
-            "tx with 2 anchors at different heights, first height is already in chain with different hash, iterator should only return 2nd height",
-            &[
-                (h!("tx"), new_anchor(5, h!("E1"))),
-                (h!("tx"), new_anchor(6, h!("F1"))),
-            ],
-            &[(4, h!("D")), (5, h!("E")), (7, h!("G"))],
-            &[6],
-        ),
-        new_scenario(
-            "tx with 2 anchors at different heights, neither height is in chain, both heights should be returned",
-            &[
-                (h!("tx"), new_anchor(3, h!("C"))),
-                (h!("tx"), new_anchor(4, h!("D"))),
-            ],
-            &[(1, h!("A")), (2, h!("B"))],
-            &[3, 4],
-        ),
-    ]);
-}
-
 #[test]
 /// The `map_anchors` allow a caller to pass a function to reconstruct the [`TxGraph`] with any [`Anchor`],
 /// even though the function is non-deterministic.
diff --git a/crates/esplora/src/async_ext.rs b/crates/esplora/src/async_ext.rs
index 4d6e0dfa8..86e469d0c 100644
--- a/crates/esplora/src/async_ext.rs
+++ b/crates/esplora/src/async_ext.rs
@@ -1,5 +1,10 @@
+use std::collections::BTreeSet;
+use std::fmt::Debug;
+
 use async_trait::async_trait;
 use bdk_chain::collections::btree_map;
+use bdk_chain::spk_client::{FullScanRequest, FullScanResult, SyncRequest, SyncResult};
+use bdk_chain::Anchor;
 use bdk_chain::{
     bitcoin::{BlockHash, OutPoint, ScriptBuf, TxOut, Txid},
     collections::BTreeMap,
@@ -22,260 +27,258 @@ type Error = Box<esplora_client::Error>;
 #[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
 #[cfg_attr(not(target_arch = "wasm32"), async_trait)]
 pub trait EsploraAsyncExt {
-    /// Prepare a [`LocalChain`] update with blocks fetched from Esplora.
-    ///
-    /// * `local_tip` is the previous tip of [`LocalChain::tip`].
-    /// * `request_heights` is the block heights that we are interested in fetching from Esplora.
-    ///
-    /// The result of this method can be applied to [`LocalChain::apply_update`].
-    ///
-    /// ## Consistency
-    ///
-    /// The chain update returned is guaranteed to be consistent as long as there is not a *large* re-org
-    /// during the call. The size of re-org we can tollerate is server dependent but will be at
-    /// least 10.
-    ///
-    /// [`LocalChain`]: bdk_chain::local_chain::LocalChain
-    /// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
-    /// [`LocalChain::apply_update`]: bdk_chain::local_chain::LocalChain::apply_update
-    async fn update_local_chain(
-        &self,
-        local_tip: CheckPoint,
-        request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
-    ) -> Result<local_chain::Update, Error>;
-
-    /// Full scan the keychain scripts specified with the blockchain (via an Esplora client) and
-    /// returns a [`TxGraph`] and a map of last active indices.
+    /// Scan keychain scripts for transactions against Esplora, returning an update that can be
+    /// applied to the receiving structures.
     ///
-    /// * `keychain_spks`: keychains that we want to scan transactions for
+    /// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no
+    /// associated transactions. `parallel_requests` specifies the max number of HTTP requests to
+    /// make in parallel.
     ///
-    /// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
-    /// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
-    /// parallel.
-    async fn full_scan<K: Ord + Clone + Send>(
+    /// [`LocalChain::tip`]: local_chain::LocalChain::tip
+    async fn full_scan<
+        K: Ord + Clone + Send + Debug + 'static,
+        I: Iterator<Item = (u32, ScriptBuf)> + Send,
+    >(
         &self,
-        keychain_spks: BTreeMap<
-            K,
-            impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
-        >,
+        request: FullScanRequest<K, I>,
         stop_gap: usize,
         parallel_requests: usize,
-    ) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>;
+    ) -> Result<FullScanResult<K>, Error>;
 
     /// Sync a set of scripts with the blockchain (via an Esplora client) for the data
     /// specified and return a [`TxGraph`].
     ///
-    /// * `misc_spks`: scripts that we want to sync transactions for
-    /// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s
-    /// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
-    ///     want to include in the update
-    ///
     /// If the scripts to sync are unknown, such as when restoring or importing a keychain that
     /// may include scripts that have been used, use [`full_scan`] with the keychain.
     ///
+    /// [`LocalChain::tip`]: local_chain::LocalChain::tip
     /// [`full_scan`]: EsploraAsyncExt::full_scan
     async fn sync(
         &self,
-        misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
-        txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
-        outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
+        request: SyncRequest,
         parallel_requests: usize,
-    ) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error>;
+    ) -> Result<SyncResult, Error>;
 }
 
 #[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
 #[cfg_attr(not(target_arch = "wasm32"), async_trait)]
 impl EsploraAsyncExt for esplora_client::AsyncClient {
-    async fn update_local_chain(
+    async fn full_scan<
+        K: Ord + Clone + Send + Debug + 'static,
+        I: Iterator<Item = (u32, ScriptBuf)> + Send,
+    >(
         &self,
-        local_tip: CheckPoint,
-        request_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
-    ) -> Result<local_chain::Update, Error> {
-        // Fetch latest N (server dependent) blocks from Esplora. The server guarantees these are
-        // consistent.
-        let mut fetched_blocks = self
-            .get_blocks(None)
-            .await?
-            .into_iter()
-            .map(|b| (b.time.height, b.id))
-            .collect::<BTreeMap<u32, BlockHash>>();
-        let new_tip_height = fetched_blocks
-            .keys()
-            .last()
-            .copied()
-            .expect("must have atleast one block");
-
-        // Fetch blocks of heights that the caller is interested in, skipping blocks that are
-        // already fetched when constructing `fetched_blocks`.
-        for height in request_heights {
-            // do not fetch blocks higher than remote tip
-            if height > new_tip_height {
-                continue;
-            }
-            // only fetch what is missing
-            if let btree_map::Entry::Vacant(entry) = fetched_blocks.entry(height) {
-                // ❗The return value of `get_block_hash` is not strictly guaranteed to be consistent
-                // with the chain at the time of `get_blocks` above (there could have been a deep
-                // re-org). Since `get_blocks` returns 10 (or so) blocks we are assuming that it's
-                // not possible to have a re-org deeper than that.
-                entry.insert(self.get_block_hash(height).await?);
-            }
-        }
-
-        // Ensure `fetched_blocks` can create an update that connects with the original chain by
-        // finding a "Point of Agreement".
-        for (height, local_hash) in local_tip.iter().map(|cp| (cp.height(), cp.hash())) {
-            if height > new_tip_height {
-                continue;
-            }
-
-            let fetched_hash = match fetched_blocks.entry(height) {
-                btree_map::Entry::Occupied(entry) => *entry.get(),
-                btree_map::Entry::Vacant(entry) => {
-                    *entry.insert(self.get_block_hash(height).await?)
-                }
-            };
-
-            // We have found point of agreement so the update will connect!
-            if fetched_hash == local_hash {
-                break;
-            }
-        }
+        mut request: FullScanRequest<K, I>,
+        stop_gap: usize,
+        parallel_requests: usize,
+    ) -> Result<FullScanResult<K>, Error> {
+        let update_blocks = init_chain_update(self, &request.chain_tip).await?;
+        let (graph_update, last_active_indices) = full_scan_for_index_and_graph(
+            self,
+            request.take_spks_by_keychain(),
+            stop_gap,
+            parallel_requests,
+        )
+        .await?;
+        let chain_update = finalize_chain_update(
+            self,
+            &request.chain_tip,
+            graph_update.all_anchors(),
+            update_blocks,
+        )
+        .await?;
 
-        Ok(local_chain::Update {
-            tip: CheckPoint::from_block_ids(fetched_blocks.into_iter().map(BlockId::from))
-                .expect("must be in height order"),
-            introduce_older_blocks: true,
+        Ok(FullScanResult {
+            graph_update,
+            chain_update,
+            last_active_indices,
         })
     }
 
-    async fn full_scan<K: Ord + Clone + Send>(
+    async fn sync(
         &self,
-        keychain_spks: BTreeMap<
-            K,
-            impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send,
-        >,
-        stop_gap: usize,
+        mut request: SyncRequest,
         parallel_requests: usize,
-    ) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> {
-        type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
-        let parallel_requests = Ord::max(parallel_requests, 1);
-        let mut graph = TxGraph::<ConfirmationTimeHeightAnchor>::default();
-        let mut last_active_indexes = BTreeMap::<K, u32>::new();
-
-        for (keychain, spks) in keychain_spks {
-            let mut spks = spks.into_iter();
-            let mut last_index = Option::<u32>::None;
-            let mut last_active_index = Option::<u32>::None;
-
-            loop {
-                let handles = spks
-                    .by_ref()
-                    .take(parallel_requests)
-                    .map(|(spk_index, spk)| {
-                        let client = self.clone();
-                        async move {
-                            let mut last_seen = None;
-                            let mut spk_txs = Vec::new();
-                            loop {
-                                let txs = client.scripthash_txs(&spk, last_seen).await?;
-                                let tx_count = txs.len();
-                                last_seen = txs.last().map(|tx| tx.txid);
-                                spk_txs.extend(txs);
-                                if tx_count < 25 {
-                                    break Result::<_, Error>::Ok((spk_index, spk_txs));
-                                }
-                            }
-                        }
-                    })
-                    .collect::<FuturesOrdered<_>>();
+    ) -> Result<SyncResult, Error> {
+        let update_blocks = init_chain_update(self, &request.chain_tip).await?;
+        let graph_update = sync_for_index_and_graph(
+            self,
+            request.take_spks().map(|(_i, spk)| spk),
+            request.take_txids(),
+            request.take_outpoints(),
+            parallel_requests,
+        )
+        .await?;
+        let chain_update = finalize_chain_update(
+            self,
+            &request.chain_tip,
+            graph_update.all_anchors(),
+            update_blocks,
+        )
+        .await?;
+        Ok(SyncResult {
+            graph_update,
+            chain_update,
+        })
+    }
+}
 
-                if handles.is_empty() {
-                    break;
-                }
+/// Create the initial chain update.
+///
+/// This atomically fetches the latest blocks from Esplora and additional blocks to ensure the
+/// update can connect to the `start_tip`.
+///
+/// We want to do this before fetching transactions and anchors as we cannot fetch latest blocks and
+/// transactions atomically, and the checkpoint tip is used to determine last-scanned block (for
+/// block-based chain-sources). Therefore it's better to be conservative when setting the tip (use
+/// an earlier tip rather than a later tip) otherwise the caller may accidentally skip blocks when
+/// alternating between chain-sources.
+#[doc(hidden)]
+pub async fn init_chain_update(
+    client: &esplora_client::AsyncClient,
+    local_tip: &CheckPoint,
+) -> Result<BTreeMap<u32, BlockHash>, Error> {
+    // Fetch latest N (server dependent) blocks from Esplora. The server guarantees these are
+    // consistent.
+    let mut fetched_blocks = client
+        .get_blocks(None)
+        .await?
+        .into_iter()
+        .map(|b| (b.time.height, b.id))
+        .collect::<BTreeMap<u32, BlockHash>>();
+    let new_tip_height = fetched_blocks
+        .keys()
+        .last()
+        .copied()
+        .expect("must atleast have one block");
 
-                for (index, txs) in handles.try_collect::<Vec<TxsOfSpkIndex>>().await? {
-                    last_index = Some(index);
-                    if !txs.is_empty() {
-                        last_active_index = Some(index);
-                    }
-                    for tx in txs {
-                        let _ = graph.insert_tx(tx.to_tx());
-                        if let Some(anchor) = anchor_from_status(&tx.status) {
-                            let _ = graph.insert_anchor(tx.txid, anchor);
-                        }
+    // Ensure `fetched_blocks` can create an update that connects with the original chain by
+    // finding a "Point of Agreement".
+    for (height, local_hash) in local_tip.iter().map(|cp| (cp.height(), cp.hash())) {
+        if height > new_tip_height {
+            continue;
+        }
 
-                        let previous_outputs = tx.vin.iter().filter_map(|vin| {
-                            let prevout = vin.prevout.as_ref()?;
-                            Some((
-                                OutPoint {
-                                    txid: vin.txid,
-                                    vout: vin.vout,
-                                },
-                                TxOut {
-                                    script_pubkey: prevout.scriptpubkey.clone(),
-                                    value: prevout.value,
-                                },
-                            ))
-                        });
-
-                        for (outpoint, txout) in previous_outputs {
-                            let _ = graph.insert_txout(outpoint, txout);
-                        }
-                    }
-                }
+        let fetched_hash = match fetched_blocks.entry(height) {
+            btree_map::Entry::Occupied(entry) => *entry.get(),
+            btree_map::Entry::Vacant(entry) => *entry.insert(client.get_block_hash(height).await?),
+        };
 
-                let last_index = last_index.expect("Must be set since handles wasn't empty.");
-                let past_gap_limit = if let Some(i) = last_active_index {
-                    last_index > i.saturating_add(stop_gap as u32)
-                } else {
-                    last_index >= stop_gap as u32
-                };
-                if past_gap_limit {
-                    break;
-                }
+        // We have found point of agreement so the update will connect!
+        if fetched_hash == local_hash {
+            break;
+        }
+    }
+
+    Ok(fetched_blocks)
+}
+
+/// Fetches missing checkpoints and finalizes the [`local_chain::Update`].
+///
+/// A checkpoint is considered "missing" if an anchor (of `anchors`) points to a height without an
+/// existing checkpoint/block under `local_tip` or `update_blocks`.
+#[doc(hidden)]
+pub async fn finalize_chain_update<A: Anchor>(
+    client: &esplora_client::AsyncClient,
+    local_tip: &CheckPoint,
+    anchors: &BTreeSet<(A, Txid)>,
+    mut update_blocks: BTreeMap<u32, BlockHash>,
+) -> Result<local_chain::Update, Error> {
+    let update_tip_height = update_blocks
+        .keys()
+        .last()
+        .copied()
+        .expect("must atleast have one block");
+
+    // We want to have a corresponding checkpoint per height. We iterate the heights of anchors
+    // backwards, comparing it against our `local_tip`'s chain and our current set of
+    // `update_blocks` to see if a corresponding checkpoint already exists.
+    let anchor_heights = anchors
+        .iter()
+        .map(|(a, _)| a.anchor_block().height)
+        // filter out duplicate heights
+        .filter({
+            let mut prev_height = Option::<u32>::None;
+            move |h| match prev_height.replace(*h) {
+                None => true,
+                Some(prev_h) => prev_h != *h,
             }
+        })
+        // filter out heights that surpass the update tip
+        .filter(|h| *h <= update_tip_height)
+        .rev();
 
-            if let Some(last_active_index) = last_active_index {
-                last_active_indexes.insert(keychain, last_active_index);
+    // We keep track of a checkpoint node of `local_tip` to make traversing the linked-list of
+    // checkpoints more efficient.
+    let mut curr_cp = local_tip.clone();
+
+    for h in anchor_heights {
+        if let Some(cp) = curr_cp.query_from(h) {
+            curr_cp = cp.clone();
+            if cp.height() == h {
+                // blocks that already exist in checkpoint linked-list is also stored in
+                // `update_blocks` because we want to keep higher checkpoints of `local_chain`
+                update_blocks.insert(h, cp.hash());
+                continue;
             }
         }
-
-        Ok((graph, last_active_indexes))
+        if let btree_map::Entry::Vacant(entry) = update_blocks.entry(h) {
+            entry.insert(client.get_block_hash(h).await?);
+        }
     }
 
-    async fn sync(
-        &self,
-        misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
-        txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
-        outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
-        parallel_requests: usize,
-    ) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
-        let mut graph = self
-            .full_scan(
-                [(
-                    (),
-                    misc_spks
-                        .into_iter()
-                        .enumerate()
-                        .map(|(i, spk)| (i as u32, spk)),
-                )]
-                .into(),
-                usize::MAX,
-                parallel_requests,
-            )
-            .await
-            .map(|(g, _)| g)?;
-
-        let mut txids = txids.into_iter();
+    Ok(local_chain::Update {
+        tip: CheckPoint::from_block_ids(
+            update_blocks
+                .into_iter()
+                .map(|(height, hash)| BlockId { height, hash }),
+        )
+        .expect("must be in order"),
+        introduce_older_blocks: true,
+    })
+}
+
+/// This performs a full scan to get an update for the [`TxGraph`] and
+/// [`KeychainTxOutIndex`](bdk_chain::keychain::KeychainTxOutIndex).
+#[doc(hidden)]
+pub async fn full_scan_for_index_and_graph<K: Ord + Clone + Send>(
+    client: &esplora_client::AsyncClient,
+    keychain_spks: BTreeMap<
+        K,
+        Box<impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send>,
+    >,
+    stop_gap: usize,
+    parallel_requests: usize,
+) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> {
+    type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
+    let parallel_requests = Ord::max(parallel_requests, 1);
+    let mut graph = TxGraph::<ConfirmationTimeHeightAnchor>::default();
+    let mut last_active_indexes = BTreeMap::<K, u32>::new();
+
+    for (keychain, spks) in keychain_spks {
+        let mut spks = spks.into_iter();
+        let mut last_index = Option::<u32>::None;
+        let mut last_active_index = Option::<u32>::None;
+
         loop {
-            let handles = txids
+            let handles = spks
                 .by_ref()
                 .take(parallel_requests)
-                .filter(|&txid| graph.get_tx(txid).is_none())
-                .map(|txid| {
-                    let client = self.clone();
-                    async move { client.get_tx_status(&txid).await.map(|s| (txid, s)) }
+                .map(|(spk_index, spk)| {
+                    let client = client.clone();
+                    async move {
+                        let mut last_seen = None;
+                        let mut spk_txs = Vec::new();
+                        loop {
+                            let txs = client.scripthash_txs(&spk, last_seen).await?;
+                            let tx_count = txs.len();
+                            last_seen = txs.last().map(|tx| tx.txid);
+                            spk_txs.extend(txs);
+                            if tx_count < 25 {
+                                break Result::<_, Error>::Ok((spk_index, spk_txs));
+                            }
+                        }
+                    }
                 })
                 .collect::<FuturesOrdered<_>>();
 
@@ -283,38 +286,130 @@ impl EsploraAsyncExt for esplora_client::AsyncClient {
                 break;
             }
 
-            for (txid, status) in handles.try_collect::<Vec<(Txid, TxStatus)>>().await? {
-                if let Some(anchor) = anchor_from_status(&status) {
-                    let _ = graph.insert_anchor(txid, anchor);
+            for (index, txs) in handles.try_collect::<Vec<TxsOfSpkIndex>>().await? {
+                last_index = Some(index);
+                if !txs.is_empty() {
+                    last_active_index = Some(index);
+                }
+                for tx in txs {
+                    let _ = graph.insert_tx(tx.to_tx());
+                    if let Some(anchor) = anchor_from_status(&tx.status) {
+                        let _ = graph.insert_anchor(tx.txid, anchor);
+                    }
+
+                    let previous_outputs = tx.vin.iter().filter_map(|vin| {
+                        let prevout = vin.prevout.as_ref()?;
+                        Some((
+                            OutPoint {
+                                txid: vin.txid,
+                                vout: vin.vout,
+                            },
+                            TxOut {
+                                script_pubkey: prevout.scriptpubkey.clone(),
+                                value: prevout.value,
+                            },
+                        ))
+                    });
+
+                    for (outpoint, txout) in previous_outputs {
+                        let _ = graph.insert_txout(outpoint, txout);
+                    }
                 }
             }
+
+            let last_index = last_index.expect("Must be set since handles wasn't empty.");
+            let past_gap_limit = if let Some(i) = last_active_index {
+                last_index > i.saturating_add(stop_gap as u32)
+            } else {
+                last_index >= stop_gap as u32
+            };
+            if past_gap_limit {
+                break;
+            }
         }
 
-        for op in outpoints.into_iter() {
-            if graph.get_tx(op.txid).is_none() {
-                if let Some(tx) = self.get_tx(&op.txid).await? {
-                    let _ = graph.insert_tx(tx);
-                }
-                let status = self.get_tx_status(&op.txid).await?;
-                if let Some(anchor) = anchor_from_status(&status) {
-                    let _ = graph.insert_anchor(op.txid, anchor);
-                }
+        if let Some(last_active_index) = last_active_index {
+            last_active_indexes.insert(keychain, last_active_index);
+        }
+    }
+
+    Ok((graph, last_active_indexes))
+}
+
+#[doc(hidden)]
+pub async fn sync_for_index_and_graph(
+    client: &esplora_client::AsyncClient,
+    misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
+    txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
+    outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
+    parallel_requests: usize,
+) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
+    let mut graph = full_scan_for_index_and_graph(
+        client,
+        [(
+            (),
+            Box::new(
+                misc_spks
+                    .into_iter()
+                    .enumerate()
+                    .map(|(i, spk)| (i as u32, spk)),
+            ),
+        )]
+        .into(),
+        usize::MAX,
+        parallel_requests,
+    )
+    .await
+    .map(|(g, _)| g)?;
+
+    let mut txids = txids.into_iter();
+    loop {
+        let handles = txids
+            .by_ref()
+            .take(parallel_requests)
+            .filter(|&txid| graph.get_tx(txid).is_none())
+            .map(|txid| {
+                let client = client.clone();
+                async move { client.get_tx_status(&txid).await.map(|s| (txid, s)) }
+            })
+            .collect::<FuturesOrdered<_>>();
+
+        if handles.is_empty() {
+            break;
+        }
+
+        for (txid, status) in handles.try_collect::<Vec<(Txid, TxStatus)>>().await? {
+            if let Some(anchor) = anchor_from_status(&status) {
+                let _ = graph.insert_anchor(txid, anchor);
             }
+        }
+    }
 
-            if let Some(op_status) = self.get_output_status(&op.txid, op.vout as _).await? {
-                if let Some(txid) = op_status.txid {
-                    if graph.get_tx(txid).is_none() {
-                        if let Some(tx) = self.get_tx(&txid).await? {
-                            let _ = graph.insert_tx(tx);
-                        }
-                        let status = self.get_tx_status(&txid).await?;
-                        if let Some(anchor) = anchor_from_status(&status) {
-                            let _ = graph.insert_anchor(txid, anchor);
-                        }
+    for op in outpoints.into_iter() {
+        if graph.get_tx(op.txid).is_none() {
+            if let Some(tx) = client.get_tx(&op.txid).await? {
+                let _ = graph.insert_tx(tx);
+            }
+            let status = client.get_tx_status(&op.txid).await?;
+            if let Some(anchor) = anchor_from_status(&status) {
+                let _ = graph.insert_anchor(op.txid, anchor);
+            }
+        }
+
+        if let Some(op_status) = client.get_output_status(&op.txid, op.vout as _).await? {
+            if let Some(txid) = op_status.txid {
+                if graph.get_tx(txid).is_none() {
+                    if let Some(tx) = client.get_tx(&txid).await? {
+                        let _ = graph.insert_tx(tx);
+                    }
+                    let status = client.get_tx_status(&txid).await?;
+                    if let Some(anchor) = anchor_from_status(&status) {
+                        let _ = graph.insert_anchor(txid, anchor);
                     }
                 }
             }
         }
-        Ok(graph)
     }
+
+    Ok(graph)
 }
diff --git a/crates/esplora/src/blocking_ext.rs b/crates/esplora/src/blocking_ext.rs
index 993e33ac0..da56250a8 100644
--- a/crates/esplora/src/blocking_ext.rs
+++ b/crates/esplora/src/blocking_ext.rs
@@ -1,7 +1,12 @@
+use std::collections::BTreeSet;
+use std::fmt::Debug;
 use std::thread::JoinHandle;
+use std::usize;
 
 use bdk_chain::collections::btree_map;
 use bdk_chain::collections::BTreeMap;
+use bdk_chain::spk_client::{FullScanRequest, FullScanResult, SyncRequest, SyncResult};
+use bdk_chain::Anchor;
 use bdk_chain::{
     bitcoin::{BlockHash, OutPoint, ScriptBuf, TxOut, Txid},
     local_chain::{self, CheckPoint},
@@ -12,7 +17,7 @@ use esplora_client::TxStatus;
 use crate::anchor_from_status;
 
 /// [`esplora_client::Error`]
-type Error = Box<esplora_client::Error>;
+pub type Error = Box<esplora_client::Error>;
 
 /// Trait to extend the functionality of [`esplora_client::BlockingClient`].
 ///
@@ -20,298 +25,388 @@ type Error = Box<esplora_client::Error>;
 ///
 /// [crate-level documentation]: crate
 pub trait EsploraExt {
-    /// Prepare a [`LocalChain`] update with blocks fetched from Esplora.
+    /// Scan keychain scripts for transactions against Esplora, returning an update that can be
+    /// applied to the receiving structures.
     ///
-    /// * `local_tip` is the previous tip of [`LocalChain::tip`].
-    /// * `request_heights` is the block heights that we are interested in fetching from Esplora.
+    /// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no
+    /// associated transactions. `parallel_requests` specifies the max number of HTTP requests to
+    /// make in parallel.
     ///
-    /// The result of this method can be applied to [`LocalChain::apply_update`].
-    ///
-    /// ## Consistency
-    ///
-    /// The chain update returned is guaranteed to be consistent as long as there is not a *large* re-org
-    /// during the call. The size of re-org we can tollerate is server dependent but will be at
-    /// least 10.
-    ///
-    /// [`LocalChain`]: bdk_chain::local_chain::LocalChain
-    /// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
-    /// [`LocalChain::apply_update`]: bdk_chain::local_chain::LocalChain::apply_update
-    fn update_local_chain(
-        &self,
-        local_tip: CheckPoint,
-        request_heights: impl IntoIterator<Item = u32>,
-    ) -> Result<local_chain::Update, Error>;
-
-    /// Full scan the keychain scripts specified with the blockchain (via an Esplora client) and
-    /// returns a [`TxGraph`] and a map of last active indices.
-    ///
-    /// * `keychain_spks`: keychains that we want to scan transactions for
-    ///
-    /// The full scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
-    /// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
-    /// parallel.
-    fn full_scan<K: Ord + Clone>(
+    /// [`LocalChain::tip`]: local_chain::LocalChain::tip
+    fn full_scan<
+        K: Ord + Clone + Send + Debug + 'static,
+        I: Iterator<Item = (u32, ScriptBuf)> + Send,
+    >(
         &self,
-        keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
+        request: FullScanRequest<K, I>,
         stop_gap: usize,
         parallel_requests: usize,
-    ) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error>;
+    ) -> Result<FullScanResult<K>, Error>;
 
     /// Sync a set of scripts with the blockchain (via an Esplora client) for the data
     /// specified and return a [`TxGraph`].
     ///
-    /// * `misc_spks`: scripts that we want to sync transactions for
-    /// * `txids`: transactions for which we want updated [`ConfirmationTimeHeightAnchor`]s
-    /// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
-    ///     want to include in the update
-    ///
     /// If the scripts to sync are unknown, such as when restoring or importing a keychain that
     /// may include scripts that have been used, use [`full_scan`] with the keychain.
     ///
     /// [`full_scan`]: EsploraExt::full_scan
+    fn sync(&self, request: SyncRequest, parallel_requests: usize) -> Result<SyncResult, Error>;
+}
+
+impl EsploraExt for esplora_client::BlockingClient {
+    fn full_scan<
+        K: Ord + Clone + Send + Debug + 'static,
+        I: Iterator<Item = (u32, ScriptBuf)> + Send,
+    >(
+        &self,
+        mut request: FullScanRequest<K, I>,
+        stop_gap: usize,
+        parallel_requests: usize,
+    ) -> Result<FullScanResult<K>, Error> {
+        let update_blocks = init_chain_update_blocking(self, &request.chain_tip)?;
+        let (graph_update, last_active_indices) = full_scan_for_index_and_graph_blocking(
+            self,
+            request.take_spks_by_keychain(),
+            stop_gap,
+            parallel_requests,
+        )?;
+        let chain_update = finalize_chain_update_blocking(
+            self,
+            &request.chain_tip,
+            graph_update.all_anchors(),
+            update_blocks,
+        )?;
+        Ok(FullScanResult {
+            graph_update,
+            chain_update,
+            last_active_indices,
+        })
+    }
+
     fn sync(
         &self,
-        misc_spks: impl IntoIterator<Item = ScriptBuf>,
-        txids: impl IntoIterator<Item = Txid>,
-        outpoints: impl IntoIterator<Item = OutPoint>,
+        mut request: SyncRequest,
         parallel_requests: usize,
-    ) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error>;
+    ) -> Result<SyncResult, Error> {
+        let update_blocks = init_chain_update_blocking(self, &request.chain_tip)?;
+        let graph_update = sync_for_index_and_graph_blocking(
+            self,
+            request.take_spks().map(|(_i, spk)| spk),
+            request.take_txids(),
+            request.take_outpoints(),
+            parallel_requests,
+        )?;
+        let chain_update = finalize_chain_update_blocking(
+            self,
+            &request.chain_tip,
+            graph_update.all_anchors(),
+            update_blocks,
+        )?;
+        Ok(SyncResult {
+            graph_update,
+            chain_update,
+        })
+    }
 }
 
-impl EsploraExt for esplora_client::BlockingClient {
-    fn update_local_chain(
-        &self,
-        local_tip: CheckPoint,
-        request_heights: impl IntoIterator<Item = u32>,
-    ) -> Result<local_chain::Update, Error> {
-        // Fetch latest N (server dependent) blocks from Esplora. The server guarantees these are
-        // consistent.
-        let mut fetched_blocks = self
-            .get_blocks(None)?
-            .into_iter()
-            .map(|b| (b.time.height, b.id))
-            .collect::<BTreeMap<u32, BlockHash>>();
-        let new_tip_height = fetched_blocks
-            .keys()
-            .last()
-            .copied()
-            .expect("must atleast have one block");
-
-        // Fetch blocks of heights that the caller is interested in, skipping blocks that are
-        // already fetched when constructing `fetched_blocks`.
-        for height in request_heights {
-            // do not fetch blocks higher than remote tip
-            if height > new_tip_height {
-                continue;
-            }
-            // only fetch what is missing
-            if let btree_map::Entry::Vacant(entry) = fetched_blocks.entry(height) {
-                // ❗The return value of `get_block_hash` is not strictly guaranteed to be consistent
-                // with the chain at the time of `get_blocks` above (there could have been a deep
-                // re-org). Since `get_blocks` returns 10 (or so) blocks we are assuming that it's
-                // not possible to have a re-org deeper than that.
-                entry.insert(self.get_block_hash(height)?);
-            }
+/// Create the initial chain update.
+///
+/// This atomically fetches the latest blocks from Esplora and additional blocks to ensure the
+/// update can connect to the `start_tip`.
+///
+/// We want to do this before fetching transactions and anchors as we cannot fetch latest blocks and
+/// transactions atomically, and the checkpoint tip is used to determine last-scanned block (for
+/// block-based chain-sources). Therefore it's better to be conservative when setting the tip (use
+/// an earlier tip rather than a later tip) otherwise the caller may accidentally skip blocks when
+/// alternating between chain-sources.
+#[doc(hidden)]
+pub fn init_chain_update_blocking(
+    client: &esplora_client::BlockingClient,
+    local_tip: &CheckPoint,
+) -> Result<BTreeMap<u32, BlockHash>, Error> {
+    // Fetch latest N (server dependent) blocks from Esplora. The server guarantees these are
+    // consistent.
+    let mut fetched_blocks = client
+        .get_blocks(None)?
+        .into_iter()
+        .map(|b| (b.time.height, b.id))
+        .collect::<BTreeMap<u32, BlockHash>>();
+    let new_tip_height = fetched_blocks
+        .keys()
+        .last()
+        .copied()
+        .expect("must atleast have one block");
+
+    // Ensure `fetched_blocks` can create an update that connects with the original chain by
+    // finding a "Point of Agreement".
+    for (height, local_hash) in local_tip.iter().map(|cp| (cp.height(), cp.hash())) {
+        if height > new_tip_height {
+            continue;
         }
 
-        // Ensure `fetched_blocks` can create an update that connects with the original chain by
-        // finding a "Point of Agreement".
-        for (height, local_hash) in local_tip.iter().map(|cp| (cp.height(), cp.hash())) {
-            if height > new_tip_height {
-                continue;
+        let fetched_hash = match fetched_blocks.entry(height) {
+            btree_map::Entry::Occupied(entry) => *entry.get(),
+            btree_map::Entry::Vacant(entry) => *entry.insert(client.get_block_hash(height)?),
+        };
+
+        // We have found point of agreement so the update will connect!
+        if fetched_hash == local_hash {
+            break;
+        }
+    }
+
+    Ok(fetched_blocks)
+}
+
+/// Fetches missing checkpoints and finalizes the [`local_chain::Update`].
+///
+/// A checkpoint is considered "missing" if an anchor (of `anchors`) points to a height without an
+/// existing checkpoint/block under `local_tip` or `update_blocks`.
+#[doc(hidden)]
+pub fn finalize_chain_update_blocking<A: Anchor>(
+    client: &esplora_client::BlockingClient,
+    local_tip: &CheckPoint,
+    anchors: &BTreeSet<(A, Txid)>,
+    mut update_blocks: BTreeMap<u32, BlockHash>,
+) -> Result<local_chain::Update, Error> {
+    let update_tip_height = update_blocks
+        .keys()
+        .last()
+        .copied()
+        .expect("must atleast have one block");
+
+    // We want to have a corresponding checkpoint per height. We iterate the heights of anchors
+    // backwards, comparing it against our `local_tip`'s chain and our current set of
+    // `update_blocks` to see if a corresponding checkpoint already exists.
+    let anchor_heights = anchors
+        .iter()
+        .map(|(a, _)| a.anchor_block().height)
+        // filter out duplicate heights
+        .filter({
+            let mut prev_height = Option::<u32>::None;
+            move |h| match prev_height.replace(*h) {
+                None => true,
+                Some(prev_h) => prev_h != *h,
             }
+        })
+        // filter out heights that surpass the update tip
+        .filter(|h| *h <= update_tip_height)
+        .rev();
 
-            let fetched_hash = match fetched_blocks.entry(height) {
-                btree_map::Entry::Occupied(entry) => *entry.get(),
-                btree_map::Entry::Vacant(entry) => *entry.insert(self.get_block_hash(height)?),
-            };
+    // We keep track of a checkpoint node of `local_tip` to make traversing the linked-list of
+    // checkpoints more efficient.
+    let mut curr_cp = local_tip.clone();
 
-            // We have found point of agreement so the update will connect!
-            if fetched_hash == local_hash {
-                break;
+    for h in anchor_heights {
+        if let Some(cp) = curr_cp.query_from(h) {
+            curr_cp = cp.clone();
+            if cp.height() == h {
+                // blocks that already exist in checkpoint linked-list is also stored in
+                // `update_blocks` because we want to keep higher checkpoints of `local_chain`
+                update_blocks.insert(h, cp.hash());
+                continue;
             }
         }
-
-        Ok(local_chain::Update {
-            tip: CheckPoint::from_block_ids(fetched_blocks.into_iter().map(BlockId::from))
-                .expect("must be in height order"),
-            introduce_older_blocks: true,
-        })
+        if let btree_map::Entry::Vacant(entry) = update_blocks.entry(h) {
+            entry.insert(client.get_block_hash(h)?);
+        }
     }
 
-    fn full_scan<K: Ord + Clone>(
-        &self,
-        keychain_spks: BTreeMap<K, impl IntoIterator<Item = (u32, ScriptBuf)>>,
-        stop_gap: usize,
-        parallel_requests: usize,
-    ) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> {
-        type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
-        let parallel_requests = Ord::max(parallel_requests, 1);
-        let mut graph = TxGraph::<ConfirmationTimeHeightAnchor>::default();
-        let mut last_active_indexes = BTreeMap::<K, u32>::new();
-
-        for (keychain, spks) in keychain_spks {
-            let mut spks = spks.into_iter();
-            let mut last_index = Option::<u32>::None;
-            let mut last_active_index = Option::<u32>::None;
-
-            loop {
-                let handles = spks
-                    .by_ref()
-                    .take(parallel_requests)
-                    .map(|(spk_index, spk)| {
-                        std::thread::spawn({
-                            let client = self.clone();
-                            move || -> Result<TxsOfSpkIndex, Error> {
-                                let mut last_seen = None;
-                                let mut spk_txs = Vec::new();
-                                loop {
-                                    let txs = client.scripthash_txs(&spk, last_seen)?;
-                                    let tx_count = txs.len();
-                                    last_seen = txs.last().map(|tx| tx.txid);
-                                    spk_txs.extend(txs);
-                                    if tx_count < 25 {
-                                        break Ok((spk_index, spk_txs));
-                                    }
+    Ok(local_chain::Update {
+        tip: CheckPoint::from_block_ids(
+            update_blocks
+                .into_iter()
+                .map(|(height, hash)| BlockId { height, hash }),
+        )
+        .expect("must be in order"),
+        introduce_older_blocks: true,
+    })
+}
+
+/// This performs a full scan to get an update for the [`TxGraph`] and
+/// [`KeychainTxOutIndex`](bdk_chain::keychain::KeychainTxOutIndex).
+#[doc(hidden)]
+pub fn full_scan_for_index_and_graph_blocking<K: Ord + Clone + Send>(
+    client: &esplora_client::BlockingClient,
+    keychain_spks: BTreeMap<
+        K,
+        Box<impl IntoIterator<IntoIter = impl Iterator<Item = (u32, ScriptBuf)> + Send> + Send>,
+    >,
+    stop_gap: usize,
+    parallel_requests: usize,
+) -> Result<(TxGraph<ConfirmationTimeHeightAnchor>, BTreeMap<K, u32>), Error> {
+    type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
+    let parallel_requests = Ord::max(parallel_requests, 1);
+    let mut tx_graph = TxGraph::<ConfirmationTimeHeightAnchor>::default();
+    let mut last_active_indices = BTreeMap::<K, u32>::new();
+
+    for (keychain, spks) in keychain_spks {
+        let mut spks = spks.into_iter();
+        let mut last_index = Option::<u32>::None;
+        let mut last_active_index = Option::<u32>::None;
+
+        loop {
+            let handles = spks
+                .by_ref()
+                .take(parallel_requests)
+                .map(|(spk_index, spk)| {
+                    std::thread::spawn({
+                        let client = client.clone();
+                        move || -> Result<TxsOfSpkIndex, Error> {
+                            let mut last_seen = None;
+                            let mut spk_txs = Vec::new();
+                            loop {
+                                let txs = client.scripthash_txs(&spk, last_seen)?;
+                                let tx_count = txs.len();
+                                last_seen = txs.last().map(|tx| tx.txid);
+                                spk_txs.extend(txs);
+                                if tx_count < 25 {
+                                    break Ok((spk_index, spk_txs));
                                 }
                             }
-                        })
+                        }
                     })
-                    .collect::<Vec<JoinHandle<Result<TxsOfSpkIndex, Error>>>>();
+                })
+                .collect::<Vec<JoinHandle<Result<TxsOfSpkIndex, Error>>>>();
 
-                if handles.is_empty() {
-                    break;
-                }
+            if handles.is_empty() {
+                break;
+            }
 
-                for handle in handles {
-                    let (index, txs) = handle.join().expect("thread must not panic")?;
-                    last_index = Some(index);
-                    if !txs.is_empty() {
-                        last_active_index = Some(index);
+            for handle in handles {
+                let (index, txs) = handle.join().expect("thread must not panic")?;
+                last_index = Some(index);
+                if !txs.is_empty() {
+                    last_active_index = Some(index);
+                }
+                for tx in txs {
+                    let _ = tx_graph.insert_tx(tx.to_tx());
+                    if let Some(anchor) = anchor_from_status(&tx.status) {
+                        let _ = tx_graph.insert_anchor(tx.txid, anchor);
                     }
-                    for tx in txs {
-                        let _ = graph.insert_tx(tx.to_tx());
-                        if let Some(anchor) = anchor_from_status(&tx.status) {
-                            let _ = graph.insert_anchor(tx.txid, anchor);
-                        }
 
-                        let previous_outputs = tx.vin.iter().filter_map(|vin| {
-                            let prevout = vin.prevout.as_ref()?;
-                            Some((
-                                OutPoint {
-                                    txid: vin.txid,
-                                    vout: vin.vout,
-                                },
-                                TxOut {
-                                    script_pubkey: prevout.scriptpubkey.clone(),
-                                    value: prevout.value,
-                                },
-                            ))
-                        });
-
-                        for (outpoint, txout) in previous_outputs {
-                            let _ = graph.insert_txout(outpoint, txout);
-                        }
-                    }
-                }
+                    let previous_outputs = tx.vin.iter().filter_map(|vin| {
+                        let prevout = vin.prevout.as_ref()?;
+                        Some((
+                            OutPoint {
+                                txid: vin.txid,
+                                vout: vin.vout,
+                            },
+                            TxOut {
+                                script_pubkey: prevout.scriptpubkey.clone(),
+                                value: prevout.value,
+                            },
+                        ))
+                    });
 
-                let last_index = last_index.expect("Must be set since handles wasn't empty.");
-                let past_gap_limit = if let Some(i) = last_active_index {
-                    last_index > i.saturating_add(stop_gap as u32)
-                } else {
-                    last_index >= stop_gap as u32
-                };
-                if past_gap_limit {
-                    break;
+                    for (outpoint, txout) in previous_outputs {
+                        let _ = tx_graph.insert_txout(outpoint, txout);
+                    }
                 }
             }
 
-            if let Some(last_active_index) = last_active_index {
-                last_active_indexes.insert(keychain, last_active_index);
+            let last_index = last_index.expect("Must be set since handles wasn't empty.");
+            let past_gap_limit = if let Some(i) = last_active_index {
+                last_index > i.saturating_add(stop_gap as u32)
+            } else {
+                last_index >= stop_gap as u32
+            };
+            if past_gap_limit {
+                break;
             }
         }
 
-        Ok((graph, last_active_indexes))
+        if let Some(last_active_index) = last_active_index {
+            last_active_indices.insert(keychain, last_active_index);
+        }
     }
 
-    fn sync(
-        &self,
-        misc_spks: impl IntoIterator<Item = ScriptBuf>,
-        txids: impl IntoIterator<Item = Txid>,
-        outpoints: impl IntoIterator<Item = OutPoint>,
-        parallel_requests: usize,
-    ) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
-        let mut graph = self
-            .full_scan(
-                [(
-                    (),
+    Ok((tx_graph, last_active_indices))
+}
+
+#[doc(hidden)]
+pub fn sync_for_index_and_graph_blocking(
+    client: &esplora_client::BlockingClient,
+    misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = ScriptBuf> + Send> + Send,
+    txids: impl IntoIterator<Item = Txid>,
+    outpoints: impl IntoIterator<Item = OutPoint>,
+    parallel_requests: usize,
+) -> Result<TxGraph<ConfirmationTimeHeightAnchor>, Error> {
+    let (mut tx_graph, _) = full_scan_for_index_and_graph_blocking(
+        client,
+        {
+            let mut keychains = BTreeMap::new();
+            keychains.insert(
+                (),
+                Box::new(
                     misc_spks
                         .into_iter()
                         .enumerate()
                         .map(|(i, spk)| (i as u32, spk)),
-                )]
-                .into(),
-                usize::MAX,
-                parallel_requests,
-            )
-            .map(|(g, _)| g)?;
-
-        let mut txids = txids.into_iter();
-        loop {
-            let handles = txids
-                .by_ref()
-                .take(parallel_requests)
-                .filter(|&txid| graph.get_tx(txid).is_none())
-                .map(|txid| {
-                    std::thread::spawn({
-                        let client = self.clone();
-                        move || {
-                            client
-                                .get_tx_status(&txid)
-                                .map_err(Box::new)
-                                .map(|s| (txid, s))
-                        }
-                    })
+                ),
+            );
+            keychains
+        },
+        usize::MAX,
+        parallel_requests,
+    )?;
+
+    let mut txids = txids.into_iter();
+    loop {
+        let handles = txids
+            .by_ref()
+            .take(parallel_requests)
+            .filter(|&txid| tx_graph.get_tx(txid).is_none())
+            .map(|txid| {
+                std::thread::spawn({
+                    let client = client.clone();
+                    move || {
+                        client
+                            .get_tx_status(&txid)
+                            .map_err(Box::new)
+                            .map(|s| (txid, s))
+                    }
                 })
-                .collect::<Vec<JoinHandle<Result<(Txid, TxStatus), Error>>>>();
+            })
+            .collect::<Vec<JoinHandle<Result<(Txid, TxStatus), Error>>>>();
 
-            if handles.is_empty() {
-                break;
-            }
+        if handles.is_empty() {
+            break;
+        }
 
-            for handle in handles {
-                let (txid, status) = handle.join().expect("thread must not panic")?;
-                if let Some(anchor) = anchor_from_status(&status) {
-                    let _ = graph.insert_anchor(txid, anchor);
-                }
+        for handle in handles {
+            let (txid, status) = handle.join().expect("thread must not panic")?;
+            if let Some(anchor) = anchor_from_status(&status) {
+                let _ = tx_graph.insert_anchor(txid, anchor);
             }
         }
+    }
 
-        for op in outpoints {
-            if graph.get_tx(op.txid).is_none() {
-                if let Some(tx) = self.get_tx(&op.txid)? {
-                    let _ = graph.insert_tx(tx);
-                }
-                let status = self.get_tx_status(&op.txid)?;
-                if let Some(anchor) = anchor_from_status(&status) {
-                    let _ = graph.insert_anchor(op.txid, anchor);
-                }
+    for op in outpoints {
+        if tx_graph.get_tx(op.txid).is_none() {
+            if let Some(tx) = client.get_tx(&op.txid)? {
+                let _ = tx_graph.insert_tx(tx);
             }
+            let status = client.get_tx_status(&op.txid)?;
+            if let Some(anchor) = anchor_from_status(&status) {
+                let _ = tx_graph.insert_anchor(op.txid, anchor);
+            }
+        }
 
-            if let Some(op_status) = self.get_output_status(&op.txid, op.vout as _)? {
-                if let Some(txid) = op_status.txid {
-                    if graph.get_tx(txid).is_none() {
-                        if let Some(tx) = self.get_tx(&txid)? {
-                            let _ = graph.insert_tx(tx);
-                        }
-                        let status = self.get_tx_status(&txid)?;
-                        if let Some(anchor) = anchor_from_status(&status) {
-                            let _ = graph.insert_anchor(txid, anchor);
-                        }
+        if let Some(op_status) = client.get_output_status(&op.txid, op.vout as _)? {
+            if let Some(txid) = op_status.txid {
+                if tx_graph.get_tx(txid).is_none() {
+                    if let Some(tx) = client.get_tx(&txid)? {
+                        let _ = tx_graph.insert_tx(tx);
+                    }
+                    let status = client.get_tx_status(&txid)?;
+                    if let Some(anchor) = anchor_from_status(&status) {
+                        let _ = tx_graph.insert_anchor(txid, anchor);
                     }
                 }
             }
         }
-        Ok(graph)
     }
+
+    Ok(tx_graph)
 }
diff --git a/crates/esplora/tests/async_ext.rs b/crates/esplora/tests/async_ext.rs
index 6c3c9cf1f..d81cb1261 100644
--- a/crates/esplora/tests/async_ext.rs
+++ b/crates/esplora/tests/async_ext.rs
@@ -1,15 +1,188 @@
+use bdk_chain::bitcoin::hashes::Hash;
+use bdk_chain::local_chain::LocalChain;
+use bdk_chain::BlockId;
 use bdk_esplora::EsploraAsyncExt;
 use electrsd::bitcoind::anyhow;
 use electrsd::bitcoind::bitcoincore_rpc::RpcApi;
 use esplora_client::{self, Builder};
-use std::collections::{BTreeMap, HashSet};
+use std::collections::{BTreeMap, BTreeSet, HashSet};
 use std::str::FromStr;
 use std::thread::sleep;
 use std::time::Duration;
 
 use bdk_chain::bitcoin::{Address, Amount, Txid};
+use bdk_chain::spk_client::{FullScanRequest, SyncRequest};
 use bdk_testenv::TestEnv;
 
+macro_rules! h {
+    ($index:literal) => {{
+        bdk_chain::bitcoin::hashes::Hash::hash($index.as_bytes())
+    }};
+}
+
+/// Ensure that update does not remove heights (from original), and all anchor heights are included.
+#[tokio::test]
+pub async fn test_finalize_chain_update() -> anyhow::Result<()> {
+    struct TestCase<'a> {
+        name: &'a str,
+        /// Initial blockchain height to start the env with.
+        initial_env_height: u32,
+        /// Initial checkpoint heights to start with.
+        initial_cps: &'a [u32],
+        /// The final blockchain height of the env.
+        final_env_height: u32,
+        /// The anchors to test with: `(height, txid)`. Only the height is provided as we can fetch
+        /// the blockhash from the env.
+        anchors: &'a [(u32, Txid)],
+    }
+
+    let test_cases = [
+        TestCase {
+            name: "chain_extends",
+            initial_env_height: 60,
+            initial_cps: &[59, 60],
+            final_env_height: 90,
+            anchors: &[],
+        },
+        TestCase {
+            name: "introduce_older_heights",
+            initial_env_height: 50,
+            initial_cps: &[10, 15],
+            final_env_height: 50,
+            anchors: &[(11, h!("A")), (14, h!("B"))],
+        },
+        TestCase {
+            name: "introduce_older_heights_after_chain_extends",
+            initial_env_height: 50,
+            initial_cps: &[10, 15],
+            final_env_height: 100,
+            anchors: &[(11, h!("A")), (14, h!("B"))],
+        },
+    ];
+
+    for (i, t) in test_cases.into_iter().enumerate() {
+        println!("[{}] running test case: {}", i, t.name);
+
+        let env = TestEnv::new()?;
+        let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
+        let client = Builder::new(base_url.as_str()).build_async()?;
+
+        // set env to `initial_env_height`
+        if let Some(to_mine) = t
+            .initial_env_height
+            .checked_sub(env.make_checkpoint_tip().height())
+        {
+            env.mine_blocks(to_mine as _, None)?;
+        }
+        while client.get_height().await? < t.initial_env_height {
+            std::thread::sleep(Duration::from_millis(10));
+        }
+
+        // craft initial `local_chain`
+        let local_chain = {
+            let (mut chain, _) = LocalChain::from_genesis_hash(env.genesis_hash()?);
+            let chain_tip = chain.tip();
+            let update_blocks = bdk_esplora::init_chain_update(&client, &chain_tip).await?;
+            let update_anchors = t
+                .initial_cps
+                .iter()
+                .map(|&height| -> anyhow::Result<_> {
+                    Ok((
+                        BlockId {
+                            height,
+                            hash: env.bitcoind.client.get_block_hash(height as _)?,
+                        },
+                        Txid::all_zeros(),
+                    ))
+                })
+                .collect::<anyhow::Result<BTreeSet<_>>>()?;
+            let chain_update = bdk_esplora::finalize_chain_update(
+                &client,
+                &chain_tip,
+                &update_anchors,
+                update_blocks,
+            )
+            .await?;
+            chain.apply_update(chain_update)?;
+            chain
+        };
+        println!("local chain height: {}", local_chain.tip().height());
+
+        // extend env chain
+        if let Some(to_mine) = t
+            .final_env_height
+            .checked_sub(env.make_checkpoint_tip().height())
+        {
+            env.mine_blocks(to_mine as _, None)?;
+        }
+        while client.get_height().await? < t.final_env_height {
+            std::thread::sleep(Duration::from_millis(10));
+        }
+
+        // craft update
+        let update = {
+            let local_tip = local_chain.tip();
+            let update_blocks = bdk_esplora::init_chain_update(&client, &local_tip).await?;
+            let update_anchors = t
+                .anchors
+                .iter()
+                .map(|&(height, txid)| -> anyhow::Result<_> {
+                    Ok((
+                        BlockId {
+                            height,
+                            hash: env.bitcoind.client.get_block_hash(height as _)?,
+                        },
+                        txid,
+                    ))
+                })
+                .collect::<anyhow::Result<_>>()?;
+            bdk_esplora::finalize_chain_update(&client, &local_tip, &update_anchors, update_blocks)
+                .await?
+        };
+
+        // apply update
+        let mut updated_local_chain = local_chain.clone();
+        updated_local_chain.apply_update(update)?;
+        println!(
+            "updated local chain height: {}",
+            updated_local_chain.tip().height()
+        );
+
+        assert!(
+            {
+                let initial_heights = local_chain
+                    .iter_checkpoints()
+                    .map(|cp| cp.height())
+                    .collect::<BTreeSet<_>>();
+                let updated_heights = updated_local_chain
+                    .iter_checkpoints()
+                    .map(|cp| cp.height())
+                    .collect::<BTreeSet<_>>();
+                updated_heights.is_superset(&initial_heights)
+            },
+            "heights from the initial chain must all be in the updated chain",
+        );
+
+        assert!(
+            {
+                let exp_anchor_heights = t
+                    .anchors
+                    .iter()
+                    .map(|(h, _)| *h)
+                    .chain(t.initial_cps.iter().copied())
+                    .collect::<BTreeSet<_>>();
+                let anchor_heights = updated_local_chain
+                    .iter_checkpoints()
+                    .map(|cp| cp.height())
+                    .collect::<BTreeSet<_>>();
+                anchor_heights.is_superset(&exp_anchor_heights)
+            },
+            "anchor heights must all be in updated chain",
+        );
+    }
+
+    Ok(())
+}
 #[tokio::test]
 pub async fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
     let env = TestEnv::new()?;
@@ -52,21 +225,42 @@ pub async fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
         sleep(Duration::from_millis(10))
     }
 
-    let graph_update = client
-        .sync(
-            misc_spks.into_iter(),
-            vec![].into_iter(),
-            vec![].into_iter(),
-            1,
-        )
-        .await?;
+    // use a full checkpoint linked list (since this is not what we are testing)
+    let cp_tip = env.make_checkpoint_tip();
 
+    let mut request = SyncRequest::new(cp_tip.clone());
+    request.add_spks(
+        misc_spks
+            .into_iter()
+            .enumerate()
+            .map(|(i, spk)| (i as u32, spk)),
+    );
+    let sync_response = client.sync(request, 1).await?;
+
+    assert!(
+        {
+            let update_cps = sync_response
+                .chain_update
+                .tip
+                .iter()
+                .map(|cp| cp.block_id())
+                .collect::<BTreeSet<_>>();
+            let superset_cps = cp_tip
+                .iter()
+                .map(|cp| cp.block_id())
+                .collect::<BTreeSet<_>>();
+            superset_cps.is_superset(&update_cps)
+        },
+        "update should not alter original checkpoint tip since we already started with all checkpoints",
+    );
+
+    let graph_update = sync_response.graph_update;
     // Check to see if we have the floating txouts available from our two created transactions'
     // previous outputs in order to calculate transaction fees.
     for tx in graph_update.full_txs() {
         // Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the
         // floating txouts available from the transactions' previous outputs.
-        let fee = graph_update.calculate_fee(tx.tx).expect("Fee must exist");
+        let fee = graph_update.calculate_fee(&tx.tx).expect("Fee must exist");
 
         // Retrieve the fee in the transaction data from `bitcoind`.
         let tx_fee = env
@@ -116,11 +310,10 @@ pub async fn test_async_update_tx_graph_gap_limit() -> anyhow::Result<()> {
         .into_iter()
         .map(|s| Address::from_str(s).unwrap().assume_checked())
         .collect();
-    let spks: Vec<_> = addresses
+    let spks = addresses
         .iter()
         .enumerate()
-        .map(|(i, addr)| (i as u32, addr.script_pubkey()))
-        .collect();
+        .map(|(i, addr)| (i as u32, addr.script_pubkey()));
     let mut keychains = BTreeMap::new();
     keychains.insert(0, spks);
 
@@ -140,14 +333,29 @@ pub async fn test_async_update_tx_graph_gap_limit() -> anyhow::Result<()> {
         sleep(Duration::from_millis(10))
     }
 
+    // use a full checkpoint linked list (since this is not what we are testing)
+    let cp_tip = env.make_checkpoint_tip();
+
     // A scan with a gap limit of 2 won't find the transaction, but a scan with a gap limit of 3
     // will.
-    let (graph_update, active_indices) = client.full_scan(keychains.clone(), 2, 1).await?;
-    assert!(graph_update.full_txs().next().is_none());
-    assert!(active_indices.is_empty());
-    let (graph_update, active_indices) = client.full_scan(keychains.clone(), 3, 1).await?;
-    assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr);
-    assert_eq!(active_indices[&0], 3);
+    let mut request = FullScanRequest::new(cp_tip.clone());
+    request.add_spks_by_keychain(keychains.clone());
+    let full_scan_response = client.full_scan(request, 2, 1).await?;
+    assert!(full_scan_response.graph_update.full_txs().next().is_none());
+    assert!(full_scan_response.last_active_indices.is_empty());
+    let mut request = FullScanRequest::new(cp_tip.clone());
+    request.add_spks_by_keychain(keychains.clone());
+    let full_scan_response = client.full_scan(request, 3, 1).await?;
+    assert_eq!(
+        full_scan_response
+            .graph_update
+            .full_txs()
+            .next()
+            .unwrap()
+            .txid,
+        txid_4th_addr
+    );
+    assert_eq!(full_scan_response.last_active_indices[&0], 3);
 
     // Now receive a coin on the last address.
     let txid_last_addr = env.bitcoind.client.send_to_address(
@@ -167,16 +375,28 @@ pub async fn test_async_update_tx_graph_gap_limit() -> anyhow::Result<()> {
 
     // A scan with gap limit 4 won't find the second transaction, but a scan with gap limit 5 will.
     // The last active indice won't be updated in the first case but will in the second one.
-    let (graph_update, active_indices) = client.full_scan(keychains.clone(), 4, 1).await?;
-    let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
+    let mut request = FullScanRequest::new(cp_tip.clone());
+    request.add_spks_by_keychain(keychains.clone());
+    let full_scan_response = client.full_scan(request, 4, 1).await?;
+    let txs: HashSet<_> = full_scan_response
+        .graph_update
+        .full_txs()
+        .map(|tx| tx.txid)
+        .collect();
     assert_eq!(txs.len(), 1);
     assert!(txs.contains(&txid_4th_addr));
-    assert_eq!(active_indices[&0], 3);
-    let (graph_update, active_indices) = client.full_scan(keychains, 5, 1).await?;
-    let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
+    assert_eq!(full_scan_response.last_active_indices[&0], 3);
+    let mut request = FullScanRequest::new(cp_tip);
+    request.add_spks_by_keychain(keychains);
+    let full_scan_response = client.full_scan(request, 5, 1).await?;
+    let txs: HashSet<_> = full_scan_response
+        .graph_update
+        .full_txs()
+        .map(|tx| tx.txid)
+        .collect();
     assert_eq!(txs.len(), 2);
     assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
-    assert_eq!(active_indices[&0], 9);
+    assert_eq!(full_scan_response.last_active_indices[&0], 9);
 
     Ok(())
 }
diff --git a/crates/esplora/tests/blocking_ext.rs b/crates/esplora/tests/blocking_ext.rs
index 6225a6a6b..17ddbc8de 100644
--- a/crates/esplora/tests/blocking_ext.rs
+++ b/crates/esplora/tests/blocking_ext.rs
@@ -1,15 +1,18 @@
+use bdk_chain::bitcoin::hashes::Hash;
 use bdk_chain::local_chain::LocalChain;
 use bdk_chain::BlockId;
 use bdk_esplora::EsploraExt;
+use bitcoin::ScriptBuf;
 use electrsd::bitcoind::anyhow;
 use electrsd::bitcoind::bitcoincore_rpc::RpcApi;
-use esplora_client::{self, Builder};
+use esplora_client::{self, BlockHash, Builder};
 use std::collections::{BTreeMap, BTreeSet, HashSet};
 use std::str::FromStr;
 use std::thread::sleep;
 use std::time::Duration;
 
 use bdk_chain::bitcoin::{Address, Amount, Txid};
+use bdk_chain::spk_client::{FullScanRequest, SyncRequest};
 use bdk_testenv::TestEnv;
 
 macro_rules! h {
@@ -26,6 +29,173 @@ macro_rules! local_chain {
     }};
 }
 
+/// Ensure that update does not remove heights (from original), and all anchor heights are included.
+#[test]
+pub fn test_finalize_chain_update() -> anyhow::Result<()> {
+    struct TestCase<'a> {
+        name: &'a str,
+        /// Initial blockchain height to start the env with.
+        initial_env_height: u32,
+        /// Initial checkpoint heights to start with.
+        initial_cps: &'a [u32],
+        /// The final blockchain height of the env.
+        final_env_height: u32,
+        /// The anchors to test with: `(height, txid)`. Only the height is provided as we can fetch
+        /// the blockhash from the env.
+        anchors: &'a [(u32, Txid)],
+    }
+
+    let test_cases = [
+        TestCase {
+            name: "chain_extends",
+            initial_env_height: 60,
+            initial_cps: &[59, 60],
+            final_env_height: 90,
+            anchors: &[],
+        },
+        TestCase {
+            name: "introduce_older_heights",
+            initial_env_height: 50,
+            initial_cps: &[10, 15],
+            final_env_height: 50,
+            anchors: &[(11, h!("A")), (14, h!("B"))],
+        },
+        TestCase {
+            name: "introduce_older_heights_after_chain_extends",
+            initial_env_height: 50,
+            initial_cps: &[10, 15],
+            final_env_height: 100,
+            anchors: &[(11, h!("A")), (14, h!("B"))],
+        },
+    ];
+
+    for (i, t) in test_cases.into_iter().enumerate() {
+        println!("[{}] running test case: {}", i, t.name);
+
+        let env = TestEnv::new()?;
+        let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
+        let client = Builder::new(base_url.as_str()).build_blocking()?;
+
+        // set env to `initial_env_height`
+        if let Some(to_mine) = t
+            .initial_env_height
+            .checked_sub(env.make_checkpoint_tip().height())
+        {
+            env.mine_blocks(to_mine as _, None)?;
+        }
+        while client.get_height()? < t.initial_env_height {
+            std::thread::sleep(Duration::from_millis(10));
+        }
+
+        // craft initial `local_chain`
+        let local_chain = {
+            let (mut chain, _) = LocalChain::from_genesis_hash(env.genesis_hash()?);
+            let chain_tip = chain.tip();
+            let update_blocks = bdk_esplora::init_chain_update_blocking(&client, &chain_tip)?;
+            let update_anchors = t
+                .initial_cps
+                .iter()
+                .map(|&height| -> anyhow::Result<_> {
+                    Ok((
+                        BlockId {
+                            height,
+                            hash: env.bitcoind.client.get_block_hash(height as _)?,
+                        },
+                        Txid::all_zeros(),
+                    ))
+                })
+                .collect::<anyhow::Result<BTreeSet<_>>>()?;
+            let chain_update = bdk_esplora::finalize_chain_update_blocking(
+                &client,
+                &chain_tip,
+                &update_anchors,
+                update_blocks,
+            )?;
+            chain.apply_update(chain_update)?;
+            chain
+        };
+        println!("local chain height: {}", local_chain.tip().height());
+
+        // extend env chain
+        if let Some(to_mine) = t
+            .final_env_height
+            .checked_sub(env.make_checkpoint_tip().height())
+        {
+            env.mine_blocks(to_mine as _, None)?;
+        }
+        while client.get_height()? < t.final_env_height {
+            std::thread::sleep(Duration::from_millis(10));
+        }
+
+        // craft update
+        let update = {
+            let local_tip = local_chain.tip();
+            let update_blocks = bdk_esplora::init_chain_update_blocking(&client, &local_tip)?;
+            let update_anchors = t
+                .anchors
+                .iter()
+                .map(|&(height, txid)| -> anyhow::Result<_> {
+                    Ok((
+                        BlockId {
+                            height,
+                            hash: env.bitcoind.client.get_block_hash(height as _)?,
+                        },
+                        txid,
+                    ))
+                })
+                .collect::<anyhow::Result<_>>()?;
+            bdk_esplora::finalize_chain_update_blocking(
+                &client,
+                &local_tip,
+                &update_anchors,
+                update_blocks,
+            )?
+        };
+
+        // apply update
+        let mut updated_local_chain = local_chain.clone();
+        updated_local_chain.apply_update(update)?;
+        println!(
+            "updated local chain height: {}",
+            updated_local_chain.tip().height()
+        );
+
+        assert!(
+            {
+                let initial_heights = local_chain
+                    .iter_checkpoints()
+                    .map(|cp| cp.height())
+                    .collect::<BTreeSet<_>>();
+                let updated_heights = updated_local_chain
+                    .iter_checkpoints()
+                    .map(|cp| cp.height())
+                    .collect::<BTreeSet<_>>();
+                updated_heights.is_superset(&initial_heights)
+            },
+            "heights from the initial chain must all be in the updated chain",
+        );
+
+        assert!(
+            {
+                let exp_anchor_heights = t
+                    .anchors
+                    .iter()
+                    .map(|(h, _)| *h)
+                    .chain(t.initial_cps.iter().copied())
+                    .collect::<BTreeSet<_>>();
+                let anchor_heights = updated_local_chain
+                    .iter_checkpoints()
+                    .map(|cp| cp.height())
+                    .collect::<BTreeSet<_>>();
+                anchor_heights.is_superset(&exp_anchor_heights)
+            },
+            "anchor heights must all be in updated chain",
+        );
+    }
+
+    Ok(())
+}
+
 #[test]
 pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
     let env = TestEnv::new()?;
@@ -68,19 +238,44 @@ pub fn test_update_tx_graph_without_keychain() -> anyhow::Result<()> {
         sleep(Duration::from_millis(10))
     }
 
-    let graph_update = client.sync(
-        misc_spks.into_iter(),
-        vec![].into_iter(),
-        vec![].into_iter(),
-        1,
-    )?;
+    // use a full checkpoint linked list (since this is not what we are testing)
+    let cp_tip = env.make_checkpoint_tip();
+
+    let mut request = SyncRequest::new(cp_tip.clone());
+    request.add_spks(
+        misc_spks
+            .into_iter()
+            .enumerate()
+            .map(|(i, spk)| (i as u32, spk))
+            .collect::<Vec<(u32, ScriptBuf)>>(),
+    );
+
+    let result = client.sync(request, 1)?;
+
+    assert!(
+        {
+            let update_cps = result
+                .chain_update
+                .tip
+                .iter()
+                .map(|cp| cp.block_id())
+                .collect::<BTreeSet<_>>();
+            let superset_cps = cp_tip
+                .iter()
+                .map(|cp| cp.block_id())
+                .collect::<BTreeSet<_>>();
+            superset_cps.is_superset(&update_cps)
+        },
+        "update should not alter original checkpoint tip since we already started with all checkpoints",
+    );
 
+    let graph_update = result.graph_update;
     // Check to see if we have the floating txouts available from our two created transactions'
     // previous outputs in order to calculate transaction fees.
     for tx in graph_update.full_txs() {
         // Retrieve the calculated fee from `TxGraph`, which will panic if we do not have the
         // floating txouts available from the transactions' previous outputs.
-        let fee = graph_update.calculate_fee(tx.tx).expect("Fee must exist");
+        let fee = graph_update.calculate_fee(&tx.tx).expect("Fee must exist");
 
         // Retrieve the fee in the transaction data from `bitcoind`.
         let tx_fee = env
@@ -131,11 +326,10 @@ pub fn test_update_tx_graph_gap_limit() -> anyhow::Result<()> {
         .into_iter()
         .map(|s| Address::from_str(s).unwrap().assume_checked())
         .collect();
-    let spks: Vec<_> = addresses
+    let spks = addresses
         .iter()
         .enumerate()
-        .map(|(i, addr)| (i as u32, addr.script_pubkey()))
-        .collect();
+        .map(|(i, addr)| (i as u32, addr.script_pubkey()));
     let mut keychains = BTreeMap::new();
     keychains.insert(0, spks);
 
@@ -155,14 +349,29 @@ pub fn test_update_tx_graph_gap_limit() -> anyhow::Result<()> {
         sleep(Duration::from_millis(10))
     }
 
+    // use a full checkpoint linked list (since this is not what we are testing)
+    let cp_tip = env.make_checkpoint_tip();
+
     // A scan with a gap limit of 2 won't find the transaction, but a scan with a gap limit of 3
     // will.
-    let (graph_update, active_indices) = client.full_scan(keychains.clone(), 2, 1)?;
-    assert!(graph_update.full_txs().next().is_none());
-    assert!(active_indices.is_empty());
-    let (graph_update, active_indices) = client.full_scan(keychains.clone(), 3, 1)?;
-    assert_eq!(graph_update.full_txs().next().unwrap().txid, txid_4th_addr);
-    assert_eq!(active_indices[&0], 3);
+    let mut request = FullScanRequest::new(cp_tip.clone());
+    request.add_spks_by_keychain(keychains.clone());
+    let full_scan_response = client.full_scan(request, 2, 1)?;
+    assert!(full_scan_response.graph_update.full_txs().next().is_none());
+    assert!(full_scan_response.last_active_indices.is_empty());
+    let mut request = FullScanRequest::new(cp_tip.clone());
+    request.add_spks_by_keychain(keychains.clone());
+    let full_scan_response = client.full_scan(request, 3, 1)?;
+    assert_eq!(
+        full_scan_response
+            .graph_update
+            .full_txs()
+            .next()
+            .unwrap()
+            .txid,
+        txid_4th_addr
+    );
+    assert_eq!(full_scan_response.last_active_indices[&0], 3);
 
     // Now receive a coin on the last address.
     let txid_last_addr = env.bitcoind.client.send_to_address(
@@ -182,16 +391,28 @@ pub fn test_update_tx_graph_gap_limit() -> anyhow::Result<()> {
 
     // A scan with gap limit 4 won't find the second transaction, but a scan with gap limit 5 will.
     // The last active indice won't be updated in the first case but will in the second one.
-    let (graph_update, active_indices) = client.full_scan(keychains.clone(), 4, 1)?;
-    let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
+    let mut request = FullScanRequest::new(cp_tip.clone());
+    request.add_spks_by_keychain(keychains.clone());
+    let full_scan_response = client.full_scan(request, 4, 1)?;
+    let txs: HashSet<_> = full_scan_response
+        .graph_update
+        .full_txs()
+        .map(|tx| tx.txid)
+        .collect();
     assert_eq!(txs.len(), 1);
     assert!(txs.contains(&txid_4th_addr));
-    assert_eq!(active_indices[&0], 3);
-    let (graph_update, active_indices) = client.full_scan(keychains, 5, 1)?;
-    let txs: HashSet<_> = graph_update.full_txs().map(|tx| tx.txid).collect();
+    assert_eq!(full_scan_response.last_active_indices[&0], 3);
+    let mut request = FullScanRequest::new(cp_tip.clone());
+    request.add_spks_by_keychain(keychains.clone());
+    let full_scan_response = client.full_scan(request, 5, 1)?;
+    let txs: HashSet<_> = full_scan_response
+        .graph_update
+        .full_txs()
+        .map(|tx| tx.txid)
+        .collect();
     assert_eq!(txs.len(), 2);
     assert!(txs.contains(&txid_4th_addr) && txs.contains(&txid_last_addr));
-    assert_eq!(active_indices[&0], 9);
+    assert_eq!(full_scan_response.last_active_indices[&0], 9);
 
     Ok(())
 }
@@ -317,14 +538,38 @@ fn update_local_chain() -> anyhow::Result<()> {
     for (i, t) in test_cases.into_iter().enumerate() {
         println!("Case {}: {}", i, t.name);
         let mut chain = t.chain;
+        let cp_tip = chain.tip();
 
-        let update = client
-            .update_local_chain(chain.tip(), t.request_heights.iter().copied())
-            .map_err(|err| {
-                anyhow::format_err!("[{}:{}] `update_local_chain` failed: {}", i, t.name, err)
+        let new_blocks =
+            bdk_esplora::init_chain_update_blocking(&client, &cp_tip).map_err(|err| {
+                anyhow::format_err!("[{}:{}] `init_chain_update` failed: {}", i, t.name, err)
             })?;
 
-        let update_blocks = update
+        let mock_anchors = t
+            .request_heights
+            .iter()
+            .map(|&h| {
+                let anchor_blockhash: BlockHash = bdk_chain::bitcoin::hashes::Hash::hash(
+                    &format!("hash_at_height_{}", h).into_bytes(),
+                );
+                let txid: Txid = bdk_chain::bitcoin::hashes::Hash::hash(
+                    &format!("txid_at_height_{}", h).into_bytes(),
+                );
+                let anchor = BlockId {
+                    height: h,
+                    hash: anchor_blockhash,
+                };
+                (anchor, txid)
+            })
+            .collect::<BTreeSet<_>>();
+
+        let chain_update = bdk_esplora::finalize_chain_update_blocking(
+            &client,
+            &cp_tip,
+            &mock_anchors,
+            new_blocks,
+        )?;
+        let update_blocks = chain_update
             .tip
             .iter()
             .map(|cp| cp.block_id())
@@ -346,22 +591,23 @@ fn update_local_chain() -> anyhow::Result<()> {
             )
             .collect::<BTreeSet<_>>();
 
-        assert_eq!(
-            update_blocks, exp_update_blocks,
+        assert!(
+            update_blocks.is_superset(&exp_update_blocks),
             "[{}:{}] unexpected update",
-            i, t.name
+            i,
+            t.name
         );
 
         let _ = chain
-            .apply_update(update)
+            .apply_update(chain_update)
             .unwrap_or_else(|err| panic!("[{}:{}] update failed to apply: {}", i, t.name, err));
 
         // all requested heights must exist in the final chain
         for height in t.request_heights {
             let exp_blockhash = blocks.get(height).expect("block must exist in bitcoind");
             assert_eq!(
-                chain.blocks().get(height),
-                Some(exp_blockhash),
+                chain.query(*height).map(|cp| cp.hash()),
+                Some(*exp_blockhash),
                 "[{}:{}] block {}:{} must exist in final chain",
                 i,
                 t.name,
diff --git a/crates/testenv/src/lib.rs b/crates/testenv/src/lib.rs
index b836387c1..030878f46 100644
--- a/crates/testenv/src/lib.rs
+++ b/crates/testenv/src/lib.rs
@@ -1,7 +1,11 @@
-use bdk_chain::bitcoin::{
-    address::NetworkChecked, block::Header, hash_types::TxMerkleNode, hashes::Hash,
-    secp256k1::rand::random, Address, Amount, Block, BlockHash, CompactTarget, ScriptBuf,
-    ScriptHash, Transaction, TxIn, TxOut, Txid,
+use bdk_chain::{
+    bitcoin::{
+        address::NetworkChecked, block::Header, hash_types::TxMerkleNode, hashes::Hash,
+        secp256k1::rand::random, Address, Amount, Block, BlockHash, CompactTarget, ScriptBuf,
+        ScriptHash, Transaction, TxIn, TxOut, Txid,
+    },
+    local_chain::CheckPoint,
+    BlockId,
 };
 use bitcoincore_rpc::{
     bitcoincore_rpc_json::{GetBlockTemplateModes, GetBlockTemplateRules},
@@ -234,6 +238,24 @@ impl TestEnv {
             .send_to_address(address, amount, None, None, None, None, None, None)?;
         Ok(txid)
     }
+
+    /// Create a checkpoint linked list of all the blocks in the chain.
+    pub fn make_checkpoint_tip(&self) -> CheckPoint {
+        CheckPoint::from_block_ids((0_u32..).map_while(|height| {
+            self.bitcoind
+                .client
+                .get_block_hash(height as u64)
+                .ok()
+                .map(|hash| BlockId { height, hash })
+        }))
+        .expect("must craft tip")
+    }
+
+    /// Get the genesis hash of the blockchain.
+    pub fn genesis_hash(&self) -> anyhow::Result<BlockHash> {
+        let hash = self.bitcoind.client.get_block_hash(0)?;
+        Ok(hash)
+    }
 }
 
 #[cfg(test)]
diff --git a/example-crates/example_bitcoind_rpc_polling/src/main.rs b/example-crates/example_bitcoind_rpc_polling/src/main.rs
index 88b83067b..0a016cbe9 100644
--- a/example-crates/example_bitcoind_rpc_polling/src/main.rs
+++ b/example-crates/example_bitcoind_rpc_polling/src/main.rs
@@ -216,7 +216,7 @@ fn main() -> anyhow::Result<()> {
                             &*chain,
                             synced_to.block_id(),
                             graph.index.outpoints().iter().cloned(),
-                            |(k, _), _| k == &Keychain::Internal,
+                            |(k, _), _| k == &Keychain::Internal { account: 0 },
                         )
                     };
                     println!(
@@ -344,7 +344,7 @@ fn main() -> anyhow::Result<()> {
                             &*chain,
                             synced_to.block_id(),
                             graph.index.outpoints().iter().cloned(),
-                            |(k, _), _| k == &Keychain::Internal,
+                            |(k, _), _| k == &Keychain::Internal { account: 0 },
                         )
                     };
                     println!(
diff --git a/example-crates/example_cli/src/lib.rs b/example-crates/example_cli/src/lib.rs
index 4989c08c6..39d96a634 100644
--- a/example-crates/example_cli/src/lib.rs
+++ b/example-crates/example_cli/src/lib.rs
@@ -175,15 +175,15 @@ pub enum TxOutCmd {
     Debug, Clone, Copy, PartialOrd, Ord, PartialEq, Eq, serde::Deserialize, serde::Serialize,
 )]
 pub enum Keychain {
-    External,
-    Internal,
+    External { account: u32 },
+    Internal { account: u32 },
 }
 
 impl core::fmt::Display for Keychain {
     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
         match self {
-            Keychain::External => write!(f, "external"),
-            Keychain::Internal => write!(f, "internal"),
+            Keychain::External { account } => write!(f, "external[{}]", account),
+            Keychain::Internal { account } => write!(f, "internal[{}]", account),
         }
     }
 }
@@ -247,10 +247,15 @@ where
         script_pubkey: address.script_pubkey(),
     }];
 
-    let internal_keychain = if graph.index.keychains().get(&Keychain::Internal).is_some() {
-        Keychain::Internal
+    let internal_keychain = if graph
+        .index
+        .keychains()
+        .get(&Keychain::Internal { account: 0 })
+        .is_some()
+    {
+        Keychain::Internal { account: 0 }
     } else {
-        Keychain::External
+        Keychain::External { account: 0 }
     };
 
     let ((change_index, change_script), change_changeset) =
@@ -463,7 +468,8 @@ where
                         _ => unreachable!("only these two variants exist in match arm"),
                     };
 
-                    let ((spk_i, spk), index_changeset) = spk_chooser(index, &Keychain::External);
+                    let ((spk_i, spk), index_changeset) =
+                        spk_chooser(index, &Keychain::External { account: 0 });
                     let db = &mut *db.lock().unwrap();
                     db.stage_and_commit(C::from((
                         local_chain::ChangeSet::default(),
@@ -482,8 +488,8 @@ where
                 }
                 AddressCmd::List { change } => {
                     let target_keychain = match change {
-                        true => Keychain::Internal,
-                        false => Keychain::External,
+                        true => Keychain::Internal { account: 0 },
+                        false => Keychain::External { account: 0 },
                     };
                     for (spk_i, spk) in index.revealed_keychain_spks(&target_keychain) {
                         let address = Address::from_script(spk, network)
@@ -516,7 +522,7 @@ where
                 chain,
                 chain.get_chain_tip()?,
                 graph.index.outpoints().iter().cloned(),
-                |(k, _), _| k == &Keychain::Internal,
+                |(k, _), _| k == &Keychain::Internal { account: 0 },
             )?;
 
             let confirmed_total = balance.confirmed + balance.immature;
@@ -689,7 +695,7 @@ where
 
     let (descriptor, mut keymap) =
         Descriptor::<DescriptorPublicKey>::parse_descriptor(&secp, &args.descriptor)?;
-    index.add_keychain(Keychain::External, descriptor);
+    index.add_keychain(Keychain::External { account: 0 }, descriptor);
 
     if let Some((internal_descriptor, internal_keymap)) = args
         .change_descriptor
@@ -698,7 +704,7 @@ where
         .transpose()?
     {
         keymap.extend(internal_keymap);
-        index.add_keychain(Keychain::Internal, internal_descriptor);
+        index.add_keychain(Keychain::Internal { account: 0 }, internal_descriptor);
     }
 
     let mut db_backend = match Store::<C>::open_or_create_new(db_magic, &args.db_path) {
diff --git a/example-crates/example_esplora/src/main.rs b/example-crates/example_esplora/src/main.rs
index e92205706..d4d2f32ad 100644
--- a/example-crates/example_esplora/src/main.rs
+++ b/example-crates/example_esplora/src/main.rs
@@ -1,9 +1,9 @@
 use std::{
-    collections::{BTreeMap, BTreeSet},
     io::{self, Write},
     sync::Mutex,
 };
 
+use bdk_chain::spk_client::{FullScanRequest, SyncRequest};
 use bdk_chain::{
     bitcoin::{constants::genesis_block, Address, Network, OutPoint, ScriptBuf, Txid},
     indexed_tx_graph::{self, IndexedTxGraph},
@@ -60,6 +60,7 @@ enum EsploraCommands {
         esplora_args: EsploraArgs,
     },
 }
+
 impl EsploraCommands {
     fn esplora_args(&self) -> EsploraArgs {
         match self {
@@ -82,7 +83,7 @@ impl EsploraArgs {
             Network::Bitcoin => "https://blockstream.info/api",
             Network::Testnet => "https://blockstream.info/testnet/api",
             Network::Regtest => "http://localhost:3002",
-            Network::Signet => "https://mempool.space/signet/api",
+            Network::Signet => "http://signet.bitcoindevkit.net",
             _ => panic!("unsupported network"),
         });
 
@@ -149,59 +150,64 @@ fn main() -> anyhow::Result<()> {
     };
 
     let client = esplora_cmd.esplora_args().client(args.network)?;
-    // Prepare the `IndexedTxGraph` update based on whether we are scanning or syncing.
+    // Prepare the `IndexedTxGraph` and `LocalChain` updates based on whether we are scanning or
+    // syncing.
+    //
     // Scanning: We are iterating through spks of all keychains and scanning for transactions for
     //   each spk. We start with the lowest derivation index spk and stop scanning after `stop_gap`
     //   number of consecutive spks have no transaction history. A Scan is done in situations of
     //   wallet restoration. It is a special case. Applications should use "sync" style updates
     //   after an initial scan.
+    //
     // Syncing: We only check for specified spks, utxos and txids to update their confirmation
     //   status or fetch missing transactions.
-    let indexed_tx_graph_changeset = match &esplora_cmd {
+    let (local_chain_changeset, indexed_tx_graph_changeset) = match &esplora_cmd {
         EsploraCommands::Scan {
             stop_gap,
             scan_options,
             ..
         } => {
+            let local_tip = chain.lock().expect("mutex must not be poisoned").tip();
             let keychain_spks = graph
                 .lock()
                 .expect("mutex must not be poisoned")
                 .index
-                .all_unbounded_spk_iters()
-                .into_iter()
-                // This `map` is purely for logging.
-                .map(|(keychain, iter)| {
-                    let mut first = true;
-                    let spk_iter = iter.inspect(move |(i, _)| {
-                        if first {
-                            eprint!("\nscanning {}: ", keychain);
-                            first = false;
-                        }
-                        eprint!("{} ", i);
-                        // Flush early to ensure we print at every iteration.
-                        let _ = io::stderr().flush();
-                    });
-                    (keychain, spk_iter)
-                })
-                .collect::<BTreeMap<_, _>>();
+                .all_unbounded_spk_iters();
 
             // The client scans keychain spks for transaction histories, stopping after `stop_gap`
             // is reached. It returns a `TxGraph` update (`graph_update`) and a structure that
             // represents the last active spk derivation indices of keychains
             // (`keychain_indices_update`).
-            let (graph_update, last_active_indices) = client
-                .full_scan(keychain_spks, *stop_gap, scan_options.parallel_requests)
+            let mut request = FullScanRequest::new(local_tip);
+            request.add_spks_by_keychain(keychain_spks);
+            request.inspect_spks(move |k, i, spk| {
+                println!(
+                    "{:?}[{}]: {}",
+                    k,
+                    i,
+                    Address::from_script(spk, args.network).unwrap()
+                );
+            });
+            println!("Scanning... ");
+            let result = client
+                .full_scan(request, *stop_gap, scan_options.parallel_requests)
                 .context("scanning for transactions")?;
+            println!("done. ");
 
             let mut graph = graph.lock().expect("mutex must not be poisoned");
+            let mut chain = chain.lock().expect("mutex must not be poisoned");
             // Because we did a stop gap based scan we are likely to have some updates to our
             // deriviation indices. Usually before a scan you are on a fresh wallet with no
             // addresses derived so we need to derive up to last active addresses the scan found
             // before adding the transactions.
-            let (_, index_changeset) = graph.index.reveal_to_target_multi(&last_active_indices);
-            let mut indexed_tx_graph_changeset = graph.apply_update(graph_update);
-            indexed_tx_graph_changeset.append(index_changeset.into());
-            indexed_tx_graph_changeset
+            (chain.apply_update(result.chain_update)?, {
+                let (_, index_changeset) = graph
+                    .index
+                    .reveal_to_target_multi(&result.last_active_indices);
+                let mut indexed_tx_graph_changeset = graph.apply_update(result.graph_update);
+                indexed_tx_graph_changeset.append(index_changeset.into());
+                indexed_tx_graph_changeset
+            })
         }
         EsploraCommands::Sync {
             mut unused_spks,
@@ -223,16 +229,18 @@ fn main() -> anyhow::Result<()> {
             }
 
             // Spks, outpoints and txids we want updates on will be accumulated here.
-            let mut spks: Box<dyn Iterator<Item = ScriptBuf>> = Box::new(core::iter::empty());
+            let mut spks: Box<dyn Iterator<Item = (u32, ScriptBuf)>> =
+                Box::new(core::iter::empty());
             let mut outpoints: Box<dyn Iterator<Item = OutPoint>> = Box::new(core::iter::empty());
             let mut txids: Box<dyn Iterator<Item = Txid>> = Box::new(core::iter::empty());
 
+            let local_tip = chain.lock().expect("mutex must not be poisoned").tip();
+
             // Get a short lock on the structures to get spks, utxos, and txs that we are interested
             // in.
             {
                 let graph = graph.lock().unwrap();
                 let chain = chain.lock().unwrap();
-                let chain_tip = chain.tip().block_id();
 
                 if *all_spks {
                     let all_spks = graph
@@ -240,12 +248,7 @@ fn main() -> anyhow::Result<()> {
                         .revealed_spks()
                         .map(|(k, i, spk)| (k, i, spk.to_owned()))
                         .collect::<Vec<_>>();
-                    spks = Box::new(spks.chain(all_spks.into_iter().map(|(k, i, spk)| {
-                        eprintln!("scanning {}:{}", k, i);
-                        // Flush early to ensure we print at every iteration.
-                        let _ = io::stderr().flush();
-                        spk
-                    })));
+                    spks = Box::new(spks.chain(all_spks.into_iter().map(|(_k, i, spk)| (i, spk))));
                 }
                 if unused_spks {
                     let unused_spks = graph
@@ -253,17 +256,8 @@ fn main() -> anyhow::Result<()> {
                         .unused_spks()
                         .map(|(k, i, spk)| (k, i, spk.to_owned()))
                         .collect::<Vec<_>>();
-                    spks = Box::new(spks.chain(unused_spks.into_iter().map(|(k, i, spk)| {
-                        eprintln!(
-                            "Checking if address {} {}:{} has been used",
-                            Address::from_script(&spk, args.network).unwrap(),
-                            k,
-                            i,
-                        );
-                        // Flush early to ensure we print at every iteration.
-                        let _ = io::stderr().flush();
-                        spk
-                    })));
+                    spks =
+                        Box::new(spks.chain(unused_spks.into_iter().map(|(_k, i, spk)| (i, spk))));
                 }
                 if utxos {
                     // We want to search for whether the UTXO is spent, and spent by which
@@ -272,7 +266,7 @@ fn main() -> anyhow::Result<()> {
                     let init_outpoints = graph.index.outpoints().iter().cloned();
                     let utxos = graph
                         .graph()
-                        .filter_chain_unspents(&*chain, chain_tip, init_outpoints)
+                        .filter_chain_unspents(&*chain, local_tip.block_id(), init_outpoints)
                         .map(|(_, utxo)| utxo)
                         .collect::<Vec<_>>();
                     outpoints = Box::new(
@@ -295,7 +289,7 @@ fn main() -> anyhow::Result<()> {
                     // `EsploraExt::update_tx_graph_without_keychain`.
                     let unconfirmed_txids = graph
                         .graph()
-                        .list_chain_txs(&*chain, chain_tip)
+                        .list_chain_txs(&*chain, local_tip.block_id())
                         .filter(|canonical_tx| !canonical_tx.chain_position.is_confirmed())
                         .map(|canonical_tx| canonical_tx.tx_node.txid)
                         .collect::<Vec<Txid>>();
@@ -307,44 +301,35 @@ fn main() -> anyhow::Result<()> {
                 }
             }
 
-            let graph_update =
-                client.sync(spks, txids, outpoints, scan_options.parallel_requests)?;
+            let mut request = SyncRequest::new(local_tip);
+            request.add_spks(spks);
+            request.inspect_spks(move |i, spk| {
+                println!(
+                    "[{}]: {}",
+                    i,
+                    Address::from_script(spk, args.network).unwrap()
+                );
+            });
+            request.add_txids(txids);
+            request.add_outpoints(outpoints);
+            println!("Syncing... ");
+            let result = client
+                .sync(request, scan_options.parallel_requests)
+                .context("syncing transactions")?;
+            println!("done. ");
 
-            graph.lock().unwrap().apply_update(graph_update)
+            (
+                chain.lock().unwrap().apply_update(result.chain_update)?,
+                graph.lock().unwrap().apply_update(result.graph_update),
+            )
         }
     };
 
     println!();
 
-    // Now that we're done updating the `IndexedTxGraph`, it's time to update the `LocalChain`! We
-    // want the `LocalChain` to have data about all the anchors in the `TxGraph` - for this reason,
-    // we want retrieve the blocks at the heights of the newly added anchors that are missing from
-    // our view of the chain.
-    let (missing_block_heights, tip) = {
-        let chain = &*chain.lock().unwrap();
-        let missing_block_heights = indexed_tx_graph_changeset
-            .graph
-            .missing_heights_from(chain)
-            .collect::<BTreeSet<_>>();
-        let tip = chain.tip();
-        (missing_block_heights, tip)
-    };
-
-    println!("prev tip: {}", tip.height());
-    println!("missing block heights: {:?}", missing_block_heights);
-
-    // Here, we actually fetch the missing blocks and create a `local_chain::Update`.
-    let chain_changeset = {
-        let chain_update = client
-            .update_local_chain(tip, missing_block_heights)
-            .context("scanning for blocks")?;
-        println!("new tip: {}", chain_update.tip.height());
-        chain.lock().unwrap().apply_update(chain_update)?
-    };
-
     // We persist the changes
     let mut db = db.lock().unwrap();
-    db.stage((chain_changeset, indexed_tx_graph_changeset));
+    db.stage((local_chain_changeset, indexed_tx_graph_changeset));
     db.commit()?;
     Ok(())
 }
diff --git a/example-crates/wallet_esplora_async/Cargo.toml b/example-crates/wallet_esplora_async/Cargo.toml
index c588a87aa..8e71ea993 100644
--- a/example-crates/wallet_esplora_async/Cargo.toml
+++ b/example-crates/wallet_esplora_async/Cargo.toml
@@ -11,3 +11,5 @@ bdk_esplora = { path = "../../crates/esplora", features = ["async-https"] }
 bdk_file_store = { path = "../../crates/file_store" }
 tokio = { version = "1", features = ["rt", "rt-multi-thread", "macros"] }
 anyhow = "1"
+env_logger = { version = "0.10", default-features =  false, features = ["humantime"] }
+log = "0.4.20"
diff --git a/example-crates/wallet_esplora_async/src/main.rs b/example-crates/wallet_esplora_async/src/main.rs
index 690cd87e2..e8dc950be 100644
--- a/example-crates/wallet_esplora_async/src/main.rs
+++ b/example-crates/wallet_esplora_async/src/main.rs
@@ -1,5 +1,6 @@
-use std::{io::Write, str::FromStr};
+use std::str::FromStr;
 
+use bdk::chain::spk_client::FullScanRequest;
 use bdk::{
     bitcoin::{Address, Network},
     wallet::{AddressIndex, Update},
@@ -15,17 +16,15 @@ const PARALLEL_REQUESTS: usize = 5;
 
 #[tokio::main]
 async fn main() -> Result<(), anyhow::Error> {
-    let db_path = std::env::temp_dir().join("bdk-esplora-async-example");
+    //let db_path = std::env::temp_dir().join("bdk-esplora-async-example");
+    let db_path = "bdk-esplora-async-example";
     let db = Store::<bdk::wallet::ChangeSet>::open_or_create_new(DB_MAGIC.as_bytes(), db_path)?;
     let external_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/0/*)";
     let internal_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/1/*)";
+    let network = Network::Signet;
 
-    let mut wallet = Wallet::new_or_load(
-        external_descriptor,
-        Some(internal_descriptor),
-        db,
-        Network::Testnet,
-    )?;
+    let mut wallet =
+        Wallet::new_or_load(external_descriptor, Some(internal_descriptor), db, network)?;
 
     let address = wallet.try_get_address(AddressIndex::New)?;
     println!("Generated Address: {}", address);
@@ -33,39 +32,33 @@ async fn main() -> Result<(), anyhow::Error> {
     let balance = wallet.get_balance();
     println!("Wallet balance before syncing: {} sats", balance.total());
 
-    print!("Syncing...");
-    let client =
-        esplora_client::Builder::new("https://blockstream.info/testnet/api").build_async()?;
+    let client = esplora_client::Builder::new("http://signet.bitcoindevkit.net").build_async()?;
 
     let prev_tip = wallet.latest_checkpoint();
-    let keychain_spks = wallet
-        .all_unbounded_spk_iters()
-        .into_iter()
-        .map(|(k, k_spks)| {
-            let mut once = Some(());
-            let mut stdout = std::io::stdout();
-            let k_spks = k_spks
-                .inspect(move |(spk_i, _)| match once.take() {
-                    Some(_) => print!("\nScanning keychain [{:?}]", k),
-                    None => print!(" {:<3}", spk_i),
-                })
-                .inspect(move |_| stdout.flush().expect("must flush"));
-            (k, k_spks)
-        })
-        .collect();
-    let (update_graph, last_active_indices) = client
-        .full_scan(keychain_spks, STOP_GAP, PARALLEL_REQUESTS)
+    let keychain_spks = wallet.all_unbounded_spk_iters();
+
+    let mut request = FullScanRequest::new(prev_tip);
+    request.add_spks_by_keychain(keychain_spks);
+    request.inspect_spks(move |k, i, spk| {
+        println!(
+            "{:?}[{}]: {}",
+            k,
+            i,
+            Address::from_script(spk, network).unwrap()
+        );
+    });
+    println!("Scanning... ");
+    let result = client
+        .full_scan(request, STOP_GAP, PARALLEL_REQUESTS)
         .await?;
-    let missing_heights = update_graph.missing_heights(wallet.local_chain());
-    let chain_update = client.update_local_chain(prev_tip, missing_heights).await?;
+    println!("done. ");
     let update = Update {
-        last_active_indices,
-        graph: update_graph,
-        chain: Some(chain_update),
+        last_active_indices: result.last_active_indices,
+        graph: result.graph_update,
+        chain: Some(result.chain_update),
     };
     wallet.apply_update(update)?;
     wallet.commit()?;
-    println!();
 
     let balance = wallet.get_balance();
     println!("Wallet balance after syncing: {} sats", balance.total());
@@ -78,8 +71,8 @@ async fn main() -> Result<(), anyhow::Error> {
         std::process::exit(0);
     }
 
-    let faucet_address = Address::from_str("mkHS9ne12qx9pS9VojpwU5xtRd4T7X7ZUt")?
-        .require_network(Network::Testnet)?;
+    let faucet_address =
+        Address::from_str("mkHS9ne12qx9pS9VojpwU5xtRd4T7X7ZUt")?.require_network(network)?;
 
     let mut tx_builder = wallet.build_tx();
     tx_builder
diff --git a/example-crates/wallet_esplora_blocking/Cargo.toml b/example-crates/wallet_esplora_blocking/Cargo.toml
index 0679bd8f3..5d6ed9b00 100644
--- a/example-crates/wallet_esplora_blocking/Cargo.toml
+++ b/example-crates/wallet_esplora_blocking/Cargo.toml
@@ -11,3 +11,5 @@ bdk = { path = "../../crates/bdk" }
 bdk_esplora = { path = "../../crates/esplora", features = ["blocking"] }
 bdk_file_store = { path = "../../crates/file_store" }
 anyhow = "1"
+env_logger = { version = "0.10", default-features =  false, features = ["humantime"] }
+log = "0.4.20"
diff --git a/example-crates/wallet_esplora_blocking/src/main.rs b/example-crates/wallet_esplora_blocking/src/main.rs
index 73bfdd559..ee2a4d2cd 100644
--- a/example-crates/wallet_esplora_blocking/src/main.rs
+++ b/example-crates/wallet_esplora_blocking/src/main.rs
@@ -3,8 +3,9 @@ const SEND_AMOUNT: u64 = 1000;
 const STOP_GAP: usize = 5;
 const PARALLEL_REQUESTS: usize = 1;
 
-use std::{io::Write, str::FromStr};
+use std::str::FromStr;
 
+use bdk::chain::spk_client::FullScanRequest;
 use bdk::{
     bitcoin::{Address, Network},
     wallet::{AddressIndex, Update},
@@ -14,17 +15,15 @@ use bdk_esplora::{esplora_client, EsploraExt};
 use bdk_file_store::Store;
 
 fn main() -> Result<(), anyhow::Error> {
-    let db_path = std::env::temp_dir().join("bdk-esplora-example");
+    // let db_path = std::env::temp_dir().join("bdk-esplora-example");
+    let db_path = "bdk-esplora-blocking-example";
     let db = Store::<bdk::wallet::ChangeSet>::open_or_create_new(DB_MAGIC.as_bytes(), db_path)?;
     let external_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/0/*)";
     let internal_descriptor = "wpkh(tprv8ZgxMBicQKsPdy6LMhUtFHAgpocR8GC6QmwMSFpZs7h6Eziw3SpThFfczTDh5rW2krkqffa11UpX3XkeTTB2FvzZKWXqPY54Y6Rq4AQ5R8L/84'/1'/0'/1/*)";
+    let network = Network::Signet;
 
-    let mut wallet = Wallet::new_or_load(
-        external_descriptor,
-        Some(internal_descriptor),
-        db,
-        Network::Testnet,
-    )?;
+    let mut wallet =
+        Wallet::new_or_load(external_descriptor, Some(internal_descriptor), db, network)?;
 
     let address = wallet.try_get_address(AddressIndex::New)?;
     println!("Generated Address: {}", address);
@@ -32,38 +31,30 @@ fn main() -> Result<(), anyhow::Error> {
     let balance = wallet.get_balance();
     println!("Wallet balance before syncing: {} sats", balance.total());
 
-    print!("Syncing...");
     let client =
-        esplora_client::Builder::new("https://blockstream.info/testnet/api").build_blocking()?;
-
-    let prev_tip = wallet.latest_checkpoint();
-    let keychain_spks = wallet
-        .all_unbounded_spk_iters()
-        .into_iter()
-        .map(|(k, k_spks)| {
-            let mut once = Some(());
-            let mut stdout = std::io::stdout();
-            let k_spks = k_spks
-                .inspect(move |(spk_i, _)| match once.take() {
-                    Some(_) => print!("\nScanning keychain [{:?}]", k),
-                    None => print!(" {:<3}", spk_i),
-                })
-                .inspect(move |_| stdout.flush().expect("must flush"));
-            (k, k_spks)
-        })
-        .collect();
-
-    let (update_graph, last_active_indices) =
-        client.full_scan(keychain_spks, STOP_GAP, PARALLEL_REQUESTS)?;
-    let missing_heights = update_graph.missing_heights(wallet.local_chain());
-    let chain_update = client.update_local_chain(prev_tip, missing_heights)?;
-    let update = Update {
-        last_active_indices,
-        graph: update_graph,
-        chain: Some(chain_update),
-    };
-
-    wallet.apply_update(update)?;
+        esplora_client::Builder::new("http://signet.bitcoindevkit.net").build_blocking()?;
+
+    let keychain_spks = wallet.all_unbounded_spk_iters();
+
+    let mut request = FullScanRequest::new(wallet.latest_checkpoint());
+    request.add_spks_by_keychain(keychain_spks);
+    request.inspect_spks(move |k, i, spk| {
+        println!(
+            "{:?}[{}]: {}",
+            k,
+            i,
+            Address::from_script(spk, network).unwrap()
+        );
+    });
+    println!("Scanning...");
+    let result = client.full_scan(request, STOP_GAP, PARALLEL_REQUESTS)?;
+    println!("done. ");
+
+    wallet.apply_update(Update {
+        last_active_indices: result.last_active_indices,
+        graph: result.graph_update,
+        chain: Some(result.chain_update),
+    })?;
     wallet.commit()?;
     println!();