• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bitcoindevkit / bdk / 5582722505

pending completion
5582722505

Pull #1002

github

web-flow
Merge 98a52d0cb into 81c761339
Pull Request #1002: Implement linked-list `LocalChain` and add rpc-chain module/example

945 of 945 new or added lines in 10 files covered. (100.0%)

8019 of 10332 relevant lines covered (77.61%)

5036.23 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

0.0
/crates/esplora/src/async_ext.rs
1
use async_trait::async_trait;
2
use bdk_chain::collections::btree_map;
3
use bdk_chain::{
4
    bitcoin::{BlockHash, OutPoint, Script, Txid},
5
    collections::BTreeMap,
6
    local_chain::CheckPoint,
7
    BlockId, ConfirmationTimeAnchor, TxGraph,
8
};
9
use esplora_client::{Error, TxStatus};
10
use futures::{stream::FuturesOrdered, TryStreamExt};
11

12
use crate::{anchor_from_status, ASSUME_FINAL_DEPTH};
13

14
/// Trait to extend the functionality of [`esplora_client::AsyncClient`].
15
///
16
/// Refer to [crate-level documentation] for more.
17
///
18
/// [crate-level documentation]: crate
19
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
20
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
21
pub trait EsploraAsyncExt {
22
    /// Prepare an [`LocalChain`] update with blocks fetched from Esplora.
23
    ///
24
    /// * `prev_tip` is the previous tip of [`LocalChain::tip`].
25
    /// * `get_heights` is the block heights that we are interested in fetching from Esplora.
26
    ///
27
    /// The result of this method can be applied to [`LocalChain::update`].
28
    ///
29
    /// [`LocalChain`]: bdk_chain::local_chain::LocalChain
30
    /// [`LocalChain::tip`]: bdk_chain::local_chain::LocalChain::tip
31
    /// [`LocalChain::update`]: bdk_chain::local_chain::LocalChain::update
32
    #[allow(clippy::result_large_err)]
33
    async fn update_local_chain(
34
        &self,
35
        prev_tip: Option<CheckPoint>,
36
        get_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
37
    ) -> Result<CheckPoint, Error>;
38

39
    /// Scan Esplora for the data specified and return a [`TxGraph`] and a map of last active
40
    /// indices.
41
    ///
42
    /// * `keychain_spks`: keychains that we want to scan transactions for
43
    /// * `txids`: transactions for which we want updated [`ConfirmationTimeAnchor`]s
44
    /// * `outpoints`: transactions associated with these outpoints (residing, spending) that we
45
    ///     want to include in the update
46
    ///
47
    /// The scan for each keychain stops after a gap of `stop_gap` script pubkeys with no associated
48
    /// transactions. `parallel_requests` specifies the max number of HTTP requests to make in
49
    /// parallel.
50
    #[allow(clippy::result_large_err)]
51
    async fn update_tx_graph<K: Ord + Clone + Send>(
52
        &self,
53
        keychain_spks: BTreeMap<
54
            K,
55
            impl IntoIterator<IntoIter = impl Iterator<Item = (u32, Script)> + Send> + Send,
56
        >,
57
        txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
58
        outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
59
        stop_gap: usize,
60
        parallel_requests: usize,
61
    ) -> Result<(TxGraph<ConfirmationTimeAnchor>, BTreeMap<K, u32>), Error>;
62

63
    /// Convenience method to call [`update_tx_graph`] without requiring a keychain.
64
    ///
65
    /// [`update_tx_graph`]: EsploraAsyncExt::update_tx_graph
66
    #[allow(clippy::result_large_err)]
67
    async fn update_tx_graph_without_keychain(
×
68
        &self,
×
69
        misc_spks: impl IntoIterator<IntoIter = impl Iterator<Item = Script> + Send> + Send,
×
70
        txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
×
71
        outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
×
72
        parallel_requests: usize,
×
73
    ) -> Result<TxGraph<ConfirmationTimeAnchor>, Error> {
×
74
        self.update_tx_graph(
×
75
            [(
×
76
                (),
×
77
                misc_spks
×
78
                    .into_iter()
×
79
                    .enumerate()
×
80
                    .map(|(i, spk)| (i as u32, spk)),
×
81
            )]
×
82
            .into(),
×
83
            txids,
×
84
            outpoints,
×
85
            usize::MAX,
×
86
            parallel_requests,
×
87
        )
×
88
        .await
×
89
        .map(|(g, _)| g)
×
90
    }
×
91
}
92

93
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
94
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
95
impl EsploraAsyncExt for esplora_client::AsyncClient {
96
    async fn update_local_chain(
×
97
        &self,
×
98
        prev_tip: Option<CheckPoint>,
×
99
        get_heights: impl IntoIterator<IntoIter = impl Iterator<Item = u32> + Send> + Send,
×
100
    ) -> Result<CheckPoint, Error> {
×
101
        let new_tip_height = self.get_height().await?;
×
102

103
        // If esplora returns a tip height that is lower than our previous tip, then checkpoints do
104
        // not need updating. We just return the previous tip and use that as the point of
105
        // agreement.
106
        if let Some(prev_tip) = prev_tip.as_ref() {
×
107
            if new_tip_height < prev_tip.height() {
×
108
                return Ok(prev_tip.clone());
×
109
            }
×
110
        }
×
111

112
        // Fetch new block IDs that are to be included in the update. This includes:
113
        // 1. Atomically fetched most-recent blocks so we have a consistent view even during reorgs.
114
        // 2. Heights the caller is interested in (as specified in `get_heights`).
115
        let mut new_blocks = {
×
116
            let heights = (0..=new_tip_height).rev();
×
117
            let hashes = self
×
118
                .get_blocks(Some(new_tip_height))
×
119
                .await?
×
120
                .into_iter()
×
121
                .map(|b| b.id);
×
122

×
123
            let mut new_blocks = heights.zip(hashes).collect::<BTreeMap<u32, BlockHash>>();
×
124

125
            for height in get_heights {
×
126
                // do not fetch blocks higher than known tip
127
                if height > new_tip_height {
×
128
                    continue;
×
129
                }
×
130
                if let btree_map::Entry::Vacant(entry) = new_blocks.entry(height) {
×
131
                    let hash = self.get_block_hash(height).await?;
×
132
                    entry.insert(hash);
×
133
                }
×
134
            }
135

136
            new_blocks
×
137
        };
138

139
        // Determine the checkpoint to start building our update tip from.
140
        let first_cp = match prev_tip {
×
141
            Some(old_tip) => {
×
142
                let old_tip_height = old_tip.height();
×
143
                let mut earliest_agreement_cp = Option::<CheckPoint>::None;
×
144

145
                for old_cp in old_tip.iter() {
×
146
                    let old_block = old_cp.block_id();
×
147

148
                    let new_hash = match new_blocks.entry(old_block.height) {
×
149
                        btree_map::Entry::Vacant(entry) => *entry.insert(
×
150
                            if old_tip_height - old_block.height >= ASSUME_FINAL_DEPTH {
×
151
                                old_block.hash
×
152
                            } else {
153
                                self.get_block_hash(old_block.height).await?
×
154
                            },
155
                        ),
156
                        btree_map::Entry::Occupied(entry) => *entry.get(),
×
157
                    };
158

159
                    // Since we may introduce blocks below the point of agreement, we cannot break
160
                    // here unconditionally. We only break if we guarantee there are no new heights
161
                    // below our current.
162
                    if old_block.hash == new_hash {
×
163
                        earliest_agreement_cp = Some(old_cp);
×
164

×
165
                        let first_new_height = *new_blocks
×
166
                            .keys()
×
167
                            .next()
×
168
                            .expect("must have atleast one new block");
×
169
                        if first_new_height <= old_block.height {
×
170
                            break;
×
171
                        }
×
172
                    }
×
173
                }
174

175
                earliest_agreement_cp
×
176
            }
177
            None => None,
×
178
        }
179
        .unwrap_or_else(|| {
×
180
            let (&height, &hash) = new_blocks
×
181
                .iter()
×
182
                .next()
×
183
                .expect("must have atleast one new block");
×
184
            CheckPoint::new(BlockId { height, hash })
×
185
        });
×
186

×
187
        let new_tip = new_blocks
×
188
            .split_off(&(first_cp.height() + 1))
×
189
            .into_iter()
×
190
            .map(|(height, hash)| BlockId { height, hash })
×
191
            .fold(first_cp, |prev_cp, block| {
×
192
                prev_cp
×
193
                    .extend_with_blocks(core::iter::once(block))
×
194
                    .expect("must extend checkpoint")
×
195
            });
×
196

×
197
        Ok(new_tip)
×
198
    }
×
199

200
    async fn update_tx_graph<K: Ord + Clone + Send>(
×
201
        &self,
×
202
        keychain_spks: BTreeMap<
×
203
            K,
×
204
            impl IntoIterator<IntoIter = impl Iterator<Item = (u32, Script)> + Send> + Send,
×
205
        >,
×
206
        txids: impl IntoIterator<IntoIter = impl Iterator<Item = Txid> + Send> + Send,
×
207
        outpoints: impl IntoIterator<IntoIter = impl Iterator<Item = OutPoint> + Send> + Send,
×
208
        stop_gap: usize,
×
209
        parallel_requests: usize,
×
210
    ) -> Result<(TxGraph<ConfirmationTimeAnchor>, BTreeMap<K, u32>), Error> {
×
211
        type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
212
        let parallel_requests = Ord::max(parallel_requests, 1);
×
213
        let mut graph = TxGraph::<ConfirmationTimeAnchor>::default();
×
214
        let mut last_active_indexes = BTreeMap::<K, u32>::new();
×
215

216
        for (keychain, spks) in keychain_spks {
×
217
            let mut spks = spks.into_iter();
×
218
            let mut last_index = Option::<u32>::None;
×
219
            let mut last_active_index = Option::<u32>::None;
×
220

221
            loop {
×
222
                let handles = spks
×
223
                    .by_ref()
×
224
                    .take(parallel_requests)
×
225
                    .map(|(spk_index, spk)| {
×
226
                        let client = self.clone();
×
227
                        async move {
×
228
                            let mut last_seen = None;
×
229
                            let mut spk_txs = Vec::new();
×
230
                            loop {
231
                                let txs = client.scripthash_txs(&spk, last_seen).await?;
×
232
                                let tx_count = txs.len();
×
233
                                last_seen = txs.last().map(|tx| tx.txid);
×
234
                                spk_txs.extend(txs);
×
235
                                if tx_count < 25 {
×
236
                                    break Result::<_, Error>::Ok((spk_index, spk_txs));
×
237
                                }
×
238
                            }
239
                        }
×
240
                    })
×
241
                    .collect::<FuturesOrdered<_>>();
×
242

×
243
                if handles.is_empty() {
×
244
                    break;
×
245
                }
×
246

247
                for (index, txs) in handles.try_collect::<Vec<TxsOfSpkIndex>>().await? {
×
248
                    last_index = Some(index);
×
249
                    if !txs.is_empty() {
×
250
                        last_active_index = Some(index);
×
251
                    }
×
252
                    for tx in txs {
×
253
                        let _ = graph.insert_tx(tx.to_tx());
×
254
                        if let Some(anchor) = anchor_from_status(&tx.status) {
×
255
                            let _ = graph.insert_anchor(tx.txid, anchor);
×
256
                        }
×
257
                    }
258
                }
259

260
                if last_index > last_active_index.map(|i| i + stop_gap as u32) {
×
261
                    break;
×
262
                }
×
263
            }
264

265
            if let Some(last_active_index) = last_active_index {
×
266
                last_active_indexes.insert(keychain, last_active_index);
×
267
            }
×
268
        }
269

270
        let mut txids = txids.into_iter();
×
271
        loop {
×
272
            let handles = txids
×
273
                .by_ref()
×
274
                .take(parallel_requests)
×
275
                .filter(|&txid| graph.get_tx(txid).is_none())
×
276
                .map(|txid| {
×
277
                    let client = self.clone();
×
278
                    async move { client.get_tx_status(&txid).await.map(|s| (txid, s)) }
×
279
                })
×
280
                .collect::<FuturesOrdered<_>>();
×
281
            // .collect::<Vec<JoinHandle<Result<(Txid, TxStatus), Error>>>>();
×
282

×
283
            if handles.is_empty() {
×
284
                break;
×
285
            }
×
286

287
            for (txid, status) in handles.try_collect::<Vec<(Txid, TxStatus)>>().await? {
×
288
                if let Some(anchor) = anchor_from_status(&status) {
×
289
                    let _ = graph.insert_anchor(txid, anchor);
×
290
                }
×
291
            }
292
        }
293

294
        for op in outpoints.into_iter() {
×
295
            if graph.get_tx(op.txid).is_none() {
×
296
                if let Some(tx) = self.get_tx(&op.txid).await? {
×
297
                    let _ = graph.insert_tx(tx);
×
298
                }
×
299
                let status = self.get_tx_status(&op.txid).await?;
×
300
                if let Some(anchor) = anchor_from_status(&status) {
×
301
                    let _ = graph.insert_anchor(op.txid, anchor);
×
302
                }
×
303
            }
×
304

305
            if let Some(op_status) = self.get_output_status(&op.txid, op.vout as _).await? {
×
306
                if let Some(txid) = op_status.txid {
×
307
                    if graph.get_tx(txid).is_none() {
×
308
                        if let Some(tx) = self.get_tx(&txid).await? {
×
309
                            let _ = graph.insert_tx(tx);
×
310
                        }
×
311
                        let status = self.get_tx_status(&txid).await?;
×
312
                        if let Some(anchor) = anchor_from_status(&status) {
×
313
                            let _ = graph.insert_anchor(txid, anchor);
×
314
                        }
×
315
                    }
×
316
                }
×
317
            }
×
318
        }
319

320
        Ok((graph, last_active_indexes))
×
321
    }
×
322
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc