• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bitcoindevkit / bdk / 9737955023

01 Jul 2024 04:01AM UTC coverage: 83.182% (+0.2%) from 83.029%
9737955023

Pull #1478

github

web-flow
Merge d05135804 into 22368ab7b
Pull Request #1478: Make `bdk_esplora` more modular

358 of 422 new or added lines in 2 files covered. (84.83%)

213 existing lines in 7 files now uncovered.

11148 of 13402 relevant lines covered (83.18%)

16721.16 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

94.32
/crates/esplora/src/async_ext.rs
1
use std::collections::BTreeSet;
2
use std::usize;
3

4
use async_trait::async_trait;
5
use bdk_chain::spk_client::{FullScanRequest, FullScanResult, SyncRequest, SyncResult};
6
use bdk_chain::{
7
    bitcoin::{BlockHash, OutPoint, ScriptBuf, TxOut, Txid},
8
    collections::BTreeMap,
9
    local_chain::CheckPoint,
10
    BlockId, ConfirmationTimeHeightAnchor, TxGraph,
11
};
12
use bdk_chain::{Anchor, Indexed};
13
use esplora_client::{Amount, Tx, TxStatus};
14
use futures::{stream::FuturesOrdered, TryStreamExt};
15

16
use crate::anchor_from_status;
17

18
/// [`esplora_client::Error`]
19
type Error = Box<esplora_client::Error>;
20

21
/// Trait to extend the functionality of [`esplora_client::AsyncClient`].
22
///
23
/// Refer to [crate-level documentation](crate) for more.
24
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
25
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
26
pub trait EsploraAsyncExt {
27
    /// Scan keychain scripts for transactions against Esplora, returning an update that can be
28
    /// applied to the receiving structures.
29
    ///
30
    /// `request` provides the data required to perform a script-pubkey-based full scan
31
    /// (see [`FullScanRequest`]). The full scan for each keychain (`K`) stops after a gap of
32
    /// `stop_gap` script pubkeys with no associated transactions. `parallel_requests` specifies
33
    /// the maximum number of HTTP requests to make in parallel.
34
    ///
35
    /// Refer to [crate-level docs](crate) for more.
36
    async fn full_scan<K: Ord + Clone + Send>(
37
        &self,
38
        request: FullScanRequest<K>,
39
        stop_gap: usize,
40
        parallel_requests: usize,
41
    ) -> Result<FullScanResult<K>, Error>;
42

43
    /// Sync a set of scripts, txids, and/or outpoints against Esplora.
44
    ///
45
    /// `request` provides the data required to perform a script-pubkey-based sync (see
46
    /// [`SyncRequest`]). `parallel_requests` specifies the maximum number of HTTP requests to make
47
    /// in parallel.
48
    ///
49
    /// Refer to [crate-level docs](crate) for more.
50
    async fn sync(
51
        &self,
52
        request: SyncRequest,
53
        parallel_requests: usize,
54
    ) -> Result<SyncResult, Error>;
55

56
    /// Populate the `tx_graph` with transactions and associated [`ConfirmationTimeHeightAnchor`]s
57
    /// by scanning `keychain_spks` against Esplora.
58
    ///
59
    /// `keychain_spks` is an *unbounded* indexed-[`ScriptBuf`] iterator that represents scripts
60
    /// derived from a keychain. The scanning logic stops after a `stop_gap` number of consecutive
61
    /// scripts with no transaction history is reached. `parallel_requests` specifies the maximum
62
    /// number of HTTP requests to make in parallel.
63
    ///
64
    /// The last active keychain index (if any) is returned. This is the keychain index of the last
65
    /// script that contains a non-empty transaction history.
66
    ///
67
    /// Refer to [crate-level docs](crate) for more.
68
    async fn populate_using_keychain_spks<I: Iterator<Item = Indexed<ScriptBuf>> + Send>(
69
        &self,
70
        tx_graph: &mut TxGraph<ConfirmationTimeHeightAnchor>,
71
        keychain_spks: I,
72
        stop_gap: usize,
73
        parallel_requests: usize,
74
    ) -> Result<Option<u32>, Error>;
75

76
    /// Populate the `tx_graph` with transactions and associated [`ConfirmationTimeHeightAnchor`]s
77
    /// by scanning `spks` against Esplora.
78
    ///
79
    /// Unlike with [`EsploraAsyncExt::populate_using_keychain_spks`], `spks` must be *bounded* as
80
    /// all contained scripts will be scanned. `parallel_requests` specifies the maximum number of
81
    /// HTTP requests to make in parallel.
82
    ///
83
    /// Refer to [crate-level docs](crate) for more.
84
    async fn populate_using_spks<I: IntoIterator<Item = ScriptBuf> + Send>(
85
        &self,
86
        tx_graph: &mut TxGraph<ConfirmationTimeHeightAnchor>,
87
        spks: I,
88
        parallel_requests: usize,
89
    ) -> Result<(), Error>
90
    where
91
        I::IntoIter: ExactSizeIterator + Send;
92

93
    /// Populate the `tx_graph` with transactions and associated [`ConfirmationTimeHeightAnchor`]s
94
    /// by scanning `txids` against Esplora.
95
    ///
96
    /// `parallel_requests` specifies the maximum number of HTTP requests to make in parallel.
97
    ///
98
    /// Refer to [crate-level docs](crate) for more.
99
    async fn populate_using_txids<I: IntoIterator<Item = Txid> + Send>(
100
        &self,
101
        tx_graph: &mut TxGraph<ConfirmationTimeHeightAnchor>,
102
        txids: I,
103
        parallel_requests: usize,
104
    ) -> Result<(), Error>
105
    where
106
        I::IntoIter: ExactSizeIterator + Send;
107

108
    /// Populate the `tx_graph` with residing and spending transactions and
109
    /// [`ConfirmationTimeHeightAnchor`]s by scanning `outpoints` against Esplora.
110
    ///
111
    /// `parallel_requests` specifies the maximum number of HTTP requests to make in parallel.
112
    ///
113
    /// Refer to [crate-level docs](crate) for more.
114
    async fn populate_using_outpoints<I: IntoIterator<Item = OutPoint> + Send>(
115
        &self,
116
        tx_graph: &mut TxGraph<ConfirmationTimeHeightAnchor>,
117
        outpoints: I,
118
        parallel_requests: usize,
119
    ) -> Result<(), Error>
120
    where
121
        I::IntoIter: ExactSizeIterator + Send;
122
}
123

124
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
125
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
126
impl EsploraAsyncExt for esplora_client::AsyncClient {
127
    async fn full_scan<K: Ord + Clone + Send>(
128
        &self,
129
        request: FullScanRequest<K>,
130
        stop_gap: usize,
131
        parallel_requests: usize,
132
    ) -> Result<FullScanResult<K>, Error> {
4✔
133
        let latest_blocks = fetch_latest_blocks(self).await?;
4✔
134
        let mut graph_update = TxGraph::default();
4✔
135
        let mut last_active_indices = BTreeMap::<K, u32>::new();
4✔
136
        for (keychain, keychain_spks) in request.spks_by_keychain {
8✔
137
            if let Some(last_active_index) = self
4✔
138
                .populate_using_keychain_spks(
4✔
139
                    &mut graph_update,
4✔
140
                    keychain_spks,
4✔
141
                    stop_gap,
4✔
142
                    parallel_requests,
4✔
143
                )
4✔
144
                .await?
60✔
145
            {
4✔
146
                last_active_indices.insert(keychain, last_active_index);
3✔
147
            }
3✔
148
        }
4✔
149
        let chain_update = chain_update(
4✔
150
            self,
4✔
151
            &latest_blocks,
4✔
152
            &request.chain_tip,
4✔
153
            graph_update.all_anchors(),
4✔
154
        )
4✔
155
        .await?;
4✔
156
        Ok(FullScanResult {
4✔
157
            chain_update,
4✔
158
            graph_update,
4✔
159
            last_active_indices,
4✔
160
        })
4✔
161
    }
4✔
162

163
    async fn sync(
164
        &self,
165
        request: SyncRequest,
166
        parallel_requests: usize,
167
    ) -> Result<SyncResult, Error> {
2✔
168
        let latest_blocks = fetch_latest_blocks(self).await?;
2✔
169
        let mut graph_update = TxGraph::default();
2✔
170
        self.populate_using_spks(&mut graph_update, request.spks, parallel_requests)
2✔
171
            .await?;
8✔
172
        self.populate_using_txids(&mut graph_update, request.txids, parallel_requests)
2✔
173
            .await?;
2✔
174
        self.populate_using_outpoints(&mut graph_update, request.outpoints, parallel_requests)
2✔
175
            .await?;
2✔
176
        let chain_update = chain_update(
2✔
177
            self,
2✔
178
            &latest_blocks,
2✔
179
            &request.chain_tip,
2✔
180
            graph_update.all_anchors(),
2✔
181
        )
2✔
182
        .await?;
2✔
183
        Ok(SyncResult {
2✔
184
            chain_update,
2✔
185
            graph_update,
2✔
186
        })
2✔
187
    }
2✔
188

189
    async fn populate_using_keychain_spks<I: Iterator<Item = Indexed<ScriptBuf>> + Send>(
190
        &self,
191
        tx_graph: &mut TxGraph<ConfirmationTimeHeightAnchor>,
192
        mut keychain_spks: I,
193
        stop_gap: usize,
194
        parallel_requests: usize,
195
    ) -> Result<Option<u32>, Error> {
6✔
196
        type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
6✔
197

6✔
198
        let mut last_index = Option::<u32>::None;
6✔
199
        let mut last_active_index = Option::<u32>::None;
6✔
200

6✔
201
        loop {
37✔
202
            let handles = keychain_spks
37✔
203
                .by_ref()
37✔
204
                .take(parallel_requests)
37✔
205
                .map(|(spk_index, spk)| {
37✔
206
                    let client = self.clone();
34✔
207
                    async move {
34✔
208
                        let mut last_seen = None;
34✔
209
                        let mut spk_txs = Vec::new();
34✔
210
                        loop {
6✔
211
                            let txs = client.scripthash_txs(&spk, last_seen).await?;
34✔
212
                            let tx_count = txs.len();
34✔
213
                            last_seen = txs.last().map(|tx| tx.txid);
34✔
214
                            spk_txs.extend(txs);
34✔
215
                            if tx_count < 25 {
34✔
216
                                break Result::<_, Error>::Ok((spk_index, spk_txs));
34✔
217
                            }
6✔
218
                        }
6✔
219
                    }
34✔
220
                })
37✔
221
                .collect::<FuturesOrdered<_>>();
37✔
222

37✔
223
            if handles.is_empty() {
37✔
224
                break;
6✔
225
            }
34✔
226

6✔
227
            for (index, txs) in handles.try_collect::<Vec<TxsOfSpkIndex>>().await? {
68✔
228
                last_index = Some(index);
34✔
229
                if !txs.is_empty() {
34✔
230
                    last_active_index = Some(index);
8✔
231
                }
28✔
232
                for tx in txs {
42✔
233
                    let _ = tx_graph.insert_tx(tx.to_tx());
8✔
234
                    if let Some(anchor) = anchor_from_status(&tx.status) {
8✔
235
                        let _ = tx_graph.insert_anchor(tx.txid, anchor);
8✔
236
                    }
8✔
237

6✔
238
                    let previous_outputs = tx.vin.iter().filter_map(|vin| {
8✔
239
                        let prevout = vin.prevout.as_ref()?;
8✔
240
                        Some((
8✔
241
                            OutPoint {
8✔
242
                                txid: vin.txid,
8✔
243
                                vout: vin.vout,
8✔
244
                            },
8✔
245
                            TxOut {
8✔
246
                                script_pubkey: prevout.scriptpubkey.clone(),
8✔
247
                                value: Amount::from_sat(prevout.value),
8✔
248
                            },
8✔
249
                        ))
8✔
250
                    });
8✔
251

6✔
252
                    for (outpoint, txout) in previous_outputs {
16✔
253
                        let _ = tx_graph.insert_txout(outpoint, txout);
8✔
254
                    }
8✔
255
                }
6✔
256
            }
6✔
257

6✔
258
            let last_index = last_index.expect("Must be set since handles wasn't empty.");
34✔
259
            let gap_limit_reached = if let Some(i) = last_active_index {
34✔
260
                last_index >= i.saturating_add(stop_gap as u32)
22✔
261
            } else {
6✔
262
                last_index + 1 >= stop_gap as u32
13✔
263
            };
6✔
264
            if gap_limit_reached {
34✔
265
                break;
6✔
266
            }
31✔
267
        }
6✔
268

6✔
269
        Ok(last_active_index)
6✔
270
    }
6✔
271

272
    async fn populate_using_spks<I: IntoIterator<Item = ScriptBuf> + Send>(
273
        &self,
274
        tx_graph: &mut TxGraph<ConfirmationTimeHeightAnchor>,
275
        spks: I,
276
        parallel_requests: usize,
277
    ) -> Result<(), Error>
278
    where
279
        I::IntoIter: ExactSizeIterator + Send,
280
    {
2✔
281
        self.populate_using_keychain_spks(
2✔
282
            tx_graph,
2✔
283
            spks.into_iter().enumerate().map(|(i, spk)| (i as u32, spk)),
4✔
284
            usize::MAX,
2✔
285
            parallel_requests,
2✔
286
        )
2✔
287
        .await
8✔
288
        .map(|_| ())
2✔
289
    }
2✔
290

291
    async fn populate_using_txids<I: IntoIterator<Item = Txid> + Send>(
292
        &self,
293
        tx_graph: &mut TxGraph<ConfirmationTimeHeightAnchor>,
294
        txids: I,
295
        parallel_requests: usize,
296
    ) -> Result<(), Error>
297
    where
298
        I::IntoIter: ExactSizeIterator + Send,
299
    {
6✔
300
        enum EsploraResp {
6✔
301
            TxStatus(TxStatus),
6✔
302
            Tx(Option<Tx>),
6✔
303
        }
6✔
304

6✔
305
        let mut txids = txids.into_iter();
6✔
306
        loop {
6✔
307
            let handles = txids
6✔
308
                .by_ref()
6✔
309
                .take(parallel_requests)
6✔
310
                .map(|txid| {
6✔
NEW
UNCOV
311
                    let client = self.clone();
×
NEW
UNCOV
312
                    let tx_already_exists = tx_graph.get_tx(txid).is_some();
×
313
                    async move {
6✔
NEW
314
                        if tx_already_exists {
×
315
                            client
6✔
NEW
UNCOV
316
                                .get_tx_status(&txid)
×
317
                                .await
6✔
318
                                .map(|s| (txid, EsploraResp::TxStatus(s)))
6✔
319
                        } else {
6✔
320
                            client
6✔
NEW
321
                                .get_tx_info(&txid)
×
322
                                .await
6✔
323
                                .map(|t| (txid, EsploraResp::Tx(t)))
6✔
324
                        }
6✔
325
                    }
6✔
326
                })
6✔
327
                .collect::<FuturesOrdered<_>>();
6✔
328

6✔
329
            if handles.is_empty() {
6✔
330
                break;
6✔
331
            }
6✔
332

6✔
333
            for (txid, resp) in handles.try_collect::<Vec<_>>().await? {
6✔
334
                match resp {
6✔
335
                    EsploraResp::TxStatus(status) => {
6✔
336
                        if let Some(anchor) = anchor_from_status(&status) {
6✔
NEW
UNCOV
337
                            let _ = tx_graph.insert_anchor(txid, anchor);
×
NEW
UNCOV
338
                        }
×
339
                    }
6✔
340
                    EsploraResp::Tx(Some(tx_info)) => {
6✔
NEW
341
                        let _ = tx_graph.insert_tx(tx_info.to_tx());
×
342
                        if let Some(anchor) = anchor_from_status(&tx_info.status) {
6✔
NEW
343
                            let _ = tx_graph.insert_anchor(txid, anchor);
×
NEW
344
                        }
×
345
                    }
6✔
346
                    _ => continue,
6✔
347
                }
6✔
348
            }
6✔
349
        }
6✔
350
        Ok(())
6✔
351
    }
6✔
352

353
    async fn populate_using_outpoints<I: IntoIterator<Item = OutPoint> + Send>(
354
        &self,
355
        tx_graph: &mut TxGraph<ConfirmationTimeHeightAnchor>,
356
        outpoints: I,
357
        parallel_requests: usize,
358
    ) -> Result<(), Error>
359
    where
360
        I::IntoIter: ExactSizeIterator + Send,
361
    {
2✔
362
        let outpoints = outpoints.into_iter().collect::<Vec<_>>();
2✔
363

2✔
364
        // make sure txs exists in graph and tx statuses are updated
2✔
365
        // TODO: We should maintain a tx cache (like we do with Electrum).
2✔
366
        self.populate_using_txids(
2✔
367
            tx_graph,
2✔
368
            outpoints.iter().map(|op| op.txid),
2✔
369
            parallel_requests,
2✔
370
        )
2✔
371
        .await?;
2✔
372

2✔
373
        // get outpoint spend-statuses
2✔
374
        let mut outpoints = outpoints.into_iter();
2✔
375
        let mut missing_txs = Vec::<Txid>::with_capacity(outpoints.len());
2✔
376
        loop {
2✔
377
            let handles = outpoints
2✔
378
                .by_ref()
2✔
379
                .take(parallel_requests)
2✔
380
                .map(|op| {
2✔
NEW
381
                    let client = self.clone();
×
382
                    async move { client.get_output_status(&op.txid, op.vout as _).await }
2✔
383
                })
2✔
384
                .collect::<FuturesOrdered<_>>();
2✔
385

2✔
386
            if handles.is_empty() {
2✔
387
                break;
2✔
388
            }
2✔
389

2✔
390
            for op_status in handles.try_collect::<Vec<_>>().await?.into_iter().flatten() {
2✔
391
                let spend_txid = match op_status.txid {
2✔
392
                    Some(txid) => txid,
2✔
393
                    None => continue,
2✔
394
                };
2✔
395
                if tx_graph.get_tx(spend_txid).is_none() {
2✔
NEW
396
                    missing_txs.push(spend_txid);
×
NEW
397
                }
×
398
                if let Some(spend_status) = op_status.status {
2✔
399
                    if let Some(spend_anchor) = anchor_from_status(&spend_status) {
2✔
NEW
400
                        let _ = tx_graph.insert_anchor(spend_txid, spend_anchor);
×
NEW
401
                    }
×
402
                }
2✔
403
            }
2✔
404
        }
2✔
405

2✔
406
        self.populate_using_txids(tx_graph, missing_txs, parallel_requests)
2✔
407
            .await?;
2✔
408
        Ok(())
2✔
409
    }
2✔
410
}
411

412
/// Fetch latest blocks from Esplora in an atomic call.
413
///
414
/// We want to do this before fetching transactions and anchors as we cannot fetch latest blocks AND
415
/// transactions atomically, and the checkpoint tip is used to determine last-scanned block (for
416
/// block-based chain-sources). Therefore it's better to be conservative when setting the tip (use
417
/// an earlier tip rather than a later tip) otherwise the caller may accidentally skip blocks when
418
/// alternating between chain-sources.
419
async fn fetch_latest_blocks(
16✔
420
    client: &esplora_client::AsyncClient,
16✔
421
) -> Result<BTreeMap<u32, BlockHash>, Error> {
16✔
422
    Ok(client
16✔
423
        .get_blocks(None)
16✔
424
        .await?
16✔
425
        .into_iter()
16✔
426
        .map(|b| (b.time.height, b.id))
160✔
427
        .collect())
16✔
428
}
16✔
429

430
/// Used instead of [`esplora_client::BlockingClient::get_block_hash`].
431
///
432
/// This first checks the previously fetched `latest_blocks` before fetching from Esplora again.
433
async fn fetch_block(
28✔
434
    client: &esplora_client::AsyncClient,
28✔
435
    latest_blocks: &BTreeMap<u32, BlockHash>,
28✔
436
    height: u32,
28✔
437
) -> Result<Option<BlockHash>, Error> {
28✔
438
    if let Some(&hash) = latest_blocks.get(&height) {
28✔
439
        return Ok(Some(hash));
15✔
440
    }
13✔
441

13✔
442
    // We avoid fetching blocks higher than previously fetched `latest_blocks` as the local chain
13✔
443
    // tip is used to signal for the last-synced-up-to-height.
13✔
444
    let &tip_height = latest_blocks
13✔
445
        .keys()
13✔
446
        .last()
13✔
447
        .expect("must have atleast one entry");
13✔
448
    if height > tip_height {
13✔
UNCOV
449
        return Ok(None);
×
450
    }
13✔
451

13✔
452
    Ok(Some(client.get_block_hash(height).await?))
13✔
453
}
28✔
454

455
/// Create the [`local_chain::Update`].
456
///
457
/// We want to have a corresponding checkpoint per anchor height. However, checkpoints fetched
458
/// should not surpass `latest_blocks`.
459
async fn chain_update<A: Anchor>(
16✔
460
    client: &esplora_client::AsyncClient,
16✔
461
    latest_blocks: &BTreeMap<u32, BlockHash>,
16✔
462
    local_tip: &CheckPoint,
16✔
463
    anchors: &BTreeSet<(A, Txid)>,
16✔
464
) -> Result<CheckPoint, Error> {
16✔
465
    let mut point_of_agreement = None;
16✔
466
    let mut conflicts = vec![];
16✔
467
    for local_cp in local_tip.iter() {
16✔
468
        let remote_hash = match fetch_block(client, latest_blocks, local_cp.height()).await? {
16✔
469
            Some(hash) => hash,
16✔
UNCOV
470
            None => continue,
×
471
        };
472
        if remote_hash == local_cp.hash() {
16✔
473
            point_of_agreement = Some(local_cp.clone());
16✔
474
            break;
16✔
UNCOV
475
        } else {
×
UNCOV
476
            // it is not strictly necessary to include all the conflicted heights (we do need the
×
UNCOV
477
            // first one) but it seems prudent to make sure the updated chain's heights are a
×
UNCOV
478
            // superset of the existing chain after update.
×
UNCOV
479
            conflicts.push(BlockId {
×
UNCOV
480
                height: local_cp.height(),
×
UNCOV
481
                hash: remote_hash,
×
UNCOV
482
            });
×
UNCOV
483
        }
×
484
    }
485

486
    let mut tip = point_of_agreement.expect("remote esplora should have same genesis block");
16✔
487

16✔
488
    tip = tip
16✔
489
        .extend(conflicts.into_iter().rev())
16✔
490
        .expect("evicted are in order");
16✔
491

492
    for anchor in anchors {
38✔
493
        let height = anchor.0.anchor_block().height;
22✔
494
        if tip.get(height).is_none() {
22✔
495
            let hash = match fetch_block(client, latest_blocks, height).await? {
12✔
496
                Some(hash) => hash,
12✔
UNCOV
497
                None => continue,
×
498
            };
499
            tip = tip.insert(BlockId { height, hash });
12✔
500
        }
10✔
501
    }
502

503
    // insert the most recent blocks at the tip to make sure we update the tip and make the update
504
    // robust.
505
    for (&height, &hash) in latest_blocks.iter() {
160✔
506
        tip = tip.insert(BlockId { height, hash });
160✔
507
    }
160✔
508

509
    Ok(tip)
16✔
510
}
16✔
511

512
#[cfg(test)]
513
mod test {
514
    use std::{collections::BTreeSet, time::Duration};
515

516
    use bdk_chain::{
517
        bitcoin::{hashes::Hash, Txid},
518
        local_chain::LocalChain,
519
        BlockId,
520
    };
521
    use bdk_testenv::{anyhow, bitcoincore_rpc::RpcApi, TestEnv};
522
    use esplora_client::Builder;
523

524
    use crate::async_ext::{chain_update, fetch_latest_blocks};
525

526
    macro_rules! h {
527
        ($index:literal) => {{
528
            bdk_chain::bitcoin::hashes::Hash::hash($index.as_bytes())
529
        }};
530
    }
531

532
    /// Ensure that update does not remove heights (from original), and all anchor heights are included.
533
    #[tokio::test]
534
    pub async fn test_finalize_chain_update() -> anyhow::Result<()> {
1✔
535
        struct TestCase<'a> {
1✔
536
            name: &'a str,
1✔
537
            /// Initial blockchain height to start the env with.
1✔
538
            initial_env_height: u32,
1✔
539
            /// Initial checkpoint heights to start with.
1✔
540
            initial_cps: &'a [u32],
1✔
541
            /// The final blockchain height of the env.
1✔
542
            final_env_height: u32,
1✔
543
            /// The anchors to test with: `(height, txid)`. Only the height is provided as we can fetch
1✔
544
            /// the blockhash from the env.
1✔
545
            anchors: &'a [(u32, Txid)],
1✔
546
        }
1✔
547

1✔
548
        let test_cases = [
1✔
549
            TestCase {
1✔
550
                name: "chain_extends",
1✔
551
                initial_env_height: 60,
1✔
552
                initial_cps: &[59, 60],
1✔
553
                final_env_height: 90,
1✔
554
                anchors: &[],
1✔
555
            },
1✔
556
            TestCase {
1✔
557
                name: "introduce_older_heights",
1✔
558
                initial_env_height: 50,
1✔
559
                initial_cps: &[10, 15],
1✔
560
                final_env_height: 50,
1✔
561
                anchors: &[(11, h!("A")), (14, h!("B"))],
1✔
562
            },
1✔
563
            TestCase {
1✔
564
                name: "introduce_older_heights_after_chain_extends",
1✔
565
                initial_env_height: 50,
1✔
566
                initial_cps: &[10, 15],
1✔
567
                final_env_height: 100,
1✔
568
                anchors: &[(11, h!("A")), (14, h!("B"))],
1✔
569
            },
1✔
570
        ];
1✔
571

1✔
572
        for (i, t) in test_cases.into_iter().enumerate() {
3✔
573
            println!("[{}] running test case: {}", i, t.name);
3✔
574

1✔
575
            let env = TestEnv::new()?;
3✔
576
            let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
3✔
577
            let client = Builder::new(base_url.as_str()).build_async()?;
3✔
578

1✔
579
            // set env to `initial_env_height`
1✔
580
            if let Some(to_mine) = t
3✔
581
                .initial_env_height
3✔
582
                .checked_sub(env.make_checkpoint_tip().height())
3✔
583
            {
1✔
584
                env.mine_blocks(to_mine as _, None)?;
3✔
585
            }
1✔
586
            while client.get_height().await? < t.initial_env_height {
1,271✔
587
                std::thread::sleep(Duration::from_millis(10));
1,262✔
588
            }
1,262✔
589

1✔
590
            // craft initial `local_chain`
1✔
591
            let local_chain = {
3✔
592
                let (mut chain, _) = LocalChain::from_genesis_hash(env.genesis_hash()?);
3✔
593
                // force `chain_update_blocking` to add all checkpoints in `t.initial_cps`
1✔
594
                let anchors = t
3✔
595
                    .initial_cps
3✔
596
                    .iter()
3✔
597
                    .map(|&height| -> anyhow::Result<_> {
6✔
598
                        Ok((
6✔
599
                            BlockId {
6✔
600
                                height,
6✔
601
                                hash: env.bitcoind.client.get_block_hash(height as _)?,
6✔
602
                            },
1✔
603
                            Txid::all_zeros(),
6✔
604
                        ))
1✔
605
                    })
6✔
606
                    .collect::<anyhow::Result<BTreeSet<_>>>()?;
3✔
607
                let update = chain_update(
3✔
608
                    &client,
3✔
609
                    &fetch_latest_blocks(&client).await?,
3✔
610
                    &chain.tip(),
3✔
611
                    &anchors,
3✔
612
                )
1✔
613
                .await?;
7✔
614
                chain.apply_update(update)?;
3✔
615
                chain
3✔
616
            };
3✔
617
            println!("local chain height: {}", local_chain.tip().height());
3✔
618

1✔
619
            // extend env chain
1✔
620
            if let Some(to_mine) = t
3✔
621
                .final_env_height
3✔
622
                .checked_sub(env.make_checkpoint_tip().height())
3✔
623
            {
1✔
624
                env.mine_blocks(to_mine as _, None)?;
3✔
625
            }
1✔
626
            while client.get_height().await? < t.final_env_height {
933✔
627
                std::thread::sleep(Duration::from_millis(10));
930✔
628
            }
930✔
629

1✔
630
            // craft update
1✔
631
            let update = {
3✔
632
                let anchors = t
3✔
633
                    .anchors
3✔
634
                    .iter()
3✔
635
                    .map(|&(height, txid)| -> anyhow::Result<_> {
4✔
636
                        Ok((
4✔
637
                            BlockId {
4✔
638
                                height,
4✔
639
                                hash: env.bitcoind.client.get_block_hash(height as _)?,
4✔
640
                            },
1✔
641
                            txid,
4✔
642
                        ))
1✔
643
                    })
4✔
644
                    .collect::<anyhow::Result<_>>()?;
3✔
645
                chain_update(
1✔
646
                    &client,
3✔
647
                    &fetch_latest_blocks(&client).await?,
3✔
648
                    &local_chain.tip(),
3✔
649
                    &anchors,
3✔
650
                )
1✔
651
                .await?
6✔
652
            };
1✔
653

1✔
654
            // apply update
1✔
655
            let mut updated_local_chain = local_chain.clone();
3✔
656
            updated_local_chain.apply_update(update)?;
3✔
657
            println!(
3✔
658
                "updated local chain height: {}",
3✔
659
                updated_local_chain.tip().height()
3✔
660
            );
3✔
661

3✔
662
            assert!(
3✔
663
                {
3✔
664
                    let initial_heights = local_chain
3✔
665
                        .iter_checkpoints()
3✔
666
                        .map(|cp| cp.height())
37✔
667
                        .collect::<BTreeSet<_>>();
3✔
668
                    let updated_heights = updated_local_chain
3✔
669
                        .iter_checkpoints()
3✔
670
                        .map(|cp| cp.height())
61✔
671
                        .collect::<BTreeSet<_>>();
3✔
672
                    updated_heights.is_superset(&initial_heights)
3✔
673
                },
1✔
674
                "heights from the initial chain must all be in the updated chain",
1✔
675
            );
1✔
676

1✔
677
            assert!(
3✔
678
                {
3✔
679
                    let exp_anchor_heights = t
3✔
680
                        .anchors
3✔
681
                        .iter()
3✔
682
                        .map(|(h, _)| *h)
4✔
683
                        .chain(t.initial_cps.iter().copied())
3✔
684
                        .collect::<BTreeSet<_>>();
3✔
685
                    let anchor_heights = updated_local_chain
3✔
686
                        .iter_checkpoints()
3✔
687
                        .map(|cp| cp.height())
61✔
688
                        .collect::<BTreeSet<_>>();
3✔
689
                    anchor_heights.is_superset(&exp_anchor_heights)
3✔
690
                },
1✔
691
                "anchor heights must all be in updated chain",
1✔
692
            );
1✔
693
        }
1✔
694

1✔
695
        Ok(())
1✔
696
    }
1✔
697
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc