• Home
  • Features
  • Pricing
  • Docs
  • Announcements
  • Sign In

bitcoindevkit / bdk / 10529454656

23 Aug 2024 04:58PM UTC coverage: 82.011% (+0.2%) from 81.848%
10529454656

Pull #1569

github

web-flow
Merge 2f21b5835 into 9e6ac72a6
Pull Request #1569: `bdk_core` WIP WIP WIP

495 of 556 new or added lines in 15 files covered. (89.03%)

5 existing lines in 4 files now uncovered.

11215 of 13675 relevant lines covered (82.01%)

13750.38 hits per line

Source File
Press 'n' to go to next uncovered line, 'b' for previous

92.32
/crates/esplora/src/async_ext.rs
1
use std::collections::{BTreeSet, HashSet};
2

3
use async_trait::async_trait;
4
use bdk_chain::spk_client::{FullScanRequest, FullScanResult, SyncRequest, SyncResult};
5
use bdk_chain::{
6
    bitcoin::{BlockHash, OutPoint, ScriptBuf, Txid},
7
    collections::BTreeMap,
8
    BlockId, ConfirmationBlockTime,
9
};
10
use bdk_chain::{tx_graph, Anchor, CheckPoint, Indexed};
11
use futures::{stream::FuturesOrdered, TryStreamExt};
12

13
use crate::{insert_anchor_from_status, insert_prevouts};
14

15
/// [`esplora_client::Error`]
16
type Error = Box<esplora_client::Error>;
17

18
/// Trait to extend the functionality of [`esplora_client::AsyncClient`].
19
///
20
/// Refer to [crate-level documentation](crate) for more.
21
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
22
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
23
pub trait EsploraAsyncExt {
24
    /// Scan keychain scripts for transactions against Esplora, returning an update that can be
25
    /// applied to the receiving structures.
26
    ///
27
    /// `request` provides the data required to perform a script-pubkey-based full scan
28
    /// (see [`FullScanRequest`]). The full scan for each keychain (`K`) stops after a gap of
29
    /// `stop_gap` script pubkeys with no associated transactions. `parallel_requests` specifies
30
    /// the maximum number of HTTP requests to make in parallel.
31
    ///
32
    /// Refer to [crate-level docs](crate) for more.
33
    async fn full_scan<K: Ord + Clone + Send, R: Into<FullScanRequest<K>> + Send>(
34
        &self,
35
        request: R,
36
        stop_gap: usize,
37
        parallel_requests: usize,
38
    ) -> Result<FullScanResult<K>, Error>;
39

40
    /// Sync a set of scripts, txids, and/or outpoints against Esplora.
41
    ///
42
    /// `request` provides the data required to perform a script-pubkey-based sync (see
43
    /// [`SyncRequest`]). `parallel_requests` specifies the maximum number of HTTP requests to make
44
    /// in parallel.
45
    ///
46
    /// Refer to [crate-level docs](crate) for more.
47
    async fn sync<I: Send, R: Into<SyncRequest<I>> + Send>(
48
        &self,
49
        request: R,
50
        parallel_requests: usize,
51
    ) -> Result<SyncResult, Error>;
52
}
53

54
#[cfg_attr(target_arch = "wasm32", async_trait(?Send))]
55
#[cfg_attr(not(target_arch = "wasm32"), async_trait)]
56
impl EsploraAsyncExt for esplora_client::AsyncClient {
57
    async fn full_scan<K: Ord + Clone + Send, R: Into<FullScanRequest<K>> + Send>(
58
        &self,
59
        request: R,
60
        stop_gap: usize,
61
        parallel_requests: usize,
62
    ) -> Result<FullScanResult<K>, Error> {
4✔
63
        let mut request = request.into();
4✔
64
        let keychains = request.keychains();
4✔
65

4✔
66
        let chain_tip = request.chain_tip();
4✔
67
        let latest_blocks = if chain_tip.is_some() {
4✔
68
            Some(fetch_latest_blocks(self).await?)
4✔
69
        } else {
4✔
70
            None
4✔
71
        };
4✔
72

4✔
73
        let mut graph_update = tx_graph::Update::<ConfirmationBlockTime>::default();
4✔
74
        let mut inserted_txs = HashSet::<Txid>::new();
4✔
75
        let mut last_active_indices = BTreeMap::<K, u32>::new();
4✔
76
        for keychain in keychains {
8✔
77
            let keychain_spks = request.iter_spks(keychain.clone());
4✔
78
            let (update, last_active_index) = fetch_txs_with_keychain_spks(
4✔
79
                self,
4✔
80
                &mut inserted_txs,
4✔
81
                keychain_spks,
4✔
82
                stop_gap,
4✔
83
                parallel_requests,
4✔
84
            )
4✔
85
            .await?;
60✔
86
            graph_update.extend(update);
4✔
87
            if let Some(last_active_index) = last_active_index {
4✔
88
                last_active_indices.insert(keychain, last_active_index);
3✔
89
            }
3✔
90
        }
4✔
91

4✔
92
        let chain_update = match (chain_tip, latest_blocks) {
4✔
93
            (Some(chain_tip), Some(latest_blocks)) => {
4✔
94
                Some(chain_update(self, &latest_blocks, &chain_tip, &graph_update.anchors).await?)
4✔
95
            }
4✔
96
            _ => None,
4✔
97
        };
4✔
98

4✔
99
        Ok(FullScanResult {
4✔
100
            chain_update,
4✔
101
            graph_update,
4✔
102
            last_active_indices,
4✔
103
        })
4✔
104
    }
4✔
105

106
    async fn sync<I: Send, R: Into<SyncRequest<I>> + Send>(
107
        &self,
108
        request: R,
109
        parallel_requests: usize,
110
    ) -> Result<SyncResult, Error> {
1✔
111
        let mut request = request.into();
1✔
112

1✔
113
        let chain_tip = request.chain_tip();
1✔
114
        let latest_blocks = if chain_tip.is_some() {
1✔
115
            Some(fetch_latest_blocks(self).await?)
1✔
116
        } else {
1✔
117
            None
1✔
118
        };
1✔
119

1✔
120
        let mut graph_update = tx_graph::Update::<ConfirmationBlockTime>::default();
1✔
121
        let mut inserted_txs = HashSet::<Txid>::new();
1✔
122
        graph_update.extend(
1✔
123
            fetch_txs_with_spks(
1✔
124
                self,
1✔
125
                &mut inserted_txs,
1✔
126
                request.iter_spks(),
1✔
127
                parallel_requests,
1✔
128
            )
1✔
129
            .await?,
4✔
130
        );
1✔
131
        graph_update.extend(
1✔
132
            fetch_txs_with_txids(
1✔
133
                self,
1✔
134
                &mut inserted_txs,
1✔
135
                request.iter_txids(),
1✔
136
                parallel_requests,
1✔
137
            )
1✔
138
            .await?,
1✔
139
        );
1✔
140
        graph_update.extend(
1✔
141
            fetch_txs_with_outpoints(
1✔
142
                self,
1✔
143
                &mut inserted_txs,
1✔
144
                request.iter_outpoints(),
1✔
145
                parallel_requests,
1✔
146
            )
1✔
147
            .await?,
1✔
148
        );
1✔
149

1✔
150
        let chain_update = match (chain_tip, latest_blocks) {
1✔
151
            (Some(chain_tip), Some(latest_blocks)) => {
1✔
152
                Some(chain_update(self, &latest_blocks, &chain_tip, &graph_update.anchors).await?)
1✔
153
            }
1✔
154
            _ => None,
1✔
155
        };
1✔
156

1✔
157
        Ok(SyncResult {
1✔
158
            chain_update,
1✔
159
            graph_update,
1✔
160
        })
1✔
161
    }
1✔
162
}
163

164
/// Fetch latest blocks from Esplora in an atomic call.
165
///
166
/// We want to do this before fetching transactions and anchors as we cannot fetch latest blocks AND
167
/// transactions atomically, and the checkpoint tip is used to determine last-scanned block (for
168
/// block-based chain-sources). Therefore it's better to be conservative when setting the tip (use
169
/// an earlier tip rather than a later tip) otherwise the caller may accidentally skip blocks when
170
/// alternating between chain-sources.
171
async fn fetch_latest_blocks(
16✔
172
    client: &esplora_client::AsyncClient,
16✔
173
) -> Result<BTreeMap<u32, BlockHash>, Error> {
16✔
174
    Ok(client
11✔
175
        .get_blocks(None)
11✔
176
        .await?
11✔
177
        .into_iter()
11✔
178
        .map(|b| (b.time.height, b.id))
110✔
179
        .collect())
11✔
180
}
11✔
181

182
/// Used instead of [`esplora_client::BlockingClient::get_block_hash`].
183
///
184
/// This first checks the previously fetched `latest_blocks` before fetching from Esplora again.
185
async fn fetch_block(
28✔
186
    client: &esplora_client::AsyncClient,
28✔
187
    latest_blocks: &BTreeMap<u32, BlockHash>,
28✔
188
    height: u32,
28✔
189
) -> Result<Option<BlockHash>, Error> {
28✔
190
    if let Some(&hash) = latest_blocks.get(&height) {
22✔
191
        return Ok(Some(hash));
9✔
192
    }
13✔
193

13✔
194
    // We avoid fetching blocks higher than previously fetched `latest_blocks` as the local chain
13✔
195
    // tip is used to signal for the last-synced-up-to-height.
13✔
196
    let &tip_height = latest_blocks
13✔
197
        .keys()
13✔
198
        .last()
13✔
199
        .expect("must have atleast one entry");
13✔
200
    if height > tip_height {
13✔
201
        return Ok(None);
×
202
    }
13✔
203

13✔
204
    Ok(Some(client.get_block_hash(height).await?))
13✔
205
}
22✔
206

207
/// Create the [`local_chain::Update`].
208
///
209
/// We want to have a corresponding checkpoint per anchor height. However, checkpoints fetched
210
/// should not surpass `latest_blocks`.
211
async fn chain_update<A: Anchor>(
11✔
212
    client: &esplora_client::AsyncClient,
11✔
213
    latest_blocks: &BTreeMap<u32, BlockHash>,
11✔
214
    local_tip: &CheckPoint,
11✔
215
    anchors: &BTreeSet<(A, Txid)>,
11✔
216
) -> Result<CheckPoint, Error> {
11✔
217
    let mut point_of_agreement = None;
11✔
218
    let mut conflicts = vec![];
11✔
219
    for local_cp in local_tip.iter() {
11✔
220
        let remote_hash = match fetch_block(client, latest_blocks, local_cp.height()).await? {
11✔
221
            Some(hash) => hash,
11✔
222
            None => continue,
×
223
        };
224
        if remote_hash == local_cp.hash() {
11✔
225
            point_of_agreement = Some(local_cp.clone());
11✔
226
            break;
11✔
227
        } else {
×
228
            // it is not strictly necessary to include all the conflicted heights (we do need the
×
229
            // first one) but it seems prudent to make sure the updated chain's heights are a
×
230
            // superset of the existing chain after update.
×
231
            conflicts.push(BlockId {
×
232
                height: local_cp.height(),
×
233
                hash: remote_hash,
×
234
            });
×
235
        }
×
236
    }
237

238
    let mut tip = point_of_agreement.expect("remote esplora should have same genesis block");
11✔
239

11✔
240
    tip = tip
11✔
241
        .extend(conflicts.into_iter().rev())
11✔
242
        .expect("evicted are in order");
11✔
243

244
    for anchor in anchors {
27✔
245
        let height = anchor.0.anchor_block().height;
16✔
246
        if tip.get(height).is_none() {
16✔
247
            let hash = match fetch_block(client, latest_blocks, height).await? {
11✔
248
                Some(hash) => hash,
11✔
249
                None => continue,
×
250
            };
251
            tip = tip.insert(BlockId { height, hash });
11✔
252
        }
5✔
253
    }
254

255
    // insert the most recent blocks at the tip to make sure we update the tip and make the update
256
    // robust.
257
    for (&height, &hash) in latest_blocks.iter() {
110✔
258
        tip = tip.insert(BlockId { height, hash });
110✔
259
    }
110✔
260

261
    Ok(tip)
11✔
262
}
11✔
263

264
/// Fetch transactions and associated [`ConfirmationBlockTime`]s by scanning
265
/// `keychain_spks` against Esplora.
266
///
267
/// `keychain_spks` is an *unbounded* indexed-[`ScriptBuf`] iterator that represents scripts
268
/// derived from a keychain. The scanning logic stops after a `stop_gap` number of consecutive
269
/// scripts with no transaction history is reached. `parallel_requests` specifies the maximum
270
/// number of HTTP requests to make in parallel.
271
///
272
/// A [`TxGraph`] (containing the fetched transactions and anchors) and the last active
273
/// keychain index (if any) is returned. The last active keychain index is the keychain's last
274
/// script pubkey that contains a non-empty transaction history.
275
///
276
/// Refer to [crate-level docs](crate) for more.
277
async fn fetch_txs_with_keychain_spks<I: Iterator<Item = Indexed<ScriptBuf>> + Send>(
5✔
278
    client: &esplora_client::AsyncClient,
5✔
279
    inserted_txs: &mut HashSet<Txid>,
5✔
280
    mut keychain_spks: I,
5✔
281
    stop_gap: usize,
5✔
282
    parallel_requests: usize,
5✔
283
) -> Result<(tx_graph::Update<ConfirmationBlockTime>, Option<u32>), Error> {
5✔
284
    type TxsOfSpkIndex = (u32, Vec<esplora_client::Tx>);
5✔
285

5✔
286
    let mut update = tx_graph::Update::<ConfirmationBlockTime>::default();
5✔
287
    let mut last_index = Option::<u32>::None;
5✔
288
    let mut last_active_index = Option::<u32>::None;
5✔
289

290
    loop {
34✔
291
        let handles = keychain_spks
34✔
292
            .by_ref()
34✔
293
            .take(parallel_requests)
34✔
294
            .map(|(spk_index, spk)| {
34✔
295
                let client = client.clone();
32✔
296
                async move {
32✔
297
                    let mut last_seen = None;
32✔
298
                    let mut spk_txs = Vec::new();
32✔
299
                    loop {
300
                        let txs = client.scripthash_txs(&spk, last_seen).await?;
32✔
301
                        let tx_count = txs.len();
32✔
302
                        last_seen = txs.last().map(|tx| tx.txid);
32✔
303
                        spk_txs.extend(txs);
32✔
304
                        if tx_count < 25 {
32✔
305
                            break Result::<_, Error>::Ok((spk_index, spk_txs));
32✔
306
                        }
×
307
                    }
308
                }
32✔
309
            })
34✔
310
            .collect::<FuturesOrdered<_>>();
34✔
311

34✔
312
        if handles.is_empty() {
34✔
313
            break;
2✔
314
        }
32✔
315

316
        for (index, txs) in handles.try_collect::<Vec<TxsOfSpkIndex>>().await? {
64✔
317
            last_index = Some(index);
32✔
318
            if !txs.is_empty() {
32✔
319
                last_active_index = Some(index);
6✔
320
            }
26✔
321
            for tx in txs {
38✔
322
                if inserted_txs.insert(tx.txid) {
6✔
323
                    update.txs.push(tx.to_tx().into());
6✔
324
                }
6✔
325
                insert_anchor_from_status(&mut update, tx.txid, tx.status);
6✔
326
                insert_prevouts(&mut update, tx.vin);
6✔
327
            }
328
        }
329

330
        let last_index = last_index.expect("Must be set since handles wasn't empty.");
32✔
331
        let gap_limit_reached = if let Some(i) = last_active_index {
32✔
332
            last_index >= i.saturating_add(stop_gap as u32)
20✔
333
        } else {
334
            last_index + 1 >= stop_gap as u32
12✔
335
        };
336
        if gap_limit_reached {
32✔
337
            break;
3✔
338
        }
29✔
339
    }
340

341
    Ok((update, last_active_index))
5✔
342
}
5✔
343

344
/// Fetch transactions and associated [`ConfirmationBlockTime`]s by scanning `spks`
345
/// against Esplora.
346
///
347
/// Unlike with [`EsploraAsyncExt::fetch_txs_with_keychain_spks`], `spks` must be *bounded* as
348
/// all contained scripts will be scanned. `parallel_requests` specifies the maximum number of
349
/// HTTP requests to make in parallel.
350
///
351
/// Refer to [crate-level docs](crate) for more.
352
async fn fetch_txs_with_spks<I: IntoIterator<Item = ScriptBuf> + Send>(
1✔
353
    client: &esplora_client::AsyncClient,
1✔
354
    inserted_txs: &mut HashSet<Txid>,
1✔
355
    spks: I,
1✔
356
    parallel_requests: usize,
1✔
357
) -> Result<tx_graph::Update<ConfirmationBlockTime>, Error>
1✔
358
where
1✔
359
    I::IntoIter: Send,
1✔
360
{
1✔
361
    fetch_txs_with_keychain_spks(
1✔
362
        client,
1✔
363
        inserted_txs,
1✔
364
        spks.into_iter().enumerate().map(|(i, spk)| (i as u32, spk)),
2✔
365
        usize::MAX,
1✔
366
        parallel_requests,
1✔
367
    )
1✔
368
    .await
4✔
369
    .map(|(update, _)| update)
1✔
370
}
1✔
371

372
/// Fetch transactions and associated [`ConfirmationBlockTime`]s by scanning `txids`
373
/// against Esplora.
374
///
375
/// `parallel_requests` specifies the maximum number of HTTP requests to make in parallel.
376
///
377
/// Refer to [crate-level docs](crate) for more.
378
async fn fetch_txs_with_txids<I: IntoIterator<Item = Txid> + Send>(
3✔
379
    client: &esplora_client::AsyncClient,
3✔
380
    inserted_txs: &mut HashSet<Txid>,
3✔
381
    txids: I,
3✔
382
    parallel_requests: usize,
3✔
383
) -> Result<tx_graph::Update<ConfirmationBlockTime>, Error>
3✔
384
where
3✔
385
    I::IntoIter: Send,
3✔
386
{
3✔
387
    let mut update = tx_graph::Update::<ConfirmationBlockTime>::default();
3✔
388
    // Only fetch for non-inserted txs.
3✔
389
    let mut txids = txids
3✔
390
        .into_iter()
3✔
391
        .filter(|txid| !inserted_txs.contains(txid))
3✔
392
        .collect::<Vec<Txid>>()
3✔
393
        .into_iter();
3✔
394
    loop {
3✔
395
        let handles = txids
3✔
396
            .by_ref()
3✔
397
            .take(parallel_requests)
3✔
398
            .map(|txid| {
3✔
399
                let client = client.clone();
×
NEW
400
                async move { client.get_tx_info(&txid).await.map(|t| (txid, t)) }
×
401
            })
3✔
402
            .collect::<FuturesOrdered<_>>();
3✔
403

3✔
404
        if handles.is_empty() {
3✔
405
            break;
3✔
406
        }
×
407

NEW
408
        for (txid, tx_info) in handles.try_collect::<Vec<_>>().await? {
×
NEW
409
            if let Some(tx_info) = tx_info {
×
NEW
410
                if inserted_txs.insert(txid) {
×
NEW
411
                    update.txs.push(tx_info.to_tx().into());
×
412
                }
×
NEW
413
                insert_anchor_from_status(&mut update, txid, tx_info.status);
×
NEW
414
                insert_prevouts(&mut update, tx_info.vin);
×
UNCOV
415
            }
×
416
        }
417
    }
418
    Ok(update)
3✔
419
}
3✔
420

421
/// Fetch transactions and [`ConfirmationBlockTime`]s that contain and spend the provided
422
/// `outpoints`.
423
///
424
/// `parallel_requests` specifies the maximum number of HTTP requests to make in parallel.
425
///
426
/// Refer to [crate-level docs](crate) for more.
427
async fn fetch_txs_with_outpoints<I: IntoIterator<Item = OutPoint> + Send>(
1✔
428
    client: &esplora_client::AsyncClient,
1✔
429
    inserted_txs: &mut HashSet<Txid>,
1✔
430
    outpoints: I,
1✔
431
    parallel_requests: usize,
1✔
432
) -> Result<tx_graph::Update<ConfirmationBlockTime>, Error>
1✔
433
where
1✔
434
    I::IntoIter: Send,
1✔
435
{
1✔
436
    let outpoints = outpoints.into_iter().collect::<Vec<_>>();
1✔
437
    let mut update = tx_graph::Update::<ConfirmationBlockTime>::default();
1✔
438

1✔
439
    // make sure txs exists in graph and tx statuses are updated
1✔
440
    // TODO: We should maintain a tx cache (like we do with Electrum).
1✔
441
    update.extend(
1✔
442
        fetch_txs_with_txids(
1✔
443
            client,
1✔
444
            inserted_txs,
1✔
445
            outpoints.iter().copied().map(|op| op.txid),
1✔
446
            parallel_requests,
1✔
447
        )
1✔
NEW
448
        .await?,
×
449
    );
450

451
    // get outpoint spend-statuses
452
    let mut outpoints = outpoints.into_iter();
1✔
453
    let mut missing_txs = Vec::<Txid>::with_capacity(outpoints.len());
1✔
454
    loop {
1✔
455
        let handles = outpoints
1✔
456
            .by_ref()
1✔
457
            .take(parallel_requests)
1✔
458
            .map(|op| {
1✔
459
                let client = client.clone();
×
460
                async move { client.get_output_status(&op.txid, op.vout as _).await }
×
461
            })
1✔
462
            .collect::<FuturesOrdered<_>>();
1✔
463

1✔
464
        if handles.is_empty() {
1✔
465
            break;
1✔
466
        }
×
467

468
        for op_status in handles.try_collect::<Vec<_>>().await?.into_iter().flatten() {
×
469
            let spend_txid = match op_status.txid {
×
470
                Some(txid) => txid,
×
471
                None => continue,
×
472
            };
NEW
473
            if !inserted_txs.contains(&spend_txid) {
×
474
                missing_txs.push(spend_txid);
×
475
            }
×
476
            if let Some(spend_status) = op_status.status {
×
NEW
477
                insert_anchor_from_status(&mut update, spend_txid, spend_status);
×
478
            }
×
479
        }
480
    }
481

482
    update
1✔
483
        .extend(fetch_txs_with_txids(client, inserted_txs, missing_txs, parallel_requests).await?);
1✔
484
    Ok(update)
1✔
485
}
1✔
486

487
#[cfg(test)]
488
mod test {
489
    use std::{collections::BTreeSet, time::Duration};
490

491
    use bdk_chain::{
492
        bitcoin::{hashes::Hash, Txid},
493
        local_chain::LocalChain,
494
        BlockId,
495
    };
496
    use bdk_testenv::{anyhow, bitcoincore_rpc::RpcApi, TestEnv};
497
    use esplora_client::Builder;
498

499
    use crate::async_ext::{chain_update, fetch_latest_blocks};
500

501
    macro_rules! h {
502
        ($index:literal) => {{
503
            bdk_chain::bitcoin::hashes::Hash::hash($index.as_bytes())
504
        }};
505
    }
506

507
    /// Ensure that update does not remove heights (from original), and all anchor heights are included.
508
    #[tokio::test]
509
    pub async fn test_finalize_chain_update() -> anyhow::Result<()> {
1✔
510
        struct TestCase<'a> {
1✔
511
            name: &'a str,
1✔
512
            /// Initial blockchain height to start the env with.
1✔
513
            initial_env_height: u32,
1✔
514
            /// Initial checkpoint heights to start with.
1✔
515
            initial_cps: &'a [u32],
1✔
516
            /// The final blockchain height of the env.
1✔
517
            final_env_height: u32,
1✔
518
            /// The anchors to test with: `(height, txid)`. Only the height is provided as we can fetch
1✔
519
            /// the blockhash from the env.
1✔
520
            anchors: &'a [(u32, Txid)],
1✔
521
        }
1✔
522

1✔
523
        let test_cases = [
1✔
524
            TestCase {
1✔
525
                name: "chain_extends",
1✔
526
                initial_env_height: 60,
1✔
527
                initial_cps: &[59, 60],
1✔
528
                final_env_height: 90,
1✔
529
                anchors: &[],
1✔
530
            },
1✔
531
            TestCase {
1✔
532
                name: "introduce_older_heights",
1✔
533
                initial_env_height: 50,
1✔
534
                initial_cps: &[10, 15],
1✔
535
                final_env_height: 50,
1✔
536
                anchors: &[(11, h!("A")), (14, h!("B"))],
1✔
537
            },
1✔
538
            TestCase {
1✔
539
                name: "introduce_older_heights_after_chain_extends",
1✔
540
                initial_env_height: 50,
1✔
541
                initial_cps: &[10, 15],
1✔
542
                final_env_height: 100,
1✔
543
                anchors: &[(11, h!("A")), (14, h!("B"))],
1✔
544
            },
1✔
545
        ];
1✔
546

1✔
547
        for (i, t) in test_cases.into_iter().enumerate() {
3✔
548
            println!("[{}] running test case: {}", i, t.name);
3✔
549

1✔
550
            let env = TestEnv::new()?;
3✔
551
            let base_url = format!("http://{}", &env.electrsd.esplora_url.clone().unwrap());
3✔
552
            let client = Builder::new(base_url.as_str()).build_async()?;
3✔
553

1✔
554
            // set env to `initial_env_height`
1✔
555
            if let Some(to_mine) = t
3✔
556
                .initial_env_height
3✔
557
                .checked_sub(env.make_checkpoint_tip().height())
3✔
558
            {
1✔
559
                env.mine_blocks(to_mine as _, None)?;
3✔
560
            }
1✔
561
            while client.get_height().await? < t.initial_env_height {
1,267✔
562
                std::thread::sleep(Duration::from_millis(10));
1,258✔
563
            }
1,258✔
564

1✔
565
            // craft initial `local_chain`
1✔
566
            let local_chain = {
3✔
567
                let (mut chain, _) = LocalChain::from_genesis_hash(env.genesis_hash()?);
3✔
568
                // force `chain_update_blocking` to add all checkpoints in `t.initial_cps`
1✔
569
                let anchors = t
3✔
570
                    .initial_cps
3✔
571
                    .iter()
3✔
572
                    .map(|&height| -> anyhow::Result<_> {
6✔
573
                        Ok((
6✔
574
                            BlockId {
6✔
575
                                height,
6✔
576
                                hash: env.bitcoind.client.get_block_hash(height as _)?,
6✔
577
                            },
1✔
578
                            Txid::all_zeros(),
6✔
579
                        ))
1✔
580
                    })
6✔
581
                    .collect::<anyhow::Result<BTreeSet<_>>>()?;
3✔
582
                let update = chain_update(
3✔
583
                    &client,
3✔
584
                    &fetch_latest_blocks(&client).await?,
3✔
585
                    &chain.tip(),
3✔
586
                    &anchors,
3✔
587
                )
1✔
588
                .await?;
7✔
589
                chain.apply_update(update)?;
3✔
590
                chain
3✔
591
            };
3✔
592
            println!("local chain height: {}", local_chain.tip().height());
3✔
593

1✔
594
            // extend env chain
1✔
595
            if let Some(to_mine) = t
3✔
596
                .final_env_height
3✔
597
                .checked_sub(env.make_checkpoint_tip().height())
3✔
598
            {
1✔
599
                env.mine_blocks(to_mine as _, None)?;
3✔
600
            }
1✔
601
            while client.get_height().await? < t.final_env_height {
943✔
602
                std::thread::sleep(Duration::from_millis(10));
940✔
603
            }
940✔
604

1✔
605
            // craft update
1✔
606
            let update = {
3✔
607
                let anchors = t
3✔
608
                    .anchors
3✔
609
                    .iter()
3✔
610
                    .map(|&(height, txid)| -> anyhow::Result<_> {
4✔
611
                        Ok((
4✔
612
                            BlockId {
4✔
613
                                height,
4✔
614
                                hash: env.bitcoind.client.get_block_hash(height as _)?,
4✔
615
                            },
1✔
616
                            txid,
4✔
617
                        ))
1✔
618
                    })
4✔
619
                    .collect::<anyhow::Result<_>>()?;
3✔
620
                chain_update(
1✔
621
                    &client,
3✔
622
                    &fetch_latest_blocks(&client).await?,
3✔
623
                    &local_chain.tip(),
3✔
624
                    &anchors,
3✔
625
                )
1✔
626
                .await?
6✔
627
            };
1✔
628

1✔
629
            // apply update
1✔
630
            let mut updated_local_chain = local_chain.clone();
3✔
631
            updated_local_chain.apply_update(update)?;
3✔
632
            println!(
3✔
633
                "updated local chain height: {}",
3✔
634
                updated_local_chain.tip().height()
3✔
635
            );
3✔
636

3✔
637
            assert!(
3✔
638
                {
3✔
639
                    let initial_heights = local_chain
3✔
640
                        .iter_checkpoints()
3✔
641
                        .map(|cp| cp.height())
37✔
642
                        .collect::<BTreeSet<_>>();
3✔
643
                    let updated_heights = updated_local_chain
3✔
644
                        .iter_checkpoints()
3✔
645
                        .map(|cp| cp.height())
61✔
646
                        .collect::<BTreeSet<_>>();
3✔
647
                    updated_heights.is_superset(&initial_heights)
3✔
648
                },
1✔
649
                "heights from the initial chain must all be in the updated chain",
1✔
650
            );
1✔
651

1✔
652
            assert!(
3✔
653
                {
3✔
654
                    let exp_anchor_heights = t
3✔
655
                        .anchors
3✔
656
                        .iter()
3✔
657
                        .map(|(h, _)| *h)
4✔
658
                        .chain(t.initial_cps.iter().copied())
3✔
659
                        .collect::<BTreeSet<_>>();
3✔
660
                    let anchor_heights = updated_local_chain
3✔
661
                        .iter_checkpoints()
3✔
662
                        .map(|cp| cp.height())
61✔
663
                        .collect::<BTreeSet<_>>();
3✔
664
                    anchor_heights.is_superset(&exp_anchor_heights)
3✔
665
                },
1✔
666
                "anchor heights must all be in updated chain",
1✔
667
            );
1✔
668
        }
1✔
669

1✔
670
        Ok(())
1✔
671
    }
1✔
672
}
STATUS · Troubleshooting · Open an Issue · Sales · Support · CAREERS · ENTERPRISE · START FREE · SCHEDULE DEMO
ANNOUNCEMENTS · TWITTER · TOS & SLA · Supported CI Services · What's a CI service? · Automated Testing

© 2025 Coveralls, Inc